Skip to content

Implementing test cases for Artery scenarios #366

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 4 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -29,3 +29,11 @@ CMakeUserPresets.json

# Exported compile commands
compile_commands.json

# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# Clangd language server cache
.cache
Empty file added tools/__init__.py
Empty file.
Empty file added tools/ci/__init__.py
Empty file.
9 changes: 9 additions & 0 deletions tools/ci/test-loader.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import yaml

from pathlib import Path
from unittest import TestLoader, TestSuite
from typing import override


class ArteryTestLoader(TestLoader):
...
Empty file added tools/ci/tests/__init__.py
Empty file.
5 changes: 5 additions & 0 deletions tools/ci/tests/stations-test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
import unittest


class StationsTest(unittest.TestCase):
...
Empty file added tools/requirements.txt
Empty file.
79 changes: 55 additions & 24 deletions tools/run-artery.py → tools/run_artery.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,25 +5,30 @@
import configparser
import pathlib

from pathlib import Path
from typing import Optional, Iterable

def main():
parser = argparse.ArgumentParser()
parser.add_argument('--all', action='store_true', help='sets runall mode')
parser.add_argument('-b', '--batchsize', dest='batch', action='store')
parser.add_argument('-j', '--jobs', dest='jobs', action='store')
parser.add_argument('-l', '--launch-conf', action='store', required=True, type=pathlib.Path)
parser.add_argument('-s', '--scenario', default=pathlib.Path.cwd(), type=pathlib.Path)
parser.add_argument('-v', '--verbose', action='store_true')
args, opp_args = parser.parse_known_args()

# remove '--' from opp_args when used to split run_artery args from opp_run args
if len(opp_args) > 0 and opp_args[0] == '--':
opp_args = opp_args[1:]
def run_artery(
launch_conf: Path,
opp_args: Optional[Iterable[str]] = None,
scenario: Optional[Path] = None,
runall: bool = False,
batchsize: Optional[int] = None,
jobs: Optional[int] = None,
verbose: bool = False
) -> int:

if args.launch_conf.is_file():
config_filename = args.launch_conf
elif args.launch_conf.is_dir():
config_filename = args.launch_conf / 'run-artery.ini'
if opp_args is None:
opp_args = []

if scenario is None:
scenario = Path.cwd()

if launch_conf.is_file():
config_filename = launch_conf
elif launch_conf.is_dir():
config_filename = launch_conf / 'run-artery.ini'
else:
raise ValueError('Argument "launch-conf" must be an existing file or directory')

Expand All @@ -41,23 +46,49 @@ def main():
raise ValueError('missing libraries in Artery section')

cmd = []
if args.all:
if runall:
cmd.append(opp_runall)
if args.batch is not None:
cmd.extend(['-b', args.batch])
if args.jobs is not None:
cmd.extend(['-j', args.jobs])
if batchsize is not None:
cmd.extend(['-b', opp_args.batch])
if jobs is not None:
cmd.extend(['-j', opp_args.jobs])

cmd.append(opp_run)
cmd.extend(['-n', ned_folders])
cmd.extend(libraries.split())
cmd.extend(opp_args)

if args.verbose:
if verbose:
print('running command: ', ' '.join(cmd))

process = subprocess.run(cmd, cwd=args.scenario, stderr=sys.stderr, stdout=sys.stdout)
sys.exit(process.returncode)
process = subprocess.run(cmd, cwd=scenario, stderr=sys.stderr, stdout=sys.stdout)
return process.returncode



def main():
parser = argparse.ArgumentParser()
parser.add_argument('--all', action='store_true', help='sets runall mode')
parser.add_argument('-b', '--batchsize', dest='batch', action='store')
parser.add_argument('-j', '--jobs', dest='jobs', action='store')
parser.add_argument('-l', '--launch-conf', action='store', required=True, type=pathlib.Path)
parser.add_argument('-s', '--scenario', default=pathlib.Path.cwd(), type=pathlib.Path)
parser.add_argument('-v', '--verbose', action='store_true')
args, opp_args = parser.parse_known_args()

# remove '--' from opp_args when used to split run_artery args from opp_run args
if len(opp_args) > 0 and opp_args[0] == '--':
opp_args = opp_args[1:]

sys.exit(run_artery(
args.launch_conf,
opp_args,
args.scenario,
args.all,
args.batch,
args.jobs,
args.verbose
))


if __name__ == '__main__':
Expand Down
128 changes: 128 additions & 0 deletions tools/sim-results-reader.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
import rich
import subprocess

import pandas as pd

from pathlib import Path
from typing import Union, Optional, Dict, List

from dataclasses import dataclass


@dataclass
class SimRecordedData:
config_name: str
scalars: Optional[pd.DataFrame] = None
vectors: Optional[pd.DataFrame] = None

def __bool__(self):
return all(data is not None for data in (self.scalars, self.vectors))


class SimResultsReader:

def __init__(self, keep_intermediate_data: bool = False, overwrite_intermediate_data: bool = False):
self._keep_intermediate_data = keep_intermediate_data
self._overwrite_intermediate_data = overwrite_intermediate_data

def _invoke_scavetool(self, results_file: Path, output: Optional[Union[Path, str]] = None) -> Path:
if output is None or isinstance(output, str):
if output is None:
output = results_file.with_suffix('.csv')
if isinstance(output, str):
output = results_file.with_suffix(output)
output = output.with_stem(f'{output.stem}-{results_file.suffix[1:]}')

if not isinstance(output, Path):
raise TypeError
if not isinstance(results_file, Path):
raise TypeError

results_file, output = results_file.resolve(), output.resolve()
if not results_file.is_file():
raise FileNotFoundError(f'simulation results file could not be located: {results_file}')
if output.is_file():
if self._overwrite_intermediate_data:
output.unlink()
else:
raise FileExistsError(f'processed simulation results file already exists: {output}')

process = subprocess.run(['scavetool', 'x', results_file, '-o', output], capture_output=True)
if process.returncode:
raise RuntimeError(f'running scavetool failed, command\'s stderr: {process.stderr}')

return output

def _scan_results(self, scenerio_path: Path) -> Dict[str, Dict[str, Path]]:
if not isinstance(scenerio_path, Path):
raise TypeError

scenerio_path = scenerio_path.resolve()
if not scenerio_path.is_dir():
raise FileNotFoundError(f'scenerio folder was not found: {scenerio_path}')
# sanity check
results_dir = scenerio_path.joinpath('results')
if not results_dir.is_dir():
raise RuntimeError(f'sanity check failed for directory: {scenerio_path}; results directory was not found')

records = {}
key_mappings = {'.sca': 'scalars', '.vec': 'vectors'}

for child in results_dir.iterdir():
if child.match('*.sca') or child.match('*.vec'):
config_name = child.stem
if config_name not in records:
records[config_name] = {}
records[config_name][key_mappings[child.suffix]] = child

return records

def _handle_intermediate_file(self, filepath: Path):
if filepath.is_file():
if self._keep_intermediate_data:
return
filepath.unlink()
return
raise FileNotFoundError(f'could not find intermediate file: {filepath}')


def read(self, scenerio_path: Path) -> List[SimRecordedData]:
records = self._scan_results(scenerio_path)

data = []
for config, path_mapping in records.items():
recording = SimRecordedData(config)
if 'vectors' in path_mapping:
csv_filepath = self._invoke_scavetool(path_mapping['vectors'], '.csv')
recording.vectors = pd.read_csv(csv_filepath)
self._handle_intermediate_file(csv_filepath)
if 'scalars' in path_mapping:
csv_filepath = self._invoke_scavetool(path_mapping['scalars'], '.csv')
recording.scalars = pd.read_csv(csv_filepath)
self._handle_intermediate_file(csv_filepath)

if recording:
data.append(recording)

return data



reader = SimResultsReader(overwrite_intermediate_data=True)
for recording in reader.read(Path('/workspaces/artery/scenarios/highway-police')):
print(recording.config_name)
vec = recording.vectors
sca = recording.scalars
rich.print(vec.columns)
# rich.print(*vec['module'].unique(), sep='\n')
# 1
# rich.print(vec[vec['module'] == 'World.node[3].wlan[0].radio'][vec['type'] == 'attr'])
# rich.print(vec['type'].unique())
# rich.print(vec[vec['type'] == 'vector'])
# rich.print(*vec[vec['type'] == 'vector']['vecvalue'].unique(), sep='\n')
# 2
# rich.print(vec[vec['type'] == 'vector'][['vecvalue', 'module']].sample(30))
rich.print(sca.columns)
rich.print(sca['type'].unique())
rich.print(sca[sca['type'] == 'attr']['name'].unique())
# rich.print(sca[sca['max'].notna()][['max', 'min', 'name']], sep='\n')
120 changes: 120 additions & 0 deletions tools/sim-runner.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
#!/usr/bin/env python3

import sys
import time
import enum
import logging
import argparse
import subprocess
import configparser

from pathlib import Path
from typing import Dict, Optional, Any, Optional

from run_artery import run_artery

logger = logging.getLogger(__file__)


class SimRunner:

RUNNER_BASE = {
'General': {
# switches for data recording
'**.scalar-recording': 'true',
'**.vector-recording': 'true',
'**.statistic-recording': 'true',

# output format - sqllite database
'outputvectormanager-class': r'"omnetpp::envir::SqliteOutputVectorManager"',
'outputscalarmanager-class': r'"omnetpp::envir::SqliteOutputScalarManager"',

# express mode switch
'cmdenv-express-mode': 'true',

# resulting output files' name templates
'output-vector-file': r'"${resultdir}/${configname}.vec"',
'output-scalar-file': r'"${resultdir}/${configname}.sca"'
}
}

def __init__(
self,
runall: bool = False,
batch: Optional[int] = None,
jobs: Optional[int] = None,
frontend: str = 'Cmdenv'
):
self.__runall = runall
self.__batch = batch
self.__jobs = jobs
self.__frontend = frontend

def run(
self,
runner_config: Path,
scenario_directory: Path,
scenario_config: Path,
user_options: Optional[Dict[str, Dict[str, Any]]] = None
):
if not isinstance(scenario_directory, Path):
raise TypeError

if not scenario_directory.is_dir():
raise FileNotFoundError(f'scenario directory {scenario_directory} was not found')

scenario_config_path = self.__resolve_scenario_config_path(scenario_directory, scenario_config)
testing_config_path = scenario_directory.joinpath('.omnetpp.test.ini')
self.__make_config(testing_config_path, user_options)

try:
returncode = run_artery(

)

finally:
if not self.__keep_runner_config:
runner_config_path.unlink()

def __resolve_scenario_config_path(self, scenario_directory: Path, scenario_config: Path) -> Path:
if scenario_config.is_absolute():
return scenario_config
try:
resolved_path = scenario_config.relative_to(scenario_directory)
except ValueError:
raise ValueError(
'scenario config path should be either absolute or relative to scenario directory'
)
return resolved_path


def __make_config(self, output_path: Path, user_options: Optional[Dict[str, Dict[str, Any]]] = None):
if not isinstance(output_path, Path):
raise TypeError

config = configparser.ConfigParser(default_section='General')
config.optionxform = str
config.read_dict(SimRunner.RUNNER_BASE)
if user_options is not None:
config.read_dict(user_options)

with open(output_path, 'w') as out:
config.write(out)


if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(action='store', dest='script')
parser.add_argument('-s', '--scenario', action='store', dest='scenario', required=True)
parser.add_argument('-f', '--frontend', action='store', dest='frontend', default='Qtenv')
parser.add_argument('--keep-runner-config', action='store_true', dest='keep_runner_config', default=False)

args = parser.parse_args()
runner = SimRunner(Path(args.script), args.keep_runner_config, args.frontend)

try:
runner.run(Path(args.scenario), mode=RunnerMode.BRIDGE)
except KeyboardInterrupt:
# TODO: wait for spawned process to finish
print('aborted by user! waiting for OmnetPP to finish...')