Skip to content

Commit fc52936

Browse files
committed
change name
1 parent 17ae909 commit fc52936

File tree

3 files changed

+9
-9
lines changed

3 files changed

+9
-9
lines changed

src/vivarium_profiling/tools/cli.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ def make_artifacts(
101101
help="Drop into python debugger if an error occurs.",
102102
)
103103
def run_benchmark(
104-
models: tuple[str, ...],
104+
model_specifications: tuple[str, ...],
105105
model_runs: int,
106106
baseline_model_runs: int,
107107
output_dir: str,
@@ -117,8 +117,8 @@ def run_benchmark(
117117
run_benchmark -m "model_spec_baseline.yaml" -m "model_spec_*.yaml" -r 10 -b 20
118118
"""
119119
# Expand model patterns
120-
model_specs = expand_model_specs(list(models))
120+
model_specifications = expand_model_specs(list(model_specifications))
121121

122122
# Run benchmarks with error handling
123123
main = handle_exceptions(run_benchmark_loop, logger, with_debugger=with_debugger)
124-
main(model_specs, model_runs, baseline_model_runs, output_dir, verbose)
124+
main(model_specifications, model_runs, baseline_model_runs, output_dir, verbose)

src/vivarium_profiling/tools/run_benchmark.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ def run_single_benchmark(
245245

246246

247247
def run_benchmark_loop(
248-
model_specs: list[Path],
248+
model_specifications: list[Path],
249249
model_runs: int,
250250
baseline_model_runs: int,
251251
output_dir: str = ".",
@@ -274,19 +274,19 @@ def run_benchmark_loop(
274274
configure_logging_to_terminal(verbose)
275275

276276
# Validate inputs
277-
validate_baseline_model(model_specs)
277+
validate_baseline_model(model_specifications)
278278

279279
# Create results directory and initialize results file
280280
results_dir = create_results_directory(output_dir)
281281
results_file = initialize_results_file(results_dir)
282282

283283
logger.info("Running benchmarks:")
284-
logger.info(f" Model Specs: {model_specs}")
284+
logger.info(f" Model Specs: {model_specifications}")
285285
logger.info(f" Runs: {model_runs} ({baseline_model_runs} for baseline)")
286286
logger.info(f" Results Directory: {results_dir}")
287287

288288
# Run benchmarks for each specification
289-
for spec in model_specs:
289+
for spec in model_specifications:
290290
logger.info(f"Running {spec}...")
291291

292292
model_spec_name = spec.stem

tests/test_run_benchmark.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ def test_run_benchmark_loop_integration(test_model_specs: list[Path], tmp_path:
3333

3434
# Run the benchmark
3535
results_dir = run_benchmark_loop(
36-
model_specs=test_model_specs,
36+
model_specifications=test_model_specs,
3737
model_runs=model_runs,
3838
baseline_model_runs=baseline_runs,
3939
output_dir=output_dir,
@@ -99,7 +99,7 @@ def test_run_benchmark_loop_validation_error(test_model_specs: list[Path], tmp_p
9999
match="Error: One of the model specs must be 'model_spec_baseline.yaml'.",
100100
): # Should raise ClickException about missing baseline
101101
run_benchmark_loop(
102-
model_specs=model_specs,
102+
model_specifications=model_specs,
103103
model_runs=2,
104104
baseline_model_runs=2,
105105
output_dir=output_dir,

0 commit comments

Comments
 (0)