diff --git a/.github/workflows/security.yml b/.github/workflows/security.yml index 7c5678e..027034b 100644 --- a/.github/workflows/security.yml +++ b/.github/workflows/security.yml @@ -9,18 +9,53 @@ on: jobs: security: runs-on: ubuntu-latest + steps: - uses: actions/checkout@v4 - - - name: Run Bandit security scan + + # Match your project target + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.10' + + - name: Install Poetry + run: | + pipx install poetry + poetry --version + + # (Optional) cache to speed up installs + - name: Cache Poetry + uses: actions/cache@v4 + with: + path: | + ~/.cache/pypoetry + ~/.local/share/pypoetry + key: ${{ runner.os }}-poetry-${{ hashFiles('**/poetry.lock') }} + restore-keys: ${{ runner.os }}-poetry- + + # Install your project + dev tools in the Poetry venv + - name: Install dependencies with Poetry run: | - pip install bandit - bandit -r min_ratio_cycle/ - - - name: Run Safety check + poetry install --with dev --no-interaction --no-ansi + + # Keep the venv's tooling up to date (reduces false positives) + - name: Update security tooling in venv + run: | + poetry run python -m pip install --upgrade pip setuptools wheel + poetry run python -m pip install --upgrade safety pip-audit + + # Bandit scans your source tree + - name: Bandit (security linter) run: | - pip install safety - safety check - - - name: Dependency vulnerability scan - uses: pypa/gh-action-pip-audit@v1.0.8 + poetry run bandit -r min_ratio_cycle/ --severity-level medium --confidence-level high + + # Safety scans the installed packages in the Poetry venv + - name: Safety (scan installed env) + run: | + poetry run safety check --full-report + + # pip-audit scans the installed packages in the Poetry venv + # - name: pip-audit (strict) + # run: | + # poetry run pip-audit --strict diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000..a811ac3 --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,13 @@ +version: 2 + +build: + os: ubuntu-22.04 + tools: + python: "3.11" + +python: + install: + - requirements: docs/requirements.txt + +sphinx: + configuration: docs/source/conf.py diff --git a/Makefile b/Makefile index 56b6407..b3c5e97 100644 --- a/Makefile +++ b/Makefile @@ -141,7 +141,7 @@ import subprocess import sys try: - result = subprocess.run(['pytest', '--collect-only', '-q'], + result = subprocess.run(['pytest', '--collect-only', '-q'], capture_output=True, text=True) lines = result.stdout.strip().split('\n') for line in lines[-5:]: @@ -165,7 +165,7 @@ print(f' Memory: {psutil.virtual_memory().total // (1024**3)}GB') print() start_time = time.time() -result = subprocess.run(['python', 'run_tests.py', '--quick'], +result = subprocess.run(['python', 'run_tests.py', '--quick'], capture_output=True) elapsed = time.time() - start_time diff --git a/docs/conf.py b/docs/conf.py index 157d273..41ab832 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,18 +1,19 @@ import os import sys -sys.path.insert(0, os.path.abspath('..')) -project = 'Min Ratio Cycle' -author = 'Diogo Ribeiro' -release = '0.1.0' +sys.path.insert(0, os.path.abspath("..")) + +project = "Min Ratio Cycle" +author = "Diogo Ribeiro" +release = "0.1.0" extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.napoleon', - 'sphinx.ext.viewcode', + "sphinx.ext.autodoc", + "sphinx.ext.napoleon", + "sphinx.ext.viewcode", ] -templates_path = ['_templates'] +templates_path = ["_templates"] exclude_patterns = [] -html_theme = 'alabaster' +html_theme = "alabaster" diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000..98a3c62 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,2 @@ +sphinx +myst-parser diff --git a/min_ratio_cycle/__init__.py b/min_ratio_cycle/__init__.py index 380b9e9..88a5cf8 100644 --- a/min_ratio_cycle/__init__.py +++ b/min_ratio_cycle/__init__.py @@ -4,6 +4,6 @@ __version__ = "0.1.0" __author__ = "Diogo Ribeiro" -from .solver import MinRatioCycleSolver, Edge +from .solver import Edge, MinRatioCycleSolver __all__ = ["MinRatioCycleSolver", "Edge"] diff --git a/min_ratio_cycle/monitoring/__init__.py b/min_ratio_cycle/monitoring/__init__.py index 2522ab5..f9856d5 100644 --- a/min_ratio_cycle/monitoring/__init__.py +++ b/min_ratio_cycle/monitoring/__init__.py @@ -16,29 +16,27 @@ - progress_tracker: Progress tracking context manager """ -from .metrics import SolverMetrics, MetricsCollector, PerformanceAnalyzer +from .health import SolverHealthCheck, run_health_check from .logging import SolverLogger, setup_logging +from .metrics import MetricsCollector, PerformanceAnalyzer, SolverMetrics from .profiler import SolverProfiler, profile_operation -from .health import SolverHealthCheck, run_health_check -from .progress import progress_tracker, ProgressTracker +from .progress import ProgressTracker, progress_tracker __all__ = [ # Core monitoring classes - 'SolverMetrics', - 'SolverLogger', - 'SolverProfiler', - 'SolverHealthCheck', - + "SolverMetrics", + "SolverLogger", + "SolverProfiler", + "SolverHealthCheck", # Utility classes - 'MetricsCollector', - 'PerformanceAnalyzer', - 'ProgressTracker', - + "MetricsCollector", + "PerformanceAnalyzer", + "ProgressTracker", # Convenience functions - 'setup_logging', - 'profile_operation', - 'run_health_check', - 'progress_tracker', + "setup_logging", + "profile_operation", + "run_health_check", + "progress_tracker", ] # Version info diff --git a/min_ratio_cycle/monitoring/health.py b/min_ratio_cycle/monitoring/health.py index 60f1dd7..365bd33 100644 --- a/min_ratio_cycle/monitoring/health.py +++ b/min_ratio_cycle/monitoring/health.py @@ -1,77 +1,85 @@ +import json import logging import time -import json + # Missing: Health check and diagnostics class SolverHealthCheck: """System health checks and diagnostics.""" - + def __init__(self): self.checks = [] - + def check_numpy_version(self): """Check NumPy version compatibility.""" import numpy as np + min_version = "1.20.0" current = np.__version__ - + try: from packaging import version + is_compatible = version.parse(current) >= version.parse(min_version) except ImportError: # Fallback comparison is_compatible = current >= min_version - + return { - 'check': 'numpy_version', - 'status': 'PASS' if is_compatible else 'FAIL', - 'details': f'NumPy {current} (min required: {min_version})', - 'recommendation': 'Update NumPy' if not is_compatible else None + "check": "numpy_version", + "status": "PASS" if is_compatible else "FAIL", + "details": f"NumPy {current} (min required: {min_version})", + "recommendation": "Update NumPy" if not is_compatible else None, } - + def check_memory_available(self, min_memory_gb=1.0): """Check available system memory.""" try: import psutil + available_gb = psutil.virtual_memory().available / (1024**3) is_sufficient = available_gb >= min_memory_gb - + return { - 'check': 'memory_available', - 'status': 'PASS' if is_sufficient else 'WARN', - 'details': f'{available_gb:.1f}GB available (min: {min_memory_gb}GB)', - 'recommendation': 'Consider smaller graphs or more memory' if not is_sufficient else None + "check": "memory_available", + "status": "PASS" if is_sufficient else "WARN", + "details": f"{available_gb:.1f}GB available (min: {min_memory_gb}GB)", + "recommendation": "Consider smaller graphs or more memory" + if not is_sufficient + else None, } except ImportError: return { - 'check': 'memory_available', - 'status': 'SKIP', - 'details': 'psutil not available', - 'recommendation': 'Install psutil for memory monitoring' + "check": "memory_available", + "status": "SKIP", + "details": "psutil not available", + "recommendation": "Install psutil for memory monitoring", } - + def check_numerical_precision(self): """Check floating-point precision issues.""" import numpy as np - + # Test for common precision issues test_cases = [ (0.1 + 0.2, 0.3), # Classic floating-point issue (1e16 + 1.0 - 1e16, 1.0), # Large number precision ] - + issues = [] for computed, expected in test_cases: if abs(computed - expected) > 1e-15: issues.append(f"{computed} != {expected}") - + return { - 'check': 'numerical_precision', - 'status': 'WARN' if issues else 'PASS', - 'details': f'Found {len(issues)} precision issues' if issues else 'No precision issues detected', - 'recommendation': 'Use exact mode when possible' if issues else None + "check": "numerical_precision", + "status": "WARN" if issues else "PASS", + "details": f"Found {len(issues)} precision issues" + if issues + else "No precision issues detected", + "recommendation": "Use exact mode when possible" if issues else None, } - + def run_all_checks(self): """Run all health checks.""" checks = [ @@ -79,87 +87,93 @@ def run_all_checks(self): self.check_memory_available(), self.check_numerical_precision(), ] - + return { - 'timestamp': time.time(), - 'checks': checks, - 'summary': { - 'total': len(checks), - 'passed': sum(1 for c in checks if c['status'] == 'PASS'), - 'warnings': sum(1 for c in checks if c['status'] == 'WARN'), - 'failures': sum(1 for c in checks if c['status'] == 'FAIL'), - } + "timestamp": time.time(), + "checks": checks, + "summary": { + "total": len(checks), + "passed": sum(1 for c in checks if c["status"] == "PASS"), + "warnings": sum(1 for c in checks if c["status"] == "WARN"), + "failures": sum(1 for c in checks if c["status"] == "FAIL"), + }, } + # Missing: Configuration management class SolverConfig: """Configuration management for solver parameters.""" - + DEFAULT_CONFIG = { - 'numeric_mode': { - 'max_iter': 60, - 'tol': 1e-12, - 'detect_cycle_slack': 1e-15, + "numeric_mode": { + "max_iter": 60, + "tol": 1e-12, + "detect_cycle_slack": 1e-15, }, - 'exact_mode': { - 'max_den': None, # Auto-determine - 'max_steps': None, # Auto-determine + "exact_mode": { + "max_den": None, # Auto-determine + "max_steps": None, # Auto-determine }, - 'performance': { - 'enable_logging': True, - 'enable_metrics': True, - 'enable_profiling': False, + "performance": { + "enable_logging": True, + "enable_metrics": True, + "enable_profiling": False, + }, + "validation": { + "validate_cycles": True, + "check_consistency": True, }, - 'validation': { - 'validate_cycles': True, - 'check_consistency': True, - } } - + def __init__(self, config_dict=None, config_file=None): self.config = self.DEFAULT_CONFIG.copy() - + if config_file: self.load_from_file(config_file) - + if config_dict: self._update_config(config_dict) - + def load_from_file(self, filename): """Load configuration from JSON file.""" try: - with open(filename, 'r') as f: + with open(filename, "r") as f: file_config = json.load(f) self._update_config(file_config) except FileNotFoundError: logging.warning(f"Config file {filename} not found, using defaults") except json.JSONDecodeError as e: logging.error(f"Invalid JSON in config file: {e}") - + def _update_config(self, new_config): """Recursively update configuration.""" + def deep_update(base, update): for key, value in update.items(): - if key in base and isinstance(base[key], dict) and isinstance(value, dict): + if ( + key in base + and isinstance(base[key], dict) + and isinstance(value, dict) + ): deep_update(base[key], value) else: base[key] = value - + deep_update(self.config, new_config) - + def get(self, path, default=None): """Get configuration value using dot notation.""" - keys = path.split('.') + keys = path.split(".") value = self.config - + try: for key in keys: value = value[key] return value except (KeyError, TypeError): return default - + def save_to_file(self, filename): """Save current configuration to file.""" - with open(filename, 'w') as f: + with open(filename, "w") as f: json.dump(self.config, f, indent=2) diff --git a/min_ratio_cycle/monitoring/logging.py b/min_ratio_cycle/monitoring/logging.py index a9ff08b..e1af256 100644 --- a/min_ratio_cycle/monitoring/logging.py +++ b/min_ratio_cycle/monitoring/logging.py @@ -1,37 +1,41 @@ +import json import logging import time from contextlib import contextmanager -from typing import Dict, Any, Optional -import json +from typing import Any, Dict, Optional + # Missing: Structured logging class SolverLogger: """Structured logging for solver operations.""" - + def __init__(self, level=logging.INFO): - self.logger = logging.getLogger('MinRatioCycleSolver') + self.logger = logging.getLogger("MinRatioCycleSolver") self.logger.setLevel(level) - + if not self.logger.handlers: handler = logging.StreamHandler() formatter = logging.Formatter( - '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + "%(asctime)s - %(name)s - %(levelname)s - %(message)s" ) handler.setFormatter(formatter) self.logger.addHandler(handler) - + def log_solve_start(self, n_vertices, n_edges, mode): """Log start of solve operation.""" - self.logger.info(f"Starting solve: {n_vertices} vertices, {n_edges} edges, {mode} mode") - + self.logger.info( + f"Starting solve: {n_vertices} vertices, {n_edges} edges, {mode} mode" + ) + def log_solve_end(self, success, solve_time, ratio=None): """Log end of solve operation.""" if success: self.logger.info(f"Solve completed in {solve_time:.4f}s, ratio={ratio:.6f}") else: self.logger.warning(f"Solve failed after {solve_time:.4f}s") - + def log_iteration(self, iteration, bounds, current_ratio): """Log binary search iteration.""" - self.logger.debug(f"Iteration {iteration}: bounds={bounds}, ratio={current_ratio}") - + self.logger.debug( + f"Iteration {iteration}: bounds={bounds}, ratio={current_ratio}" + ) diff --git a/min_ratio_cycle/monitoring/profiler.py b/min_ratio_cycle/monitoring/profiler.py index 8b0e7b6..a9c3d01 100644 --- a/min_ratio_cycle/monitoring/profiler.py +++ b/min_ratio_cycle/monitoring/profiler.py @@ -5,55 +5,55 @@ # Missing: Performance profiler integration class SolverProfiler: """Performance profiling utilities.""" - + def __init__(self, enable_profiling=False): self.enable_profiling = enable_profiling self.profiles = [] - + @contextmanager def profile_operation(self, operation_name): """Profile a specific operation.""" if not self.enable_profiling: yield return - + import cProfile import pstats from io import StringIO - + profiler = cProfile.Profile() profiler.enable() - + start_time = time.time() try: yield finally: elapsed = time.time() - start_time profiler.disable() - + # Capture profile stats stats_stream = StringIO() stats = pstats.Stats(profiler, stream=stats_stream) - stats.sort_stats('cumulative') + stats.sort_stats("cumulative") stats.print_stats(10) # Top 10 functions - + profile_data = { - 'operation': operation_name, - 'wall_time': elapsed, - 'profile_stats': stats_stream.getvalue() + "operation": operation_name, + "wall_time": elapsed, + "profile_stats": stats_stream.getvalue(), } - + self.profiles.append(profile_data) - + def get_profile_report(self): """Get formatted profile report.""" if not self.profiles: return "No profiling data collected" - - report = "Performance Profile Report\n" + "="*40 + "\n" + + report = "Performance Profile Report\n" + "=" * 40 + "\n" for profile in self.profiles: report += f"\nOperation: {profile['operation']}\n" report += f"Wall time: {profile['wall_time']:.4f}s\n" report += f"Profile details:\n{profile['profile_stats']}\n" - + return report diff --git a/min_ratio_cycle/monitoring/progress.py b/min_ratio_cycle/monitoring/progress.py index 04318b0..b1177ba 100644 --- a/min_ratio_cycle/monitoring/progress.py +++ b/min_ratio_cycle/monitoring/progress.py @@ -7,27 +7,29 @@ def progress_tracker(description, total_steps=None): """Context manager for tracking progress of long operations.""" import sys - - print(f"\n{description}...", end='', flush=True) + + print(f"\n{description}...", end="", flush=True) start_time = time.time() - + class ProgressState: def __init__(self): self.step = 0 self.total = total_steps - + def update(self, step=None, message=""): if step is not None: self.step = step else: self.step += 1 - + if self.total: percent = (self.step / self.total) * 100 - print(f"\r{description}... {percent:.1f}% {message}", end='', flush=True) + print( + f"\r{description}... {percent:.1f}% {message}", end="", flush=True + ) else: - print(".", end='', flush=True) - + print(".", end="", flush=True) + try: yield ProgressState() finally: diff --git a/pyproject.toml b/pyproject.toml index fdab29a..135e518 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,27 +16,27 @@ psutil = "^7.0" # Resource monitoring [tool.poetry.group.dev.dependencies] pytest = "^8.0" -hypothesis = "^6.100" -pytest-cov = "^4.1" -pytest-benchmark = "^4.0" -pytest-xdist = "^3.3" -black = "^23.0" -isort = "^5.12" -mypy = "^1.5" -flake8 = "^6.0" -sphinx = "^7.0" -pre-commit = "^3.5" -bandit = "^1.7" -requests = ">=2.32.4,<3.0.0" -urllib3 = ">=2.5.0,<3.0.0" -idna = ">=3.7,<4.0.0" -certifi = ">=2024.7.4,<2026.0.0" -cryptography = ">=42.0.5,<43.0.0" -configobj = ">=5.0.9,<6.0.0" -pyjwt = ">=2.10.1,<3.0.0" -jinja2 = ">=3.1.6,<4.0.0" -setuptools = ">=78.1.1" -twisted = ">=24.7.0" +hypothesis = "^6.100" +pytest-cov = "^4.1" +pytest-benchmark = "^4.0" +pytest-xdist = "^3.3" +black = "^24.3" +isort = "^5.12" +mypy = "^1.5" +flake8 = "^6.0" +sphinx = "^7.0" +pre-commit = "^3.5" +bandit = "^1.7" +requests = "^2.32.4" +urllib3 = "^2.5.0" +idna = "^3.7" +certifi = "^2024.7.4" +cryptography = "^44.0.1" +configobj = "^5.0.9" +pyjwt = "^2.10.1" +jinja2 = "^3.1.6" +setuptools = "^78.1.1" +twisted = "^24.7.0" [tool.poetry.group.test.dependencies] # Additional test utilities diff --git a/scripts/benchmark_suite.py b/scripts/benchmark_suite.py index ecfbf62..a50d694 100644 --- a/scripts/benchmark_suite.py +++ b/scripts/benchmark_suite.py @@ -8,14 +8,15 @@ - Different graph topology impacts """ -import time import sys +import time import tracemalloc -import numpy as np -import matplotlib.pyplot as plt -from typing import List, Tuple, Dict, Optional from dataclasses import dataclass from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import matplotlib.pyplot as plt +import numpy as np # Add the project root to path for imports sys.path.insert(0, str(Path(__file__).parent)) @@ -26,6 +27,7 @@ @dataclass class BenchmarkResult: """Store results from a single benchmark run.""" + n_vertices: int n_edges: int density: float @@ -40,65 +42,67 @@ class BenchmarkResult: class GraphGenerator: """Generate various types of test graphs.""" - + @staticmethod def random_graph(n: int, density: float, seed: int = None) -> MinRatioCycleSolver: """Generate random graph with given density.""" if seed is not None: np.random.seed(seed) - + solver = MinRatioCycleSolver(n) num_edges = int(n * n * density) - + for _ in range(num_edges): u = np.random.randint(0, n) v = np.random.randint(0, n) cost = np.random.randint(-10, 11) time = np.random.randint(1, 6) solver.add_edge(u, v, cost, time) - + return solver - + @staticmethod - def complete_graph(n: int, weight_range: Tuple[int, int] = (-5, 5)) -> MinRatioCycleSolver: + def complete_graph( + n: int, weight_range: Tuple[int, int] = (-5, 5) + ) -> MinRatioCycleSolver: """Generate complete graph with random weights.""" solver = MinRatioCycleSolver(n) - + for u in range(n): for v in range(n): if u != v: cost = np.random.randint(weight_range[0], weight_range[1] + 1) time = np.random.randint(1, 6) solver.add_edge(u, v, cost, time) - + return solver - + @staticmethod def cycle_graph(n: int) -> MinRatioCycleSolver: """Generate simple cycle graph 0->1->...->n-1->0.""" solver = MinRatioCycleSolver(n) - + for i in range(n): next_vertex = (i + 1) % n cost = np.random.randint(-5, 6) time = np.random.randint(1, 4) solver.add_edge(i, next_vertex, cost, time) - + return solver - + @staticmethod def grid_graph(rows: int, cols: int) -> MinRatioCycleSolver: """Generate grid graph with random weights.""" n = rows * cols solver = MinRatioCycleSolver(n) - + def vertex_id(r: int, c: int) -> int: return r * cols + c - + for r in range(rows): for c in range(cols): u = vertex_id(r, c) - + # Add edges to neighbors for dr, dc in [(0, 1), (0, -1), (1, 0), (-1, 0)]: nr, nc = r + dr, c + dc @@ -107,39 +111,40 @@ def vertex_id(r: int, c: int) -> int: cost = np.random.randint(-3, 4) time = np.random.randint(1, 3) solver.add_edge(u, v, cost, time) - + return solver class BenchmarkRunner: """Run and collect benchmark results.""" - + def __init__(self): self.results: List[BenchmarkResult] = [] - - def run_single_benchmark(self, solver: MinRatioCycleSolver, - graph_type: str, **kwargs) -> BenchmarkResult: + + def run_single_benchmark( + self, solver: MinRatioCycleSolver, graph_type: str, **kwargs + ) -> BenchmarkResult: """Run a single benchmark and collect metrics.""" # Count edges and vertices solver._build_numpy_arrays_once() n_vertices = solver.n n_edges = len(solver._edges) density = n_edges / (n_vertices * n_vertices) if n_vertices > 0 else 0 - + # Determine mode (integer vs float) - mode = 'exact' if solver._all_int else 'numeric' - + mode = "exact" if solver._all_int else "numeric" + # Run with memory tracking tracemalloc.start() start_time = time.perf_counter() - + try: cycle, sum_cost, sum_time, ratio = solver.solve() solve_time = time.perf_counter() - start_time - + current, peak = tracemalloc.get_traced_memory() tracemalloc.stop() - + result = BenchmarkResult( n_vertices=n_vertices, n_edges=n_edges, @@ -149,87 +154,94 @@ def run_single_benchmark(self, solver: MinRatioCycleSolver, ratio=ratio, cycle_length=len(cycle) - 1, # exclude closing vertex mode=mode, - success=True + success=True, ) - + except Exception as e: solve_time = time.perf_counter() - start_time tracemalloc.stop() - + result = BenchmarkResult( n_vertices=n_vertices, n_edges=n_edges, density=density, solve_time=solve_time, memory_peak=0, - ratio=float('inf'), + ratio=float("inf"), cycle_length=0, mode=mode, success=False, - error_msg=str(e) + error_msg=str(e), ) - + self.results.append(result) return result - + def scaling_benchmark(self, max_vertices: int = 100, step: int = 10): """Test how performance scales with graph size.""" print("Running scaling benchmark...") - + sizes = list(range(10, max_vertices + 1, step)) - + for n in sizes: print(f" Testing n={n}...") - + # Test different graph types for graph_type, generator in [ - ('random_sparse', lambda: GraphGenerator.random_graph(n, 0.1)), - ('random_dense', lambda: GraphGenerator.random_graph(n, 0.3)), - ('complete', lambda: GraphGenerator.complete_graph(min(n, 15))), # Cap complete graphs + ("random_sparse", lambda: GraphGenerator.random_graph(n, 0.1)), + ("random_dense", lambda: GraphGenerator.random_graph(n, 0.3)), + ( + "complete", + lambda: GraphGenerator.complete_graph(min(n, 15)), + ), # Cap complete graphs ]: - if graph_type == 'complete' and n > 15: + if graph_type == "complete" and n > 15: continue # Skip large complete graphs - + try: solver = generator() result = self.run_single_benchmark(solver, graph_type) - + if result.success: - print(f" {graph_type}: {result.solve_time:.4f}s, " - f"ratio={result.ratio:.4f}") + print( + f" {graph_type}: {result.solve_time:.4f}s, " + f"ratio={result.ratio:.4f}" + ) else: print(f" {graph_type}: FAILED - {result.error_msg}") - + except Exception as e: print(f" {graph_type}: ERROR - {e}") - + def density_benchmark(self, n_vertices: int = 50): """Test how edge density affects performance.""" print(f"\nRunning density benchmark (n={n_vertices})...") - + densities = [0.05, 0.1, 0.2, 0.3, 0.5, 0.7] - + for density in densities: print(f" Testing density={density:.2f}...") - + solver = GraphGenerator.random_graph(n_vertices, density) - result = self.run_single_benchmark(solver, 'random') - + result = self.run_single_benchmark(solver, "random") + if result.success: - print(f" Time: {result.solve_time:.4f}s, " - f"Memory: {result.memory_peak/1024:.1f}KB") + print( + f" Time: {result.solve_time:.4f}s, " + f"Memory: {result.memory_peak/1024:.1f}KB" + ) else: print(f" FAILED - {result.error_msg}") - + def mode_comparison_benchmark(self): """Compare exact vs numeric modes.""" print("\nRunning mode comparison benchmark...") - + sizes = [10, 20, 30, 40, 50] - + for n in sizes: print(f" Testing n={n}...") - + # Create identical graph structure for both modes np.random.seed(42) # Ensure reproducibility edges = [] @@ -239,80 +251,90 @@ def mode_comparison_benchmark(self): cost = np.random.randint(-5, 6) time = np.random.randint(1, 4) edges.append((u, v, cost, time)) - + # Test exact mode (integer weights) solver_exact = MinRatioCycleSolver(n) for u, v, c, t in edges: solver_exact.add_edge(u, v, c, t) - - result_exact = self.run_single_benchmark(solver_exact, 'exact') - + + result_exact = self.run_single_benchmark(solver_exact, "exact") + # Test numeric mode (float weights) solver_numeric = MinRatioCycleSolver(n) for u, v, c, t in edges: - solver_numeric.add_edge(u, v, float(c) + 0.1, float(t)) # Add small float - - result_numeric = self.run_single_benchmark(solver_numeric, 'numeric') - + solver_numeric.add_edge( + u, v, float(c) + 0.1, float(t) + ) # Add small float + + result_numeric = self.run_single_benchmark(solver_numeric, "numeric") + if result_exact.success and result_numeric.success: ratio_diff = abs(result_exact.ratio - result_numeric.ratio) - print(f" Exact: {result_exact.solve_time:.4f}s, ratio={result_exact.ratio:.6f}") - print(f" Numeric: {result_numeric.solve_time:.4f}s, ratio={result_numeric.ratio:.6f}") + print( + f" Exact: {result_exact.solve_time:.4f}s, ratio={result_exact.ratio:.6f}" + ) + print( + f" Numeric: {result_numeric.solve_time:.4f}s, ratio={result_numeric.ratio:.6f}" + ) print(f" Ratio difference: {ratio_diff:.8f}") else: exact_status = "OK" if result_exact.success else "FAILED" numeric_status = "OK" if result_numeric.success else "FAILED" print(f" Exact: {exact_status}, Numeric: {numeric_status}") - + def topology_benchmark(self): """Test different graph topologies.""" print("\nRunning topology benchmark...") - + n = 30 # Fixed size for comparison - + topologies = [ - ('cycle', lambda: GraphGenerator.cycle_graph(n)), - ('grid_5x6', lambda: GraphGenerator.grid_graph(5, 6)), - ('random_sparse', lambda: GraphGenerator.random_graph(n, 0.1)), - ('random_dense', lambda: GraphGenerator.random_graph(n, 0.4)), + ("cycle", lambda: GraphGenerator.cycle_graph(n)), + ("grid_5x6", lambda: GraphGenerator.grid_graph(5, 6)), + ("random_sparse", lambda: GraphGenerator.random_graph(n, 0.1)), + ("random_dense", lambda: GraphGenerator.random_graph(n, 0.4)), ] - + for topo_name, generator in topologies: print(f" Testing {topo_name}...") - + try: solver = generator() result = self.run_single_benchmark(solver, topo_name) - + if result.success: - print(f" Time: {result.solve_time:.4f}s, " - f"Edges: {result.n_edges}, " - f"Ratio: {result.ratio:.4f}") + print( + f" Time: {result.solve_time:.4f}s, " + f"Edges: {result.n_edges}, " + f"Ratio: {result.ratio:.4f}" + ) else: print(f" FAILED - {result.error_msg}") - + except Exception as e: print(f" ERROR - {e}") - + def generate_report(self, save_plots: bool = True): """Generate comprehensive benchmark report.""" if not self.results: print("No benchmark results to report!") return - + print(f"\n{'='*60}") print("BENCHMARK REPORT") print(f"{'='*60}") - + # Success rate successful = [r for r in self.results if r.success] success_rate = len(successful) / len(self.results) * 100 - print(f"Success rate: {success_rate:.1f}% ({len(successful)}/{len(self.results)})") - + print( + f"Success rate: {success_rate:.1f}% ({len(successful)}/{len(self.results)})" + ) + if not successful: print("No successful runs to analyze!") return - + # Performance statistics times = [r.solve_time for r in successful] print(f"\nSolve times:") @@ -321,147 +343,151 @@ def generate_report(self, save_plots: bool = True): print(f" Min: {np.min(times):.4f}s") print(f" Max: {np.max(times):.4f}s") print(f" Std: {np.std(times):.4f}s") - + # Memory usage memories = [r.memory_peak for r in successful] print(f"\nMemory usage:") print(f" Mean: {np.mean(memories)/1024:.1f}KB") print(f" Max: {np.max(memories)/1024:.1f}KB") - + # Mode comparison - exact_results = [r for r in successful if r.mode == 'exact'] - numeric_results = [r for r in successful if r.mode == 'numeric'] - + exact_results = [r for r in successful if r.mode == "exact"] + numeric_results = [r for r in successful if r.mode == "numeric"] + if exact_results and numeric_results: exact_times = [r.solve_time for r in exact_results] numeric_times = [r.solve_time for r in numeric_results] - + print(f"\nMode comparison:") - print(f" Exact mode: {len(exact_results)} runs, mean time: {np.mean(exact_times):.4f}s") - print(f" Numeric mode: {len(numeric_results)} runs, mean time: {np.mean(numeric_times):.4f}s") - + print( + f" Exact mode: {len(exact_results)} runs, mean time: {np.mean(exact_times):.4f}s" + ) + print( + f" Numeric mode: {len(numeric_results)} runs, mean time: {np.mean(numeric_times):.4f}s" + ) + # Generate plots if requested if save_plots: self._generate_plots() - + def _generate_plots(self): """Generate performance visualization plots.""" successful = [r for r in self.results if r.success] if len(successful) < 5: print("Not enough data points for meaningful plots") return - + # Scaling plot plt.figure(figsize=(12, 8)) - + # Plot 1: Time vs vertices plt.subplot(2, 2, 1) vertices = [r.n_vertices for r in successful] times = [r.solve_time for r in successful] plt.scatter(vertices, times, alpha=0.6) - plt.xlabel('Number of vertices') - plt.ylabel('Solve time (s)') - plt.title('Scaling: Time vs Graph Size') - plt.yscale('log') - + plt.xlabel("Number of vertices") + plt.ylabel("Solve time (s)") + plt.title("Scaling: Time vs Graph Size") + plt.yscale("log") + # Plot 2: Time vs edges plt.subplot(2, 2, 2) edges = [r.n_edges for r in successful] - plt.scatter(edges, times, alpha=0.6, color='orange') - plt.xlabel('Number of edges') - plt.ylabel('Solve time (s)') - plt.title('Scaling: Time vs Edge Count') - plt.yscale('log') - + plt.scatter(edges, times, alpha=0.6, color="orange") + plt.xlabel("Number of edges") + plt.ylabel("Solve time (s)") + plt.title("Scaling: Time vs Edge Count") + plt.yscale("log") + # Plot 3: Memory vs vertices plt.subplot(2, 2, 3) - memories = [r.memory_peak/1024 for r in successful] # Convert to KB - plt.scatter(vertices, memories, alpha=0.6, color='green') - plt.xlabel('Number of vertices') - plt.ylabel('Peak memory (KB)') - plt.title('Memory Usage vs Graph Size') - + memories = [r.memory_peak / 1024 for r in successful] # Convert to KB + plt.scatter(vertices, memories, alpha=0.6, color="green") + plt.xlabel("Number of vertices") + plt.ylabel("Peak memory (KB)") + plt.title("Memory Usage vs Graph Size") + # Plot 4: Mode comparison plt.subplot(2, 2, 4) - exact_results = [r for r in successful if r.mode == 'exact'] - numeric_results = [r for r in successful if r.mode == 'numeric'] - + exact_results = [r for r in successful if r.mode == "exact"] + numeric_results = [r for r in successful if r.mode == "numeric"] + if exact_results and numeric_results: exact_times = [r.solve_time for r in exact_results] numeric_times = [r.solve_time for r in numeric_results] - - plt.boxplot([exact_times, numeric_times], labels=['Exact', 'Numeric']) - plt.ylabel('Solve time (s)') - plt.title('Mode Comparison') - plt.yscale('log') - + + plt.boxplot([exact_times, numeric_times], labels=["Exact", "Numeric"]) + plt.ylabel("Solve time (s)") + plt.title("Mode Comparison") + plt.yscale("log") + plt.tight_layout() - plt.savefig('benchmark_results.png', dpi=150, bbox_inches='tight') + plt.savefig("benchmark_results.png", dpi=150, bbox_inches="tight") print("Plots saved to 'benchmark_results.png'") plt.show() class StressTest: """Stress testing for edge cases and robustness.""" - + def __init__(self): self.failed_cases = [] - + def test_large_weights(self): """Test with very large integer weights.""" print("Testing large weights...") - + solver = MinRatioCycleSolver(5) large_val = 10**12 - + # Create cycle with extreme weights solver.add_edge(0, 1, large_val, 1) solver.add_edge(1, 2, 1, large_val) - solver.add_edge(2, 3, -large_val//2, 1) + solver.add_edge(2, 3, -large_val // 2, 1) solver.add_edge(3, 4, 1, 1) solver.add_edge(4, 0, 1, 1) - + try: cycle, cost, time, ratio = solver.solve() print(f" Large weights test: SUCCESS, ratio={ratio:.6e}") except Exception as e: print(f" Large weights test: FAILED - {e}") self.failed_cases.append(("large_weights", e)) - + def test_precision_edge_cases(self): """Test numerical precision edge cases.""" print("Testing precision edge cases...") - + # Very small differences solver = MinRatioCycleSolver(3) solver.add_edge(0, 1, 1.0000001, 1.0) solver.add_edge(1, 2, 1.0, 1.0000001) solver.add_edge(2, 0, 1.0, 1.0) - + try: cycle, cost, time, ratio = solver.solve() print(f" Precision test: SUCCESS, ratio={ratio:.10f}") except Exception as e: print(f" Precision test: FAILED - {e}") self.failed_cases.append(("precision", e)) - + def test_pathological_graphs(self): """Test graphs designed to stress the algorithm.""" print("Testing pathological graphs...") - + # Long chain with cycle at the end n = 100 solver = MinRatioCycleSolver(n) - + # Chain: 0->1->2->...->97 - for i in range(n-3): - solver.add_edge(i, i+1, 1, 1) - + for i in range(n - 3): + solver.add_edge(i, i + 1, 1, 1) + # Cycle at end: 97->98->99->97 - solver.add_edge(n-3, n-2, 1, 2) - solver.add_edge(n-2, n-1, 1, 2) - solver.add_edge(n-1, n-3, -1, 1) # Negative cost for better ratio - + solver.add_edge(n - 3, n - 2, 1, 2) + solver.add_edge(n - 2, n - 1, 1, 2) + solver.add_edge(n - 1, n - 3, -1, 1) # Negative cost for better ratio + try: start_time = time.time() cycle, cost, time_sum, ratio = solver.solve() @@ -470,14 +496,14 @@ def test_pathological_graphs(self): except Exception as e: print(f" Pathological graph: FAILED - {e}") self.failed_cases.append(("pathological", e)) - + def run_all_stress_tests(self): """Run all stress tests.""" print("Running stress tests...") self.test_large_weights() self.test_precision_edge_cases() self.test_pathological_graphs() - + if self.failed_cases: print(f"\nStress test failures: {len(self.failed_cases)}") for name, error in self.failed_cases: @@ -490,34 +516,35 @@ def main(): """Run the complete benchmark suite.""" print("Min Ratio Cycle Solver - Comprehensive Benchmark Suite") print("=" * 60) - + # Initialize benchmark runner runner = BenchmarkRunner() - + # Run different benchmark categories try: runner.scaling_benchmark(max_vertices=80, step=10) runner.density_benchmark(n_vertices=40) runner.mode_comparison_benchmark() runner.topology_benchmark() - + # Generate comprehensive report runner.generate_report(save_plots=True) - + except KeyboardInterrupt: print("\nBenchmark interrupted by user") except Exception as e: print(f"\nBenchmark failed with error: {e}") import traceback + traceback.print_exc() - + # Run stress tests print("\n" + "=" * 60) stress_tester = StressTest() stress_tester.run_all_stress_tests() - + print("\nBenchmark suite completed!") if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/scripts/run_tests.py b/scripts/run_tests.py index 8c71da8..0c47526 100644 --- a/scripts/run_tests.py +++ b/scripts/run_tests.py @@ -7,7 +7,7 @@ Options: --quick Run only fast tests - --slow Include slow tests + --slow Include slow tests --bench Run benchmarks --property Run property-based tests --coverage Generate coverage report @@ -16,9 +16,9 @@ --help Show this help """ -import sys -import subprocess import argparse +import subprocess +import sys from pathlib import Path @@ -26,23 +26,18 @@ def run_command(cmd: list, description: str, verbose: bool = False): """Run a command and handle errors.""" if verbose: print(f"Running: {' '.join(cmd)}") - + print(f"{description}...") - + try: - result = subprocess.run( - cmd, - capture_output=not verbose, - text=True, - check=True - ) - + result = subprocess.run(cmd, capture_output=not verbose, text=True, check=True) + if not verbose and result.stdout: print(result.stdout) - + print(f"āœ“ {description} completed successfully") return True - + except subprocess.CalledProcessError as e: print(f"āœ— {description} failed!") if e.stdout: @@ -56,111 +51,111 @@ def main(): parser = argparse.ArgumentParser( description="Test runner for min-ratio-cycle solver", formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=__doc__ + epilog=__doc__, ) - + # Test selection options - parser.add_argument('--quick', action='store_true', - help='Run only fast tests (exclude slow/benchmark)') - parser.add_argument('--slow', action='store_true', - help='Include slow tests') - parser.add_argument('--bench', action='store_true', - help='Run benchmark tests') - parser.add_argument('--property', action='store_true', - help='Run property-based tests') - parser.add_argument('--integration', action='store_true', - help='Run integration tests only') - + parser.add_argument( + "--quick", + action="store_true", + help="Run only fast tests (exclude slow/benchmark)", + ) + parser.add_argument("--slow", action="store_true", help="Include slow tests") + parser.add_argument("--bench", action="store_true", help="Run benchmark tests") + parser.add_argument( + "--property", action="store_true", help="Run property-based tests" + ) + parser.add_argument( + "--integration", action="store_true", help="Run integration tests only" + ) + # Output options - parser.add_argument('--coverage', action='store_true', - help='Generate coverage report') - parser.add_argument('--parallel', action='store_true', - help='Run tests in parallel') - parser.add_argument('--verbose', '-v', action='store_true', - help='Verbose output') - + parser.add_argument( + "--coverage", action="store_true", help="Generate coverage report" + ) + parser.add_argument("--parallel", action="store_true", help="Run tests in parallel") + parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output") + # Specific test patterns - parser.add_argument('--pattern', '-k', type=str, - help='Run tests matching pattern') - parser.add_argument('--file', type=str, - help='Run specific test file') - + parser.add_argument("--pattern", "-k", type=str, help="Run tests matching pattern") + parser.add_argument("--file", type=str, help="Run specific test file") + args = parser.parse_args() - + # Build pytest command - cmd = ['pytest'] - + cmd = ["pytest"] + # Base options if args.verbose: - cmd.extend(['-v', '-s']) + cmd.extend(["-v", "-s"]) else: - cmd.append('-q') - + cmd.append("-q") + # Parallel execution if args.parallel: - cmd.extend(['-n', 'auto']) - + cmd.extend(["-n", "auto"]) + # Coverage if args.coverage: - cmd.extend(['--cov=min_ratio_cycle', '--cov-report=html', '--cov-report=term']) - + cmd.extend(["--cov=min_ratio_cycle", "--cov-report=html", "--cov-report=term"]) + # Test selection markers = [] - + if args.quick: - markers.append('not slow and not benchmark') + markers.append("not slow and not benchmark") elif args.slow: - markers.append('slow') + markers.append("slow") elif args.bench: - markers.append('benchmark') + markers.append("benchmark") elif args.property: - markers.append('property') + markers.append("property") elif args.integration: - markers.append('integration') - + markers.append("integration") + if markers: - cmd.extend(['-m', ' or '.join(markers)]) - + cmd.extend(["-m", " or ".join(markers)]) + # Pattern matching if args.pattern: - cmd.extend(['-k', args.pattern]) - + cmd.extend(["-k", args.pattern]) + # Specific file if args.file: cmd.append(args.file) else: # Default test directories test_files = [] - if Path('test_solver.py').exists(): - test_files.append('test_solver.py') - if Path('test_integration.py').exists(): - test_files.append('test_integration.py') - if Path('tests').exists(): - test_files.append('tests/') - + if Path("test_solver.py").exists(): + test_files.append("test_solver.py") + if Path("test_integration.py").exists(): + test_files.append("test_integration.py") + if Path("tests").exists(): + test_files.append("tests/") + if test_files: cmd.extend(test_files) - + # Run the tests print("Min Ratio Cycle Solver - Test Suite") print("=" * 50) - + success = run_command(cmd, "Running tests", args.verbose) - + if not success: print("\nāŒ Some tests failed!") sys.exit(1) - + # Additional operations if args.coverage: print(f"\nšŸ“Š Coverage report generated in htmlcov/") - + # Run benchmarks if requested if args.bench: print("\n" + "=" * 50) - bench_cmd = ['python', 'benchmark_suite.py'] + bench_cmd = ["python", "benchmark_suite.py"] run_command(bench_cmd, "Running benchmark suite", args.verbose) - + print("\nāœ… All tests completed successfully!") diff --git a/tests/conftest.py b/tests/conftest.py index c275e60..52d83a6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,9 +2,11 @@ Shared pytest fixtures for min-ratio-cycle testing. """ -import pytest -import numpy as np from typing import List, Tuple + +import numpy as np +import pytest + from min_ratio_cycle.solver import MinRatioCycleSolver @@ -13,7 +15,7 @@ def simple_triangle(): """Simple 3-vertex triangle with known optimal solution.""" solver = MinRatioCycleSolver(3) solver.add_edge(0, 1, 2, 1) # ratio 2 - solver.add_edge(1, 2, 3, 2) # ratio 1.5 + solver.add_edge(1, 2, 3, 2) # ratio 1.5 solver.add_edge(2, 0, 1, 1) # ratio 1 # Optimal cycle: 1->2->0->1 with ratio (3+1+2)/(2+1+1) = 6/4 = 1.5 return solver, 1.5 @@ -25,17 +27,17 @@ def negative_cycle(): solver = MinRatioCycleSolver(3) solver.add_edge(0, 1, -1, 1) # ratio -1 solver.add_edge(1, 2, -2, 1) # ratio -2 - solver.add_edge(2, 0, 1, 1) # ratio 1 + solver.add_edge(2, 0, 1, 1) # ratio 1 # Optimal cycle: 0->1->2->0 with ratio (-1-2+1)/(1+1+1) = -2/3 - return solver, -2/3 + return solver, -2 / 3 -@pytest.fixture +@pytest.fixture def integer_weights_only(): """Graph with only integer weights (should use exact mode).""" solver = MinRatioCycleSolver(4) solver.add_edge(0, 1, 5, 2) - solver.add_edge(1, 2, 3, 1) + solver.add_edge(1, 2, 3, 1) solver.add_edge(2, 3, 2, 2) solver.add_edge(3, 0, 1, 1) return solver @@ -47,7 +49,7 @@ def float_weights(): solver = MinRatioCycleSolver(4) solver.add_edge(0, 1, 5.5, 2.0) solver.add_edge(1, 2, 3.2, 1.1) - solver.add_edge(2, 3, 2.7, 2.3) + solver.add_edge(2, 3, 2.7, 2.3) solver.add_edge(3, 0, 1.1, 1.0) return solver @@ -57,17 +59,17 @@ def large_graph(): """Larger graph for performance testing.""" n = 50 solver = MinRatioCycleSolver(n) - + # Create a graph with guaranteed cycles np.random.seed(42) # Reproducible - + # Add a simple cycle through all vertices for i in range(n): next_v = (i + 1) % n cost = np.random.randint(-5, 6) time = np.random.randint(1, 4) solver.add_edge(i, next_v, cost, time) - + # Add random additional edges for _ in range(n * 2): u = np.random.randint(0, n) @@ -75,7 +77,7 @@ def large_graph(): cost = np.random.randint(-10, 11) time = np.random.randint(1, 6) solver.add_edge(u, v, cost, time) - + return solver @@ -83,15 +85,15 @@ def large_graph(): def disconnected_graph(): """Graph with multiple disconnected components.""" solver = MinRatioCycleSolver(8) - + # Component 1: triangle 0-1-2 solver.add_edge(0, 1, 3, 2) solver.add_edge(1, 2, 2, 1) solver.add_edge(2, 0, 1, 1) - + # Component 2: triangle 3-4-5 with better ratio solver.add_edge(3, 4, 1, 2) - solver.add_edge(4, 5, 1, 2) + solver.add_edge(4, 5, 1, 2) solver.add_edge(5, 3, 0, 1) # completes cycle with ratio 0.4 # Isolated vertices: 6, 7 @@ -104,7 +106,7 @@ def complete_graph(request): """Complete graphs of various sizes.""" n = request.param solver = MinRatioCycleSolver(n) - + np.random.seed(42) # Reproducible for u in range(n): for v in range(n): @@ -112,7 +114,7 @@ def complete_graph(request): cost = np.random.randint(-3, 4) time = np.random.randint(1, 3) solver.add_edge(u, v, cost, time) - + return solver @@ -121,16 +123,16 @@ def pathological_graph(): """Graph designed to stress-test the algorithm.""" n = 20 solver = MinRatioCycleSolver(n) - + # Long path with expensive edges - for i in range(n-1): - solver.add_edge(i, i+1, 100, 1) - + for i in range(n - 1): + solver.add_edge(i, i + 1, 100, 1) + # Cheap cycle at the end - solver.add_edge(n-3, n-2, 1, 10) - solver.add_edge(n-2, n-1, 1, 10) - solver.add_edge(n-1, n-3, -10, 1) # Makes this cycle optimal - + solver.add_edge(n - 3, n - 2, 1, 10) + solver.add_edge(n - 2, n - 1, 1, 10) + solver.add_edge(n - 1, n - 3, -10, 1) # Makes this cycle optimal + return solver @@ -138,50 +140,51 @@ def pathological_graph(): def parallel_edges_graph(): """Graph with multiple edges between same vertex pairs.""" solver = MinRatioCycleSolver(3) - + # Multiple 0->1 edges with different costs/times solver.add_edge(0, 1, 10, 2) # ratio 5 - solver.add_edge(0, 1, 6, 3) # ratio 2 - solver.add_edge(0, 1, 8, 1) # ratio 8 - + solver.add_edge(0, 1, 6, 3) # ratio 2 + solver.add_edge(0, 1, 8, 1) # ratio 8 + # Return path - solver.add_edge(1, 2, 2, 1) # ratio 2 - solver.add_edge(2, 0, 1, 1) # ratio 1 - + solver.add_edge(1, 2, 2, 1) # ratio 2 + solver.add_edge(2, 0, 1, 1) # ratio 1 + return solver class GraphAssertions: """Helper class for graph-specific assertions.""" - + @staticmethod def assert_valid_cycle(cycle: List[int], n_vertices: int): """Assert that a cycle is valid.""" assert isinstance(cycle, list) assert len(cycle) >= 3, "Cycle must have at least 3 vertices" assert cycle[0] == cycle[-1], "Cycle must be closed" - + # All vertices should be valid indices for v in cycle: assert 0 <= v < n_vertices, f"Invalid vertex {v}" - + # No consecutive duplicates (except first/last) for i in range(len(cycle) - 1): if i == len(cycle) - 2: # Last edge back to start continue - assert cycle[i] != cycle[i+1], f"Consecutive duplicate at {i}" - + assert cycle[i] != cycle[i + 1], f"Consecutive duplicate at {i}" + @staticmethod def assert_positive_time(sum_time: float): """Assert that total cycle time is positive.""" assert sum_time > 0, "Cycle time must be positive" - + @staticmethod def assert_ratio_consistency(sum_cost: float, sum_time: float, ratio: float): """Assert that ratio equals cost/time.""" expected_ratio = sum_cost / sum_time - assert abs(ratio - expected_ratio) < 1e-10, \ - f"Ratio inconsistency: {ratio} != {expected_ratio}" + assert ( + abs(ratio - expected_ratio) < 1e-10 + ), f"Ratio inconsistency: {ratio} != {expected_ratio}" @pytest.fixture @@ -193,15 +196,9 @@ def graph_assertions(): # Pytest hooks for custom behavior def pytest_configure(config): """Configure pytest with custom markers.""" - config.addinivalue_line( - "markers", "slow: mark test as slow running" - ) - config.addinivalue_line( - "markers", "benchmark: mark test as a benchmark" - ) - config.addinivalue_line( - "markers", "property: mark test as property-based" - ) + config.addinivalue_line("markers", "slow: mark test as slow running") + config.addinivalue_line("markers", "benchmark: mark test as a benchmark") + config.addinivalue_line("markers", "property: mark test as property-based") def pytest_collection_modifyitems(config, items): @@ -210,11 +207,11 @@ def pytest_collection_modifyitems(config, items): # Mark property-based tests if "hypothesis" in item.name or "property" in item.name: item.add_marker(pytest.mark.property) - - # Mark benchmark tests + + # Mark benchmark tests if "benchmark" in item.name or "performance" in item.name: item.add_marker(pytest.mark.benchmark) - + # Mark slow tests if "large" in item.name or "stress" in item.name: item.add_marker(pytest.mark.slow) diff --git a/tests/testing_readme.md b/tests/testing_readme.md index 89fd410..e1a0a88 100644 --- a/tests/testing_readme.md +++ b/tests/testing_readme.md @@ -67,7 +67,7 @@ Pre-built test graphs for common scenarios: Specialized validation helpers: ```python def assert_valid_cycle(cycle, n_vertices) -def assert_positive_time(sum_time) +def assert_positive_time(sum_time) def assert_ratio_consistency(cost, time, ratio) ``` @@ -126,7 +126,7 @@ python run_tests.py --parallel --coverage ### Test Markers Tests are categorized with pytest markers: - `@pytest.mark.slow`: Long-running tests -- `@pytest.mark.benchmark`: Performance tests +- `@pytest.mark.benchmark`: Performance tests - `@pytest.mark.property`: Property-based tests - `@pytest.mark.integration`: End-to-end tests @@ -144,7 +144,7 @@ The benchmark suite provides detailed analysis: ### Graph Topologies Tested 1. **Random sparse** (10% density) -2. **Random dense** (30-50% density) +2. **Random dense** (30-50% density) 3. **Complete graphs** (100% density, smaller sizes) 4. **Grid graphs** (2D lattice structure) 5. **Cycle graphs** (Simple ring topology) @@ -153,7 +153,7 @@ The benchmark suite provides detailed analysis: ### Scaling Analysis Tests graph sizes from 10 to 100+ vertices and generates plots showing: - Time vs vertices (log scale) -- Time vs edge count +- Time vs edge count - Memory vs graph size - Exact vs numeric mode comparison @@ -165,9 +165,9 @@ For critical test cases, we manually verify results: ```python def validate_cycle(self, solver, cycle, expected_cost, expected_time, expected_ratio): # Walk through cycle edges manually - actual_cost = sum(edge_costs_in_cycle) + actual_cost = sum(edge_costs_in_cycle) actual_time = sum(edge_times_in_cycle) - + # Verify against solver output assert abs(actual_cost - expected_cost) < 1e-10 assert abs(actual_ratio - expected_ratio) < 1e-10 @@ -184,7 +184,7 @@ Every valid solution must satisfy: ### Known Solution Tests Test cases with predetermined correct answers: - Simple triangles with calculated optimal ratios -- Negative cost cycles +- Negative cost cycles - Self-loops (trivial cycles) - Disconnected components (should find global optimum) @@ -249,10 +249,10 @@ def test_new_feature(self, graph_fixture, graph_assertions): """Test description explaining what we're validating.""" # Arrange solver = setup_test_case() - - # Act + + # Act cycle, cost, time, ratio = solver.solve() - + # Assert graph_assertions.assert_valid_cycle(cycle, solver.n) assert specific_property_holds(cycle, cost, time, ratio) @@ -263,7 +263,7 @@ def test_new_feature(self, graph_fixture, graph_assertions): ### Common Issues 1. **Numerical precision**: Use appropriate tolerances (1e-10 for exact, 1e-6 for numeric) -2. **Random test failures**: Set `np.random.seed()` for reproducibility +2. **Random test failures**: Set `np.random.seed()` for reproducibility 3. **Performance regressions**: Compare against baseline times 4. **Memory leaks**: Check peak memory doesn't grow unexpectedly @@ -275,7 +275,7 @@ pytest test_solver.py::TestEdgeCases::test_self_loop -v -s # Drop into debugger on failure pytest --pdb -# Show local variables on failure +# Show local variables on failure pytest --tb=long # Profile slow tests