|
| 1 | +#!/usr/bin/env python3 |
| 2 | +""" |
| 3 | +Unified Simulation Framework for Multi-Heart-Model |
| 4 | +Automatically saves all results to Google Drive |
| 5 | +
|
| 6 | +All simulations write to: ~/drive_links/ALL_MY_WORK/SimResults/ |
| 7 | +Organized by simulation type and run ID. |
| 8 | +
|
| 9 | +Author: Multi-Heart-Model Team |
| 10 | +Date: 2025-11-26 |
| 11 | +""" |
| 12 | + |
| 13 | +import os |
| 14 | +import sys |
| 15 | +import json |
| 16 | +import csv |
| 17 | +from pathlib import Path |
| 18 | +from datetime import datetime |
| 19 | +from typing import Dict, List, Any, Optional |
| 20 | +from dataclasses import dataclass, asdict |
| 21 | + |
| 22 | +# ============================================================================ |
| 23 | +# GOOGLE DRIVE CONFIGURATION |
| 24 | +# ============================================================================ |
| 25 | + |
| 26 | +# Everything goes into Google Drive → "All My Work" → "SimResults" |
| 27 | +BASE_RESULTS_DIR = os.path.expanduser( |
| 28 | + "~/drive_links/ALL_MY_WORK/SimResults" |
| 29 | +) |
| 30 | + |
| 31 | +# Fallback to local directory if Drive not available |
| 32 | +LOCAL_FALLBACK_DIR = os.path.expanduser( |
| 33 | + "~/Multi-Heart-Model-Results" |
| 34 | +) |
| 35 | + |
| 36 | + |
| 37 | +def ensure_dir(path: str) -> str: |
| 38 | + """Ensure directory exists, create if needed""" |
| 39 | + Path(path).mkdir(parents=True, exist_ok=True) |
| 40 | + return path |
| 41 | + |
| 42 | + |
| 43 | +def get_results_base_dir() -> str: |
| 44 | + """Get base results directory, with fallback""" |
| 45 | + if os.path.exists(os.path.dirname(BASE_RESULTS_DIR)): |
| 46 | + try: |
| 47 | + ensure_dir(BASE_RESULTS_DIR) |
| 48 | + return BASE_RESULTS_DIR |
| 49 | + except Exception as e: |
| 50 | + print(f"⚠️ Warning: Cannot access Drive ({e})") |
| 51 | + print(f" Falling back to: {LOCAL_FALLBACK_DIR}") |
| 52 | + ensure_dir(LOCAL_FALLBACK_DIR) |
| 53 | + return LOCAL_FALLBACK_DIR |
| 54 | + else: |
| 55 | + print(f"⚠️ Warning: Drive not mounted") |
| 56 | + print(f" Using local: {LOCAL_FALLBACK_DIR}") |
| 57 | + ensure_dir(LOCAL_FALLBACK_DIR) |
| 58 | + return LOCAL_FALLBACK_DIR |
| 59 | + |
| 60 | + |
| 61 | +def timestamp_tag() -> str: |
| 62 | + """Generate timestamp tag for run ID""" |
| 63 | + return datetime.now().strftime("%Y%m%d_%H%M%S") |
| 64 | + |
| 65 | + |
| 66 | +# ============================================================================ |
| 67 | +# RUN LOGGER - Automatic Google Drive Integration |
| 68 | +# ============================================================================ |
| 69 | + |
| 70 | +@dataclass |
| 71 | +class RunMetadata: |
| 72 | + """Metadata for a simulation run""" |
| 73 | + run_id: str |
| 74 | + sim_name: str |
| 75 | + tag: str |
| 76 | + start_time: str |
| 77 | + base_dir: str |
| 78 | + parameters: Dict[str, Any] |
| 79 | + git_commit: Optional[str] = None |
| 80 | + git_branch: Optional[str] = None |
| 81 | + |
| 82 | + |
| 83 | +class RunLogger: |
| 84 | + """ |
| 85 | + Unified logger for all simulation runs. |
| 86 | + Automatically saves to Google Drive. |
| 87 | +
|
| 88 | + Usage: |
| 89 | + logger = RunLogger("primal_kernel", tag="full_sweep") |
| 90 | + logger.log_parameters({"mu": 1.5, "omega": 1.0}) |
| 91 | +
|
| 92 | + for params in parameter_space: |
| 93 | + result = run_simulation(params) |
| 94 | + logger.add_result(params, result) |
| 95 | +
|
| 96 | + logger.finalize(generate_report=True) |
| 97 | + """ |
| 98 | + |
| 99 | + def __init__(self, sim_name: str, tag: str = "sweep"): |
| 100 | + self.sim_name = sim_name |
| 101 | + self.tag = tag |
| 102 | + self.run_id = f"{timestamp_tag()}_{tag}" |
| 103 | + self.start_time = datetime.now().isoformat() |
| 104 | + |
| 105 | + # All results under Google Drive / All My Work / SimResults |
| 106 | + results_base = get_results_base_dir() |
| 107 | + self.base_dir = os.path.join( |
| 108 | + results_base, |
| 109 | + sim_name, |
| 110 | + self.run_id, |
| 111 | + ) |
| 112 | + |
| 113 | + # Directory structure |
| 114 | + self.raw_dir = os.path.join(self.base_dir, "raw") |
| 115 | + self.summary_dir = os.path.join(self.base_dir, "summary") |
| 116 | + self.plots_dir = os.path.join(self.base_dir, "plots") |
| 117 | + |
| 118 | + # Create directories |
| 119 | + ensure_dir(self.raw_dir) |
| 120 | + ensure_dir(self.summary_dir) |
| 121 | + ensure_dir(self.plots_dir) |
| 122 | + |
| 123 | + # Storage |
| 124 | + self.summary_rows = [] |
| 125 | + self.metadata = RunMetadata( |
| 126 | + run_id=self.run_id, |
| 127 | + sim_name=sim_name, |
| 128 | + tag=tag, |
| 129 | + start_time=self.start_time, |
| 130 | + base_dir=self.base_dir, |
| 131 | + parameters={}, |
| 132 | + ) |
| 133 | + |
| 134 | + # Try to get git info |
| 135 | + self._capture_git_info() |
| 136 | + |
| 137 | + print("=" * 80) |
| 138 | + print(f"🚀 Starting {sim_name} sweep: {self.run_id}") |
| 139 | + print("=" * 80) |
| 140 | + print(f"📁 Results will save to:") |
| 141 | + print(f" {self.base_dir}") |
| 142 | + if BASE_RESULTS_DIR in self.base_dir: |
| 143 | + print(f" ✓ Google Drive sync active!") |
| 144 | + print("=" * 80) |
| 145 | + |
| 146 | + def _capture_git_info(self): |
| 147 | + """Capture git commit and branch info""" |
| 148 | + try: |
| 149 | + import subprocess |
| 150 | + commit = subprocess.check_output( |
| 151 | + ['git', 'rev-parse', 'HEAD'], |
| 152 | + stderr=subprocess.DEVNULL |
| 153 | + ).decode().strip()[:8] |
| 154 | + branch = subprocess.check_output( |
| 155 | + ['git', 'rev-parse', '--abbrev-ref', 'HEAD'], |
| 156 | + stderr=subprocess.DEVNULL |
| 157 | + ).decode().strip() |
| 158 | + self.metadata.git_commit = commit |
| 159 | + self.metadata.git_branch = branch |
| 160 | + except: |
| 161 | + pass |
| 162 | + |
| 163 | + def log_parameters(self, params: Dict[str, Any]): |
| 164 | + """Log sweep parameters""" |
| 165 | + self.metadata.parameters = params |
| 166 | + params_file = os.path.join(self.base_dir, "parameters.json") |
| 167 | + with open(params_file, 'w') as f: |
| 168 | + json.dump(params, f, indent=2) |
| 169 | + |
| 170 | + def add_result(self, params: Dict[str, Any], metrics: Dict[str, Any]): |
| 171 | + """Add a single result row""" |
| 172 | + row = {**params, **metrics} |
| 173 | + self.summary_rows.append(row) |
| 174 | + |
| 175 | + # Also save individual result |
| 176 | + result_id = len(self.summary_rows) |
| 177 | + result_file = os.path.join( |
| 178 | + self.raw_dir, |
| 179 | + f"result_{result_id:06d}.json" |
| 180 | + ) |
| 181 | + with open(result_file, 'w') as f: |
| 182 | + json.dump(row, f, indent=2) |
| 183 | + |
| 184 | + def save_checkpoint(self, checkpoint_name: str = "checkpoint"): |
| 185 | + """Save current state as checkpoint""" |
| 186 | + checkpoint_file = os.path.join( |
| 187 | + self.summary_dir, |
| 188 | + f"{checkpoint_name}.csv" |
| 189 | + ) |
| 190 | + self._write_csv(checkpoint_file, self.summary_rows) |
| 191 | + print(f" 💾 Checkpoint saved: {checkpoint_name} ({len(self.summary_rows)} results)") |
| 192 | + |
| 193 | + def _write_csv(self, filepath: str, rows: List[Dict[str, Any]]): |
| 194 | + """Write rows to CSV""" |
| 195 | + if not rows: |
| 196 | + return |
| 197 | + |
| 198 | + with open(filepath, 'w', newline='') as f: |
| 199 | + writer = csv.DictWriter(f, fieldnames=rows[0].keys()) |
| 200 | + writer.writeheader() |
| 201 | + writer.writerows(rows) |
| 202 | + |
| 203 | + def finalize(self, generate_report: bool = True): |
| 204 | + """Finalize the run and generate outputs""" |
| 205 | + end_time = datetime.now().isoformat() |
| 206 | + |
| 207 | + print(f"\n{'=' * 80}") |
| 208 | + print(f"✅ Sweep complete: {len(self.summary_rows)} results") |
| 209 | + print(f"{'=' * 80}") |
| 210 | + |
| 211 | + # Save final summary CSV |
| 212 | + summary_file = os.path.join(self.summary_dir, "summary.csv") |
| 213 | + self._write_csv(summary_file, self.summary_rows) |
| 214 | + print(f" 📊 Summary saved: summary.csv") |
| 215 | + |
| 216 | + # Save metadata |
| 217 | + self.metadata.parameters['end_time'] = end_time |
| 218 | + self.metadata.parameters['total_results'] = len(self.summary_rows) |
| 219 | + |
| 220 | + metadata_file = os.path.join(self.base_dir, "metadata.json") |
| 221 | + with open(metadata_file, 'w') as f: |
| 222 | + json.dump(asdict(self.metadata), f, indent=2) |
| 223 | + print(f" 📋 Metadata saved: metadata.json") |
| 224 | + |
| 225 | + # Generate report if requested |
| 226 | + if generate_report: |
| 227 | + self._generate_report() |
| 228 | + |
| 229 | + print(f"\n{'=' * 80}") |
| 230 | + print(f"📁 All results saved to:") |
| 231 | + print(f" {self.base_dir}") |
| 232 | + if BASE_RESULTS_DIR in self.base_dir: |
| 233 | + print(f" ✓ Synced to Google Drive!") |
| 234 | + print(f"{'=' * 80}\n") |
| 235 | + |
| 236 | + def _generate_report(self): |
| 237 | + """Generate markdown report""" |
| 238 | + report_file = os.path.join(self.base_dir, "REPORT.md") |
| 239 | + |
| 240 | + with open(report_file, 'w') as f: |
| 241 | + f.write(f"# {self.sim_name} - {self.tag}\n\n") |
| 242 | + f.write(f"**Run ID:** {self.run_id}\n\n") |
| 243 | + f.write(f"**Started:** {self.metadata.start_time}\n\n") |
| 244 | + |
| 245 | + if self.metadata.git_commit: |
| 246 | + f.write(f"**Git Commit:** {self.metadata.git_commit}\n\n") |
| 247 | + f.write(f"**Git Branch:** {self.metadata.git_branch}\n\n") |
| 248 | + |
| 249 | + f.write(f"## Summary\n\n") |
| 250 | + f.write(f"- Total Results: {len(self.summary_rows)}\n") |
| 251 | + f.write(f"- Output Directory: `{self.base_dir}`\n\n") |
| 252 | + |
| 253 | + f.write(f"## Parameters\n\n") |
| 254 | + f.write("```json\n") |
| 255 | + json.dump(self.metadata.parameters, f, indent=2) |
| 256 | + f.write("\n```\n\n") |
| 257 | + |
| 258 | + f.write(f"## Files\n\n") |
| 259 | + f.write(f"- `summary/summary.csv` - Complete results\n") |
| 260 | + f.write(f"- `raw/result_*.json` - Individual results\n") |
| 261 | + f.write(f"- `plots/` - Visualizations\n") |
| 262 | + f.write(f"- `metadata.json` - Run metadata\n\n") |
| 263 | + |
| 264 | + print(f" 📄 Report generated: REPORT.md") |
| 265 | + |
| 266 | + |
| 267 | +# ============================================================================ |
| 268 | +# HELPER FUNCTIONS |
| 269 | +# ============================================================================ |
| 270 | + |
| 271 | +def test_drive_access(): |
| 272 | + """Test if Google Drive is accessible""" |
| 273 | + drive_path = os.path.expanduser("~/drive_links/ALL_MY_WORK") |
| 274 | + |
| 275 | + print("Testing Google Drive Access...") |
| 276 | + print(f" Checking: {drive_path}") |
| 277 | + |
| 278 | + if os.path.exists(drive_path): |
| 279 | + print(f" ✓ Drive accessible!") |
| 280 | + |
| 281 | + # Try to create test file |
| 282 | + test_file = os.path.join(drive_path, ".test_write_access") |
| 283 | + try: |
| 284 | + with open(test_file, 'w') as f: |
| 285 | + f.write("test") |
| 286 | + os.remove(test_file) |
| 287 | + print(f" ✓ Write access confirmed!") |
| 288 | + return True |
| 289 | + except Exception as e: |
| 290 | + print(f" ✗ Cannot write to Drive: {e}") |
| 291 | + return False |
| 292 | + else: |
| 293 | + print(f" ✗ Drive not accessible") |
| 294 | + print(f" Run: bash setup_drive_symlink.sh") |
| 295 | + return False |
| 296 | + |
| 297 | + |
| 298 | +def list_recent_runs(sim_name: Optional[str] = None, limit: int = 10): |
| 299 | + """List recent simulation runs""" |
| 300 | + results_base = get_results_base_dir() |
| 301 | + |
| 302 | + print(f"\n{'=' * 80}") |
| 303 | + print(f"Recent Simulation Runs") |
| 304 | + print(f"{'=' * 80}") |
| 305 | + print(f"Base Directory: {results_base}\n") |
| 306 | + |
| 307 | + if sim_name: |
| 308 | + sim_dirs = [os.path.join(results_base, sim_name)] |
| 309 | + else: |
| 310 | + sim_dirs = [ |
| 311 | + os.path.join(results_base, d) |
| 312 | + for d in os.listdir(results_base) |
| 313 | + if os.path.isdir(os.path.join(results_base, d)) |
| 314 | + ] |
| 315 | + |
| 316 | + all_runs = [] |
| 317 | + for sim_dir in sim_dirs: |
| 318 | + if not os.path.exists(sim_dir): |
| 319 | + continue |
| 320 | + |
| 321 | + sim_name_local = os.path.basename(sim_dir) |
| 322 | + |
| 323 | + for run_id in os.listdir(sim_dir): |
| 324 | + run_path = os.path.join(sim_dir, run_id) |
| 325 | + if os.path.isdir(run_path): |
| 326 | + metadata_file = os.path.join(run_path, "metadata.json") |
| 327 | + if os.path.exists(metadata_file): |
| 328 | + with open(metadata_file) as f: |
| 329 | + metadata = json.load(f) |
| 330 | + all_runs.append((sim_name_local, run_id, metadata, run_path)) |
| 331 | + |
| 332 | + # Sort by start time |
| 333 | + all_runs.sort(key=lambda x: x[2].get('start_time', ''), reverse=True) |
| 334 | + |
| 335 | + # Display |
| 336 | + for i, (sim, run_id, meta, path) in enumerate(all_runs[:limit]): |
| 337 | + print(f"{i+1}. {sim} / {run_id}") |
| 338 | + print(f" Started: {meta.get('start_time', 'Unknown')}") |
| 339 | + print(f" Path: {path}") |
| 340 | + if 'parameters' in meta and 'total_results' in meta['parameters']: |
| 341 | + print(f" Results: {meta['parameters']['total_results']}") |
| 342 | + print() |
| 343 | + |
| 344 | + print(f"{'=' * 80}\n") |
| 345 | + |
| 346 | + |
| 347 | +# ============================================================================ |
| 348 | +# MAIN - Demo/Test |
| 349 | +# ============================================================================ |
| 350 | + |
| 351 | +def main(): |
| 352 | + """Demo the framework""" |
| 353 | + print("\n" + "=" * 80) |
| 354 | + print("UNIFIED SIMULATION FRAMEWORK") |
| 355 | + print("=" * 80 + "\n") |
| 356 | + |
| 357 | + # Test Drive access |
| 358 | + test_drive_access() |
| 359 | + |
| 360 | + print("\n" + "=" * 80) |
| 361 | + print("DEMO: Creating Test Run") |
| 362 | + print("=" * 80 + "\n") |
| 363 | + |
| 364 | + # Create demo logger |
| 365 | + logger = RunLogger("demo_simulation", tag="test") |
| 366 | + |
| 367 | + # Log parameters |
| 368 | + logger.log_parameters({ |
| 369 | + "param1_range": [1.0, 2.0, 3.0], |
| 370 | + "param2_range": [0.5, 1.0, 1.5], |
| 371 | + }) |
| 372 | + |
| 373 | + # Add some results |
| 374 | + import numpy as np |
| 375 | + for i in range(10): |
| 376 | + logger.add_result( |
| 377 | + params={"param1": 1.0 + i*0.1, "param2": 0.5 + i*0.05}, |
| 378 | + metrics={ |
| 379 | + "output": np.random.randn(), |
| 380 | + "success": True, |
| 381 | + "iteration": i |
| 382 | + } |
| 383 | + ) |
| 384 | + |
| 385 | + # Save checkpoint |
| 386 | + logger.save_checkpoint("midpoint") |
| 387 | + |
| 388 | + # Finalize |
| 389 | + logger.finalize(generate_report=True) |
| 390 | + |
| 391 | + # List recent runs |
| 392 | + list_recent_runs(limit=5) |
| 393 | + |
| 394 | + |
| 395 | +if __name__ == "__main__": |
| 396 | + main() |
0 commit comments