|
| 1 | +#!/usr/bin/env python3 |
| 2 | +"""CCv3 Hackathon Demo - Prolonged Coordination with MongoDB Atlas. |
| 3 | +
|
| 4 | +This demo showcases Statement 1: Prolonged Coordination |
| 5 | +- Multi-step workflow spanning "hours" (simulated) |
| 6 | +- MongoDB Atlas as the context engine |
| 7 | +- Failure recovery and task resumption |
| 8 | +- All sponsor integrations |
| 9 | +
|
| 10 | +Run: python -m opc.ccv3.demo_hackathon |
| 11 | +""" |
| 12 | + |
| 13 | +import asyncio |
| 14 | +import os |
| 15 | +import sys |
| 16 | +from datetime import datetime, timezone |
| 17 | +from uuid import uuid4 |
| 18 | + |
| 19 | +# Colors for terminal output |
| 20 | +class C: |
| 21 | + HEADER = '\033[95m' |
| 22 | + BLUE = '\033[94m' |
| 23 | + CYAN = '\033[96m' |
| 24 | + GREEN = '\033[92m' |
| 25 | + YELLOW = '\033[93m' |
| 26 | + RED = '\033[91m' |
| 27 | + END = '\033[0m' |
| 28 | + BOLD = '\033[1m' |
| 29 | + |
| 30 | + |
| 31 | +def print_step(step: int, total: int, msg: str): |
| 32 | + print(f"\n{C.CYAN}[{step}/{total}]{C.END} {C.BOLD}{msg}{C.END}") |
| 33 | + |
| 34 | + |
| 35 | +def print_sponsor(sponsor: str, action: str): |
| 36 | + print(f" {C.YELLOW}► {sponsor}:{C.END} {action}") |
| 37 | + |
| 38 | + |
| 39 | +def print_success(msg: str): |
| 40 | + print(f" {C.GREEN}✓{C.END} {msg}") |
| 41 | + |
| 42 | + |
| 43 | +def print_data(key: str, value: str): |
| 44 | + print(f" {C.BLUE}{key}:{C.END} {value}") |
| 45 | + |
| 46 | + |
| 47 | +async def demo_prolonged_coordination(): |
| 48 | + """Demo: Multi-step workflow with MongoDB Atlas context engine.""" |
| 49 | + |
| 50 | + print(f"\n{C.HEADER}{'='*60}{C.END}") |
| 51 | + print(f"{C.HEADER} CCv3: Prolonged Coordination Demo{C.END}") |
| 52 | + print(f"{C.HEADER} MongoDB Atlas as Context Engine{C.END}") |
| 53 | + print(f"{C.HEADER}{'='*60}{C.END}") |
| 54 | + |
| 55 | + # Import our modules |
| 56 | + from .atlas import Atlas |
| 57 | + from .embeddings import EmbeddingsRouter |
| 58 | + from .galileo import GalileoEval |
| 59 | + from .handoff import HandoffPack, Citation |
| 60 | + |
| 61 | + total_steps = 7 |
| 62 | + |
| 63 | + # ========================================================================= |
| 64 | + # STEP 1: Connect to MongoDB Atlas |
| 65 | + # ========================================================================= |
| 66 | + print_step(1, total_steps, "Connecting to MongoDB Atlas (Context Engine)") |
| 67 | + print_sponsor("MongoDB Atlas", "Establishing connection...") |
| 68 | + |
| 69 | + atlas = Atlas() |
| 70 | + await atlas.connect() |
| 71 | + |
| 72 | + if atlas.is_in_memory: |
| 73 | + print_success("Using in-memory mode (set MONGODB_URI for persistence)") |
| 74 | + else: |
| 75 | + print_success("Connected to MongoDB Atlas cluster") |
| 76 | + |
| 77 | + # ========================================================================= |
| 78 | + # STEP 2: Initialize Repository Context |
| 79 | + # ========================================================================= |
| 80 | + print_step(2, total_steps, "Initializing Repository Context") |
| 81 | + print_sponsor("MongoDB Atlas", "Creating repo document in 'repos' collection") |
| 82 | + |
| 83 | + repo_id = await atlas.register_repo( |
| 84 | + name="hackathon-project", |
| 85 | + root_path="/workspace/agentic-app", |
| 86 | + languages=["python", "typescript"], |
| 87 | + ) |
| 88 | + print_success(f"Repository registered: {repo_id}") |
| 89 | + print_data("Collections", "repos, files, symbols, graphs, handoffs, runs, embeddings") |
| 90 | + |
| 91 | + # ========================================================================= |
| 92 | + # STEP 3: Create Long-Running Workflow (Simulated) |
| 93 | + # ========================================================================= |
| 94 | + print_step(3, total_steps, "Starting Long-Running Workflow") |
| 95 | + print_sponsor("MongoDB Atlas", "Creating run document to track workflow state") |
| 96 | + |
| 97 | + run_id = await atlas.create_run( |
| 98 | + repo_id=repo_id, |
| 99 | + command="/build feature-auth", |
| 100 | + description="Implement authentication system with OAuth2", |
| 101 | + ) |
| 102 | + print_success(f"Workflow started: {run_id}") |
| 103 | + print_data("Status", "running") |
| 104 | + print_data("Task", "Implement authentication system") |
| 105 | + |
| 106 | + # Simulate workflow steps stored in MongoDB |
| 107 | + workflow_steps = [ |
| 108 | + {"step": 1, "action": "analyze_requirements", "status": "completed"}, |
| 109 | + {"step": 2, "action": "design_architecture", "status": "completed"}, |
| 110 | + {"step": 3, "action": "implement_oauth2", "status": "running"}, |
| 111 | + {"step": 4, "action": "write_tests", "status": "pending"}, |
| 112 | + {"step": 5, "action": "validate_security", "status": "pending"}, |
| 113 | + ] |
| 114 | + |
| 115 | + await atlas.update_run(run_id, plan={"steps": workflow_steps}) |
| 116 | + print_success("Workflow plan stored in MongoDB Atlas") |
| 117 | + for step in workflow_steps: |
| 118 | + status_color = C.GREEN if step["status"] == "completed" else (C.YELLOW if step["status"] == "running" else C.BLUE) |
| 119 | + print(f" {status_color}[{step['status']}]{C.END} Step {step['step']}: {step['action']}") |
| 120 | + |
| 121 | + # ========================================================================= |
| 122 | + # STEP 4: Generate Embeddings (Jina v3) |
| 123 | + # ========================================================================= |
| 124 | + print_step(4, total_steps, "Generating Context Embeddings") |
| 125 | + print_sponsor("Jina AI", "Using jina-embeddings-v3 with task adapters") |
| 126 | + |
| 127 | + embeddings = EmbeddingsRouter() |
| 128 | + |
| 129 | + # Embed code context |
| 130 | + code_samples = [ |
| 131 | + "class AuthService:\n def login(self, username, password):\n return self.oauth2_provider.authenticate(username, password)", |
| 132 | + "def verify_token(token: str) -> User:\n payload = jwt.decode(token, SECRET_KEY)\n return User.from_payload(payload)", |
| 133 | + "async def refresh_token(refresh_token: str) -> TokenPair:\n if not validate_refresh(refresh_token):\n raise InvalidTokenError()", |
| 134 | + ] |
| 135 | + |
| 136 | + for i, code in enumerate(code_samples): |
| 137 | + # Embed for storage (retrieval.passage adapter) |
| 138 | + emb = await embeddings.embed_for_storage(code) |
| 139 | + await atlas.store_embedding( |
| 140 | + repo_id=repo_id, |
| 141 | + object_type="code", |
| 142 | + object_id=f"auth_code_{i}", |
| 143 | + vector=emb, |
| 144 | + content=code, |
| 145 | + metadata={"file": "auth_service.py", "line": i * 10 + 1}, |
| 146 | + ) |
| 147 | + |
| 148 | + print_success(f"Stored {len(code_samples)} code embeddings") |
| 149 | + print_data("Embedding dim", str(len(emb))) |
| 150 | + print_data("Task adapter", "retrieval.passage (for storage)") |
| 151 | + print_data("Provider", embeddings.provider_name) |
| 152 | + |
| 153 | + # ========================================================================= |
| 154 | + # STEP 5: Simulate Failure & Recovery |
| 155 | + # ========================================================================= |
| 156 | + print_step(5, total_steps, "Simulating Failure & Recovery") |
| 157 | + print_sponsor("MongoDB Atlas", "Workflow state persisted - can recover from failure") |
| 158 | + |
| 159 | + # Mark current step as failed |
| 160 | + await atlas.update_run(run_id, status="interrupted") |
| 161 | + print(f" {C.RED}✗{C.END} Simulated failure during step 3 (implement_oauth2)") |
| 162 | + print_data("Run status", "interrupted") |
| 163 | + |
| 164 | + # Simulate session restart |
| 165 | + print(f"\n {C.YELLOW}... Session restart ...{C.END}\n") |
| 166 | + await asyncio.sleep(0.5) |
| 167 | + |
| 168 | + # Recovery: Load workflow state from MongoDB |
| 169 | + print_sponsor("MongoDB Atlas", "Recovering workflow state from 'runs' collection") |
| 170 | + # In real implementation: run_data = await atlas.get_run(run_id) |
| 171 | + |
| 172 | + # Resume from last completed step |
| 173 | + await atlas.update_run(run_id, status="running") |
| 174 | + workflow_steps[2]["status"] = "completed" # Mark oauth2 as done |
| 175 | + workflow_steps[3]["status"] = "running" # Move to tests |
| 176 | + await atlas.update_run(run_id, plan={"steps": workflow_steps}) |
| 177 | + |
| 178 | + print_success("Workflow recovered and resumed") |
| 179 | + print_data("Resumed from", "step 3 (implement_oauth2)") |
| 180 | + print_data("Now running", "step 4 (write_tests)") |
| 181 | + |
| 182 | + # ========================================================================= |
| 183 | + # STEP 6: Quality Gate (Galileo) |
| 184 | + # ========================================================================= |
| 185 | + print_step(6, total_steps, "Evaluating Output Quality") |
| 186 | + print_sponsor("Galileo AI", "RAG Triad evaluation for context quality") |
| 187 | + |
| 188 | + galileo = GalileoEval() |
| 189 | + |
| 190 | + # Evaluate the generated code against requirements |
| 191 | + eval_result = await galileo.evaluate( |
| 192 | + query="Implement OAuth2 authentication with token refresh", |
| 193 | + response="AuthService class with login(), verify_token(), and refresh_token() methods using JWT", |
| 194 | + context="OAuth2 authentication requires: 1) Token-based auth 2) Refresh token rotation 3) JWT validation", |
| 195 | + ) |
| 196 | + |
| 197 | + print_success(f"Quality gate: {'PASSED' if eval_result.passed else 'NEEDS REVIEW'}") |
| 198 | + print_data("Context Adherence", f"{eval_result.scores.get('context_adherence', 0):.2f}") |
| 199 | + print_data("Chunk Relevance", f"{eval_result.scores.get('chunk_relevance', 0):.2f}") |
| 200 | + print_data("Correctness", f"{eval_result.scores.get('correctness', 0):.2f}") |
| 201 | + |
| 202 | + # ========================================================================= |
| 203 | + # STEP 7: Create Handoff Pack for Next Session |
| 204 | + # ========================================================================= |
| 205 | + print_step(7, total_steps, "Creating Handoff Pack") |
| 206 | + print_sponsor("MongoDB Atlas", "Storing handoff in 'handoffs' collection") |
| 207 | + |
| 208 | + # Create handoff pack |
| 209 | + handoff_yaml = f"""task: Complete authentication implementation |
| 210 | +resumed_from: {run_id} |
| 211 | +completed_steps: |
| 212 | + - analyze_requirements |
| 213 | + - design_architecture |
| 214 | + - implement_oauth2 |
| 215 | +pending_steps: |
| 216 | + - write_tests |
| 217 | + - validate_security |
| 218 | +context: |
| 219 | + files_modified: |
| 220 | + - auth_service.py |
| 221 | + - auth_routes.py |
| 222 | + key_decisions: |
| 223 | + - Using JWT for stateless tokens |
| 224 | + - Refresh token rotation for security |
| 225 | + blockers: none |
| 226 | +""" |
| 227 | + |
| 228 | + handoff_md = f"""# Handoff: Authentication Implementation |
| 229 | +
|
| 230 | +**Run ID:** {run_id} |
| 231 | +**Status:** In Progress (4/5 steps complete) |
| 232 | +
|
| 233 | +## Completed |
| 234 | +- ✓ Requirements analysis |
| 235 | +- ✓ Architecture design |
| 236 | +- ✓ OAuth2 implementation |
| 237 | +
|
| 238 | +## Next Steps |
| 239 | +- [ ] Write unit tests for AuthService |
| 240 | +- [ ] Security validation (OWASP checklist) |
| 241 | +
|
| 242 | +## Key Context |
| 243 | +- Using JWT tokens with 15-min expiry |
| 244 | +- Refresh tokens stored in MongoDB |
| 245 | +- Rate limiting on auth endpoints |
| 246 | +""" |
| 247 | + |
| 248 | + await atlas.store_handoff( |
| 249 | + repo_id=repo_id, |
| 250 | + task="authentication-implementation", |
| 251 | + yaml_content=handoff_yaml, |
| 252 | + markdown_content=handoff_md, |
| 253 | + citations=[{"file": "auth_service.py", "lines": "1-50"}], |
| 254 | + token_estimate=len(handoff_yaml + handoff_md) // 4, |
| 255 | + ) |
| 256 | + |
| 257 | + print_success("Handoff pack stored in MongoDB Atlas") |
| 258 | + print_data("Task ID", "authentication-implementation") |
| 259 | + print_data("Token estimate", str(len(handoff_yaml + handoff_md) // 4)) |
| 260 | + |
| 261 | + # Mark workflow as paused (for next session) |
| 262 | + await atlas.update_run(run_id, status="paused") |
| 263 | + |
| 264 | + # ========================================================================= |
| 265 | + # Summary |
| 266 | + # ========================================================================= |
| 267 | + print(f"\n{C.HEADER}{'='*60}{C.END}") |
| 268 | + print(f"{C.HEADER} Demo Complete - Prolonged Coordination{C.END}") |
| 269 | + print(f"{C.HEADER}{'='*60}{C.END}") |
| 270 | + |
| 271 | + print(f""" |
| 272 | +{C.BOLD}Key Achievements:{C.END} |
| 273 | +
|
| 274 | + {C.GREEN}✓{C.END} Multi-step workflow tracked in MongoDB Atlas |
| 275 | + {C.GREEN}✓{C.END} Failure recovery with state persistence |
| 276 | + {C.GREEN}✓{C.END} Context embeddings stored for retrieval |
| 277 | + {C.GREEN}✓{C.END} Quality evaluation before proceeding |
| 278 | + {C.GREEN}✓{C.END} Handoff pack for session continuity |
| 279 | +
|
| 280 | +{C.BOLD}Sponsors Used:{C.END} |
| 281 | +
|
| 282 | + {C.YELLOW}MongoDB Atlas{C.END} - Context engine (repos, runs, handoffs, embeddings) |
| 283 | + {C.YELLOW}Jina AI{C.END} - Embeddings with task adapters (v3) |
| 284 | + {C.YELLOW}Galileo AI{C.END} - RAG Triad quality evaluation |
| 285 | + {C.YELLOW}Fireworks AI{C.END} - LLM inference (when API key set) |
| 286 | +
|
| 287 | +{C.BOLD}Problem Statement:{C.END} Prolonged Coordination |
| 288 | +
|
| 289 | + This demo shows how CCv3 enables agentic workflows that: |
| 290 | + - Span multiple sessions (hours/days) |
| 291 | + - Survive failures and restarts |
| 292 | + - Maintain reasoning state in MongoDB |
| 293 | + - Ensure task consistency through handoffs |
| 294 | +""") |
| 295 | + |
| 296 | + # Cleanup |
| 297 | + await embeddings.close() |
| 298 | + await galileo.close() |
| 299 | + await atlas.close() |
| 300 | + |
| 301 | + |
| 302 | +async def demo_adaptive_retrieval(): |
| 303 | + """Demo: Adaptive retrieval with hybrid search.""" |
| 304 | + |
| 305 | + print(f"\n{C.HEADER}{'='*60}{C.END}") |
| 306 | + print(f"{C.HEADER} CCv3: Adaptive Retrieval Demo{C.END}") |
| 307 | + print(f"{C.HEADER}{'='*60}{C.END}") |
| 308 | + |
| 309 | + from .atlas import Atlas |
| 310 | + from .embeddings import EmbeddingsRouter |
| 311 | + |
| 312 | + atlas = Atlas() |
| 313 | + await atlas.connect() |
| 314 | + |
| 315 | + embeddings = EmbeddingsRouter() |
| 316 | + |
| 317 | + # Create test data |
| 318 | + repo_id = await atlas.register_repo("test-retrieval", "/tmp", ["python"]) |
| 319 | + |
| 320 | + # Store some documents |
| 321 | + docs = [ |
| 322 | + ("auth.py", "def authenticate(user, password): return check_credentials(user, password)"), |
| 323 | + ("routes.py", "app.post('/login', authenticate_handler)"), |
| 324 | + ("models.py", "class User: username: str; password_hash: str"), |
| 325 | + ] |
| 326 | + |
| 327 | + print_step(1, 3, "Indexing documents with embeddings") |
| 328 | + for filename, content in docs: |
| 329 | + emb = await embeddings.embed_for_storage(content) |
| 330 | + await atlas.store_embedding( |
| 331 | + repo_id=repo_id, |
| 332 | + object_type="file", |
| 333 | + object_id=filename, |
| 334 | + vector=emb, |
| 335 | + content=content, |
| 336 | + metadata={"filename": filename}, |
| 337 | + ) |
| 338 | + print_success(f"Indexed: {filename}") |
| 339 | + |
| 340 | + print_step(2, 3, "Hybrid search with RRF fusion") |
| 341 | + query = "how does authentication work" |
| 342 | + query_emb = await embeddings.embed_for_search(query) |
| 343 | + |
| 344 | + print_sponsor("MongoDB Atlas", "Combining text search + vector search") |
| 345 | + results = await atlas.hybrid_search( |
| 346 | + repo_id=repo_id, |
| 347 | + query=query, |
| 348 | + query_vector=query_emb, |
| 349 | + limit=3, |
| 350 | + ) |
| 351 | + |
| 352 | + print_success(f"Found {len(results)} results") |
| 353 | + for r in results: |
| 354 | + print_data(r.get("object_id", "?"), f"score={r.get('rrf_score', 0):.4f}") |
| 355 | + |
| 356 | + print_step(3, 3, "Adaptive chunking based on query") |
| 357 | + print_sponsor("Jina AI", "Task-specific adapter: retrieval.query") |
| 358 | + print_success("Query embedding uses different adapter than storage") |
| 359 | + |
| 360 | + await embeddings.close() |
| 361 | + await atlas.close() |
| 362 | + |
| 363 | + |
| 364 | +async def main(): |
| 365 | + """Run hackathon demos.""" |
| 366 | + |
| 367 | + print(f""" |
| 368 | +{C.BOLD}{C.CYAN} |
| 369 | + ██████╗ ██████╗██╗ ██╗██████╗ |
| 370 | + ██╔════╝██╔════╝██║ ██║╚════██╗ |
| 371 | + ██║ ██║ ██║ ██║ █████╔╝ |
| 372 | + ██║ ██║ ╚██╗ ██╔╝ ╚═══██╗ |
| 373 | + ╚██████╗╚██████╗ ╚████╔╝ ██████╔╝ |
| 374 | + ╚═════╝ ╚═════╝ ╚═══╝ ╚═════╝ |
| 375 | +{C.END} |
| 376 | + {C.BOLD}Continuous Context Engineering{C.END} |
| 377 | + {C.BLUE}Hackathon Edition - January 2026{C.END} |
| 378 | +""") |
| 379 | + |
| 380 | + # Run main demo |
| 381 | + await demo_prolonged_coordination() |
| 382 | + |
| 383 | + print(f"\n{C.CYAN}Press Enter to run Adaptive Retrieval demo...{C.END}") |
| 384 | + input() |
| 385 | + |
| 386 | + await demo_adaptive_retrieval() |
| 387 | + |
| 388 | + |
| 389 | +if __name__ == "__main__": |
| 390 | + try: |
| 391 | + asyncio.run(main()) |
| 392 | + except KeyboardInterrupt: |
| 393 | + print(f"\n{C.YELLOW}Demo cancelled{C.END}") |
| 394 | + except Exception as e: |
| 395 | + print(f"\n{C.RED}Error: {e}{C.END}") |
| 396 | + sys.exit(1) |
0 commit comments