diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 000000000..9cf84074e
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,94 @@
+name: Bug Report
+description: Report a bug in Lighthouse AI (Dream Server, Guardian, Memory Shepherd, Token Spy)
+labels: ["bug"]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Thanks for taking the time to report a bug. Please fill out the sections below so we can reproduce and fix the issue.
+
+ - type: textarea
+ id: description
+ attributes:
+ label: Description
+ description: A clear summary of the bug.
+ placeholder: What went wrong?
+ validations:
+ required: true
+
+ - type: textarea
+ id: steps-to-reproduce
+ attributes:
+ label: Steps to Reproduce
+ description: Step-by-step instructions to trigger the bug.
+ placeholder: |
+ 1. Run `./install.sh`
+ 2. Select option X
+ 3. ...
+ validations:
+ required: true
+
+ - type: textarea
+ id: expected-behavior
+ attributes:
+ label: Expected Behavior
+ description: What you expected to happen.
+ validations:
+ required: true
+
+ - type: textarea
+ id: actual-behavior
+ attributes:
+ label: Actual Behavior
+ description: What actually happened instead.
+ validations:
+ required: true
+
+ - type: input
+ id: os
+ attributes:
+ label: Operating System
+ placeholder: "e.g. Ubuntu 24.04, Windows 11, macOS 14"
+ validations:
+ required: true
+
+ - type: input
+ id: gpu
+ attributes:
+ label: GPU
+ placeholder: "e.g. NVIDIA RTX 4090 24 GB, AMD RX 7900 XTX, None (CPU only)"
+ validations:
+ required: true
+
+ - type: input
+ id: docker-version
+ attributes:
+ label: Docker Version
+ placeholder: "e.g. Docker 27.1.1, Podman 5.0"
+ validations:
+ required: true
+
+ - type: input
+ id: vram
+ attributes:
+ label: VRAM
+ placeholder: "e.g. 24 GB, 8 GB"
+ validations:
+ required: true
+
+ - type: textarea
+ id: logs
+ attributes:
+ label: Logs
+ description: Paste any relevant log output. This will be rendered as code.
+ render: shell
+ validations:
+ required: false
+
+ - type: textarea
+ id: screenshots
+ attributes:
+ label: Screenshots
+ description: If applicable, add screenshots to help illustrate the problem.
+ validations:
+ required: false
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 000000000..4ab38d7e0
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,5 @@
+blank_issues_enabled: true
+contact_links:
+ - name: "Question / Help"
+ url: "https://github.com/Light-Heart-Labs/Lighthouse-AI/discussions"
+ about: "Ask questions and get help from the community"
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
new file mode 100644
index 000000000..d48d3b6fd
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -0,0 +1,42 @@
+name: Feature Request
+description: Suggest a new feature or improvement for Lighthouse AI
+labels: ["enhancement"]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Have an idea that would make Lighthouse AI better? We'd love to hear it.
+
+ - type: textarea
+ id: description
+ attributes:
+ label: Description
+ description: A clear description of the feature you'd like.
+ placeholder: What should Lighthouse AI do?
+ validations:
+ required: true
+
+ - type: textarea
+ id: use-case
+ attributes:
+ label: Use Case
+ description: Why do you need this? What problem does it solve for you?
+ placeholder: I want this because...
+ validations:
+ required: true
+
+ - type: textarea
+ id: proposed-solution
+ attributes:
+ label: Proposed Solution
+ description: If you have an idea for how this could work, describe it here.
+ validations:
+ required: false
+
+ - type: textarea
+ id: alternatives-considered
+ attributes:
+ label: Alternatives Considered
+ description: Have you considered other approaches or workarounds?
+ validations:
+ required: false
diff --git a/.github/workflows/lint-python.yml b/.github/workflows/lint-python.yml
new file mode 100644
index 000000000..2f31a6a69
--- /dev/null
+++ b/.github/workflows/lint-python.yml
@@ -0,0 +1,31 @@
+name: Python Lint
+
+on:
+ push:
+ branches: [main]
+ pull_request:
+ branches: [main]
+
+permissions:
+ contents: read
+
+jobs:
+ ruff:
+ name: Lint Python with Ruff
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+
+ - name: Install Ruff
+ run: pip install ruff
+
+ - name: Run Ruff on dream-server Python files
+ run: |
+ ruff check dream-server/ \
+ --select E,F,W \
+ --ignore E501
diff --git a/.github/workflows/lint-shell.yml b/.github/workflows/lint-shell.yml
new file mode 100644
index 000000000..fc809e52e
--- /dev/null
+++ b/.github/workflows/lint-shell.yml
@@ -0,0 +1,62 @@
+name: ShellCheck
+
+on:
+ push:
+ branches: [main]
+ pull_request:
+ branches: [main]
+
+permissions:
+ contents: read
+
+jobs:
+ shellcheck:
+ name: Lint shell scripts
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install ShellCheck
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y shellcheck
+ shellcheck --version
+
+ - name: Run ShellCheck on dream-server shell scripts
+ run: |
+ # Find all .sh files under dream-server/
+ shfiles=$(find dream-server/ -name '*.sh' -type f)
+
+ if [ -z "$shfiles" ]; then
+ echo "No .sh files found under dream-server/"
+ exit 0
+ fi
+
+ echo "Found $(echo "$shfiles" | wc -l) shell scripts"
+ echo ""
+
+ # Run shellcheck:
+ # -e SC1091 exclude "can't follow sourced files"
+ # -e SC2034 exclude "unused variables" (many are used by sourced files)
+ # -S warning treat warnings and above as reportable
+ # shellcheck returns:
+ # 0 = no issues
+ # 1 = errors or warnings found
+ # We fail the job only on error-severity issues by using -S error,
+ # but still display warnings for visibility.
+
+ # First pass: display all warnings and errors for visibility
+ echo "=== ShellCheck results (warnings + errors) ==="
+ echo "$shfiles" | xargs shellcheck \
+ --exclude=SC1091,SC2034 \
+ --severity=warning \
+ --format=gcc \
+ || true
+
+ echo ""
+ echo "=== Checking for error-severity issues (will fail if found) ==="
+
+ # Second pass: fail only on error severity
+ echo "$shfiles" | xargs shellcheck \
+ --exclude=SC1091,SC2034 \
+ --severity=error
diff --git a/.github/workflows/validate-compose.yml b/.github/workflows/validate-compose.yml
new file mode 100644
index 000000000..5ffa7d309
--- /dev/null
+++ b/.github/workflows/validate-compose.yml
@@ -0,0 +1,61 @@
+name: Validate Docker Compose
+
+on:
+ push:
+ branches: [main]
+ paths:
+ - "dream-server/docker-compose*.yml"
+ - "dream-server/compose/**"
+ pull_request:
+ branches: [main]
+ paths:
+ - "dream-server/docker-compose*.yml"
+ - "dream-server/compose/**"
+
+permissions:
+ contents: read
+
+jobs:
+ validate-compose:
+ name: Validate Docker Compose files
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Validate main docker-compose.yml
+ run: |
+ echo "Validating dream-server/docker-compose.yml"
+ docker compose -f dream-server/docker-compose.yml config --quiet
+
+ - name: Validate compose files in dream-server/compose/
+ run: |
+ compose_files=$(find dream-server/compose/ -name '*.yml' -type f 2>/dev/null || true)
+
+ if [ -z "$compose_files" ]; then
+ echo "No compose files found in dream-server/compose/"
+ exit 0
+ fi
+
+ echo "Found compose files:"
+ echo "$compose_files"
+ echo ""
+
+ failed=0
+ for f in $compose_files; do
+ echo "Validating $f ..."
+ if docker compose -f "$f" config --quiet 2>&1; then
+ echo " OK"
+ else
+ echo " FAILED"
+ failed=1
+ fi
+ done
+
+ if [ "$failed" -ne 0 ]; then
+ echo ""
+ echo "One or more compose files failed validation."
+ exit 1
+ fi
+
+ echo ""
+ echo "All compose files validated successfully."
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 000000000..80a2f673c
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,54 @@
+# Contributing to Lighthouse AI
+
+First off, thanks for wanting to contribute! Lighthouse AI is an open source project and we welcome help from everyone -- whether you're fixing a typo, adding a cookbook recipe, or tackling a full feature.
+
+## Quick Start
+
+1. **Fork** this repository and **clone** your fork locally.
+2. Create a **branch** for your work:
+ ```bash
+ git checkout -b my-change
+ ```
+3. Make your changes, test them locally, and commit.
+4. Open a **pull request** against `main`.
+
+That's it. No CLA, no hoops.
+
+## What We're Looking For
+
+All kinds of contributions are valuable. Here are some great places to start:
+
+- **Bug fixes** -- something broken? Fix it and send a PR.
+- **Documentation improvements** -- clearer install instructions, better troubleshooting guides, typo fixes.
+- **New cookbook recipes** -- workflows, prompt templates, or integration examples that help other users.
+- **Test coverage** -- more tests means fewer surprises.
+- **Feature work** -- check the issue tracker for ideas, or propose your own.
+
+If you're new here, look for issues labeled **`good first issue`**. These are scoped, well-defined tasks that are a great way to get familiar with the codebase.
+
+## Code Style
+
+Nothing exotic:
+
+- **Shell scripts** are written in Bash. Use `shellcheck` if you can.
+- **Python** uses standard formatting (we're not picky -- just be consistent with the file you're editing).
+- Keep things readable. Comments are welcome where intent isn't obvious.
+
+## Pull Request Process
+
+1. **Describe your changes** in the PR description. A sentence or two is fine for small changes; more detail helps for larger ones.
+2. **Link related issues** if they exist (e.g. "Fixes #42").
+3. Make sure existing functionality isn't broken.
+4. A maintainer will review your PR and may suggest changes. We try to be responsive.
+
+## Where to Ask Questions
+
+Not sure about something? Open a thread in [GitHub Discussions](https://github.com/Light-Heart-Labs/Lighthouse-AI/discussions). We're happy to help you figure out the best approach before you write any code.
+
+## License
+
+By contributing to Lighthouse AI, you agree that your contributions will be licensed under the [Apache License 2.0](LICENSE), the same license that covers this project.
+
+---
+
+Thanks for helping make local AI infrastructure better for everyone.
diff --git a/README.md b/README.md
index aeb77a2e7..76dd1a22e 100644
--- a/README.md
+++ b/README.md
@@ -1,68 +1,150 @@
+
+
# Lighthouse AI
+**Local AI infrastructure. Your hardware. Your data. Your rules.**
+
[](LICENSE)
-[](https://github.com/Light-Heart-Labs/Lighthouse-AI)
+[](https://github.com/Light-Heart-Labs/Lighthouse-AI/stargazers)
+[](https://github.com/Light-Heart-Labs/Lighthouse-AI/releases)
+[](https://github.com/Light-Heart-Labs/Lighthouse-AI/actions)
-**Your hardware. Your data. Your rules.**
+
+
+---
-One installer. Bare metal to fully running local AI stack in 10 minutes — LLM inference, chat UI, voice agents, workflow automation, RAG, and privacy tools. No subscriptions. No cloud. Runs entirely offline.
+## Dream Server — Local AI in 10 Minutes
+
+One installer. Bare metal to a fully running AI stack — LLM, chat UI, voice agents, workflow automation, RAG, and privacy tools. No cloud. No subscriptions. Runs entirely offline after setup.
```bash
curl -fsSL https://raw.githubusercontent.com/Light-Heart-Labs/Lighthouse-AI/main/dream-server/get-dream-server.sh | bash
```
-
+
+
-## What You Get
+---
-| | |
-|---|---|
-| **LLM Inference** (vLLM) | GPU-accelerated, auto-selects 7B to 72B models for your hardware |
-| **Chat UI** (Open WebUI) | Full-featured chat interface with model management |
-| **Voice** (Whisper + Kokoro + LiveKit) | Speech-to-text, text-to-speech, real-time WebRTC conversations |
-| **Workflows** (n8n) | Visual workflow editor with 400+ integrations |
-| **RAG** (Qdrant) | Vector database for document Q&A |
-| **Privacy Shield** | PII redaction before anything leaves your network |
-| **Dashboard** | Real-time GPU metrics, service health, system status |
-| **API Gateway** (LiteLLM) | Multi-model routing, OpenAI-compatible API |
-| **OpenClaw Agents** | Multi-agent AI coordination on local hardware |
-
-### vs. The Alternatives
-
-| Feature | Dream Server | Ollama + WebUI | LocalAI |
-|---------|:---:|:---:|:---:|
-| Full-stack one-command install | **LLM + voice + workflows + RAG + privacy** | LLM + chat only | LLM only |
-| Hardware auto-detect + model selection | **Yes** | No | No |
-| Voice agents (STT + TTS + WebRTC) | **Built in** | No | Limited |
-| Inference engine | **vLLM** (continuous batching) | llama.cpp | llama.cpp |
-| Workflow automation | **n8n (400+ integrations)** | No | No |
-| PII redaction / privacy tools | **Built in** | No | No |
-| Multi-GPU | **Yes** | Partial | Partial |
+## Architecture
+
+```mermaid
+graph TB
+ subgraph User[" You "]
+ Browser(["Browser"])
+ Mic(["Microphone"])
+ API(["API Client"])
+ end
+
+ subgraph DreamServer["Dream Server (Docker Compose)"]
+ subgraph Core["Core"]
+ VLLM["vLLM · :8000
LLM Inference"]
+ WebUI["Open WebUI · :3000
Chat Interface"]
+ Dashboard["Dashboard · :3001
GPU Metrics"]
+ end
+
+ subgraph Voice["Voice"]
+ Whisper["Whisper · :9000
Speech → Text"]
+ Kokoro["Kokoro · :8880
Text → Speech"]
+ LiveKit["LiveKit · :7880
WebRTC"]
+ VoiceAgent["Voice Agent"]
+ end
+
+ subgraph RAGp["RAG"]
+ Qdrant["Qdrant · :6333
Vector DB"]
+ Embeddings["Embeddings · :8090"]
+ end
+
+ subgraph Workflows["Workflows"]
+ N8N["n8n · :5678
400+ Integrations"]
+ end
+
+ subgraph Agents["Agents"]
+ OpenClaw["OpenClaw · :7860
Multi-Agent"]
+ ToolProxy["Tool Proxy
vLLM Bridge"]
+ end
+
+ subgraph Privacy["Privacy"]
+ Shield["Privacy Shield · :8085
PII Redaction"]
+ end
+ end
+
+ Browser --> WebUI
+ Browser --> Dashboard
+ Browser --> N8N
+ Mic --> LiveKit
+ API --> VLLM
+
+ WebUI --> VLLM
+ VoiceAgent --> Whisper
+ VoiceAgent --> Kokoro
+ VoiceAgent --> VLLM
+ LiveKit --> VoiceAgent
+ OpenClaw --> ToolProxy
+ ToolProxy --> VLLM
+ Shield -.->|PII scrubbed| VLLM
+
+ style Core fill:#e8f0fe,stroke:#1a73e8,color:#1a1a1a
+ style Voice fill:#fce8e6,stroke:#d93025,color:#1a1a1a
+ style RAGp fill:#e6f4ea,stroke:#1e8e3e,color:#1a1a1a
+ style Workflows fill:#fef7e0,stroke:#f9ab00,color:#1a1a1a
+ style Agents fill:#f3e8fd,stroke:#9334e6,color:#1a1a1a
+ style Privacy fill:#e8eaed,stroke:#5f6368,color:#1a1a1a
+```
+
+The installer auto-detects your GPU and activates the right profiles. Core services start immediately; voice, RAG, workflows, and agents activate based on your hardware and preferences.
---
-## Hardware Tiers (Auto-Detected)
+## Who Is This For?
-The installer detects your GPU and picks the optimal model automatically:
+**Hobbyists** — Want local ChatGPT without subscriptions? Install Dream Server, open `localhost:3000`, start chatting. Voice mode, document Q&A, and workflow automation are one toggle away.
+
+**Developers** — Building AI agents? Dream Server gives you a local OpenAI-compatible API (vLLM), multi-agent coordination (OpenClaw), and a workflow engine (n8n) — all on your GPU. No API keys, no rate limits, no cost per token.
+
+**Teams** — Need private AI infrastructure? Everything runs on your hardware. The Privacy Shield scrubs PII before anything leaves your network. Deploy once, use from any device on your LAN.
+
+---
+
+## What You Get
+
+| Component | What It Does |
+|-----------|-------------|
+| **vLLM** | GPU-accelerated LLM inference with continuous batching — auto-selects 7B to 72B models for your hardware |
+| **Open WebUI** | Full-featured chat interface with conversation history, model switching, web search |
+| **Dashboard** | Real-time GPU metrics (VRAM, temp, utilization), service health, model management |
+| **Whisper** | Speech-to-text — local, fast, private |
+| **Kokoro** | Text-to-speech — natural-sounding voices, no cloud |
+| **LiveKit** | Real-time WebRTC voice conversations — talk to your AI like a phone call |
+| **n8n** | Visual workflow automation with 400+ integrations (GitHub, Slack, email, webhooks) |
+| **Qdrant** | Vector database for document Q&A (RAG) |
+| **OpenClaw** | Multi-agent AI framework — agents coordinating autonomously on your GPU |
+| **Privacy Shield** | PII redaction proxy — scrubs personal data before any external API call |
+
+### Hardware Tiers (Auto-Detected)
| Tier | VRAM | Model | Example GPUs |
|------|------|-------|--------------|
| Entry | <12GB | Qwen2.5-7B | RTX 3080, RTX 4070 |
-| Prosumer | 12-20GB | Qwen2.5-14B-AWQ | RTX 3090, RTX 4080 |
-| Pro | 20-40GB | Qwen2.5-32B-AWQ | RTX 4090, A6000 |
+| Prosumer | 12–20GB | Qwen2.5-14B-AWQ | RTX 3090, RTX 4080 |
+| Pro | 20–40GB | Qwen2.5-32B-AWQ | RTX 4090, A6000 |
| Enterprise | 40GB+ | Qwen2.5-72B-AWQ | A100, H100, multi-GPU |
-Override: `./install.sh --tier 3` | Windows: [`install.ps1`](dream-server/README.md#windows) handles WSL2 + Docker automatically
+**Bootstrap mode:** Chat in 2 minutes. A tiny model starts instantly while the full model downloads in the background. Hot-swap with zero downtime when ready.
-**Bootstrap mode:** Starts a tiny model instantly, lets you chat in 2 minutes while the full model downloads in the background. Hot-swap with zero downtime when ready.
+### How It Compares
----
-
-## OpenClaw — Multi-Agent AI on Your GPU
-
-Dream Server ships with local [OpenClaw](https://openclaw.io) support out of the box — the multi-agent framework for AI agents coordinating autonomously on your hardware. Includes vLLM Tool Call Proxy, battle-tested configs, and workspace templates for agent identity.
+| | Dream Server | Ollama + Open WebUI | LocalAI |
+|---|:---:|:---:|:---:|
+| Full-stack install (LLM + voice + workflows + RAG + privacy) | **One command** | Manual assembly | Manual assembly |
+| Hardware auto-detection + model selection | **Yes** | No | No |
+| Voice agents (STT + TTS + WebRTC) | **Built in** | No | Partial |
+| Inference engine | **vLLM** (continuous batching) | llama.cpp | llama.cpp |
+| Workflow automation | **n8n (400+ integrations)** | No | No |
+| PII redaction | **Built in** | No | No |
+| Multi-agent framework | **OpenClaw** | No | No |
-This repo was born from the [OpenClaw Collective](COLLECTIVE.md) — 3 AI agents, 3,464 commits, 8 days, three shipping products built autonomously on local GPUs. Dream Server packages that into something anyone can set up in 10 minutes.
+Ollama is great for running models locally. Dream Server is a complete AI platform — inference, voice, workflows, RAG, agents, privacy, and monitoring in one installer.
---
@@ -72,13 +154,13 @@ Standalone tools for running persistent AI agents in production. Each works inde
| Tool | Purpose |
|------|---------|
-| [**Guardian**](guardian/) | Self-healing process watchdog — monitors services, restores from backup, runs as root so agents can't kill it |
+| [**Guardian**](guardian/) | Self-healing process watchdog — monitors services, auto-restores from backup, runs as root so agents can't kill it |
| [**Memory Shepherd**](memory-shepherd/) | Periodic memory reset to prevent identity drift in long-running agents |
| [**Token Spy**](token-spy/) | API cost monitoring with real-time dashboard and auto-kill for runaway sessions |
-| [**vLLM Tool Proxy**](scripts/vllm-tool-proxy.py) | Makes local model tool calling work with OpenClaw — SSE re-wrapping, extraction, loop protection |
-| [**LLM Cold Storage**](scripts/llm-cold-storage.sh) | Archives idle HuggingFace models to free disk, models stay resolvable via symlink |
+| [**vLLM Tool Proxy**](dream-server/vllm-tool-proxy/) | Makes local vLLM tool calling work with OpenClaw — SSE re-wrapping, extraction, loop protection |
+| [**LLM Cold Storage**](scripts/llm-cold-storage.sh) | Archives idle HuggingFace models to free disk, keeps them resolvable via symlink |
-[Toolkit install guide →](docs/SETUP.md) | [Philosophy & patterns →](docs/PHILOSOPHY.md)
+These tools were born from the [OpenClaw Collective](COLLECTIVE.md) — 3 AI agents running autonomously on local GPUs, producing 3,464 commits in 8 days. Dream Server packages the infrastructure they built into something anyone can use.
---
@@ -86,12 +168,14 @@ Standalone tools for running persistent AI agents in production. Each works inde
| | |
|---|---|
-| [**Dream Server QUICKSTART**](dream-server/QUICKSTART.md) | Step-by-step install guide |
-| [**FAQ**](dream-server/FAQ.md) | Troubleshooting, usage, advanced config |
-| [**Hardware Guide**](dream-server/docs/HARDWARE-GUIDE.md) | What to buy — GPU recommendations with real prices |
-| [**Cookbook**](docs/cookbook/) | Recipes: voice agents, RAG, code assistant, privacy proxy, multi-GPU, swarms |
-| [**Architecture**](docs/ARCHITECTURE.md) | How it all works under the hood |
-| [**COLLECTIVE.md**](COLLECTIVE.md) | Origin story — the AI agents that built these tools |
+| [**Quickstart**](dream-server/QUICKSTART.md) | Step-by-step install guide with troubleshooting |
+| [**FAQ**](dream-server/FAQ.md) | Common questions, hardware advice, configuration |
+| [**Hardware Guide**](dream-server/docs/HARDWARE-GUIDE.md) | GPU recommendations with real prices |
+| [**Cookbook**](docs/cookbook/) | Recipes: voice agents, RAG pipelines, code assistant, privacy proxy |
+| [**Architecture**](docs/ARCHITECTURE.md) | Deep dive into the system design |
+| [**Contributing**](CONTRIBUTING.md) | How to contribute to Lighthouse AI |
+
+Windows: [`install.ps1`](dream-server/README.md#windows) handles WSL2 + Docker + NVIDIA drivers automatically.
---
@@ -99,4 +183,4 @@ Standalone tools for running persistent AI agents in production. Each works inde
Apache 2.0 — see [LICENSE](LICENSE). Use it, modify it, ship it.
-Built by [Lightheart Labs](https://github.com/Light-Heart-Labs) and the [Android Collective](COLLECTIVE.md).
+Built by [Lightheart Labs](https://github.com/Light-Heart-Labs) and the [OpenClaw Collective](COLLECTIVE.md).
diff --git a/RELEASE-v1.0.0.md b/RELEASE-v1.0.0.md
new file mode 100644
index 000000000..06ddebe8c
--- /dev/null
+++ b/RELEASE-v1.0.0.md
@@ -0,0 +1,113 @@
+# Dream Server v1.0.0
+
+First public release of Dream Server -- your turnkey local AI stack.
+
+**Your hardware. Your data. Your rules.**
+
+One installer. Bare metal to fully running local AI in 10 minutes -- LLM inference, chat UI, voice agents, workflow automation, RAG, and privacy tools. No subscriptions. No cloud. Runs entirely offline.
+
+## Highlights
+
+- **Full-stack local AI in one command** -- vLLM inference, chat UI, voice agents, workflow automation, RAG, privacy shield, and a real-time dashboard, all wired together and running on your GPU.
+- **Automatic hardware detection** -- the installer probes your GPU, selects the optimal model (7B to 72B parameters), and configures VRAM allocation, context windows, and resource limits without manual tuning.
+- **Bootstrap mode for instant start** -- a lightweight 1.5B model boots in under a minute so you can start chatting immediately while the full model downloads in the background. Hot-swap with zero downtime when ready.
+- **End-to-end voice pipeline** -- Whisper speech-to-text, Kokoro text-to-speech, and LiveKit WebRTC voice agents let you have real-time spoken conversations with your local LLM entirely on-premises.
+- **OpenClaw multi-agent support** -- built-in integration with the OpenClaw agent framework, including a vLLM Tool Call Proxy, pre-configured workspace templates, and battle-tested configs for autonomous AI coordination on local hardware.
+
+## What's Included
+
+| Component | Image | Version | Profile |
+|-----------|-------|---------|---------|
+| **vLLM** (LLM Inference) | `vllm/vllm-openai` | v0.15.1 | core |
+| **Open WebUI** (Chat Interface) | `ghcr.io/open-webui/open-webui` | v0.7.2 | core |
+| **Dashboard UI** (Control Center) | `dream-dashboard` | local build | core |
+| **Dashboard API** (Status Backend) | `dream-dashboard-api` | local build | core |
+| **Whisper** (Speech-to-Text) | `onerahmet/openai-whisper-asr-webservice` | v1.4.1 | voice |
+| **Kokoro** (Text-to-Speech) | `ghcr.io/remsky/kokoro-fastapi-cpu` | v0.2.4 | voice |
+| **LiveKit** (WebRTC Voice) | `dream-livekit` | local build | voice |
+| **LiveKit Voice Agent** | `dream-voice-agent` | local build | voice |
+| **n8n** (Workflow Automation) | `n8nio/n8n` | 2.6.4 | workflows |
+| **Qdrant** (Vector Database) | `qdrant/qdrant` | v1.16.3 | rag |
+| **Text Embeddings** | `ghcr.io/huggingface/text-embeddings-inference` | cpu-1.9.1 | rag |
+| **LiteLLM** (API Gateway) | `ghcr.io/berriai/litellm` | v1.81.3-stable | monitoring |
+| **Token Spy** (Usage Monitoring) | `lightheartlabs/token-spy` | latest | monitoring |
+| **TimescaleDB** (Token Spy DB) | `timescale/timescaledb` | latest-pg15 | monitoring |
+| **Redis** (Rate Limiting) | `redis` | 7-alpine | monitoring |
+| **Privacy Shield** (PII Redaction) | `dream-privacy-shield` | local build | privacy |
+| **OpenClaw** (Agent Framework) | `ghcr.io/openclaw/openclaw` | latest | openclaw |
+| **vLLM Tool Proxy** | `dream-vllm-tool-proxy` | local build | openclaw |
+
+## Hardware Support
+
+The installer automatically detects your GPU and selects the optimal model:
+
+| Tier | VRAM | Model | Context | Example GPUs |
+|------|------|-------|---------|--------------|
+| **Entry** | < 12 GB | Qwen2.5-7B | 8K | RTX 3080, RTX 4070 |
+| **Prosumer** | 12 -- 20 GB | Qwen2.5-14B-AWQ | 16K | RTX 3090, RTX 4080 |
+| **Pro** | 20 -- 40 GB | Qwen2.5-32B-AWQ | 32K | RTX 4090, A6000 |
+| **Enterprise** | 40 GB+ | Qwen2.5-72B-AWQ | 32K | A100, H100, multi-GPU |
+
+Override with `./install.sh --tier 3` if you know what you want.
+
+## Install
+
+**One-liner (Linux / WSL):**
+
+```bash
+curl -fsSL https://raw.githubusercontent.com/Light-Heart-Labs/Lighthouse-AI/main/dream-server/get-dream-server.sh | bash
+```
+
+**Manual clone:**
+
+```bash
+git clone https://github.com/Light-Heart-Labs/Lighthouse-AI.git
+cd Lighthouse-AI/dream-server
+./install.sh
+```
+
+**Windows (PowerShell):**
+
+```powershell
+Invoke-WebRequest -Uri "https://raw.githubusercontent.com/Light-Heart-Labs/Lighthouse-AI/main/dream-server/install.ps1" -OutFile install.ps1
+.\install.ps1
+```
+
+The Windows installer handles WSL2 setup, Docker Desktop, and NVIDIA driver configuration automatically.
+
+**Requirements:** Docker with Compose v2+, NVIDIA GPU with 8 GB+ VRAM (16 GB+ recommended), NVIDIA Container Toolkit, 40 GB+ disk space.
+
+## Operations Toolkit
+
+Standalone tools for running persistent AI agents in production, included in the repo:
+
+- **Guardian** -- Self-healing process watchdog that monitors services, restores from backup, and runs as root so agents cannot kill it.
+- **Memory Shepherd** -- Periodic memory reset to prevent identity drift in long-running agents.
+- **Token Spy** -- API cost monitoring with real-time dashboard and auto-kill for runaway sessions.
+- **vLLM Tool Proxy** -- Makes local model tool calling work with OpenClaw via SSE re-wrapping and loop protection.
+- **LLM Cold Storage** -- Archives idle HuggingFace models to free disk while keeping them resolvable via symlink.
+
+## Known Limitations
+
+- First release -- expect rough edges.
+- LiveKit voice requires manual profile activation (`--profile voice`).
+- OpenClaw integration is experimental.
+- No ARM / Apple Silicon support yet (planned).
+- Models download on first run (20 GB+ for full-size models).
+- Token Spy and TimescaleDB images are pinned to `latest` -- consider pinning exact versions in production.
+
+## What's Next
+
+- ARM / Apple Silicon support
+- One-click model switching in dashboard
+- Automated backup and restore
+- Community workflow templates
+- Pinned versions for all remaining `latest` tags
+
+## Contributors
+
+Built by [Lightheart Labs](https://github.com/Light-Heart-Labs) and the [OpenClaw Collective](https://github.com/Light-Heart-Labs/Lighthouse-AI/blob/main/COLLECTIVE.md).
+
+---
+
+**License:** Apache 2.0 -- use it, modify it, ship it.