Skip to content

Latest commit

 

History

History
676 lines (491 loc) · 14 KB

File metadata and controls

676 lines (491 loc) · 14 KB

🚀 LORD PROTOCOL - COMPLETE SETUP GUIDE

From Zero to Full Stack: Exhaustive Installation & Run Commands

Target System: macOS on Apple Silicon (M1/M2/M3/M4/M5)
Estimated Time: 30-60 minutes (depends on download speeds)
Skill Level: Beginner → Expert


📋 TABLE OF CONTENTS

  1. Prerequisites (Everyone Starts Here)
  2. Package Managers & Core Tools
  3. Python Environment
  4. Rust Environment
  5. AI Models Download
  6. Redis Database
  7. VSCode Setup
  8. Xcode Setup
  9. Start Everything
  10. Verification Checklist
  11. Troubleshooting

1. PREREQUISITES

1.1 Check Your System

# Check macOS version (need 14.0+ for latest features)
sw_vers

# Check chip (must be Apple Silicon)
uname -m
# Should output: arm64

# Check available RAM (need 16GB minimum, 32GB+ recommended)
sysctl -n hw.memsize | awk '{print $0/1073741824 " GB"}'

# Check available disk space (need ~50GB free)
df -h /

1.2 Open Terminal

  1. Press Cmd + Space
  2. Type "Terminal"
  3. Press Enter

2. PACKAGE MANAGERS & CORE TOOLS

2.1 Install Homebrew (macOS Package Manager)

# Install Homebrew
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"

# Follow the instructions it prints!
# Usually you need to run these after installation:
echo 'eval "$(/opt/homebrew/bin/brew shellenv)"' >> ~/.zprofile
eval "$(/opt/homebrew/bin/brew shellenv)"

# Verify installation
brew --version

2.2 Install Core Tools

# Essential development tools
brew install git
brew install curl
brew install wget
brew install jq
brew install tree

# Verify
git --version
curl --version
jq --version

2.3 Install Docker Desktop

# Download Docker Desktop for Mac (Apple Silicon)
# Option 1: Via Homebrew
brew install --cask docker

# Option 2: Manual download
# Go to: https://www.docker.com/products/docker-desktop/
# Download "Docker Desktop for Mac with Apple Silicon"
# Open the .dmg and drag Docker to Applications

# After installation, open Docker Desktop from Applications
# Wait for it to start (whale icon in menu bar)

# Verify
docker --version
docker-compose --version

3. PYTHON ENVIRONMENT

3.1 Install Miniforge (ARM64-optimized Python)

# Install Miniforge (better than standard Python for M-series)
brew install miniforge

# Initialize conda for your shell
conda init zsh

# IMPORTANT: Close and reopen Terminal, then continue

3.2 Create AI Environment

# Create dedicated environment
conda create -n ai-stack python=3.11 -y

# Activate it
conda activate ai-stack

# Verify Python
python --version
# Should show: Python 3.11.x

which python
# Should show: /opt/homebrew/Caskroom/miniforge/base/envs/ai-stack/bin/python

3.3 Install Python Packages

# Make sure you're in the ai-stack environment
conda activate ai-stack

# Install MLX (Apple's ML framework)
pip install mlx
pip install mlx-lm

# Install HuggingFace tools
pip install huggingface_hub
pip install transformers

# Install other dependencies
pip install numpy
pip install scipy
pip install torch --index-url https://download.pytorch.org/whl/cpu
pip install flask
pip install redis
pip install requests

# Verify MLX
python -c "import mlx.core as mx; print('MLX version:', mx.__version__)"

4. RUST ENVIRONMENT

4.1 Install Rust

# Install rustup (Rust toolchain manager)
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh

# Choose option 1 (default installation)
# When complete, run:
source "$HOME/.cargo/env"

# Verify
rustc --version
cargo --version

4.2 Install Rust Tools

# Add useful Rust tools
cargo install cargo-watch    # Auto-rebuild on file changes
cargo install cargo-edit     # Easy dependency management

# Verify
cargo watch --version

5. AI MODELS DOWNLOAD

5.1 Create Models Directory

mkdir -p ~/models/codebrain
mkdir -p ~/models/guardian
mkdir -p ~/models/vjepa

5.2 Login to HuggingFace (Optional but recommended)

# Create free account at: https://huggingface.co/join
# Then login:
huggingface-cli login
# Paste your token when prompted

5.3 Download CodeBrain (Qwen2.5-Coder-14B)

# This is ~8.5GB - will take 10-30 minutes depending on internet
echo "⏳ Downloading CodeBrain (8.5GB)... This will take a while."

huggingface-cli download \
    mlx-community/Qwen2.5-Coder-14B-Instruct-4bit \
    --local-dir ~/models/codebrain

# Verify
ls -la ~/models/codebrain/
# Should see: config.json, model.safetensors, etc.

5.4 Download Guardian (Llama Guard 3-1B)

# This is ~440MB - quick download
echo "⏳ Downloading Guardian (440MB)..."

huggingface-cli download \
    mlx-community/Llama-Guard-3-1B-INT4 \
    --local-dir ~/models/guardian

# Verify
ls -la ~/models/guardian/

5.5 Download V-JEPA Weights (Optional - for advanced users)

# V-JEPA is ~2.5GB
echo "⏳ Downloading V-JEPA..."

huggingface-cli download \
    facebook/vjepa2 \
    vjepa2-vitl-fpc64-256.pth \
    --local-dir ~/models/vjepa

# Verify
ls -la ~/models/vjepa/

6. REDIS DATABASE

6.1 Start Redis with Docker

# Pull and run Redis Stack (includes vector search)
docker run -d \
    --name latent-redis \
    -p 6379:6379 \
    -p 8001:8001 \
    redis/redis-stack:latest

# Verify it's running
docker ps | grep redis

# Test connection
docker exec -it latent-redis redis-cli ping
# Should output: PONG

6.2 Alternative: Use Docker Compose

cd ~/STEM_SCAFFOLDING/VJEPA_CODE2CODE/redis

# Start Redis
docker-compose up -d

# Check logs
docker-compose logs -f

7. VSCODE SETUP

7.1 Install VSCode

# Install via Homebrew
brew install --cask visual-studio-code

# Or download from: https://code.visualstudio.com/

7.2 Install Extensions

# Open VSCode
code

# Install extensions from command line
code --install-extension continue.continue
code --install-extension rust-lang.rust-analyzer
code --install-extension ms-python.python
code --install-extension ms-python.vscode-pylance
code --install-extension vadimcn.vscode-lldb
code --install-extension tamasfe.even-better-toml
code --install-extension redhat.vscode-yaml

7.3 Configure Continue Extension

# Copy the Continue config
cp ~/STEM_SCAFFOLDING/LOCAL_SOVEREIGNTY/config/continue-config.json ~/.continue/config.json

# If directory doesn't exist:
mkdir -p ~/.continue
cp ~/STEM_SCAFFOLDING/LOCAL_SOVEREIGNTY/config/continue-config.json ~/.continue/config.json

8. XCODE SETUP

8.1 Install Xcode

# Install Xcode Command Line Tools (required)
xcode-select --install

# For full Xcode (optional - for iOS/macOS app development):
# Download from App Store or:
# https://developer.apple.com/xcode/

8.2 Install XcodeGen

# XcodeGen generates Xcode projects from YAML
brew install xcodegen

# Verify
xcodegen --version

8.3 Generate Sovereign Sentinel Project

cd ~/STEM_SCAFFOLDING/SOVEREIGN_SENTINEL

# Generate Xcode project
xcodegen generate

# Open in Xcode
open SovereignSentinel.xcodeproj

9. START EVERYTHING

9.1 Quick Start Script

# Create a master startup script
cat > ~/start-lord-protocol.sh << 'EOF'
#!/bin/bash
echo "🚀 Starting Lord Protocol Stack..."

# Activate Python environment
source /opt/homebrew/Caskroom/miniforge/base/etc/profile.d/conda.sh
conda activate ai-stack

# Start Redis (if not running)
if ! docker ps | grep -q latent-redis; then
    echo "Starting Redis..."
    docker start latent-redis 2>/dev/null || \
    docker run -d --name latent-redis -p 6379:6379 -p 8001:8001 redis/redis-stack:latest
fi

# Start CodeBrain (Port 8080)
echo "Starting CodeBrain on port 8080..."
cd ~/STEM_SCAFFOLDING/LOCAL_SOVEREIGNTY
python servers/serve_codebrain.py --port 8080 &
CODEBRAIN_PID=$!
echo "CodeBrain PID: $CODEBRAIN_PID"

# Wait for it to load
sleep 5

# Start Guardian (Port 8081)
echo "Starting Guardian on port 8081..."
python servers/serve_guardian.py --port 8081 &
GUARDIAN_PID=$!
echo "Guardian PID: $GUARDIAN_PID"

# Save PIDs
echo $CODEBRAIN_PID > /tmp/codebrain.pid
echo $GUARDIAN_PID > /tmp/guardian.pid

echo ""
echo "✅ Stack Started!"
echo "   CodeBrain: http://127.0.0.1:8080"
echo "   Guardian:  http://127.0.0.1:8081"
echo "   Redis:     redis://127.0.0.1:6379"
echo ""
echo "To stop: ~/stop-lord-protocol.sh"
EOF

chmod +x ~/start-lord-protocol.sh

9.2 Create Stop Script

cat > ~/stop-lord-protocol.sh << 'EOF'
#!/bin/bash
echo "🛑 Stopping Lord Protocol Stack..."

# Kill servers
kill $(cat /tmp/codebrain.pid 2>/dev/null) 2>/dev/null
kill $(cat /tmp/guardian.pid 2>/dev/null) 2>/dev/null

# Kill by port (backup)
lsof -ti:8080 | xargs kill -9 2>/dev/null
lsof -ti:8081 | xargs kill -9 2>/dev/null

# Stop Redis (optional - keep data)
# docker stop latent-redis

rm -f /tmp/codebrain.pid /tmp/guardian.pid

echo "✅ Stopped!"
EOF

chmod +x ~/stop-lord-protocol.sh

9.3 Run the Stack

# Start everything
~/start-lord-protocol.sh

# Wait 30-60 seconds for models to load
# Watch the terminal for "Model loaded successfully"

10. VERIFICATION CHECKLIST

10.1 Test Each Component

echo "🧪 Testing Lord Protocol Stack..."

# Test Redis
echo -n "Redis: "
redis-cli -h 127.0.0.1 ping 2>/dev/null || docker exec latent-redis redis-cli ping

# Test CodeBrain
echo -n "CodeBrain: "
curl -s http://127.0.0.1:8080/health | jq -r '.status'

# Test Guardian
echo -n "Guardian: "
curl -s http://127.0.0.1:8081/health | jq -r '.status'

echo ""
echo "If all show 'healthy' or 'PONG', you're ready! 🎉"

10.2 Test AI Inference

# Test CodeBrain with a coding question
curl -X POST http://127.0.0.1:8080/v1/chat/completions \
    -H "Content-Type: application/json" \
    -d '{
        "model": "Qwen2.5-Coder-14B-Instruct",
        "messages": [{"role": "user", "content": "Write hello world in Python"}]
    }' | jq -r '.choices[0].message.content'

# Test Guardian security scan
curl -X POST http://127.0.0.1:8081/v1/scan \
    -H "Content-Type: application/json" \
    -d '{"content": "api_key = \"sk-secret123\""}' | jq

10.3 Verify File Structure

# Check all projects exist
echo "📁 Checking project structure..."

ls -la ~/STEM_SCAFFOLDING/LOCAL_SOVEREIGNTY/
ls -la ~/STEM_SCAFFOLDING/SOVEREIGN_SENTINEL/
ls -la ~/STEM_SCAFFOLDING/VJEPA_CODE2CODE/
ls -la ~/STEM_SCAFFOLDING/guardian-vscode/
ls -la ~/STEM_SCAFFOLDING/GUARDIAN_PROTOCOL/

11. TROUBLESHOOTING

11.1 "MLX not found"

# Make sure you're in the right environment
conda activate ai-stack

# Reinstall MLX
pip uninstall mlx mlx-lm -y
pip install mlx mlx-lm

11.2 "Model not found"

# Check if models downloaded
ls -la ~/models/codebrain/
ls -la ~/models/guardian/

# Re-download if needed
huggingface-cli download mlx-community/Qwen2.5-Coder-14B-Instruct-4bit --local-dir ~/models/codebrain

11.3 "Port already in use"

# Find and kill process on port
lsof -ti:8080 | xargs kill -9
lsof -ti:8081 | xargs kill -9

11.4 "Out of Memory"

# Check memory usage
top -l 1 | head -n 10

# Close other applications
# Reduce context window in config
# Use smaller model (7B instead of 14B)

11.5 "Docker not running"

# Start Docker Desktop
open -a Docker

# Wait for whale icon to appear in menu bar
# Then try again

11.6 "Redis connection refused"

# Check if container is running
docker ps | grep redis

# Start if stopped
docker start latent-redis

# Or create new container
docker run -d --name latent-redis -p 6379:6379 -p 8001:8001 redis/redis-stack:latest

11.7 "Rust build fails"

# Update Rust
rustup update

# Clean and rebuild
cd ~/STEM_SCAFFOLDING/VJEPA_CODE2CODE/rust
cargo clean
cargo build --release

📊 SUMMARY: ALL COMMANDS IN ORDER

# ========================================
# COPY-PASTE EVERYTHING BELOW
# ========================================

# 1. Homebrew
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
echo 'eval "$(/opt/homebrew/bin/brew shellenv)"' >> ~/.zprofile
eval "$(/opt/homebrew/bin/brew shellenv)"

# 2. Core tools
brew install git curl wget jq tree
brew install --cask docker
brew install --cask visual-studio-code
brew install miniforge
brew install xcodegen

# 3. Rust
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
source "$HOME/.cargo/env"

# 4. Python (CLOSE AND REOPEN TERMINAL FIRST)
conda init zsh
# (reopen terminal)
conda create -n ai-stack python=3.11 -y
conda activate ai-stack
pip install mlx mlx-lm huggingface_hub numpy scipy flask redis requests

# 5. Models (downloads ~9GB)
mkdir -p ~/models/{codebrain,guardian,vjepa}
huggingface-cli download mlx-community/Qwen2.5-Coder-14B-Instruct-4bit --local-dir ~/models/codebrain
huggingface-cli download mlx-community/Llama-Guard-3-1B-INT4 --local-dir ~/models/guardian

# 6. Redis
docker run -d --name latent-redis -p 6379:6379 -p 8001:8001 redis/redis-stack:latest

# 7. VSCode extensions
code --install-extension continue.continue
code --install-extension rust-lang.rust-analyzer
code --install-extension ms-python.python

# 8. Configure Continue
mkdir -p ~/.continue
cp ~/STEM_SCAFFOLDING/LOCAL_SOVEREIGNTY/config/continue-config.json ~/.continue/config.json

# 9. Start the stack
cd ~/STEM_SCAFFOLDING/LOCAL_SOVEREIGNTY
./scripts/start_stack.sh start

# 10. Verify
curl http://127.0.0.1:8080/health
curl http://127.0.0.1:8081/health

🎉 YOU'RE DONE!

What you now have:

  • 🧠 CodeBrain (Qwen2.5-Coder-14B) on port 8080
  • 🛡️ Guardian (Llama Guard 3-1B) on port 8081
  • 💾 Redis vector database on port 6379
  • 🧩 VSCode with Continue extension
  • 📱 Sovereign Sentinel Xcode project
  • 🔬 V-JEPA Code-to-Code system

Open VSCode and start coding with your local AI!

code ~/STEM_SCAFFOLDING

∞ - 1 = ∞

Your code. Your machine. Your sovereignty. 🛡️