Skip to content

Commit 7a2714f

Browse files
author
The No Hands Company
committed
feat: LOW_RESOURCE mode + fedhost-proxy Rust crate skeleton
LOW_RESOURCE=true (volunteer node / Raspberry Pi profile) - resourceConfig.ts: single source of truth for all constrained values DB pool: 20→5 connections, min 2→1 Domain LRU cache: 10K→500 entries File LRU cache: 50K→2000 entries Analytics flush: 1min→5min Health check: 2min→10min Log level: info→warn Global rate limit: 300→60 req/min Upload rate limit: 60→10/min Compression: level 6→1 (fastest) Gossip interval: 5min→10min - index.ts: imports resourceConfig FIRST, applies env overrides before any module initialises (pool, logger, caches, rate limiters all pick up values) - app.ts: compression({ level: COMPRESSION_LEVEL }) - rateLimiter.ts: GLOBAL_RATE_LIMIT + UPLOAD_RATE_LIMIT from resourceConfig - .env.example: full LOW_RESOURCE documentation with before/after table - docker-compose.yml: LOW_RESOURCE: 'true' as commented option - docs/SELF_HOSTING.md: full section — what changes, what doesn't, expected capacity on Pi 4 (4GB), tips for constrained nodes crates/fedhost-proxy — Rust extraction skeleton - Cargo.toml: axum 0.7, tokio, aws-sdk-s3, deadpool-postgres, redis, ed25519-dalek, prom-client; release profile strips + LTO + panic=abort - main.rs: entry point, module layout, full architecture doc comment - config.rs: all env vars via clap derive + dotenvy, LOW_RESOURCE overrides - cache.rs: DomainCache + FileCache with TTL, in-process LRU, invalidation - db.rs: read-only pool — lookup_site (primary + custom domain), lookup_file, record_hit; SQL matches TypeScript schema exactly - handler.rs: full request flow skeleton — domain resolve, ACL check, file path resolve, S3 stream, analytics fire-and-forget, geo routing stub; HMAC cookie verification is a complete port of TypeScript verifyUnlockCookie - storage.rs: ObjectStorage skeleton with todo!() for SDK wiring - geo.rs: infer_region (Fly/Cloudflare/CloudFront headers), fly_to_aws, country_to_region (Indonesia → ap-southeast-3 priority) - metrics.rs: stub Prometheus endpoint on separate port - README.md: architecture diagram, module map, 9-item implementation roadmap, Caddy routing config, protocol compatibility requirements
1 parent c7ff639 commit 7a2714f

File tree

18 files changed

+1413
-13
lines changed

18 files changed

+1413
-13
lines changed

.env.example

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -220,3 +220,19 @@ DYNAMIC_PORT_END=9999
220220

221221
# Maximum restarts before a crashed process is abandoned (default: 5)
222222
DYNAMIC_MAX_RESTARTS=5
223+
224+
# ── Low-Resource Mode (Raspberry Pi / volunteer nodes) ────────────────────────
225+
# Set to "true" on constrained hardware to reduce memory and CPU usage.
226+
# All features remain available — throughput is reduced but federation works.
227+
# Typical hardware: Raspberry Pi 3/4, old laptops, 512MB-1GB RAM VMs.
228+
#
229+
# What changes with LOW_RESOURCE=true:
230+
# DB pool connections: 20 → 5
231+
# Domain LRU cache: 10 000 → 500 entries
232+
# File LRU cache: 50 000 → 2 000 entries
233+
# Analytics flush: 1 min → 5 min
234+
# Health check: 2 min → 10 min
235+
# Log level: info → warn
236+
# Global rate limit: 300 → 60 req/min
237+
# Compression: level 6 → level 1 (fastest)
238+
LOW_RESOURCE=false

artifacts/api-server/src/app.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ import express, { type Express, type Request, type Response, type NextFunction }
22
import cors from "cors";
33
import helmet from "helmet";
44
import compression from "compression";
5+
import { COMPRESSION_LEVEL } from "./lib/resourceConfig";
56
import cookieParser from "cookie-parser";
67
import pinoHttp from "pino-http";
78
import { randomUUID } from "crypto";
@@ -56,7 +57,7 @@ app.use(
5657
app.use(cors({ credentials: true, origin: allowedOrigins }));
5758

5859
// ── Response compression ──────────────────────────────────────────────────────
59-
app.use(compression());
60+
app.use(compression({ level: COMPRESSION_LEVEL }));
6061

6162
// ── Request IDs ───────────────────────────────────────────────────────────────
6263
app.use((req: Request, res: Response, next: NextFunction) => {

artifacts/api-server/src/index.ts

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,26 @@ import app from "./app";
22
import { db, nodesTable } from "@workspace/db";
33
import { eq } from "drizzle-orm";
44
import { generateKeyPair } from "./lib/federation";
5-
import { startHealthMonitor } from "./lib/healthMonitor";
5+
// ── Resource configuration — MUST be imported before anything that reads env vars ──
6+
import {
7+
LOW_RESOURCE, DB_POOL, LOG_LEVEL,
8+
ANALYTICS_FLUSH_INTERVAL_MS, HEALTH_CHECK_INTERVAL_MS, GOSSIP_INTERVAL_MS,
9+
} from "./lib/resourceConfig";
10+
11+
// Apply LOW_RESOURCE overrides to process.env NOW so all downstream modules
12+
// (db pool, logger, caches, rate limiters) pick up constrained values at init time.
13+
if (LOW_RESOURCE) {
14+
process.env.DB_POOL_MAX = String(DB_POOL.max);
15+
process.env.DB_POOL_MIN = String(DB_POOL.min);
16+
process.env.DB_IDLE_TIMEOUT_MS = String(DB_POOL.idleTimeoutMillis);
17+
process.env.DB_CONNECT_TIMEOUT_MS = String(DB_POOL.connectionTimeoutMillis);
18+
process.env.LOG_LEVEL = LOG_LEVEL;
19+
process.env.ANALYTICS_FLUSH_INTERVAL_MS = String(ANALYTICS_FLUSH_INTERVAL_MS);
20+
process.env.HEALTH_CHECK_INTERVAL_MS = String(HEALTH_CHECK_INTERVAL_MS);
21+
process.env.GOSSIP_INTERVAL_MS = String(GOSSIP_INTERVAL_MS);
22+
process.env.DOMAIN_CACHE_MAX = process.env.DOMAIN_CACHE_MAX ?? "500";
23+
process.env.FILE_CACHE_MAX = process.env.FILE_CACHE_MAX ?? "2000";
24+
}
625
import { startAnalyticsFlusher, stopAnalyticsFlusher } from "./lib/analyticsFlush";
726
import { startGossipPusher, stopGossipPusher } from "./routes/gossip";
827
import { getRedisClient, closeRedis } from "./lib/redis";
Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
/**
2+
* Low-resource mode configuration.
3+
*
4+
* Set LOW_RESOURCE=true to run FedHost on constrained hardware:
5+
* Raspberry Pi, old laptops, small VMs (1 vCPU / 512 MB RAM).
6+
*
7+
* This is the Indonesian volunteer-node profile. A node running in
8+
* low-resource mode still federates correctly — it just serves fewer
9+
* concurrent requests and uses less memory.
10+
*
11+
* What changes:
12+
* DB pool: max 5 connections (default: 20)
13+
* DB pool min: 1 (default: 2)
14+
* Log level: warn (default: info — cuts ~30% log I/O)
15+
* Domain cache: 500 entries (default: 10 000)
16+
* File cache: 2 000 entries (default: 50 000)
17+
* Analytics flush: 5 minutes (default: 1 minute)
18+
* Health check: 10 minutes (default: 2 minutes)
19+
* Site health: 30 minutes (default: 10 minutes)
20+
* Global rate limit: 60/min (default: 300/min)
21+
* Concurrent uploads: 2 (default: 60/min)
22+
* Compression level: 1 (fastest) (default: zlib default ~6)
23+
* Pino pretty-print: disabled (always disabled in prod anyway)
24+
*
25+
* What does NOT change:
26+
* Federation protocol — identical wire format, signatures, gossip
27+
* ACME / TLS — still works
28+
* Redis integration — still works if REDIS_URL is set
29+
* All API routes — fully available
30+
* Auth — unchanged
31+
*/
32+
33+
export const LOW_RESOURCE = process.env.LOW_RESOURCE === "true";
34+
35+
/** Database connection pool limits */
36+
export const DB_POOL = LOW_RESOURCE
37+
? { max: 5, min: 1, idleTimeoutMillis: 60_000, connectionTimeoutMillis: 8_000 }
38+
: {
39+
max: parseInt(process.env.DB_POOL_MAX ?? "20"),
40+
min: parseInt(process.env.DB_POOL_MIN ?? "2"),
41+
idleTimeoutMillis: parseInt(process.env.DB_IDLE_TIMEOUT_MS ?? "30000"),
42+
connectionTimeoutMillis: parseInt(process.env.DB_CONNECT_TIMEOUT_MS ?? "5000"),
43+
};
44+
45+
/** Pino log level */
46+
export const LOG_LEVEL: string = LOW_RESOURCE
47+
? (process.env.LOG_LEVEL ?? "warn") // quieter on low-RAM nodes
48+
: (process.env.LOG_LEVEL ?? (process.env.NODE_ENV === "development" ? "debug" : "info"));
49+
50+
/** LRU domain → site cache max entries */
51+
export const DOMAIN_CACHE_MAX = LOW_RESOURCE
52+
? parseInt(process.env.DOMAIN_CACHE_MAX ?? "500")
53+
: parseInt(process.env.DOMAIN_CACHE_MAX ?? "10000");
54+
55+
/** LRU file path → objectPath cache max entries */
56+
export const FILE_CACHE_MAX = LOW_RESOURCE
57+
? parseInt(process.env.FILE_CACHE_MAX ?? "2000")
58+
: parseInt(process.env.FILE_CACHE_MAX ?? "50000");
59+
60+
/** Analytics buffer flush interval (ms) */
61+
export const ANALYTICS_FLUSH_INTERVAL_MS = LOW_RESOURCE
62+
? parseInt(process.env.ANALYTICS_FLUSH_INTERVAL_MS ?? "300000") // 5 minutes
63+
: parseInt(process.env.ANALYTICS_FLUSH_INTERVAL_MS ?? "60000"); // 1 minute
64+
65+
/** Federation node health check interval (ms) */
66+
export const HEALTH_CHECK_INTERVAL_MS = LOW_RESOURCE
67+
? parseInt(process.env.HEALTH_CHECK_INTERVAL_MS ?? "600000") // 10 minutes
68+
: parseInt(process.env.HEALTH_CHECK_INTERVAL_MS ?? "120000"); // 2 minutes
69+
70+
/** Site health monitor interval (ms) */
71+
export const SITE_HEALTH_INTERVAL_MS = LOW_RESOURCE
72+
? parseInt(process.env.SITE_HEALTH_CHECK_INTERVAL_MS ?? "1800000") // 30 minutes
73+
: parseInt(process.env.SITE_HEALTH_CHECK_INTERVAL_MS ?? "600000"); // 10 minutes
74+
75+
/** Global rate limit max requests per minute per IP */
76+
export const GLOBAL_RATE_LIMIT = LOW_RESOURCE ? 60 : 300;
77+
78+
/** Upload rate limit max per minute per IP */
79+
export const UPLOAD_RATE_LIMIT = LOW_RESOURCE ? 10 : 60;
80+
81+
/** zlib compression level (1 = fastest/least CPU, 9 = best/most CPU) */
82+
export const COMPRESSION_LEVEL = LOW_RESOURCE ? 1 : 6;
83+
84+
/** Gossip push interval (ms) */
85+
export const GOSSIP_INTERVAL_MS = LOW_RESOURCE
86+
? parseInt(process.env.GOSSIP_INTERVAL_MS ?? "600000") // 10 minutes
87+
: parseInt(process.env.GOSSIP_INTERVAL_MS ?? "300000"); // 5 minutes
88+
89+
if (LOW_RESOURCE) {
90+
// Log once at startup so operators know the mode is active
91+
// Using console directly since the logger may not be initialised yet
92+
console.warn(
93+
"[config] LOW_RESOURCE=true — running in low-resource mode. " +
94+
"DB pool: 5, caches: 500/2K, flush: 5min, health: 10min. " +
95+
"All API routes and federation remain fully functional."
96+
);
97+
}

artifacts/api-server/src/middleware/rateLimiter.ts

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ import rateLimit from "express-rate-limit";
22
import slowDown from "express-slow-down";
33
import { getRedisClient } from "../lib/redis";
44
import logger from "../lib/logger";
5+
import { GLOBAL_RATE_LIMIT, UPLOAD_RATE_LIMIT } from "../lib/resourceConfig";
56

67
const isProd = process.env.NODE_ENV === "production";
78

@@ -34,10 +35,10 @@ function makeHandler(message: string, code: string) {
3435
};
3536
}
3637

37-
// Global limiter — 300 requests / minute per IP
38+
// Global limiter — 300 requests / minute per IP (60 in LOW_RESOURCE mode)
3839
export const globalLimiter = rateLimit({
3940
windowMs: 60_000,
40-
max: isProd ? 300 : 10_000,
41+
max: isProd ? GLOBAL_RATE_LIMIT : 10_000,
4142
standardHeaders: "draft-7",
4243
legacyHeaders: false,
4344
store,
@@ -58,10 +59,10 @@ export const authLimiter = rateLimit({
5859
),
5960
});
6061

61-
// Upload endpoints — 60 uploads / minute per IP
62+
// Upload endpoints — 60 uploads / minute per IP (10 in LOW_RESOURCE mode)
6263
export const uploadLimiter = rateLimit({
6364
windowMs: 60_000,
64-
max: isProd ? 60 : 1_000,
65+
max: isProd ? UPLOAD_RATE_LIMIT : 1_000,
6566
standardHeaders: "draft-7",
6667
legacyHeaders: false,
6768
store,

crates/fedhost-proxy/Cargo.toml

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
[package]
2+
name = "fedhost-proxy"
3+
version = "0.1.0"
4+
edition = "2021"
5+
description = "High-performance reverse proxy for Federated Hosting static site serving"
6+
license = "MIT"
7+
repository = "https://github.com/The-No-Hands-company/Federated-Hosting"
8+
authors = ["The No Hands Company <dev@nohands.company>"]
9+
10+
# This crate is not published to crates.io — it is deployed as a binary
11+
# alongside the TypeScript API server.
12+
13+
[[bin]]
14+
name = "fedhost-proxy"
15+
path = "src/main.rs"
16+
17+
[dependencies]
18+
# HTTP server
19+
axum = { version = "0.7", features = ["tokio"] }
20+
tokio = { version = "1", features = ["full"] }
21+
tower = "0.4"
22+
tower-http = { version = "0.5", features = ["compression-gzip", "trace", "timeout"] }
23+
hyper = { version = "1", features = ["full"] }
24+
hyper-util = { version = "0.1", features = ["full"] }
25+
26+
# S3 / object storage
27+
aws-config = { version = "1", features = ["behavior-version-latest"] }
28+
aws-sdk-s3 = "1"
29+
30+
# PostgreSQL (read-only — domain → siteId lookups)
31+
tokio-postgres = { version = "0.7", features = ["with-serde_json-1"] }
32+
deadpool-postgres = "0.12"
33+
34+
# Redis (LRU cache sharing with TypeScript nodes)
35+
redis = { version = "0.25", features = ["tokio-comp", "connection-manager"] }
36+
37+
# Ed25519 (verify federation signatures — same keypairs as TS node)
38+
ed25519-dalek = { version = "2", features = ["pkcs8", "pem"] }
39+
base64 = "0.22"
40+
41+
# Serialization
42+
serde = { version = "1", features = ["derive"] }
43+
serde_json = "1"
44+
45+
# Configuration
46+
dotenvy = "0.15"
47+
clap = { version = "4", features = ["derive", "env"] }
48+
49+
# Observability
50+
tracing = "0.1"
51+
tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] }
52+
metrics = "0.22"
53+
metrics-exporter-prometheus = "0.13"
54+
55+
# Utilities
56+
anyhow = "1"
57+
thiserror = "1"
58+
bytes = "1"
59+
mime_guess = "2"
60+
sha2 = "0.10"
61+
hex = "0.4"
62+
63+
[dev-dependencies]
64+
tokio-test = "0.4"
65+
axum-test = "0.15"
66+
67+
[profile.release]
68+
opt-level = 3
69+
lto = true
70+
codegen-units = 1
71+
strip = true # strip debug symbols — smaller binary for Pi deployment
72+
panic = "abort" # smaller binary, no unwinding
73+
74+
[profile.dev]
75+
opt-level = 0
76+
debug = true

0 commit comments

Comments
 (0)