Skip to content

Commit 484164f

Browse files
committed
feat: add feathers tasks orchestration example
1 parent 7145eb5 commit 484164f

6 files changed

Lines changed: 290 additions & 0 deletions

File tree

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
FROM node:20-alpine
2+
3+
WORKDIR /app
4+
5+
# Install pnpm
6+
RUN corepack enable && corepack prepare pnpm@10.1.0 --activate
7+
8+
# Copy workspace manifests needed to resolve the catalog + workspace:* deps
9+
COPY pnpm-workspace.yaml ./
10+
COPY pnpm-lock.yaml ./
11+
COPY package.json ./
12+
13+
# Copy the package sources that are referenced as workspace:*
14+
COPY packages/feathers-tasks/package.json ./packages/feathers-tasks/
15+
COPY packages/feathers-tasks/src ./packages/feathers-tasks/src/
16+
17+
# Copy the example manifest
18+
COPY examples/feathers-tasks-orchestration/package.json ./examples/feathers-tasks-orchestration/
19+
20+
# Install only the example's dependencies (no dev, no other workspaces)
21+
RUN pnpm install --filter feathers-tasks-orchestration-example --prod --ignore-scripts
22+
23+
# Copy worker source
24+
COPY examples/feathers-tasks-orchestration/worker ./examples/feathers-tasks-orchestration/worker
25+
26+
WORKDIR /app/examples/feathers-tasks-orchestration
27+
28+
ENV NODE_ENV=production
29+
30+
CMD ["node", "--conditions", "development", "worker/index.js"]
Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
# Docker Swarm stack — deploy with:
2+
# docker build -t feathers-tasks-worker -f examples/feathers-tasks-orchestration/Dockerfile .
3+
# docker stack deploy -c examples/feathers-tasks-orchestration/docker-compose.yml tasks-stack
4+
#
5+
# Scale the swarm worker:
6+
# docker service scale tasks-stack_swarm-worker=4
7+
#
8+
# Watch logs:
9+
# docker service logs -f tasks-stack_swarm-worker
10+
11+
version: "3.8"
12+
13+
networks:
14+
tasks-net:
15+
driver: overlay
16+
attachable: true
17+
18+
services:
19+
redis:
20+
image: redis:7-alpine
21+
ports:
22+
- "6379:6379"
23+
networks:
24+
- tasks-net
25+
deploy:
26+
replicas: 1
27+
restart_policy:
28+
condition: on-failure
29+
30+
swarm-worker:
31+
image: feathers-tasks-worker
32+
environment:
33+
REDIS_HOST: redis
34+
REDIS_PORT: "6379"
35+
QUEUE_NAME: orchestration-tasks
36+
JOB_TYPE: swarm-job
37+
ORCHESTRATOR: swarm
38+
CONCURRENCY: "2"
39+
networks:
40+
- tasks-net
41+
depends_on:
42+
- redis
43+
deploy:
44+
replicas: 2
45+
restart_policy:
46+
condition: on-failure
47+
update_config:
48+
parallelism: 1
49+
delay: 5s
Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
# Kubernetes Deployment — k8s-job workers
2+
#
3+
# Deploy:
4+
# kubectl apply -f examples/feathers-tasks-orchestration/k8s/worker.yaml
5+
#
6+
# Scale:
7+
# kubectl scale deployment k8s-worker --replicas=4
8+
#
9+
# Watch logs (all replicas):
10+
# kubectl logs -l app=k8s-worker -f
11+
#
12+
# Delete:
13+
# kubectl delete -f examples/feathers-tasks-orchestration/k8s/worker.yaml
14+
15+
apiVersion: apps/v1
16+
kind: Deployment
17+
metadata:
18+
name: k8s-worker
19+
labels:
20+
app: k8s-worker
21+
spec:
22+
replicas: 2
23+
selector:
24+
matchLabels:
25+
app: k8s-worker
26+
template:
27+
metadata:
28+
labels:
29+
app: k8s-worker
30+
spec:
31+
containers:
32+
- name: worker
33+
image: feathers-tasks-worker
34+
# Use the local image built with docker build — never pull from registry
35+
imagePullPolicy: Never
36+
env:
37+
- name: REDIS_HOST
38+
value: "172.18.0.1" # docker_gwbridge IP — Redis Swarm service reachable from host network
39+
- name: REDIS_PORT
40+
value: "6379"
41+
- name: QUEUE_NAME
42+
value: "orchestration-tasks"
43+
- name: JOB_TYPE
44+
value: "k8s-job"
45+
- name: ORCHESTRATOR
46+
value: "kubernetes"
47+
- name: CONCURRENCY
48+
value: "2"
49+
- name: WORKER_ID
50+
valueFrom:
51+
fieldRef:
52+
fieldPath: metadata.name # Pod name as worker id
53+
resources:
54+
requests:
55+
cpu: "100m"
56+
memory: "128Mi"
57+
limits:
58+
cpu: "500m"
59+
memory: "256Mi"
60+
# Use host network so the worker can reach Redis on localhost
61+
hostNetwork: true
62+
dnsPolicy: ClusterFirstWithHostNet
63+
# Tolerate disk-pressure taint (acceptable in single-node dev environment)
64+
tolerations:
65+
- key: node.kubernetes.io/disk-pressure
66+
operator: Exists
67+
effect: NoSchedule
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
{
2+
"name": "feathers-tasks-orchestration-example",
3+
"description": "feathers-tasks example with Docker Swarm and Kubernetes workers",
4+
"packageManager": "pnpm@10.1.0",
5+
"type": "module",
6+
"scripts": {
7+
"dev:server": "node --watch --conditions development server/index.js",
8+
"lint": "standard --fix"
9+
},
10+
"author": {
11+
"name": "KALISIO <contact@kalisio.com>",
12+
"url": "https://github.com/kalisio"
13+
},
14+
"license": "MIT",
15+
"dependencies": {
16+
"@feathersjs/errors": "catalog:",
17+
"@feathersjs/express": "catalog:",
18+
"@feathersjs/feathers": "catalog:",
19+
"@feathersjs/memory": "catalog:",
20+
"@feathersjs/socketio": "catalog:",
21+
"@kalisio/feathers-tasks": "workspace:*",
22+
"bullmq": "catalog:",
23+
"debug": "catalog:"
24+
}
25+
}
Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
import { feathers } from '@feathersjs/feathers'
2+
import express from '@feathersjs/express'
3+
import socketio from '@feathersjs/socketio'
4+
import { MemoryService } from '@feathersjs/memory'
5+
import { TaskService, createQueue, setupQueueEvents, setupDashboard } from '@kalisio/feathers-tasks'
6+
7+
const port = Number(process.env.SERVER_PORT) || 3030
8+
const redis = {
9+
host: process.env.REDIS_HOST || 'localhost',
10+
port: Number(process.env.REDIS_PORT) || 6379
11+
}
12+
const queueName = process.env.QUEUE_NAME || 'orchestration-tasks'
13+
14+
const app = express(feathers())
15+
16+
app.use(express.json())
17+
app.use(express.urlencoded({ extended: true }))
18+
app.configure(express.rest())
19+
app.configure(socketio({ cors: { origin: '*' } }))
20+
21+
app.use('task-store', new MemoryService())
22+
23+
const queue = createQueue(queueName, redis)
24+
25+
setupQueueEvents(queueName, redis, app, 'task-store')
26+
27+
setupDashboard(app, queue, '/admin/tasks')
28+
29+
app.use('tasks', new TaskService({ queue, persistenceService: 'task-store' }))
30+
31+
app.on('connection', connection => app.channel('anonymous').join(connection))
32+
app.publish(() => app.channel('anonymous'))
33+
34+
await app.setup()
35+
36+
app.listen(port).then(() => {
37+
console.log(`Server listening on http://localhost:${port}`)
38+
console.log(`Bull Board: http://localhost:${port}/admin/tasks`)
39+
console.log(`Redis: ${redis.host}:${redis.port}`)
40+
console.log(`Queue: ${queueName}`)
41+
console.log()
42+
console.log('Submit a swarm job:')
43+
console.log(` curl -X POST http://localhost:${port}/tasks -H 'Content-Type: application/json' \\`)
44+
console.log(' -d \'{"type":"swarm-job","payload":{"label":"hello from swarm","steps":4}}\'')
45+
console.log()
46+
console.log('Submit a k8s job:')
47+
console.log(` curl -X POST http://localhost:${port}/tasks -H 'Content-Type: application/json' \\`)
48+
console.log(' -d \'{"type":"k8s-job","payload":{"label":"hello from k8s","steps":3}}\'')
49+
})
Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
import { Worker } from 'bullmq'
2+
3+
const redis = {
4+
host: process.env.REDIS_HOST || 'localhost',
5+
port: Number(process.env.REDIS_PORT) || 6379
6+
}
7+
8+
const queueName = process.env.QUEUE_NAME || 'orchestration-tasks'
9+
const jobType = process.env.JOB_TYPE || 'generic-job'
10+
const workerId = process.env.WORKER_ID || `worker-${process.pid}`
11+
const concurrency = Number(process.env.CONCURRENCY) || 2
12+
13+
console.log(`[${workerId}] Starting — queue: ${queueName}, job type: "${jobType}", concurrency: ${concurrency}`)
14+
console.log(`[${workerId}] Redis: ${redis.host}:${redis.port}`)
15+
16+
const worker = new Worker(
17+
queueName,
18+
async (job) => {
19+
if (job.name !== jobType) {
20+
return
21+
}
22+
23+
const { steps = 3, label = 'task' } = job.data
24+
25+
console.log(`[${workerId}] Processing job ${job.id} "${label}" (${steps} steps)`)
26+
27+
for (let i = 1; i <= steps; i++) {
28+
// Simulate work
29+
await new Promise(resolve => setTimeout(resolve, 500))
30+
const progress = Math.round((i / steps) * 100)
31+
await job.updateProgress(progress)
32+
console.log(`[${workerId}] Job ${job.id} — step ${i}/${steps} (${progress}%)`)
33+
}
34+
35+
const result = {
36+
processedBy: workerId,
37+
orchestrator: process.env.ORCHESTRATOR || 'unknown',
38+
jobId: job.id,
39+
label,
40+
steps,
41+
completedAt: new Date().toISOString()
42+
}
43+
44+
console.log(`[${workerId}] Job ${job.id} completed`)
45+
return result
46+
},
47+
{ connection: redis, concurrency }
48+
)
49+
50+
worker.on('completed', (job, result) => {
51+
console.log(`[${workerId}] ✓ Job ${job.id} done — processed by ${result?.processedBy}`)
52+
})
53+
54+
worker.on('failed', (job, err) => {
55+
console.error(`[${workerId}] ✗ Job ${job?.id} failed: ${err.message}`)
56+
})
57+
58+
worker.on('error', (err) => {
59+
console.error(`[${workerId}] Worker error: ${err.message}`)
60+
})
61+
62+
// Graceful shutdown
63+
async function shutdown () {
64+
console.log(`[${workerId}] Shutting down...`)
65+
await worker.close()
66+
process.exit(0)
67+
}
68+
69+
process.on('SIGTERM', shutdown)
70+
process.on('SIGINT', shutdown)

0 commit comments

Comments
 (0)