Closed
Description
Description
I have cluster created on Digital Ocean droplets. All droplets are on same private network .Redis is created with docker compose and are available only for private network .
docker-compose.yml
services:
redis-master:
image: redis:7.0-alpine
container_name: redis-master-1
hostname: redis-master
command: ["redis-server", "/usr/local/etc/redis/redis.conf"]
restart: always
volumes:
- ./redis.conf:/usr/local/etc/redis/redis.conf
network_mode: host
redis.conf
port 6379
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
appendonly yes
appendfilename "appendonly.aof"
dir "/data"
bind 10.108.0.6
protected-mode no
repl-ping-slave-period 5
repl-backlog-size 1024mb
repl-backlog-ttl 0
maxclients 10000
script.js
"use strict"
import {createCluster} from 'redis';
const redis_config = {
rootNodes : [
{
url: 'redis://10.108.0.8:6379'
},
{
url: 'redis://10.108.0.6:6379'
}
],
useReplicas: true,
};
const redis = await createCluster(redis_config)
redis.on('error', err => console.log('Redis Cluster Error', err))
redis.on('connect', () => console.log('indicator template client read is connecting'))
redis.on('reconnecting', () => console.log('indicator template client read is reconnecting'))
redis.on('ready', () => console.log('indicator template client read is ready'))
await redis.connect();
await redis.set('test','123')
it show error
node:internal/modules/run_main:128
triggerUncaughtException(
^
[ErrorReply: MOVED 6918 10.108.0.7:6379]
cluster nodes
10.108.0.7:6379> cluster nodes
3398e280da69ec00b4680a0f188a9f189ccff92a 10.108.0.7:6379@16379 myself,master - 0 1737466076000 8 connected 5461-10922
604893bb0af63941851f4b79274be84d9ecd785e 10.108.0.9:6379@16379 master - 0 1737466076564 3 connected 10923-16383
f378d304cfebf54dbfb1ce435394220bbefefbb4 10.108.0.2:6379@16379 slave 604893bb0af63941851f4b79274be84d9ecd785e 0 1737466075000 3 connected
606fad7fe6d7802dde029d73547133b277ceecd0 10.108.0.6:6379@16379 slave 0bafd5eaa69a8490fe09fb422dc93d31873ff3ac 0 1737466076263 1 connected
07b25bd37237587b9c28a20a37bb555cc66b4021 10.108.0.8:6379@16379 slave 3398e280da69ec00b4680a0f188a9f189ccff92a 0 1737466075260 8 connected
0bafd5eaa69a8490fe09fb422dc93d31873ff3ac 10.108.0.10:6379@16379 master - 0 1737466076000 1 connected 0-5460
cluster info
10.108.0.7:6379> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:8
cluster_my_epoch:8
cluster_stats_messages_ping_sent:1881944
cluster_stats_messages_pong_sent:1895080
cluster_stats_messages_meet_sent:1
cluster_stats_messages_publish_sent:1447424452
cluster_stats_messages_auth-req_sent:5
cluster_stats_messages_update_sent:1
cluster_stats_messages_sent:1451201483
cluster_stats_messages_ping_received:1895075
cluster_stats_messages_pong_received:1881921
cluster_stats_messages_fail_received:3
cluster_stats_messages_publish_received:1438460389
cluster_stats_messages_auth-req_received:1
cluster_stats_messages_auth-ack_received:2
cluster_stats_messages_received:1442237391
total_cluster_links_buffer_limit_exceeded:0
It suppose to just redirect to another node insteed of generating error
Node.js Version
Node.js v20.18.1
Redis Server Version
7.0.15
Node Redis Version
Platform
Linux