-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathexport-dr-metrics.sh
More file actions
executable file
·291 lines (232 loc) · 9.96 KB
/
export-dr-metrics.sh
File metadata and controls
executable file
·291 lines (232 loc) · 9.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
#!/bin/bash
################################################################################
# Disaster Recovery Metrics Exporter for Prometheus
#
# Purpose: Export disaster recovery metrics in Prometheus format
# - Backup status and size
# - Restore status
# - Cloudflare tunnel status
# - System health metrics
#
# Output: Prometheus text format metrics
# Usage: Run manually or via cron to update metrics file
#
# Author: InsightLearn DevOps Team
# Version: 1.0.0
################################################################################
set -euo pipefail
# Metrics output (stdout for direct scraping or file for node_exporter textfile collector)
METRICS_FILE="${METRICS_FILE:-/var/lib/node_exporter/textfile_collector/disaster_recovery.prom}"
TEMP_METRICS="/tmp/disaster_recovery_metrics.$$.prom"
# Ensure directory exists
mkdir -p "$(dirname "$METRICS_FILE")" 2>/dev/null || true
# Start writing metrics
cat > "$TEMP_METRICS" <<EOF
# HELP insightlearn_dr_backup_last_success_timestamp_seconds Unix timestamp of last successful backup
# TYPE insightlearn_dr_backup_last_success_timestamp_seconds gauge
EOF
# Get last backup timestamp (use latest-backup.tar.gz symlink to get most recent)
# Use -L flag to dereference symlinks and get actual file stats (not symlink size)
if [[ -f /var/backups/k3s-cluster/latest-backup.tar.gz ]]; then
BACKUP_TIMESTAMP=$(stat -L -c %Y /var/backups/k3s-cluster/latest-backup.tar.gz 2>/dev/null || echo "0")
BACKUP_SIZE=$(stat -L -c %s /var/backups/k3s-cluster/latest-backup.tar.gz 2>/dev/null || echo "0")
echo "insightlearn_dr_backup_last_success_timestamp_seconds $BACKUP_TIMESTAMP" >> "$TEMP_METRICS"
else
echo "insightlearn_dr_backup_last_success_timestamp_seconds 0" >> "$TEMP_METRICS"
BACKUP_SIZE=0
fi
# Backup size metric
cat >> "$TEMP_METRICS" <<EOF
# HELP insightlearn_dr_backup_size_bytes Size of latest backup in bytes
# TYPE insightlearn_dr_backup_size_bytes gauge
insightlearn_dr_backup_size_bytes $BACKUP_SIZE
EOF
# Backup success metric (1 = success, 0 = failure)
BACKUP_SUCCESS=1
if [[ -f /var/log/k3s-backup.log ]]; then
# Check if last backup was successful (looking for "Backup completed successfully")
if tail -100 /var/log/k3s-backup.log | grep -q "Backup completed successfully"; then
BACKUP_SUCCESS=1
else
BACKUP_SUCCESS=0
fi
fi
cat >> "$TEMP_METRICS" <<EOF
# HELP insightlearn_dr_backup_last_status Last backup status (1=success, 0=failure)
# TYPE insightlearn_dr_backup_last_status gauge
insightlearn_dr_backup_last_status $BACKUP_SUCCESS
EOF
# Auto-restore service status
RESTORE_SERVICE_ENABLED=0
RESTORE_SERVICE_ACTIVE=0
if systemctl is-enabled k3s-auto-restore.service &>/dev/null; then
RESTORE_SERVICE_ENABLED=1
fi
if systemctl is-active k3s-auto-restore.service &>/dev/null; then
RESTORE_SERVICE_ACTIVE=1
fi
cat >> "$TEMP_METRICS" <<EOF
# HELP insightlearn_dr_restore_service_enabled Auto-restore service enabled (1=yes, 0=no)
# TYPE insightlearn_dr_restore_service_enabled gauge
insightlearn_dr_restore_service_enabled $RESTORE_SERVICE_ENABLED
# HELP insightlearn_dr_restore_service_active Auto-restore service active (1=yes, 0=no)
# TYPE insightlearn_dr_restore_service_active gauge
insightlearn_dr_restore_service_active $RESTORE_SERVICE_ACTIVE
EOF
# Last restore timestamp
if [[ -f /var/lib/k3s-restore-state ]]; then
RESTORE_DATE=$(cat /var/lib/k3s-restore-state 2>/dev/null || echo "19700101")
# Convert YYYYMMDD to Unix timestamp (approximate, midnight UTC)
RESTORE_YEAR=${RESTORE_DATE:0:4}
RESTORE_MONTH=${RESTORE_DATE:4:2}
RESTORE_DAY=${RESTORE_DATE:6:2}
RESTORE_TIMESTAMP=$(date -d "${RESTORE_YEAR}-${RESTORE_MONTH}-${RESTORE_DAY}" +%s 2>/dev/null || echo "0")
else
RESTORE_TIMESTAMP=0
fi
cat >> "$TEMP_METRICS" <<EOF
# HELP insightlearn_dr_last_restore_timestamp_seconds Unix timestamp of last restore
# TYPE insightlearn_dr_last_restore_timestamp_seconds gauge
insightlearn_dr_last_restore_timestamp_seconds $RESTORE_TIMESTAMP
EOF
# Cloudflare tunnel status
CLOUDFLARE_SERVICE_ENABLED=0
CLOUDFLARE_SERVICE_ACTIVE=0
CLOUDFLARE_PROCESS_RUNNING=0
if systemctl is-enabled cloudflared-tunnel.service &>/dev/null 2>&1; then
CLOUDFLARE_SERVICE_ENABLED=1
fi
if systemctl is-active cloudflared-tunnel.service &>/dev/null 2>&1; then
CLOUDFLARE_SERVICE_ACTIVE=1
fi
if pgrep -f "cloudflared tunnel" >/dev/null 2>&1; then
CLOUDFLARE_PROCESS_RUNNING=1
fi
cat >> "$TEMP_METRICS" <<EOF
# HELP insightlearn_dr_cloudflare_service_enabled Cloudflare tunnel service enabled (1=yes, 0=no)
# TYPE insightlearn_dr_cloudflare_service_enabled gauge
insightlearn_dr_cloudflare_service_enabled $CLOUDFLARE_SERVICE_ENABLED
# HELP insightlearn_dr_cloudflare_service_active Cloudflare tunnel service active (1=yes, 0=no)
# TYPE insightlearn_dr_cloudflare_service_active gauge
insightlearn_dr_cloudflare_service_active $CLOUDFLARE_SERVICE_ACTIVE
# HELP insightlearn_dr_cloudflare_process_running Cloudflare process running (1=yes, 0=no)
# TYPE insightlearn_dr_cloudflare_process_running gauge
insightlearn_dr_cloudflare_process_running $CLOUDFLARE_PROCESS_RUNNING
EOF
# External access check (optional, can be slow)
EXTERNAL_ACCESS=0
if command -v curl &>/dev/null; then
if curl -s -m 5 https://www.insightlearn.cloud/health >/dev/null 2>&1; then
EXTERNAL_ACCESS=1
fi
fi
cat >> "$TEMP_METRICS" <<EOF
# HELP insightlearn_dr_external_access External access check (1=OK, 0=unreachable)
# TYPE insightlearn_dr_external_access gauge
insightlearn_dr_external_access $EXTERNAL_ACCESS
EOF
# Cron job status
CRON_JOB_CONFIGURED=0
if [[ -f /etc/cron.d/k3s-cluster-backup ]]; then
CRON_JOB_CONFIGURED=1
fi
cat >> "$TEMP_METRICS" <<EOF
# HELP insightlearn_dr_cron_job_configured Backup cron job configured (1=yes, 0=no)
# TYPE insightlearn_dr_cron_job_configured gauge
insightlearn_dr_cron_job_configured $CRON_JOB_CONFIGURED
EOF
# K3s cluster health (pod count) - REMOVED
# This section was causing duplicate metrics with the insightlearn namespace metrics below
# The all-namespaces count (21/23) conflicted with namespace-specific count (14/14)
# Keeping only the insightlearn namespace metrics at the end of the file
# Backup age (seconds since last backup)
CURRENT_TIME=$(date +%s)
BACKUP_AGE=$((CURRENT_TIME - BACKUP_TIMESTAMP))
cat >> "$TEMP_METRICS" <<EOF
# HELP insightlearn_dr_backup_age_seconds Age of latest backup in seconds
# TYPE insightlearn_dr_backup_age_seconds gauge
insightlearn_dr_backup_age_seconds $BACKUP_AGE
EOF
# Next backup scheduled time (calculated from cron schedule "5 * * * *")
# Use 10# prefix to force base-10 interpretation (avoids octal conversion errors for 08, 09)
CURRENT_HOUR=$((10#$(date +%H)))
CURRENT_MINUTE=$((10#$(date +%M)))
if [[ $CURRENT_MINUTE -lt 5 ]]; then
# Next backup is at :05 of current hour
NEXT_BACKUP_HOUR=$CURRENT_HOUR
else
# Next backup is at :05 of next hour
NEXT_BACKUP_HOUR=$(( (CURRENT_HOUR + 1) % 24 ))
fi
NEXT_BACKUP_TIME=$(date -d "today $NEXT_BACKUP_HOUR:05:00" +%s 2>/dev/null || echo "0")
if [[ $NEXT_BACKUP_TIME -lt $CURRENT_TIME ]]; then
# If calculated time is in the past, add 24 hours
NEXT_BACKUP_TIME=$((NEXT_BACKUP_TIME + 86400))
fi
SECONDS_TO_NEXT_BACKUP=$((NEXT_BACKUP_TIME - CURRENT_TIME))
cat >> "$TEMP_METRICS" <<EOF
# HELP insightlearn_dr_next_backup_seconds Seconds until next scheduled backup
# TYPE insightlearn_dr_next_backup_seconds gauge
insightlearn_dr_next_backup_seconds $SECONDS_TO_NEXT_BACKUP
EOF
# Disk space metrics for backup location
BACKUP_DIR="/var/backups/k3s-cluster"
if [[ -d "$BACKUP_DIR" ]]; then
# Get disk space info (in bytes)
DISK_TOTAL=$(df -B1 "$BACKUP_DIR" | tail -1 | awk '{print $2}')
DISK_USED=$(df -B1 "$BACKUP_DIR" | tail -1 | awk '{print $3}')
DISK_AVAILABLE=$(df -B1 "$BACKUP_DIR" | tail -1 | awk '{print $4}')
DISK_USAGE_PERCENT=$(df "$BACKUP_DIR" | tail -1 | awk '{print $5}' | sed 's/%//')
# Count backup files
BACKUP_COUNT=$(find "$BACKUP_DIR" -name "*.tar.gz" -type f 2>/dev/null | wc -l)
else
DISK_TOTAL=0
DISK_USED=0
DISK_AVAILABLE=0
DISK_USAGE_PERCENT=0
BACKUP_COUNT=0
fi
cat >> "$TEMP_METRICS" <<EOF
# HELP insightlearn_dr_disk_total_bytes Total disk space for backup location
# TYPE insightlearn_dr_disk_total_bytes gauge
insightlearn_dr_disk_total_bytes $DISK_TOTAL
# HELP insightlearn_dr_disk_used_bytes Used disk space for backup location
# TYPE insightlearn_dr_disk_used_bytes gauge
insightlearn_dr_disk_used_bytes $DISK_USED
# HELP insightlearn_dr_disk_available_bytes Available disk space for backup location
# TYPE insightlearn_dr_disk_available_bytes gauge
insightlearn_dr_disk_available_bytes $DISK_AVAILABLE
# HELP insightlearn_dr_disk_usage_percent Disk usage percentage for backup location
# TYPE insightlearn_dr_disk_usage_percent gauge
insightlearn_dr_disk_usage_percent $DISK_USAGE_PERCENT
# HELP insightlearn_dr_backup_count Number of backup files
# TYPE insightlearn_dr_backup_count gauge
insightlearn_dr_backup_count $BACKUP_COUNT
EOF
# K3s Cluster Pod Metrics
# Get pod counts from namespace insightlearn
# Use k3s kubectl for K3s cluster (Rocky Linux)
if [[ -x /usr/local/bin/k3s ]]; then
# Count running pods
K3S_PODS_RUNNING=$(/usr/local/bin/k3s kubectl get pods -n insightlearn --no-headers 2>/dev/null | grep -c "Running" || echo "0")
# Count total pods (all states)
K3S_PODS_TOTAL=$(/usr/local/bin/k3s kubectl get pods -n insightlearn --no-headers 2>/dev/null | wc -l || echo "0")
else
K3S_PODS_RUNNING=0
K3S_PODS_TOTAL=0
fi
cat >> "$TEMP_METRICS" <<EOF
# HELP insightlearn_dr_k3s_pods_running Number of running pods in insightlearn namespace
# TYPE insightlearn_dr_k3s_pods_running gauge
insightlearn_dr_k3s_pods_running $K3S_PODS_RUNNING
# HELP insightlearn_dr_k3s_pods_total Total number of pods in insightlearn namespace
# TYPE insightlearn_dr_k3s_pods_total gauge
insightlearn_dr_k3s_pods_total $K3S_PODS_TOTAL
EOF
# Move temp file to final location atomically
mv "$TEMP_METRICS" "$METRICS_FILE"
# Output to stdout if not writing to file
if [[ "${OUTPUT_STDOUT:-0}" == "1" ]]; then
cat "$METRICS_FILE"
fi
exit 0