-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathbackup.js
More file actions
288 lines (238 loc) · 7.27 KB
/
backup.js
File metadata and controls
288 lines (238 loc) · 7.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
const fs = require("fs");
const path = require("path");
const archiver = require("archiver");
const db = require("./db");
const users = require("./users");
// Configuration
const DATA_DIR = process.env.DATA_DIR || "./data";
const BACKUP_DIR = path.join(DATA_DIR, "backups");
const DEFAULT_RETENTION = 7;
const BACKUP_INTERVAL_MS = 24 * 60 * 60 * 1000; // 24 hours
/**
* Get the backup directory for a user
*/
function getUserBackupDir(userId) {
return path.join(BACKUP_DIR, userId);
}
/**
* Get the last backup timestamp for a user
*/
function getLastBackupTime(userId) {
const value = db.getSetting(userId, "lastBackupTime");
return value ? parseInt(value, 10) : null;
}
/**
* Set the last backup timestamp for a user
*/
function setLastBackupTime(userId, timestamp) {
db.setSetting(userId, "lastBackupTime", timestamp.toString());
}
/**
* Generate a backup filename
*/
function generateBackupFilename(userId) {
const timestamp = new Date().toISOString().replace(/[:.]/g, "-");
return `peek-backup-${userId}-${timestamp}.zip`;
}
/**
* Get table counts for manifest metadata
*/
function getTableCounts(conn) {
const counts = {};
const itemTypes = conn.all(`
SELECT type, COUNT(*) as count
FROM items
WHERE CAST(deletedAt AS INTEGER) = 0
GROUP BY type
`);
for (const row of itemTypes) {
counts[row.type + "s"] = row.count;
}
const tagCount = conn.get("SELECT COUNT(*) as count FROM tags");
counts.tags = tagCount.count;
return counts;
}
/**
* Create a backup for a single user
*
* TODO: DO SQLite backup - VACUUM INTO is not available in Cloudflare Durable Objects SQLite.
* When deploying to DO, implement an alternative backup strategy:
* - Export data as JSON to R2
* - Or use DO's built-in point-in-time recovery features
*/
async function createBackup(userId) {
console.log(`Creating backup for user: ${userId}`);
// Check if database exists BEFORE calling getConnection (which would create it)
const userDir = path.join(DATA_DIR, userId);
const dbPath = path.join(userDir, "peek.db");
if (!fs.existsSync(dbPath)) {
console.log(`No database found for user ${userId}, skipping backup`);
return { success: false, error: "No database found" };
}
// Ensure backup directory exists
const userBackupDir = getUserBackupDir(userId);
if (!fs.existsSync(userBackupDir)) {
fs.mkdirSync(userBackupDir, { recursive: true });
}
// Get database connection (SqlAdapter)
const conn = db.getConnection(userId);
const backupFilename = generateBackupFilename(userId);
const backupPath = path.join(userBackupDir, backupFilename);
const tempDbPath = path.join(userBackupDir, `temp-${userId}.db`);
try {
// Use VACUUM INTO for consistent snapshot (non-blocking)
// Note: This only works with better-sqlite3 adapter.
// TODO: For DO SQLite, implement JSON export to R2 instead.
conn.exec(`VACUUM INTO '${tempDbPath}'`);
// Get table counts for manifest
const tableCounts = getTableCounts(conn);
// Create manifest
const manifest = {
version: "1.0",
timestamp: new Date().toISOString(),
userId: userId,
tableCounts: tableCounts,
backupType: "daily-snapshot"
};
// Create ZIP archive
await new Promise((resolve, reject) => {
const output = fs.createWriteStream(backupPath);
const archive = archiver("zip", { zlib: { level: 9 } });
output.on("close", resolve);
archive.on("error", reject);
archive.pipe(output);
archive.file(tempDbPath, { name: "peek.db" });
archive.append(JSON.stringify(manifest, null, 2), { name: "manifest.json" });
archive.finalize();
});
// Clean up temp file
if (fs.existsSync(tempDbPath)) {
fs.unlinkSync(tempDbPath);
}
// Update last backup time
const now = Date.now();
setLastBackupTime(userId, now);
// Clean old backups
await cleanOldBackups(userId);
const stats = fs.statSync(backupPath);
console.log(`Backup created: ${backupFilename} (${(stats.size / 1024).toFixed(1)} KB)`);
return {
success: true,
filename: backupFilename,
path: backupPath,
size: stats.size,
timestamp: new Date().toISOString(),
tableCounts: tableCounts
};
} catch (error) {
console.error(`Backup failed for user ${userId}:`, error.message);
// Clean up temp file on error
if (fs.existsSync(tempDbPath)) {
fs.unlinkSync(tempDbPath);
}
return { success: false, error: error.message };
}
}
/**
* Create backups for all users
*/
async function createAllBackups() {
const allUsers = users.listUsers();
const results = [];
for (const user of allUsers) {
const result = await createBackup(user.id);
results.push({ userId: user.id, ...result });
}
return results;
}
/**
* Clean old backups beyond retention limit
*/
async function cleanOldBackups(userId, retention = DEFAULT_RETENTION) {
const userBackupDir = getUserBackupDir(userId);
if (!fs.existsSync(userBackupDir)) {
return { deleted: 0 };
}
// List backup files sorted by modification time (newest first)
const files = fs.readdirSync(userBackupDir)
.filter(f => f.startsWith("peek-backup-") && f.endsWith(".zip"))
.map(f => ({
name: f,
path: path.join(userBackupDir, f),
mtime: fs.statSync(path.join(userBackupDir, f)).mtime.getTime()
}))
.sort((a, b) => b.mtime - a.mtime);
// Delete files beyond retention
const toDelete = files.slice(retention);
for (const file of toDelete) {
fs.unlinkSync(file.path);
console.log(`Deleted old backup: ${file.name}`);
}
return { deleted: toDelete.length };
}
/**
* List backups for a user
*/
function listBackups(userId) {
const userBackupDir = getUserBackupDir(userId);
if (!fs.existsSync(userBackupDir)) {
return [];
}
const files = fs.readdirSync(userBackupDir)
.filter(f => f.startsWith("peek-backup-") && f.endsWith(".zip"))
.map(f => {
const filePath = path.join(userBackupDir, f);
const stats = fs.statSync(filePath);
return {
filename: f,
size: stats.size,
createdAt: stats.mtime.toISOString()
};
})
.sort((a, b) => new Date(b.createdAt) - new Date(a.createdAt));
return files;
}
/**
* Check if a user needs a backup (>24h since last backup)
*/
function needsBackup(userId) {
const lastBackup = getLastBackupTime(userId);
if (!lastBackup) return true;
const elapsed = Date.now() - lastBackup;
return elapsed >= BACKUP_INTERVAL_MS;
}
/**
* Run daily backups for all users who need them
*/
async function checkAndRunDailyBackups() {
console.log("Checking for users needing backup...");
const allUsers = users.listUsers();
let backupCount = 0;
for (const user of allUsers) {
if (needsBackup(user.id)) {
console.log(`User ${user.id} needs backup (>24h since last backup)`);
await createBackup(user.id);
backupCount++;
}
}
if (backupCount === 0) {
console.log("No users need backup at this time");
} else {
console.log(`Completed ${backupCount} backup(s)`);
}
return { backupCount };
}
module.exports = {
createBackup,
createAllBackups,
cleanOldBackups,
listBackups,
needsBackup,
checkAndRunDailyBackups,
getLastBackupTime,
setLastBackupTime,
// Exposed for testing
BACKUP_DIR,
DEFAULT_RETENTION,
BACKUP_INTERVAL_MS
};