1+ const fs = require ( 'fs' ) ;
2+ const archiver = require ( 'archiver' ) ;
3+ const crypto = require ( 'crypto' ) ;
4+ const path = require ( 'path' ) ;
5+ const decompress = require ( "decompress" ) ;
6+ var backups_processing = { } ;
7+ var backups_processing_fs = { } ;
8+ var backups_processing_status = { } ;
9+
10+ exports . getBackupsList = ( ) => {
11+ if ( fs . existsSync ( "./backups" ) ) {
12+ read = fs . readdirSync ( "./backups" ) ;
13+ arr = [ ] ;
14+ read . forEach ( function ( fn ) {
15+ if ( path . extname ( fn ) == ".json" ) {
16+ rd = JSON . parse ( fs . readFileSync ( "./backups/" + fn ) . toString ( ) ) ;
17+ uid = rd [ 'uid' ] ;
18+ if ( typeof backups_processing_status [ uid ] !== "undefined" ) {
19+ rd [ 'processing_status' ] = backups_processing_status [ uid ] ;
20+ }
21+ rd [ 'size' ] = fs . lstatSync ( "./backups/" + fn . replace ( ".json" , ".zip" ) ) . size ;
22+ arr . push ( rd ) ;
23+ }
24+ } ) ;
25+ str = JSON . stringify ( arr ) ;
26+ if ( str == "[]" ) {
27+ ret = "none" ;
28+ } else {
29+ ret = JSON . stringify ( arr ) ;
30+ }
31+ } else {
32+ ret = "none" ;
33+ fs . mkdirSync ( "./backups" ) ;
34+ }
35+ return ret ;
36+ } ;
37+
38+ exports . restoreBackup = function ( fn , sn , cb ) {
39+ if ( fs . existsSync ( "./backups/" + fn ) ) {
40+ cfg = JSON . parse ( fs . readFileSync ( "./backups/" + fn . replace ( ".zip" , ".json" ) ) ) ;
41+ if ( cfg . type == "full" ) {
42+ fs . rmSync ( "./servers/" + sn , {
43+ recursive : true ,
44+ force : true
45+ } ) ;
46+ fs . mkdirSync ( "./servers/" + sn ) ;
47+ decompress ( "./backups/" + fn , "./servers/" + sn )
48+ . then ( ( files ) => {
49+ cb ( true ) ;
50+ } )
51+ . catch ( ( error ) => {
52+ cb ( error ) ;
53+ } ) ;
54+ } else {
55+ decompress ( "./backups/" + fn , "./servers/" + sn )
56+ . then ( ( files ) => {
57+ cb ( true ) ;
58+ } )
59+ . catch ( ( error ) => {
60+ cb ( error ) ;
61+ } ) ;
62+ }
63+ } else {
64+ cb ( false ) ;
65+ }
66+ } ;
67+
68+ exports . createNewBackup = ( bname , desc , type , sn , files = null ) => {
69+ date = new Date ( ) ;
70+ randuid = crypto . randomUUID ( ) . toString ( ) ;
71+ date_str = date . getDate ( ) . toString ( ) . padStart ( 2 , "0" ) + "_" + ( date . getMonth ( ) + 1 ) . toString ( ) . padStart ( 2 , "0" ) + "_" + date . getFullYear ( ) . toString ( ) . padStart ( 2 , "0" ) + "-" + date . getHours ( ) . toString ( ) . padStart ( 2 , "0" ) + "_" + date . getMinutes ( ) . toString ( ) . padStart ( 2 , "0" ) + "_" + date . getSeconds ( ) . toString ( ) . padStart ( 2 , "0" ) ;
72+ archname = "backup-" + type + "-" + date_str + ".zip" ;
73+ jsonf = {
74+ name : bname ,
75+ description : desc ,
76+ type : type ,
77+ archive_name : archname ,
78+ selected_files : files ,
79+ uid : randuid ,
80+ server : sn
81+ }
82+ cfgname = "backup-" + type + "-" + date_str + ".json" ;
83+ fs . writeFileSync ( "./backups/" + cfgname , JSON . stringify ( jsonf , null , "\t" ) ) ;
84+ backups_processing_fs [ randuid ] = fs . createWriteStream ( './backups/' + archname ) ;
85+ backups_processing [ randuid ] = archiver ( 'zip' , {
86+ zlib : {
87+ level : 5
88+ }
89+ } ) ;
90+ backups_processing_status [ randuid ] = {
91+ status : "started"
92+ }
93+ fsock = io . sockets . sockets ;
94+ for ( const socket of fsock ) {
95+ socket [ 1 ] . emit ( "handleUpdate" , {
96+ type : "backups_list" ,
97+ data : "started"
98+ } ) ;
99+ }
100+ console . log ( 'started creating of' + archname ) ;
101+ backups_processing_fs [ randuid ] . on ( 'close' , function ( ) {
102+ backups_processing_status [ randuid ] = {
103+ status : "completed"
104+ }
105+ console . log ( 'completed creating of' + archname ) ;
106+ fsock = io . sockets . sockets ;
107+ for ( const socket of fsock ) {
108+ socket [ 1 ] . emit ( "handleUpdate" , {
109+ type : "backups_list" ,
110+ data : "completed"
111+ } ) ;
112+ }
113+ } ) ;
114+ backups_processing_fs [ randuid ] . on ( 'end' , function ( ) {
115+ backups_processing_status [ randuid ] = {
116+ status : "completed"
117+ }
118+ fsock = io . sockets . sockets ;
119+ for ( const socket of fsock ) {
120+ socket [ 1 ] . emit ( "handleUpdate" , {
121+ type : "backups_list" ,
122+ data : "completed"
123+ } ) ;
124+ }
125+ console . log ( 'completed in creating of' + archname ) ;
126+ } ) ;
127+ backups_processing [ randuid ] . on ( 'warning' , function ( err ) {
128+ backups_processing_status [ randuid ] = {
129+ status : "failed" ,
130+ error : err
131+ }
132+ fsock = io . sockets . sockets ;
133+ for ( const socket of fsock ) {
134+ socket [ 1 ] . emit ( "handleUpdate" , {
135+ type : "backups_list" ,
136+ data : "failed"
137+ } ) ;
138+ }
139+ console . log ( 'failed creating of' + archname ) ;
140+ console . log ( err ) ;
141+ } ) ;
142+
143+ backups_processing [ randuid ] . on ( 'error' , function ( err ) {
144+ backups_processing_status [ randuid ] = {
145+ status : "failed" ,
146+ error : err
147+ }
148+ fsock = io . sockets . sockets ;
149+ for ( const socket of fsock ) {
150+ socket [ 1 ] . emit ( "handleUpdate" , {
151+ type : "backups_list" ,
152+ data : "failed"
153+ } ) ;
154+ }
155+ console . log ( 'failed creating of' + archname ) ;
156+ console . log ( err ) ;
157+ } ) ;
158+
159+ backups_processing [ randuid ] . on ( 'progress' , function ( pr ) {
160+ backups_processing_status [ randuid ] = {
161+ status : "processing" ,
162+ total_files : pr . entries . total ,
163+ proc_files : pr . entries . processed ,
164+ percent : pr . entries . total > 0 ? Math . round ( pr . entries . processed * 100.0 / pr . entries . total ) : - 1
165+ }
166+ fsock = io . sockets . sockets ;
167+ for ( const socket of fsock ) {
168+ socket [ 1 ] . emit ( "handleUpdate" , {
169+ type : "backups_list" ,
170+ data : "progress"
171+ } ) ;
172+ }
173+ } ) ;
174+
175+ backups_processing [ randuid ] . pipe ( backups_processing_fs [ randuid ] ) ;
176+ if ( type == "full" ) {
177+ rdd = fs . readdirSync ( './servers/' + sn ) ;
178+ backups_processing [ randuid ] . directory ( './servers/' + sn , false ) ;
179+ backups_processing [ randuid ] . finalize ( ) ;
180+ } else {
181+ files . forEach ( function ( file ) {
182+ if ( file . type == "directory" ) {
183+ backups_processing [ randuid ] . directory ( './servers/' + sn + "/" + file . name , false ) ;
184+ } else {
185+ backups_processing [ randuid ] . file ( './servers/' + sn + "/" + file . name , {
186+ name : file . name
187+ } ) ;
188+ }
189+ } ) ;
190+ backups_processing [ randuid ] . finalize ( ) ;
191+ }
192+ return "success" ;
193+ } ;
0 commit comments