2021-07-14 11:07:19 -07:00
'use strict' ;
exports = module . exports = {
2021-09-26 18:37:04 -07:00
fullBackup ,
2021-07-14 11:07:19 -07:00
restore ,
backupApp ,
downloadApp ,
2021-09-26 18:37:04 -07:00
backupMail ,
downloadMail ,
2021-07-14 11:07:19 -07:00
upload ,
_restoreFsMetadata : restoreFsMetadata ,
_saveFsMetadata : saveFsMetadata ,
} ;
const apps = require ( './apps.js' ) ,
assert = require ( 'assert' ) ,
async = require ( 'async' ) ,
backups = require ( './backups.js' ) ,
BoxError = require ( './boxerror.js' ) ,
constants = require ( './constants.js' ) ,
crypto = require ( 'crypto' ) ,
DataLayout = require ( './datalayout.js' ) ,
database = require ( './database.js' ) ,
debug = require ( 'debug' ) ( 'box:backuptask' ) ,
fs = require ( 'fs' ) ,
once = require ( 'once' ) ,
path = require ( 'path' ) ,
paths = require ( './paths.js' ) ,
progressStream = require ( 'progress-stream' ) ,
safe = require ( 'safetydance' ) ,
services = require ( './services.js' ) ,
settings = require ( './settings.js' ) ,
shell = require ( './shell.js' ) ,
storage = require ( './storage.js' ) ,
syncer = require ( './syncer.js' ) ,
tar = require ( 'tar-fs' ) ,
TransformStream = require ( 'stream' ) . Transform ,
zlib = require ( 'zlib' ) ,
util = require ( 'util' ) ;
const BACKUP _UPLOAD _CMD = path . join ( _ _dirname , 'scripts/backupupload.js' ) ;
2021-08-19 13:24:38 -07:00
const getBackupConfig = util . callbackify ( settings . getBackupConfig ) ;
2021-07-14 11:07:19 -07:00
function canBackupApp ( app ) {
// only backup apps that are installed or specific pending states
// stopped apps cannot be backed up because addons might be down (redis)
if ( app . runState === apps . RSTATE _STOPPED ) return false ;
// we used to check the health here but that doesn't work for stopped apps. it's better to just fail
// and inform the user if the backup fails and the app addons have not been setup yet.
return app . installationState === apps . ISTATE _INSTALLED ||
app . installationState === apps . ISTATE _PENDING _CONFIGURE ||
app . installationState === apps . ISTATE _PENDING _BACKUP || // called from apptask
app . installationState === apps . ISTATE _PENDING _UPDATE ; // called from apptask
}
function encryptFilePath ( filePath , encryption ) {
assert . strictEqual ( typeof filePath , 'string' ) ;
assert . strictEqual ( typeof encryption , 'object' ) ;
var encryptedParts = filePath . split ( '/' ) . map ( function ( part ) {
let hmac = crypto . createHmac ( 'sha256' , Buffer . from ( encryption . filenameHmacKey , 'hex' ) ) ;
const iv = hmac . update ( part ) . digest ( ) . slice ( 0 , 16 ) ; // iv has to be deterministic, for our sync (copy) logic to work
const cipher = crypto . createCipheriv ( 'aes-256-cbc' , Buffer . from ( encryption . filenameKey , 'hex' ) , iv ) ;
let crypt = cipher . update ( part ) ;
crypt = Buffer . concat ( [ iv , crypt , cipher . final ( ) ] ) ;
return crypt . toString ( 'base64' ) // ensures path is valid
. replace ( /\//g , '-' ) // replace '/' of base64 since it conflicts with path separator
. replace ( /=/g , '' ) ; // strip trailing = padding. this is only needed if we concat base64 strings, which we don't
} ) ;
return encryptedParts . join ( '/' ) ;
}
function decryptFilePath ( filePath , encryption ) {
assert . strictEqual ( typeof filePath , 'string' ) ;
assert . strictEqual ( typeof encryption , 'object' ) ;
let decryptedParts = [ ] ;
for ( let part of filePath . split ( '/' ) ) {
part = part + Array ( part . length % 4 ) . join ( '=' ) ; // add back = padding
part = part . replace ( /-/g , '/' ) ; // replace with '/'
try {
const buffer = Buffer . from ( part , 'base64' ) ;
const iv = buffer . slice ( 0 , 16 ) ;
let decrypt = crypto . createDecipheriv ( 'aes-256-cbc' , Buffer . from ( encryption . filenameKey , 'hex' ) , iv ) ;
const plainText = decrypt . update ( buffer . slice ( 16 ) ) ;
const plainTextString = Buffer . concat ( [ plainText , decrypt . final ( ) ] ) . toString ( 'utf8' ) ;
const hmac = crypto . createHmac ( 'sha256' , Buffer . from ( encryption . filenameHmacKey , 'hex' ) ) ;
if ( ! hmac . update ( plainTextString ) . digest ( ) . slice ( 0 , 16 ) . equals ( iv ) ) return { error : new BoxError ( BoxError . CRYPTO _ERROR , ` mac error decrypting part ${ part } of path ${ filePath } ` ) } ;
decryptedParts . push ( plainTextString ) ;
} catch ( error ) {
debug ( ` Error decrypting part ${ part } of path ${ filePath } : ` , error ) ;
return { error : new BoxError ( BoxError . CRYPTO _ERROR , ` Error decrypting part ${ part } of path ${ filePath } : ${ error . message } ` ) } ;
}
}
return { result : decryptedParts . join ( '/' ) } ;
}
class EncryptStream extends TransformStream {
constructor ( encryption ) {
super ( ) ;
this . _headerPushed = false ;
this . _iv = crypto . randomBytes ( 16 ) ;
this . _cipher = crypto . createCipheriv ( 'aes-256-cbc' , Buffer . from ( encryption . dataKey , 'hex' ) , this . _iv ) ;
this . _hmac = crypto . createHmac ( 'sha256' , Buffer . from ( encryption . dataHmacKey , 'hex' ) ) ;
}
pushHeaderIfNeeded ( ) {
if ( ! this . _headerPushed ) {
const magic = Buffer . from ( 'CBV2' ) ;
this . push ( magic ) ;
this . _hmac . update ( magic ) ;
this . push ( this . _iv ) ;
this . _hmac . update ( this . _iv ) ;
this . _headerPushed = true ;
}
}
_transform ( chunk , ignoredEncoding , callback ) {
this . pushHeaderIfNeeded ( ) ;
try {
const crypt = this . _cipher . update ( chunk ) ;
this . _hmac . update ( crypt ) ;
callback ( null , crypt ) ;
} catch ( error ) {
callback ( error ) ;
}
}
_flush ( callback ) {
try {
this . pushHeaderIfNeeded ( ) ; // for 0-length files
const crypt = this . _cipher . final ( ) ;
this . push ( crypt ) ;
this . _hmac . update ( crypt ) ;
callback ( null , this . _hmac . digest ( ) ) ; // +32 bytes
} catch ( error ) {
callback ( error ) ;
}
}
}
class DecryptStream extends TransformStream {
constructor ( encryption ) {
super ( ) ;
this . _key = Buffer . from ( encryption . dataKey , 'hex' ) ;
this . _header = Buffer . alloc ( 0 ) ;
this . _decipher = null ;
this . _hmac = crypto . createHmac ( 'sha256' , Buffer . from ( encryption . dataHmacKey , 'hex' ) ) ;
this . _buffer = Buffer . alloc ( 0 ) ;
}
_transform ( chunk , ignoredEncoding , callback ) {
const needed = 20 - this . _header . length ; // 4 for magic, 16 for iv
if ( this . _header . length !== 20 ) { // not gotten header yet
this . _header = Buffer . concat ( [ this . _header , chunk . slice ( 0 , needed ) ] ) ;
if ( this . _header . length !== 20 ) return callback ( ) ;
if ( ! this . _header . slice ( 0 , 4 ) . equals ( new Buffer . from ( 'CBV2' ) ) ) return callback ( new BoxError ( BoxError . CRYPTO _ERROR , 'Invalid magic in header' ) ) ;
const iv = this . _header . slice ( 4 ) ;
this . _decipher = crypto . createDecipheriv ( 'aes-256-cbc' , this . _key , iv ) ;
this . _hmac . update ( this . _header ) ;
}
this . _buffer = Buffer . concat ( [ this . _buffer , chunk . slice ( needed ) ] ) ;
if ( this . _buffer . length < 32 ) return callback ( ) ; // hmac trailer length is 32
try {
const cipherText = this . _buffer . slice ( 0 , - 32 ) ;
this . _hmac . update ( cipherText ) ;
const plainText = this . _decipher . update ( cipherText ) ;
this . _buffer = this . _buffer . slice ( - 32 ) ;
callback ( null , plainText ) ;
} catch ( error ) {
callback ( error ) ;
}
}
_flush ( callback ) {
if ( this . _buffer . length !== 32 ) return callback ( new BoxError ( BoxError . CRYPTO _ERROR , 'Invalid password or tampered file (not enough data)' ) ) ;
try {
if ( ! this . _hmac . digest ( ) . equals ( this . _buffer ) ) return callback ( new BoxError ( BoxError . CRYPTO _ERROR , 'Invalid password or tampered file (mac mismatch)' ) ) ;
const plainText = this . _decipher . final ( ) ;
callback ( null , plainText ) ;
} catch ( error ) {
callback ( error ) ;
}
}
}
function createReadStream ( sourceFile , encryption ) {
assert . strictEqual ( typeof sourceFile , 'string' ) ;
assert . strictEqual ( typeof encryption , 'object' ) ;
var stream = fs . createReadStream ( sourceFile ) ;
var ps = progressStream ( { time : 10000 } ) ; // display a progress every 10 seconds
stream . on ( 'error' , function ( error ) {
debug ( ` createReadStream: read stream error at ${ sourceFile } ` , error ) ;
ps . emit ( 'error' , new BoxError ( BoxError . FS _ERROR , ` Error reading ${ sourceFile } : ${ error . message } ${ error . code } ` ) ) ;
} ) ;
stream . on ( 'open' , ( ) => ps . emit ( 'open' ) ) ;
if ( encryption ) {
let encryptStream = new EncryptStream ( encryption ) ;
encryptStream . on ( 'error' , function ( error ) {
debug ( ` createReadStream: encrypt stream error ${ sourceFile } ` , error ) ;
ps . emit ( 'error' , new BoxError ( BoxError . CRYPTO _ERROR , ` Encryption error at ${ sourceFile } : ${ error . message } ` ) ) ;
} ) ;
return stream . pipe ( encryptStream ) . pipe ( ps ) ;
} else {
return stream . pipe ( ps ) ;
}
}
function createWriteStream ( destFile , encryption ) {
assert . strictEqual ( typeof destFile , 'string' ) ;
assert . strictEqual ( typeof encryption , 'object' ) ;
var stream = fs . createWriteStream ( destFile ) ;
var ps = progressStream ( { time : 10000 } ) ; // display a progress every 10 seconds
stream . on ( 'error' , function ( error ) {
debug ( ` createWriteStream: write stream error ${ destFile } ` , error ) ;
ps . emit ( 'error' , new BoxError ( BoxError . FS _ERROR , ` Write error ${ destFile } : ${ error . message } ` ) ) ;
} ) ;
stream . on ( 'finish' , function ( ) {
debug ( 'createWriteStream: done.' ) ;
// we use a separate event because ps is a through2 stream which emits 'finish' event indicating end of inStream and not write
ps . emit ( 'done' ) ;
} ) ;
if ( encryption ) {
let decrypt = new DecryptStream ( encryption ) ;
decrypt . on ( 'error' , function ( error ) {
debug ( ` createWriteStream: decrypt stream error ${ destFile } ` , error ) ;
ps . emit ( 'error' , new BoxError ( BoxError . CRYPTO _ERROR , ` Decryption error at ${ destFile } : ${ error . message } ` ) ) ;
} ) ;
ps . pipe ( decrypt ) . pipe ( stream ) ;
} else {
ps . pipe ( stream ) ;
}
return ps ;
}
function tarPack ( dataLayout , encryption , callback ) {
assert ( dataLayout instanceof DataLayout , 'dataLayout must be a DataLayout' ) ;
assert . strictEqual ( typeof encryption , 'object' ) ;
assert . strictEqual ( typeof callback , 'function' ) ;
var pack = tar . pack ( '/' , {
dereference : false , // pack the symlink and not what it points to
entries : dataLayout . localPaths ( ) ,
ignoreStatError : ( path , err ) => {
debug ( ` tarPack: error stat'ing ${ path } - ${ err . code } ` ) ;
return err . code === 'ENOENT' ; // ignore if file or dir got removed (probably some temporary file)
} ,
map : function ( header ) {
header . name = dataLayout . toRemotePath ( header . name ) ;
// the tar pax format allows us to encode filenames > 100 and size > 8GB (see #640)
// https://www.systutorials.com/docs/linux/man/5-star/
if ( header . size > 8589934590 || header . name > 99 ) header . pax = { size : header . size } ;
return header ;
} ,
strict : false // do not error for unknown types (skip fifo, char/block devices)
} ) ;
var gzip = zlib . createGzip ( { } ) ;
var ps = progressStream ( { time : 10000 } ) ; // emit 'progress' every 10 seconds
pack . on ( 'error' , function ( error ) {
debug ( 'tarPack: tar stream error.' , error ) ;
ps . emit ( 'error' , new BoxError ( BoxError . EXTERNAL _ERROR , error . message ) ) ;
} ) ;
gzip . on ( 'error' , function ( error ) {
debug ( 'tarPack: gzip stream error.' , error ) ;
ps . emit ( 'error' , new BoxError ( BoxError . EXTERNAL _ERROR , error . message ) ) ;
} ) ;
if ( encryption ) {
const encryptStream = new EncryptStream ( encryption ) ;
encryptStream . on ( 'error' , function ( error ) {
debug ( 'tarPack: encrypt stream error.' , error ) ;
ps . emit ( 'error' , new BoxError ( BoxError . EXTERNAL _ERROR , error . message ) ) ;
} ) ;
pack . pipe ( gzip ) . pipe ( encryptStream ) . pipe ( ps ) ;
} else {
pack . pipe ( gzip ) . pipe ( ps ) ;
}
return callback ( null , ps ) ;
}
function sync ( backupConfig , backupId , dataLayout , progressCallback , callback ) {
assert . strictEqual ( typeof backupConfig , 'object' ) ;
assert . strictEqual ( typeof backupId , 'string' ) ;
assert ( dataLayout instanceof DataLayout , 'dataLayout must be a DataLayout' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
assert . strictEqual ( typeof callback , 'function' ) ;
// the number here has to take into account the s3.upload partSize (which is 10MB). So 20=200MB
const concurrency = backupConfig . syncConcurrency || ( backupConfig . provider === 's3' ? 20 : 10 ) ;
syncer . sync ( dataLayout , function processTask ( task , iteratorCallback ) {
debug ( 'sync: processing task: %j' , task ) ;
// the empty task.path is special to signify the directory
const destPath = task . path && backupConfig . encryption ? encryptFilePath ( task . path , backupConfig . encryption ) : task . path ;
const backupFilePath = path . join ( storage . getBackupFilePath ( backupConfig , backupId , backupConfig . format ) , destPath ) ;
if ( task . operation === 'removedir' ) {
debug ( ` Removing directory ${ backupFilePath } ` ) ;
return storage . api ( backupConfig . provider ) . removeDir ( backupConfig , backupFilePath )
. on ( 'progress' , ( message ) => progressCallback ( { message } ) )
. on ( 'done' , iteratorCallback ) ;
} else if ( task . operation === 'remove' ) {
debug ( ` Removing ${ backupFilePath } ` ) ;
return storage . api ( backupConfig . provider ) . remove ( backupConfig , backupFilePath , iteratorCallback ) ;
}
var retryCount = 0 ;
async . retry ( { times : 5 , interval : 20000 } , function ( retryCallback ) {
retryCallback = once ( retryCallback ) ; // protect again upload() erroring much later after read stream error
++ retryCount ;
if ( task . operation === 'add' ) {
progressCallback ( { message : ` Adding ${ task . path } ` + ( retryCount > 1 ? ` (Try ${ retryCount } ) ` : '' ) } ) ;
debug ( ` Adding ${ task . path } position ${ task . position } try ${ retryCount } ` ) ;
var stream = createReadStream ( dataLayout . toLocalPath ( './' + task . path ) , backupConfig . encryption ) ;
stream . on ( 'error' , ( error ) => retryCallback ( error . message . includes ( 'ENOENT' ) ? null : error ) ) ; // ignore error if file disappears
stream . on ( 'progress' , function ( progress ) {
const transferred = Math . round ( progress . transferred / 1024 / 1024 ) , speed = Math . round ( progress . speed / 1024 / 1024 ) ;
if ( ! transferred && ! speed ) return progressCallback ( { message : ` Uploading ${ task . path } ` } ) ; // 0M@0MBps looks wrong
progressCallback ( { message : ` Uploading ${ task . path } : ${ transferred } M@ ${ speed } MBps ` } ) ; // 0M@0MBps looks wrong
} ) ;
// only create the destination path when we have confirmation that the source is available. otherwise, we end up with
// files owned as 'root' and the cp later will fail
stream . on ( 'open' , function ( ) {
storage . api ( backupConfig . provider ) . upload ( backupConfig , backupFilePath , stream , function ( error ) {
debug ( error ? ` Error uploading ${ task . path } try ${ retryCount } : ${ error . message } ` : ` Uploaded ${ task . path } ` ) ;
retryCallback ( error ) ;
} ) ;
} ) ;
}
} , iteratorCallback ) ;
} , concurrency , function ( error ) {
if ( error ) return callback ( new BoxError ( BoxError . EXTERNAL _ERROR , error . message ) ) ;
callback ( ) ;
} ) ;
}
// this is not part of 'snapshotting' because we need root access to traverse
2021-07-14 19:03:12 -07:00
async function saveFsMetadata ( dataLayout , metadataFile ) {
2021-07-14 11:07:19 -07:00
assert ( dataLayout instanceof DataLayout , 'dataLayout must be a DataLayout' ) ;
assert . strictEqual ( typeof metadataFile , 'string' ) ;
// contains paths prefixed with './'
let metadata = {
emptyDirs : [ ] ,
execFiles : [ ] ,
symlinks : [ ]
} ;
// we assume small number of files. spawnSync will raise a ENOBUFS error after maxBuffer
for ( let lp of dataLayout . localPaths ( ) ) {
const emptyDirs = safe . child _process . execSync ( ` find ${ lp } -type d -empty ` , { encoding : 'utf8' , maxBuffer : 1024 * 1024 * 30 } ) ;
2021-07-14 19:03:12 -07:00
if ( emptyDirs === null ) throw new BoxError ( BoxError . FS _ERROR , ` Error finding empty dirs: ${ safe . error . message } ` ) ;
2021-07-14 11:07:19 -07:00
if ( emptyDirs . length ) metadata . emptyDirs = metadata . emptyDirs . concat ( emptyDirs . trim ( ) . split ( '\n' ) . map ( ( ed ) => dataLayout . toRemotePath ( ed ) ) ) ;
const execFiles = safe . child _process . execSync ( ` find ${ lp } -type f -executable ` , { encoding : 'utf8' , maxBuffer : 1024 * 1024 * 30 } ) ;
2021-07-14 19:03:12 -07:00
if ( execFiles === null ) throw new BoxError ( BoxError . FS _ERROR , ` Error finding executables: ${ safe . error . message } ` ) ;
2021-07-14 11:07:19 -07:00
if ( execFiles . length ) metadata . execFiles = metadata . execFiles . concat ( execFiles . trim ( ) . split ( '\n' ) . map ( ( ef ) => dataLayout . toRemotePath ( ef ) ) ) ;
const symlinks = safe . child _process . execSync ( ` find ${ lp } -type l ` , { encoding : 'utf8' , maxBuffer : 1024 * 1024 * 30 } ) ;
2021-07-14 19:03:12 -07:00
if ( symlinks === null ) throw new BoxError ( BoxError . FS _ERROR , ` Error finding symlinks: ${ safe . error . message } ` ) ;
2021-07-14 11:07:19 -07:00
if ( symlinks . length ) metadata . symlinks = metadata . symlinks . concat ( symlinks . trim ( ) . split ( '\n' ) . map ( ( sl ) => {
const target = safe . fs . readlinkSync ( sl ) ;
return { path : dataLayout . toRemotePath ( sl ) , target } ;
} ) ) ;
}
2021-07-14 19:03:12 -07:00
if ( ! safe . fs . writeFileSync ( metadataFile , JSON . stringify ( metadata , null , 4 ) ) ) throw new BoxError ( BoxError . FS _ERROR , ` Error writing fs metadata: ${ safe . error . message } ` ) ;
2021-07-14 11:07:19 -07:00
}
// this function is called via backupupload (since it needs root to traverse app's directory)
function upload ( backupId , format , dataLayoutString , progressCallback , callback ) {
assert . strictEqual ( typeof backupId , 'string' ) ;
assert . strictEqual ( typeof format , 'string' ) ;
assert . strictEqual ( typeof dataLayoutString , 'string' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
assert . strictEqual ( typeof callback , 'function' ) ;
debug ( ` upload: id ${ backupId } format ${ format } dataLayout ${ dataLayoutString } ` ) ;
const dataLayout = DataLayout . fromString ( dataLayoutString ) ;
2021-08-19 13:24:38 -07:00
getBackupConfig ( function ( error , backupConfig ) {
2021-07-14 11:07:19 -07:00
if ( error ) return callback ( error ) ;
storage . api ( backupConfig . provider ) . checkPreconditions ( backupConfig , dataLayout , function ( error ) {
if ( error ) return callback ( error ) ;
if ( format === 'tgz' ) {
async . retry ( { times : 5 , interval : 20000 } , function ( retryCallback ) {
retryCallback = once ( retryCallback ) ; // protect again upload() erroring much later after tar stream error
tarPack ( dataLayout , backupConfig . encryption , function ( error , tarStream ) {
if ( error ) return retryCallback ( error ) ;
tarStream . on ( 'progress' , function ( progress ) {
const transferred = Math . round ( progress . transferred / 1024 / 1024 ) , speed = Math . round ( progress . speed / 1024 / 1024 ) ;
if ( ! transferred && ! speed ) return progressCallback ( { message : 'Uploading backup' } ) ; // 0M@0MBps looks wrong
progressCallback ( { message : ` Uploading backup ${ transferred } M@ ${ speed } MBps ` } ) ;
} ) ;
tarStream . on ( 'error' , retryCallback ) ; // already returns BoxError
storage . api ( backupConfig . provider ) . upload ( backupConfig , storage . getBackupFilePath ( backupConfig , backupId , format ) , tarStream , retryCallback ) ;
} ) ;
} , callback ) ;
} else {
async . series ( [
2021-07-29 22:13:02 +02:00
saveFsMetadata . bind ( null , dataLayout , ` ${ dataLayout . localRoot ( ) } /fsmetadata.json ` ) ,
2021-07-14 11:07:19 -07:00
sync . bind ( null , backupConfig , backupId , dataLayout , progressCallback )
] , callback ) ;
}
} ) ;
} ) ;
}
function tarExtract ( inStream , dataLayout , encryption , callback ) {
assert . strictEqual ( typeof inStream , 'object' ) ;
assert ( dataLayout instanceof DataLayout , 'dataLayout must be a DataLayout' ) ;
assert . strictEqual ( typeof encryption , 'object' ) ;
assert . strictEqual ( typeof callback , 'function' ) ;
var gunzip = zlib . createGunzip ( { } ) ;
var ps = progressStream ( { time : 10000 } ) ; // display a progress every 10 seconds
var extract = tar . extract ( '/' , {
map : function ( header ) {
header . name = dataLayout . toLocalPath ( header . name ) ;
return header ;
} ,
dmode : 500 // ensure directory is writable
} ) ;
const emitError = once ( ( error ) => {
inStream . destroy ( ) ;
ps . emit ( 'error' , error ) ;
} ) ;
inStream . on ( 'error' , function ( error ) {
debug ( 'tarExtract: input stream error.' , error ) ;
emitError ( new BoxError ( BoxError . EXTERNAL _ERROR , error . message ) ) ;
} ) ;
gunzip . on ( 'error' , function ( error ) {
debug ( 'tarExtract: gunzip stream error.' , error ) ;
emitError ( new BoxError ( BoxError . EXTERNAL _ERROR , error . message ) ) ;
} ) ;
extract . on ( 'error' , function ( error ) {
debug ( 'tarExtract: extract stream error.' , error ) ;
emitError ( new BoxError ( BoxError . EXTERNAL _ERROR , error . message ) ) ;
} ) ;
extract . on ( 'finish' , function ( ) {
debug ( 'tarExtract: done.' ) ;
// we use a separate event because ps is a through2 stream which emits 'finish' event indicating end of inStream and not extract
ps . emit ( 'done' ) ;
} ) ;
if ( encryption ) {
let decrypt = new DecryptStream ( encryption ) ;
decrypt . on ( 'error' , function ( error ) {
debug ( 'tarExtract: decrypt stream error.' , error ) ;
emitError ( new BoxError ( BoxError . EXTERNAL _ERROR , ` Failed to decrypt: ${ error . message } ` ) ) ;
} ) ;
inStream . pipe ( ps ) . pipe ( decrypt ) . pipe ( gunzip ) . pipe ( extract ) ;
} else {
inStream . pipe ( ps ) . pipe ( gunzip ) . pipe ( extract ) ;
}
callback ( null , ps ) ;
}
2021-07-14 19:03:12 -07:00
async function restoreFsMetadata ( dataLayout , metadataFile ) {
2021-07-14 11:07:19 -07:00
assert ( dataLayout instanceof DataLayout , 'dataLayout must be a DataLayout' ) ;
assert . strictEqual ( typeof metadataFile , 'string' ) ;
debug ( ` Recreating empty directories in ${ dataLayout . toString ( ) } ` ) ;
var metadataJson = safe . fs . readFileSync ( metadataFile , 'utf8' ) ;
2021-07-14 19:03:12 -07:00
if ( metadataJson === null ) throw new BoxError ( BoxError . EXTERNAL _ERROR , 'Error loading fsmetadata.json:' + safe . error . message ) ;
2021-07-14 11:07:19 -07:00
var metadata = safe . JSON . parse ( metadataJson ) ;
2021-07-14 19:03:12 -07:00
if ( metadata === null ) throw new BoxError ( BoxError . EXTERNAL _ERROR , 'Error parsing fsmetadata.json:' + safe . error . message ) ;
2021-07-14 11:07:19 -07:00
2021-07-14 19:03:12 -07:00
for ( const emptyDir of metadata . emptyDirs ) {
const [ mkdirError ] = await safe ( fs . promises . mkdir ( dataLayout . toLocalPath ( emptyDir ) , { recursive : true } ) ) ;
if ( mkdirError ) throw new BoxError ( BoxError . FS _ERROR , ` unable to create path: ${ mkdirError . message } ` ) ;
}
2021-07-14 11:07:19 -07:00
2021-07-14 19:03:12 -07:00
for ( const execFile of metadata . execFiles ) {
const [ chmodError ] = await safe ( fs . promises . chmod ( dataLayout . toLocalPath ( execFile ) , parseInt ( '0755' , 8 ) ) ) ;
if ( chmodError ) throw new BoxError ( BoxError . FS _ERROR , ` unable to chmod: ${ chmodError . message } ` ) ;
}
2021-07-14 11:07:19 -07:00
2021-07-14 19:03:12 -07:00
for ( const symlink of ( metadata . symlinks || [ ] ) ) {
if ( ! symlink . target ) continue ;
// the path may not exist if we had a directory full of symlinks
const [ mkdirError ] = await safe ( fs . promises . mkdir ( path . dirname ( dataLayout . toLocalPath ( symlink . path ) ) , { recursive : true } ) ) ;
if ( mkdirError ) throw new BoxError ( BoxError . FS _ERROR , ` unable to symlink (mkdir): ${ mkdirError . message } ` ) ;
const [ symlinkError ] = await safe ( fs . promises . symlink ( symlink . target , dataLayout . toLocalPath ( symlink . path ) , 'file' ) ) ;
if ( symlinkError ) throw new BoxError ( BoxError . FS _ERROR , ` unable to symlink: ${ symlinkError . message } ` ) ;
}
2021-07-14 11:07:19 -07:00
}
function downloadDir ( backupConfig , backupFilePath , dataLayout , progressCallback , callback ) {
assert . strictEqual ( typeof backupConfig , 'object' ) ;
assert . strictEqual ( typeof backupFilePath , 'string' ) ;
assert ( dataLayout instanceof DataLayout , 'dataLayout must be a DataLayout' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
assert . strictEqual ( typeof callback , 'function' ) ;
debug ( ` downloadDir: ${ backupFilePath } to ${ dataLayout . toString ( ) } ` ) ;
function downloadFile ( entry , done ) {
let relativePath = path . relative ( backupFilePath , entry . fullPath ) ;
if ( backupConfig . encryption ) {
const { error , result } = decryptFilePath ( relativePath , backupConfig . encryption ) ;
if ( error ) return done ( new BoxError ( BoxError . CRYPTO _ERROR , 'Unable to decrypt file' ) ) ;
relativePath = result ;
}
const destFilePath = dataLayout . toLocalPath ( './' + relativePath ) ;
fs . mkdir ( path . dirname ( destFilePath ) , { recursive : true } , function ( error ) {
if ( error ) return done ( new BoxError ( BoxError . FS _ERROR , error . message ) ) ;
async . retry ( { times : 5 , interval : 20000 } , function ( retryCallback ) {
storage . api ( backupConfig . provider ) . download ( backupConfig , entry . fullPath , function ( error , sourceStream ) {
if ( error ) {
progressCallback ( { message : ` Download ${ entry . fullPath } to ${ destFilePath } errored: ${ error . message } ` } ) ;
return retryCallback ( error ) ;
}
let destStream = createWriteStream ( destFilePath , backupConfig . encryption ) ;
// protect against multiple errors. must destroy the write stream so that a previous retry does not write
let closeAndRetry = once ( ( error ) => {
if ( error ) progressCallback ( { message : ` Download ${ entry . fullPath } to ${ destFilePath } errored: ${ error . message } ` } ) ;
else progressCallback ( { message : ` Download ${ entry . fullPath } to ${ destFilePath } finished ` } ) ;
sourceStream . destroy ( ) ;
destStream . destroy ( ) ;
retryCallback ( error ) ;
} ) ;
destStream . on ( 'progress' , function ( progress ) {
const transferred = Math . round ( progress . transferred / 1024 / 1024 ) , speed = Math . round ( progress . speed / 1024 / 1024 ) ;
if ( ! transferred && ! speed ) return progressCallback ( { message : ` Downloading ${ entry . fullPath } ` } ) ; // 0M@0MBps looks wrong
progressCallback ( { message : ` Downloading ${ entry . fullPath } : ${ transferred } M@ ${ speed } MBps ` } ) ;
} ) ;
destStream . on ( 'error' , closeAndRetry ) ;
sourceStream . on ( 'error' , closeAndRetry ) ;
progressCallback ( { message : ` Downloading ${ entry . fullPath } to ${ destFilePath } ` } ) ;
sourceStream . pipe ( destStream , { end : true } ) . on ( 'done' , closeAndRetry ) ;
} ) ;
} , done ) ;
} ) ;
}
storage . api ( backupConfig . provider ) . listDir ( backupConfig , backupFilePath , 1000 , function ( entries , iteratorDone ) {
// https://www.digitalocean.com/community/questions/rate-limiting-on-spaces?answer=40441
const concurrency = backupConfig . downloadConcurrency || ( backupConfig . provider === 's3' ? 30 : 10 ) ;
async . eachLimit ( entries , concurrency , downloadFile , iteratorDone ) ;
} , callback ) ;
}
function download ( backupConfig , backupId , format , dataLayout , progressCallback , callback ) {
assert . strictEqual ( typeof backupConfig , 'object' ) ;
assert . strictEqual ( typeof backupId , 'string' ) ;
assert . strictEqual ( typeof format , 'string' ) ;
assert ( dataLayout instanceof DataLayout , 'dataLayout must be a DataLayout' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
assert . strictEqual ( typeof callback , 'function' ) ;
debug ( ` download: Downloading ${ backupId } of format ${ format } to ${ dataLayout . toString ( ) } ` ) ;
const backupFilePath = storage . getBackupFilePath ( backupConfig , backupId , format ) ;
if ( format === 'tgz' ) {
async . retry ( { times : 5 , interval : 20000 } , function ( retryCallback ) {
2021-09-29 23:36:54 -07:00
progressCallback ( { message : ` Downloading backup ${ backupId } ` } ) ;
2021-07-14 11:07:19 -07:00
storage . api ( backupConfig . provider ) . download ( backupConfig , backupFilePath , function ( error , sourceStream ) {
if ( error ) return retryCallback ( error ) ;
tarExtract ( sourceStream , dataLayout , backupConfig . encryption , function ( error , ps ) {
if ( error ) return retryCallback ( error ) ;
ps . on ( 'progress' , function ( progress ) {
const transferred = Math . round ( progress . transferred / 1024 / 1024 ) , speed = Math . round ( progress . speed / 1024 / 1024 ) ;
if ( ! transferred && ! speed ) return progressCallback ( { message : 'Downloading backup' } ) ; // 0M@0MBps looks wrong
progressCallback ( { message : ` Downloading ${ transferred } M@ ${ speed } MBps ` } ) ;
} ) ;
ps . on ( 'error' , retryCallback ) ;
ps . on ( 'done' , retryCallback ) ;
} ) ;
} ) ;
} , callback ) ;
} else {
2021-07-14 19:03:12 -07:00
downloadDir ( backupConfig , backupFilePath , dataLayout , progressCallback , async function ( error ) {
2021-07-14 11:07:19 -07:00
if ( error ) return callback ( error ) ;
2021-07-14 19:03:12 -07:00
[ error ] = await safe ( restoreFsMetadata ( dataLayout , ` ${ dataLayout . localRoot ( ) } /fsmetadata.json ` ) ) ;
callback ( error ) ;
2021-07-14 11:07:19 -07:00
} ) ;
}
}
2021-09-16 13:59:03 -07:00
async function restore ( backupConfig , backupId , progressCallback ) {
2021-07-14 11:07:19 -07:00
assert . strictEqual ( typeof backupConfig , 'object' ) ;
assert . strictEqual ( typeof backupId , 'string' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
const boxDataDir = safe . fs . realpathSync ( paths . BOX _DATA _DIR ) ;
2021-09-16 13:59:03 -07:00
if ( ! boxDataDir ) throw new BoxError ( BoxError . FS _ERROR , ` Error resolving boxdata: ${ safe . error . message } ` ) ;
2021-07-14 11:07:19 -07:00
const dataLayout = new DataLayout ( boxDataDir , [ ] ) ;
2021-09-16 13:59:03 -07:00
await util . promisify ( download ) ( backupConfig , backupId , backupConfig . format , dataLayout , progressCallback ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
debug ( 'restore: download completed, importing database' ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
await database . importFromFile ( ` ${ dataLayout . localRoot ( ) } /box.mysqldump ` ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
debug ( 'restore: database imported' ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
await settings . initCache ( ) ;
2021-07-14 11:07:19 -07:00
}
2021-09-16 13:59:03 -07:00
async function downloadApp ( app , restoreConfig , progressCallback ) {
2021-07-14 11:07:19 -07:00
assert . strictEqual ( typeof app , 'object' ) ;
assert . strictEqual ( typeof restoreConfig , 'object' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
const appDataDir = safe . fs . realpathSync ( path . join ( paths . APPS _DATA _DIR , app . id ) ) ;
2021-09-16 13:59:03 -07:00
if ( ! appDataDir ) throw new BoxError ( BoxError . FS _ERROR , safe . error . message ) ;
2021-07-14 11:07:19 -07:00
const dataLayout = new DataLayout ( appDataDir , app . dataDir ? [ { localDir : app . dataDir , remoteDir : 'data' } ] : [ ] ) ;
const startTime = new Date ( ) ;
2021-09-16 13:59:03 -07:00
const backupConfig = restoreConfig . backupConfig || await settings . getBackupConfig ( ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
const downloadAsync = util . promisify ( download ) ;
await downloadAsync ( backupConfig , restoreConfig . backupId , restoreConfig . backupFormat , dataLayout , progressCallback ) ;
debug ( 'downloadApp: time: %s' , ( new Date ( ) - startTime ) / 1000 ) ;
2021-07-14 11:07:19 -07:00
}
function runBackupUpload ( uploadConfig , progressCallback , callback ) {
assert . strictEqual ( typeof uploadConfig , 'object' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
assert . strictEqual ( typeof callback , 'function' ) ;
const { backupId , backupConfig , dataLayout , progressTag } = uploadConfig ;
assert . strictEqual ( typeof backupId , 'string' ) ;
assert . strictEqual ( typeof backupConfig , 'object' ) ;
assert . strictEqual ( typeof progressTag , 'string' ) ;
assert ( dataLayout instanceof DataLayout , 'dataLayout must be a DataLayout' ) ;
let result = '' ; // the script communicates error result as a string
// https://stackoverflow.com/questions/48387040/node-js-recommended-max-old-space-size
const envCopy = Object . assign ( { } , process . env ) ;
if ( backupConfig . memoryLimit && backupConfig . memoryLimit >= 2 * 1024 * 1024 * 1024 ) {
const heapSize = Math . min ( ( backupConfig . memoryLimit / 1024 / 1024 ) - 256 , 8192 ) ;
debug ( ` runBackupUpload: adjusting heap size to ${ heapSize } M ` ) ;
envCopy . NODE _OPTIONS = ` --max-old-space-size= ${ heapSize } ` ;
}
shell . sudo ( ` backup- ${ backupId } ` , [ BACKUP _UPLOAD _CMD , backupId , backupConfig . format , dataLayout . toString ( ) ] , { env : envCopy , preserveEnv : true , ipc : true } , function ( error ) {
if ( error && ( error . code === null /* signal */ || ( error . code !== 0 && error . code !== 50 ) ) ) { // backuptask crashed
return callback ( new BoxError ( BoxError . INTERNAL _ERROR , 'Backuptask crashed' ) ) ;
} else if ( error && error . code === 50 ) { // exited with error
return callback ( new BoxError ( BoxError . EXTERNAL _ERROR , result ) ) ;
}
callback ( ) ;
} ) . on ( 'message' , function ( progress ) { // this is { message } or { result }
if ( 'message' in progress ) return progressCallback ( { message : ` ${ progress . message } ( ${ progressTag } ) ` } ) ;
debug ( ` runBackupUpload: result - ${ JSON . stringify ( progress ) } ` ) ;
result = progress . result ;
} ) ;
}
2021-09-16 13:59:03 -07:00
async function snapshotBox ( progressCallback ) {
2021-07-14 11:07:19 -07:00
assert . strictEqual ( typeof progressCallback , 'function' ) ;
progressCallback ( { message : 'Snapshotting box' } ) ;
const startTime = new Date ( ) ;
2021-09-16 13:59:03 -07:00
await database . exportToFile ( ` ${ paths . BOX _DATA _DIR } /box.mysqldump ` ) ;
debug ( ` snapshotBox: took ${ ( new Date ( ) - startTime ) / 1000 } seconds ` ) ;
2021-07-14 11:07:19 -07:00
}
2021-09-16 13:59:03 -07:00
async function uploadBoxSnapshot ( backupConfig , progressCallback ) {
2021-07-14 11:07:19 -07:00
assert . strictEqual ( typeof backupConfig , 'object' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
2021-09-16 13:59:03 -07:00
await snapshotBox ( progressCallback ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
const boxDataDir = safe . fs . realpathSync ( paths . BOX _DATA _DIR ) ;
if ( ! boxDataDir ) throw new BoxError ( BoxError . FS _ERROR , ` Error resolving boxdata: ${ safe . error . message } ` ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
const uploadConfig = {
backupId : 'snapshot/box' ,
backupConfig ,
dataLayout : new DataLayout ( boxDataDir , [ ] ) ,
progressTag : 'box'
} ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
progressCallback ( { message : 'Uploading box snapshot' } ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
const startTime = new Date ( ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
await util . promisify ( runBackupUpload ) ( uploadConfig , progressCallback ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
debug ( ` uploadBoxSnapshot: took ${ ( new Date ( ) - startTime ) / 1000 } seconds ` ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
await backups . setSnapshotInfo ( 'box' , { timestamp : new Date ( ) . toISOString ( ) , format : backupConfig . format } ) ;
2021-07-14 11:07:19 -07:00
}
2021-09-26 18:37:04 -07:00
async function copy ( backupConfig , sourceBackupId , destBackupId , options , progressCallback ) {
assert . strictEqual ( typeof backupConfig , 'object' ) ;
assert . strictEqual ( typeof sourceBackupId , 'string' ) ;
assert . strictEqual ( typeof destBackupId , 'string' ) ;
assert . strictEqual ( typeof options , 'object' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
const format = backupConfig . format ;
return new Promise ( ( resolve , reject ) => {
const startTime = new Date ( ) ;
const copyEvents = storage . api ( backupConfig . provider ) . copy ( backupConfig , storage . getBackupFilePath ( backupConfig , sourceBackupId , format ) , storage . getBackupFilePath ( backupConfig , destBackupId , format ) ) ;
copyEvents . on ( 'progress' , ( message ) => progressCallback ( { message } ) ) ;
copyEvents . on ( 'done' , async function ( copyBackupError ) {
const state = copyBackupError ? backups . BACKUP _STATE _ERROR : backups . BACKUP _STATE _NORMAL ;
const [ error ] = await safe ( backups . update ( destBackupId , { preserveSecs : options . preserveSecs || 0 , state } ) ) ;
if ( copyBackupError ) return reject ( copyBackupError ) ;
if ( error ) return reject ( error ) ;
debug ( ` copy: copied successfully to id ${ destBackupId } . Took ${ ( new Date ( ) - startTime ) / 1000 } seconds ` ) ;
resolve ( ) ;
} ) ;
} ) ;
}
async function rotateBoxBackup ( backupConfig , tag , options , dependsOn , progressCallback ) {
2021-07-14 11:07:19 -07:00
assert . strictEqual ( typeof backupConfig , 'object' ) ;
assert . strictEqual ( typeof tag , 'string' ) ;
assert . strictEqual ( typeof options , 'object' ) ;
2021-09-26 18:37:04 -07:00
assert ( Array . isArray ( dependsOn ) ) ;
2021-07-14 11:07:19 -07:00
assert . strictEqual ( typeof progressCallback , 'function' ) ;
const backupId = ` ${ tag } /box_v ${ constants . VERSION } ` ;
const format = backupConfig . format ;
2021-09-26 18:45:23 -07:00
debug ( ` rotateBoxBackup: rotating to id ${ backupId } ` ) ;
2021-07-14 11:07:19 -07:00
const data = {
encryptionVersion : backupConfig . encryption ? 2 : null ,
packageVersion : constants . VERSION ,
type : backups . BACKUP _TYPE _BOX ,
state : backups . BACKUP _STATE _CREATING ,
2021-09-26 18:37:04 -07:00
identifier : backups . BACKUP _IDENTIFIER _BOX ,
dependsOn ,
2021-07-14 11:07:19 -07:00
manifest : null ,
2021-09-26 18:37:04 -07:00
format
2021-07-14 11:07:19 -07:00
} ;
await backups . add ( backupId , data ) ;
2021-09-26 18:37:04 -07:00
await copy ( backupConfig , 'snapshot/box' , backupId , options , progressCallback ) ;
return backupId ;
2021-07-14 11:07:19 -07:00
}
2021-09-26 18:37:04 -07:00
async function backupBox ( dependsOn , tag , options , progressCallback ) {
assert ( Array . isArray ( dependsOn ) ) ;
2021-07-14 11:07:19 -07:00
assert . strictEqual ( typeof tag , 'string' ) ;
assert . strictEqual ( typeof options , 'object' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
2021-09-16 13:59:03 -07:00
const backupConfig = await settings . getBackupConfig ( ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
await uploadBoxSnapshot ( backupConfig , progressCallback ) ;
2021-07-14 11:07:19 -07:00
2021-09-26 18:37:04 -07:00
const backupId = await rotateBoxBackup ( backupConfig , tag , options , dependsOn , progressCallback ) ;
2021-09-16 13:59:03 -07:00
return backupId ;
2021-07-14 11:07:19 -07:00
}
async function rotateAppBackup ( backupConfig , app , tag , options , progressCallback ) {
assert . strictEqual ( typeof backupConfig , 'object' ) ;
assert . strictEqual ( typeof app , 'object' ) ;
assert . strictEqual ( typeof tag , 'string' ) ;
assert . strictEqual ( typeof options , 'object' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
const snapshotInfo = backups . getSnapshotInfo ( app . id ) ;
const manifest = snapshotInfo . restoreConfig ? snapshotInfo . restoreConfig . manifest : snapshotInfo . manifest ; // compat
const backupId = ` ${ tag } /app_ ${ app . fqdn } _v ${ manifest . version } ` ;
const format = backupConfig . format ;
2021-09-26 18:45:23 -07:00
debug ( ` rotateAppBackup: rotating ${ app . fqdn } to id ${ backupId } ` ) ;
2021-07-14 11:07:19 -07:00
const data = {
encryptionVersion : backupConfig . encryption ? 2 : null ,
packageVersion : manifest . version ,
type : backups . BACKUP _TYPE _APP ,
state : backups . BACKUP _STATE _CREATING ,
identifier : app . id ,
dependsOn : [ ] ,
manifest ,
format : format
} ;
await backups . add ( backupId , data ) ;
2021-09-26 18:37:04 -07:00
await copy ( backupConfig , ` snapshot/app_ ${ app . id } ` , backupId , options , progressCallback ) ;
return backupId ;
2021-07-14 11:07:19 -07:00
}
2021-09-16 13:59:03 -07:00
async function backupApp ( app , options , progressCallback ) {
2021-07-14 11:07:19 -07:00
assert . strictEqual ( typeof app , 'object' ) ;
assert . strictEqual ( typeof options , 'object' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
2021-09-16 13:59:03 -07:00
if ( options . snapshotOnly ) return await snapshotApp ( app , progressCallback ) ;
2021-07-14 11:07:19 -07:00
const tag = ( new Date ( ) ) . toISOString ( ) . replace ( /[T.]/g , '-' ) . replace ( /[:Z]/g , '' ) ;
2021-09-26 18:45:23 -07:00
debug ( ` backupApp: backing up ${ app . fqdn } with tag ${ tag } ` ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
await backupAppWithTag ( app , tag , options , progressCallback ) ;
2021-07-14 11:07:19 -07:00
}
2021-09-16 13:59:03 -07:00
async function snapshotApp ( app , progressCallback ) {
2021-07-14 11:07:19 -07:00
assert . strictEqual ( typeof app , 'object' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
const startTime = new Date ( ) ;
progressCallback ( { message : ` Snapshotting app ${ app . fqdn } ` } ) ;
2021-09-16 13:59:03 -07:00
await apps . backupConfig ( app ) ;
await services . backupAddons ( app , app . manifest . addons ) ;
2021-07-14 11:07:19 -07:00
2021-09-26 18:45:23 -07:00
debug ( ` snapshotApp: ${ app . fqdn } took ${ ( new Date ( ) - startTime ) / 1000 } seconds ` ) ;
2021-07-14 11:07:19 -07:00
}
2021-09-16 13:59:03 -07:00
async function uploadAppSnapshot ( backupConfig , app , progressCallback ) {
2021-07-14 11:07:19 -07:00
assert . strictEqual ( typeof backupConfig , 'object' ) ;
assert . strictEqual ( typeof app , 'object' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
2021-09-16 13:59:03 -07:00
await snapshotApp ( app , progressCallback ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
const backupId = util . format ( 'snapshot/app_%s' , app . id ) ;
const appDataDir = safe . fs . realpathSync ( path . join ( paths . APPS _DATA _DIR , app . id ) ) ;
if ( ! appDataDir ) throw new BoxError ( BoxError . FS _ERROR , ` Error resolving appsdata: ${ safe . error . message } ` ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
const dataLayout = new DataLayout ( appDataDir , app . dataDir ? [ { localDir : app . dataDir , remoteDir : 'data' } ] : [ ] ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
progressCallback ( { message : ` Uploading app snapshot ${ app . fqdn } ` } ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
const uploadConfig = {
backupId ,
backupConfig ,
dataLayout ,
progressTag : app . fqdn
} ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
const startTime = new Date ( ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
await util . promisify ( runBackupUpload ) ( uploadConfig , progressCallback ) ;
2021-07-14 11:07:19 -07:00
2021-09-26 18:45:23 -07:00
debug ( ` uploadAppSnapshot: ${ app . fqdn } upload with id ${ backupId } . ${ ( new Date ( ) - startTime ) / 1000 } seconds ` ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
await backups . setSnapshotInfo ( app . id , { timestamp : new Date ( ) . toISOString ( ) , manifest : app . manifest , format : backupConfig . format } ) ;
2021-07-14 11:07:19 -07:00
}
2021-09-16 13:59:03 -07:00
async function backupAppWithTag ( app , tag , options , progressCallback ) {
2021-07-14 11:07:19 -07:00
assert . strictEqual ( typeof app , 'object' ) ;
assert . strictEqual ( typeof tag , 'string' ) ;
assert . strictEqual ( typeof options , 'object' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
if ( ! canBackupApp ( app ) ) { // if we cannot backup, reuse it's most recent backup
2021-09-16 13:59:03 -07:00
const results = await backups . getByIdentifierAndStatePaged ( app . id , backups . BACKUP _STATE _NORMAL , 1 , 1 ) ;
if ( results . length === 0 ) return null ; // no backup to re-use
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
return results [ 0 ] . id ;
2021-07-14 11:07:19 -07:00
}
2021-09-16 13:59:03 -07:00
const backupConfig = await settings . getBackupConfig ( ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
await uploadAppSnapshot ( backupConfig , app , progressCallback ) ;
const backupId = await rotateAppBackup ( backupConfig , app , tag , options , progressCallback ) ;
return backupId ;
2021-07-14 11:07:19 -07:00
}
2021-09-26 18:37:04 -07:00
async function uploadMailSnapshot ( backupConfig , progressCallback ) {
assert . strictEqual ( typeof backupConfig , 'object' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
const mailDataDir = safe . fs . realpathSync ( paths . MAIL _DATA _DIR ) ;
if ( ! mailDataDir ) throw new BoxError ( BoxError . FS _ERROR , ` Error resolving maildata: ${ safe . error . message } ` ) ;
const uploadConfig = {
backupId : 'snapshot/mail' ,
backupConfig ,
dataLayout : new DataLayout ( mailDataDir , [ ] ) ,
progressTag : 'mail'
} ;
progressCallback ( { message : 'Uploading mail snapshot' } ) ;
const startTime = new Date ( ) ;
await util . promisify ( runBackupUpload ) ( uploadConfig , progressCallback ) ;
debug ( ` uploadMailSnapshot: took ${ ( new Date ( ) - startTime ) / 1000 } seconds ` ) ;
await backups . setSnapshotInfo ( 'mail' , { timestamp : new Date ( ) . toISOString ( ) , format : backupConfig . format } ) ;
}
async function rotateMailBackup ( backupConfig , tag , options , progressCallback ) {
assert . strictEqual ( typeof backupConfig , 'object' ) ;
assert . strictEqual ( typeof tag , 'string' ) ;
assert . strictEqual ( typeof options , 'object' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
const backupId = ` ${ tag } /mail_v ${ constants . VERSION } ` ;
const format = backupConfig . format ;
debug ( ` rotateMailBackup: rotating to id ${ backupId } ` ) ;
const data = {
encryptionVersion : backupConfig . encryption ? 2 : null ,
packageVersion : constants . VERSION ,
type : backups . BACKUP _TYPE _MAIL ,
state : backups . BACKUP _STATE _CREATING ,
identifier : backups . BACKUP _IDENTIFIER _MAIL ,
dependsOn : [ ] ,
manifest : null ,
format : format
} ;
await backups . add ( backupId , data ) ;
await copy ( backupConfig , 'snapshot/mail' , backupId , options , progressCallback ) ;
return backupId ;
}
async function backupMailWithTag ( tag , options , progressCallback ) {
assert . strictEqual ( typeof tag , 'string' ) ;
assert . strictEqual ( typeof options , 'object' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
debug ( ` backupMailWithTag: backing up mail with tag ${ tag } ` ) ;
const backupConfig = await settings . getBackupConfig ( ) ;
await uploadMailSnapshot ( backupConfig , progressCallback ) ;
const backupId = await rotateMailBackup ( backupConfig , tag , options , progressCallback ) ;
return backupId ;
}
async function backupMail ( options , progressCallback ) {
assert . strictEqual ( typeof options , 'object' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
const tag = ( new Date ( ) ) . toISOString ( ) . replace ( /[T.]/g , '-' ) . replace ( /[:Z]/g , '' ) ;
debug ( ` backupMail: backing up mail with tag ${ tag } ` ) ;
return await backupMailWithTag ( tag , options , progressCallback ) ;
}
async function downloadMail ( restoreConfig , progressCallback ) {
assert . strictEqual ( typeof restoreConfig , 'object' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
const mailDataDir = safe . fs . realpathSync ( paths . MAIL _DATA _DIR ) ;
if ( ! mailDataDir ) throw new BoxError ( BoxError . FS _ERROR , ` Error resolving maildata: ${ safe . error . message } ` ) ;
const dataLayout = new DataLayout ( mailDataDir , [ ] ) ;
const startTime = new Date ( ) ;
const backupConfig = restoreConfig . backupConfig || await settings . getBackupConfig ( ) ;
const downloadAsync = util . promisify ( download ) ;
await downloadAsync ( backupConfig , restoreConfig . backupId , restoreConfig . backupFormat , dataLayout , progressCallback ) ;
debug ( 'downloadMail: time: %s' , ( new Date ( ) - startTime ) / 1000 ) ;
}
// this function is called from external process. calling process is expected to have a lock
async function fullBackup ( options , progressCallback ) {
2021-07-14 11:07:19 -07:00
assert . strictEqual ( typeof options , 'object' ) ;
assert . strictEqual ( typeof progressCallback , 'function' ) ;
const tag = ( new Date ( ) ) . toISOString ( ) . replace ( /[T.]/g , '-' ) . replace ( /[:Z]/g , '' ) ;
2021-09-16 13:59:03 -07:00
const allApps = await apps . list ( ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
let percent = 1 ;
2021-09-26 18:37:04 -07:00
let step = 100 / ( allApps . length + 3 ) ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
const appBackupIds = [ ] ;
for ( const app of allApps ) {
progressCallback ( { percent : percent , message : ` Backing up ${ app . fqdn } ` } ) ;
percent += step ;
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
if ( ! app . enableBackup ) {
2021-09-26 18:37:04 -07:00
debug ( ` fullBackup: skipped backup ${ app . fqdn } ` ) ;
2021-09-16 13:59:03 -07:00
return ; // nothing to backup
}
2021-07-14 11:07:19 -07:00
2021-09-16 13:59:03 -07:00
const startTime = new Date ( ) ;
2021-09-26 18:37:04 -07:00
const appBackupId = await backupAppWithTag ( app , tag , options , ( progress ) => progressCallback ( { percent : percent , message : progress . message } ) ) ;
debug ( ` fullBackup: app ${ app . fqdn } backup finished. Took ${ ( new Date ( ) - startTime ) / 1000 } seconds ` ) ;
if ( appBackupId ) appBackupIds . push ( appBackupId ) ; // backupId can be null if in BAD_STATE and never backed up
2021-09-16 13:59:03 -07:00
}
2021-07-14 11:07:19 -07:00
2021-09-26 18:37:04 -07:00
progressCallback ( { percent : percent , message : 'Backing up mail' } ) ;
percent += step ;
const mailBackupId = await backupMailWithTag ( tag , options , ( progress ) => progressCallback ( { percent : percent , message : progress . message } ) ) ;
2021-09-16 13:59:03 -07:00
progressCallback ( { percent : percent , message : 'Backing up system data' } ) ;
percent += step ;
2021-07-14 11:07:19 -07:00
2021-09-26 18:37:04 -07:00
const dependsOn = appBackupIds . concat ( mailBackupId ) ;
const backupId = await backupBox ( dependsOn , tag , options , ( progress ) => progressCallback ( { percent : percent , message : progress . message } ) ) ;
2021-09-16 13:59:03 -07:00
return backupId ;
2021-07-14 11:07:19 -07:00
}