Files
cloudron-box/src/backuptask.js
Girish Ramakrishnan c7ddbea8ed restore: download mail backup in restore phase
if we download it in the platform start phase, there is no way to
give feedback to the user. so it's best to show the restore UI and
not redirect to the dashboard.
2021-11-03 12:10:40 -07:00

1066 lines
46 KiB
JavaScript

'use strict';
exports = module.exports = {
fullBackup,
restore,
backupApp,
downloadApp,
backupMail,
downloadMail,
upload,
_restoreFsMetadata: restoreFsMetadata,
_saveFsMetadata: saveFsMetadata,
};
const apps = require('./apps.js'),
assert = require('assert'),
async = require('async'),
backups = require('./backups.js'),
BoxError = require('./boxerror.js'),
constants = require('./constants.js'),
crypto = require('crypto'),
DataLayout = require('./datalayout.js'),
database = require('./database.js'),
debug = require('debug')('box:backuptask'),
fs = require('fs'),
once = require('once'),
path = require('path'),
paths = require('./paths.js'),
progressStream = require('progress-stream'),
safe = require('safetydance'),
services = require('./services.js'),
settings = require('./settings.js'),
shell = require('./shell.js'),
storage = require('./storage.js'),
syncer = require('./syncer.js'),
tar = require('tar-fs'),
TransformStream = require('stream').Transform,
zlib = require('zlib'),
util = require('util');
const BACKUP_UPLOAD_CMD = path.join(__dirname, 'scripts/backupupload.js');
const getBackupConfig = util.callbackify(settings.getBackupConfig);
function canBackupApp(app) {
// only backup apps that are installed or specific pending states
// stopped apps cannot be backed up because addons might be down (redis)
if (app.runState === apps.RSTATE_STOPPED) return false;
// we used to check the health here but that doesn't work for stopped apps. it's better to just fail
// and inform the user if the backup fails and the app addons have not been setup yet.
return app.installationState === apps.ISTATE_INSTALLED ||
app.installationState === apps.ISTATE_PENDING_CONFIGURE ||
app.installationState === apps.ISTATE_PENDING_BACKUP || // called from apptask
app.installationState === apps.ISTATE_PENDING_UPDATE; // called from apptask
}
function encryptFilePath(filePath, encryption) {
assert.strictEqual(typeof filePath, 'string');
assert.strictEqual(typeof encryption, 'object');
var encryptedParts = filePath.split('/').map(function (part) {
let hmac = crypto.createHmac('sha256', Buffer.from(encryption.filenameHmacKey, 'hex'));
const iv = hmac.update(part).digest().slice(0, 16); // iv has to be deterministic, for our sync (copy) logic to work
const cipher = crypto.createCipheriv('aes-256-cbc', Buffer.from(encryption.filenameKey, 'hex'), iv);
let crypt = cipher.update(part);
crypt = Buffer.concat([ iv, crypt, cipher.final() ]);
return crypt.toString('base64') // ensures path is valid
.replace(/\//g, '-') // replace '/' of base64 since it conflicts with path separator
.replace(/=/g,''); // strip trailing = padding. this is only needed if we concat base64 strings, which we don't
});
return encryptedParts.join('/');
}
function decryptFilePath(filePath, encryption) {
assert.strictEqual(typeof filePath, 'string');
assert.strictEqual(typeof encryption, 'object');
let decryptedParts = [];
for (let part of filePath.split('/')) {
part = part + Array(part.length % 4).join('='); // add back = padding
part = part.replace(/-/g, '/'); // replace with '/'
try {
const buffer = Buffer.from(part, 'base64');
const iv = buffer.slice(0, 16);
let decrypt = crypto.createDecipheriv('aes-256-cbc', Buffer.from(encryption.filenameKey, 'hex'), iv);
const plainText = decrypt.update(buffer.slice(16));
const plainTextString = Buffer.concat([ plainText, decrypt.final() ]).toString('utf8');
const hmac = crypto.createHmac('sha256', Buffer.from(encryption.filenameHmacKey, 'hex'));
if (!hmac.update(plainTextString).digest().slice(0, 16).equals(iv)) return { error: new BoxError(BoxError.CRYPTO_ERROR, `mac error decrypting part ${part} of path ${filePath}`) };
decryptedParts.push(plainTextString);
} catch (error) {
debug(`Error decrypting part ${part} of path ${filePath}:`, error);
return { error: new BoxError(BoxError.CRYPTO_ERROR, `Error decrypting part ${part} of path ${filePath}: ${error.message}`) };
}
}
return { result: decryptedParts.join('/') };
}
class EncryptStream extends TransformStream {
constructor(encryption) {
super();
this._headerPushed = false;
this._iv = crypto.randomBytes(16);
this._cipher = crypto.createCipheriv('aes-256-cbc', Buffer.from(encryption.dataKey, 'hex'), this._iv);
this._hmac = crypto.createHmac('sha256', Buffer.from(encryption.dataHmacKey, 'hex'));
}
pushHeaderIfNeeded() {
if (!this._headerPushed) {
const magic = Buffer.from('CBV2');
this.push(magic);
this._hmac.update(magic);
this.push(this._iv);
this._hmac.update(this._iv);
this._headerPushed = true;
}
}
_transform(chunk, ignoredEncoding, callback) {
this.pushHeaderIfNeeded();
try {
const crypt = this._cipher.update(chunk);
this._hmac.update(crypt);
callback(null, crypt);
} catch (error) {
callback(error);
}
}
_flush(callback) {
try {
this.pushHeaderIfNeeded(); // for 0-length files
const crypt = this._cipher.final();
this.push(crypt);
this._hmac.update(crypt);
callback(null, this._hmac.digest()); // +32 bytes
} catch (error) {
callback(error);
}
}
}
class DecryptStream extends TransformStream {
constructor(encryption) {
super();
this._key = Buffer.from(encryption.dataKey, 'hex');
this._header = Buffer.alloc(0);
this._decipher = null;
this._hmac = crypto.createHmac('sha256', Buffer.from(encryption.dataHmacKey, 'hex'));
this._buffer = Buffer.alloc(0);
}
_transform(chunk, ignoredEncoding, callback) {
const needed = 20 - this._header.length; // 4 for magic, 16 for iv
if (this._header.length !== 20) { // not gotten header yet
this._header = Buffer.concat([this._header, chunk.slice(0, needed)]);
if (this._header.length !== 20) return callback();
if (!this._header.slice(0, 4).equals(new Buffer.from('CBV2'))) return callback(new BoxError(BoxError.CRYPTO_ERROR, 'Invalid magic in header'));
const iv = this._header.slice(4);
this._decipher = crypto.createDecipheriv('aes-256-cbc', this._key, iv);
this._hmac.update(this._header);
}
this._buffer = Buffer.concat([ this._buffer, chunk.slice(needed) ]);
if (this._buffer.length < 32) return callback(); // hmac trailer length is 32
try {
const cipherText = this._buffer.slice(0, -32);
this._hmac.update(cipherText);
const plainText = this._decipher.update(cipherText);
this._buffer = this._buffer.slice(-32);
callback(null, plainText);
} catch (error) {
callback(error);
}
}
_flush (callback) {
if (this._buffer.length !== 32) return callback(new BoxError(BoxError.CRYPTO_ERROR, 'Invalid password or tampered file (not enough data)'));
try {
if (!this._hmac.digest().equals(this._buffer)) return callback(new BoxError(BoxError.CRYPTO_ERROR, 'Invalid password or tampered file (mac mismatch)'));
const plainText = this._decipher.final();
callback(null, plainText);
} catch (error) {
callback(error);
}
}
}
function createReadStream(sourceFile, encryption) {
assert.strictEqual(typeof sourceFile, 'string');
assert.strictEqual(typeof encryption, 'object');
var stream = fs.createReadStream(sourceFile);
var ps = progressStream({ time: 10000 }); // display a progress every 10 seconds
stream.on('error', function (error) {
debug(`createReadStream: read stream error at ${sourceFile}`, error);
ps.emit('error', new BoxError(BoxError.FS_ERROR, `Error reading ${sourceFile}: ${error.message} ${error.code}`));
});
stream.on('open', () => ps.emit('open'));
if (encryption) {
let encryptStream = new EncryptStream(encryption);
encryptStream.on('error', function (error) {
debug(`createReadStream: encrypt stream error ${sourceFile}`, error);
ps.emit('error', new BoxError(BoxError.CRYPTO_ERROR, `Encryption error at ${sourceFile}: ${error.message}`));
});
return stream.pipe(encryptStream).pipe(ps);
} else {
return stream.pipe(ps);
}
}
function createWriteStream(destFile, encryption) {
assert.strictEqual(typeof destFile, 'string');
assert.strictEqual(typeof encryption, 'object');
var stream = fs.createWriteStream(destFile);
var ps = progressStream({ time: 10000 }); // display a progress every 10 seconds
stream.on('error', function (error) {
debug(`createWriteStream: write stream error ${destFile}`, error);
ps.emit('error', new BoxError(BoxError.FS_ERROR, `Write error ${destFile}: ${error.message}`));
});
stream.on('finish', function () {
debug('createWriteStream: done.');
// we use a separate event because ps is a through2 stream which emits 'finish' event indicating end of inStream and not write
ps.emit('done');
});
if (encryption) {
let decrypt = new DecryptStream(encryption);
decrypt.on('error', function (error) {
debug(`createWriteStream: decrypt stream error ${destFile}`, error);
ps.emit('error', new BoxError(BoxError.CRYPTO_ERROR, `Decryption error at ${destFile}: ${error.message}`));
});
ps.pipe(decrypt).pipe(stream);
} else {
ps.pipe(stream);
}
return ps;
}
function tarPack(dataLayout, encryption, callback) {
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof encryption, 'object');
assert.strictEqual(typeof callback, 'function');
var pack = tar.pack('/', {
dereference: false, // pack the symlink and not what it points to
entries: dataLayout.localPaths(),
ignoreStatError: (path, err) => {
debug(`tarPack: error stat'ing ${path} - ${err.code}`);
return err.code === 'ENOENT'; // ignore if file or dir got removed (probably some temporary file)
},
map: function(header) {
header.name = dataLayout.toRemotePath(header.name);
// the tar pax format allows us to encode filenames > 100 and size > 8GB (see #640)
// https://www.systutorials.com/docs/linux/man/5-star/
if (header.size > 8589934590 || header.name > 99) header.pax = { size: header.size };
return header;
},
strict: false // do not error for unknown types (skip fifo, char/block devices)
});
var gzip = zlib.createGzip({});
var ps = progressStream({ time: 10000 }); // emit 'progress' every 10 seconds
pack.on('error', function (error) {
debug('tarPack: tar stream error.', error);
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message));
});
gzip.on('error', function (error) {
debug('tarPack: gzip stream error.', error);
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message));
});
if (encryption) {
const encryptStream = new EncryptStream(encryption);
encryptStream.on('error', function (error) {
debug('tarPack: encrypt stream error.', error);
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message));
});
pack.pipe(gzip).pipe(encryptStream).pipe(ps);
} else {
pack.pipe(gzip).pipe(ps);
}
return callback(null, ps);
}
function sync(backupConfig, backupId, dataLayout, progressCallback, callback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof progressCallback, 'function');
assert.strictEqual(typeof callback, 'function');
// the number here has to take into account the s3.upload partSize (which is 10MB). So 20=200MB
const concurrency = backupConfig.syncConcurrency || (backupConfig.provider === 's3' ? 20 : 10);
syncer.sync(dataLayout, function processTask(task, iteratorCallback) {
debug('sync: processing task: %j', task);
// the empty task.path is special to signify the directory
const destPath = task.path && backupConfig.encryption ? encryptFilePath(task.path, backupConfig.encryption) : task.path;
const backupFilePath = path.join(storage.getBackupFilePath(backupConfig, backupId, backupConfig.format), destPath);
if (task.operation === 'removedir') {
debug(`Removing directory ${backupFilePath}`);
return storage.api(backupConfig.provider).removeDir(backupConfig, backupFilePath)
.on('progress', (message) => progressCallback({ message }))
.on('done', iteratorCallback);
} else if (task.operation === 'remove') {
debug(`Removing ${backupFilePath}`);
return storage.api(backupConfig.provider).remove(backupConfig, backupFilePath, iteratorCallback);
}
var retryCount = 0;
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
retryCallback = once(retryCallback); // protect again upload() erroring much later after read stream error
++retryCount;
if (task.operation === 'add') {
progressCallback({ message: `Adding ${task.path}` + (retryCount > 1 ? ` (Try ${retryCount})` : '') });
debug(`Adding ${task.path} position ${task.position} try ${retryCount}`);
var stream = createReadStream(dataLayout.toLocalPath('./' + task.path), backupConfig.encryption);
stream.on('error', (error) => retryCallback(error.message.includes('ENOENT') ? null : error)); // ignore error if file disappears
stream.on('progress', function (progress) {
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
if (!transferred && !speed) return progressCallback({ message: `Uploading ${task.path}` }); // 0M@0MBps looks wrong
progressCallback({ message: `Uploading ${task.path}: ${transferred}M@${speed}MBps` }); // 0M@0MBps looks wrong
});
// only create the destination path when we have confirmation that the source is available. otherwise, we end up with
// files owned as 'root' and the cp later will fail
stream.on('open', function () {
storage.api(backupConfig.provider).upload(backupConfig, backupFilePath, stream, function (error) {
debug(error ? `Error uploading ${task.path} try ${retryCount}: ${error.message}` : `Uploaded ${task.path}`);
retryCallback(error);
});
});
}
}, iteratorCallback);
}, concurrency, function (error) {
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
callback();
});
}
// this is not part of 'snapshotting' because we need root access to traverse
async function saveFsMetadata(dataLayout, metadataFile) {
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof metadataFile, 'string');
// contains paths prefixed with './'
let metadata = {
emptyDirs: [],
execFiles: [],
symlinks: []
};
// we assume small number of files. spawnSync will raise a ENOBUFS error after maxBuffer
for (let lp of dataLayout.localPaths()) {
const emptyDirs = safe.child_process.execSync(`find ${lp} -type d -empty`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
if (emptyDirs === null) throw new BoxError(BoxError.FS_ERROR, `Error finding empty dirs: ${safe.error.message}`);
if (emptyDirs.length) metadata.emptyDirs = metadata.emptyDirs.concat(emptyDirs.trim().split('\n').map((ed) => dataLayout.toRemotePath(ed)));
const execFiles = safe.child_process.execSync(`find ${lp} -type f -executable`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
if (execFiles === null) throw new BoxError(BoxError.FS_ERROR, `Error finding executables: ${safe.error.message}`);
if (execFiles.length) metadata.execFiles = metadata.execFiles.concat(execFiles.trim().split('\n').map((ef) => dataLayout.toRemotePath(ef)));
const symlinks = safe.child_process.execSync(`find ${lp} -type l`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
if (symlinks === null) throw new BoxError(BoxError.FS_ERROR, `Error finding symlinks: ${safe.error.message}`);
if (symlinks.length) metadata.symlinks = metadata.symlinks.concat(symlinks.trim().split('\n').map((sl) => {
const target = safe.fs.readlinkSync(sl);
return { path: dataLayout.toRemotePath(sl), target };
}));
}
if (!safe.fs.writeFileSync(metadataFile, JSON.stringify(metadata, null, 4))) throw new BoxError(BoxError.FS_ERROR, `Error writing fs metadata: ${safe.error.message}`);
}
// this function is called via backupupload (since it needs root to traverse app's directory)
function upload(backupId, format, dataLayoutString, progressCallback, callback) {
assert.strictEqual(typeof backupId, 'string');
assert.strictEqual(typeof format, 'string');
assert.strictEqual(typeof dataLayoutString, 'string');
assert.strictEqual(typeof progressCallback, 'function');
assert.strictEqual(typeof callback, 'function');
debug(`upload: id ${backupId} format ${format} dataLayout ${dataLayoutString}`);
const dataLayout = DataLayout.fromString(dataLayoutString);
getBackupConfig(function (error, backupConfig) {
if (error) return callback(error);
storage.api(backupConfig.provider).checkPreconditions(backupConfig, dataLayout, function (error) {
if (error) return callback(error);
if (format === 'tgz') {
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
retryCallback = once(retryCallback); // protect again upload() erroring much later after tar stream error
tarPack(dataLayout, backupConfig.encryption, function (error, tarStream) {
if (error) return retryCallback(error);
tarStream.on('progress', function (progress) {
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
if (!transferred && !speed) return progressCallback({ message: 'Uploading backup' }); // 0M@0MBps looks wrong
progressCallback({ message: `Uploading backup ${transferred}M@${speed}MBps` });
});
tarStream.on('error', retryCallback); // already returns BoxError
storage.api(backupConfig.provider).upload(backupConfig, storage.getBackupFilePath(backupConfig, backupId, format), tarStream, retryCallback);
});
}, callback);
} else {
async.series([
saveFsMetadata.bind(null, dataLayout, `${dataLayout.localRoot()}/fsmetadata.json`),
sync.bind(null, backupConfig, backupId, dataLayout, progressCallback)
], callback);
}
});
});
}
function tarExtract(inStream, dataLayout, encryption, callback) {
assert.strictEqual(typeof inStream, 'object');
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof encryption, 'object');
assert.strictEqual(typeof callback, 'function');
var gunzip = zlib.createGunzip({});
var ps = progressStream({ time: 10000 }); // display a progress every 10 seconds
var extract = tar.extract('/', {
map: function (header) {
header.name = dataLayout.toLocalPath(header.name);
return header;
},
dmode: 500 // ensure directory is writable
});
const emitError = once((error) => {
inStream.destroy();
ps.emit('error', error);
});
inStream.on('error', function (error) {
debug('tarExtract: input stream error.', error);
emitError(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
});
gunzip.on('error', function (error) {
debug('tarExtract: gunzip stream error.', error);
emitError(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
});
extract.on('error', function (error) {
debug('tarExtract: extract stream error.', error);
emitError(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
});
extract.on('finish', function () {
debug('tarExtract: done.');
// we use a separate event because ps is a through2 stream which emits 'finish' event indicating end of inStream and not extract
ps.emit('done');
});
if (encryption) {
let decrypt = new DecryptStream(encryption);
decrypt.on('error', function (error) {
debug('tarExtract: decrypt stream error.', error);
emitError(new BoxError(BoxError.EXTERNAL_ERROR, `Failed to decrypt: ${error.message}`));
});
inStream.pipe(ps).pipe(decrypt).pipe(gunzip).pipe(extract);
} else {
inStream.pipe(ps).pipe(gunzip).pipe(extract);
}
callback(null, ps);
}
async function restoreFsMetadata(dataLayout, metadataFile) {
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof metadataFile, 'string');
debug(`Recreating empty directories in ${dataLayout.toString()}`);
var metadataJson = safe.fs.readFileSync(metadataFile, 'utf8');
if (metadataJson === null) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Error loading fsmetadata.json:' + safe.error.message);
var metadata = safe.JSON.parse(metadataJson);
if (metadata === null) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Error parsing fsmetadata.json:' + safe.error.message);
for (const emptyDir of metadata.emptyDirs) {
const [mkdirError] = await safe(fs.promises.mkdir(dataLayout.toLocalPath(emptyDir), { recursive: true }));
if (mkdirError) throw new BoxError(BoxError.FS_ERROR, `unable to create path: ${mkdirError.message}`);
}
for (const execFile of metadata.execFiles) {
const [chmodError] = await safe(fs.promises.chmod(dataLayout.toLocalPath(execFile), parseInt('0755', 8)));
if (chmodError) throw new BoxError(BoxError.FS_ERROR, `unable to chmod: ${chmodError.message}`);
}
for (const symlink of (metadata.symlinks || [])) {
if (!symlink.target) continue;
// the path may not exist if we had a directory full of symlinks
const [mkdirError] = await safe(fs.promises.mkdir(path.dirname(dataLayout.toLocalPath(symlink.path)), { recursive: true }));
if (mkdirError) throw new BoxError(BoxError.FS_ERROR, `unable to symlink (mkdir): ${mkdirError.message}`);
const [symlinkError] = await safe(fs.promises.symlink(symlink.target, dataLayout.toLocalPath(symlink.path), 'file'));
if (symlinkError) throw new BoxError(BoxError.FS_ERROR, `unable to symlink: ${symlinkError.message}`);
}
}
function downloadDir(backupConfig, backupFilePath, dataLayout, progressCallback, callback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof backupFilePath, 'string');
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof progressCallback, 'function');
assert.strictEqual(typeof callback, 'function');
debug(`downloadDir: ${backupFilePath} to ${dataLayout.toString()}`);
function downloadFile(entry, done) {
let relativePath = path.relative(backupFilePath, entry.fullPath);
if (backupConfig.encryption) {
const { error, result } = decryptFilePath(relativePath, backupConfig.encryption);
if (error) return done(new BoxError(BoxError.CRYPTO_ERROR, 'Unable to decrypt file'));
relativePath = result;
}
const destFilePath = dataLayout.toLocalPath('./' + relativePath);
fs.mkdir(path.dirname(destFilePath), { recursive: true }, function (error) {
if (error) return done(new BoxError(BoxError.FS_ERROR, error.message));
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
storage.api(backupConfig.provider).download(backupConfig, entry.fullPath, function (error, sourceStream) {
if (error) {
progressCallback({ message: `Download ${entry.fullPath} to ${destFilePath} errored: ${error.message}` });
return retryCallback(error);
}
let destStream = createWriteStream(destFilePath, backupConfig.encryption);
// protect against multiple errors. must destroy the write stream so that a previous retry does not write
let closeAndRetry = once((error) => {
if (error) progressCallback({ message: `Download ${entry.fullPath} to ${destFilePath} errored: ${error.message}` });
else progressCallback({ message: `Download ${entry.fullPath} to ${destFilePath} finished` });
sourceStream.destroy();
destStream.destroy();
retryCallback(error);
});
destStream.on('progress', function (progress) {
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
if (!transferred && !speed) return progressCallback({ message: `Downloading ${entry.fullPath}` }); // 0M@0MBps looks wrong
progressCallback({ message: `Downloading ${entry.fullPath}: ${transferred}M@${speed}MBps` });
});
destStream.on('error', closeAndRetry);
sourceStream.on('error', closeAndRetry);
progressCallback({ message: `Downloading ${entry.fullPath} to ${destFilePath}` });
sourceStream.pipe(destStream, { end: true }).on('done', closeAndRetry);
});
}, done);
});
}
storage.api(backupConfig.provider).listDir(backupConfig, backupFilePath, 1000, function (entries, iteratorDone) {
// https://www.digitalocean.com/community/questions/rate-limiting-on-spaces?answer=40441
const concurrency = backupConfig.downloadConcurrency || (backupConfig.provider === 's3' ? 30 : 10);
async.eachLimit(entries, concurrency, downloadFile, iteratorDone);
}, callback);
}
function download(backupConfig, backupId, format, dataLayout, progressCallback, callback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
assert.strictEqual(typeof format, 'string');
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof progressCallback, 'function');
assert.strictEqual(typeof callback, 'function');
debug(`download: Downloading ${backupId} of format ${format} to ${dataLayout.toString()}`);
const backupFilePath = storage.getBackupFilePath(backupConfig, backupId, format);
if (format === 'tgz') {
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
progressCallback({ message: `Downloading backup ${backupId}` });
storage.api(backupConfig.provider).download(backupConfig, backupFilePath, function (error, sourceStream) {
if (error) return retryCallback(error);
tarExtract(sourceStream, dataLayout, backupConfig.encryption, function (error, ps) {
if (error) return retryCallback(error);
ps.on('progress', function (progress) {
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
if (!transferred && !speed) return progressCallback({ message: 'Downloading backup' }); // 0M@0MBps looks wrong
progressCallback({ message: `Downloading ${transferred}M@${speed}MBps` });
});
ps.on('error', retryCallback);
ps.on('done', retryCallback);
});
});
}, callback);
} else {
downloadDir(backupConfig, backupFilePath, dataLayout, progressCallback, async function (error) {
if (error) return callback(error);
[error] = await safe(restoreFsMetadata(dataLayout, `${dataLayout.localRoot()}/fsmetadata.json`));
callback(error);
});
}
}
async function restore(backupConfig, backupId, progressCallback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
assert.strictEqual(typeof progressCallback, 'function');
const boxDataDir = safe.fs.realpathSync(paths.BOX_DATA_DIR);
if (!boxDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving boxdata: ${safe.error.message}`);
const dataLayout = new DataLayout(boxDataDir, []);
await util.promisify(download)(backupConfig, backupId, backupConfig.format, dataLayout, progressCallback);
debug('restore: download completed, importing database');
await database.importFromFile(`${dataLayout.localRoot()}/box.mysqldump`);
debug('restore: database imported');
await settings.initCache();
}
async function downloadApp(app, restoreConfig, progressCallback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof restoreConfig, 'object');
assert.strictEqual(typeof progressCallback, 'function');
const appDataDir = safe.fs.realpathSync(path.join(paths.APPS_DATA_DIR, app.id));
if (!appDataDir) throw new BoxError(BoxError.FS_ERROR, safe.error.message);
const dataLayout = new DataLayout(appDataDir, app.dataDir ? [{ localDir: app.dataDir, remoteDir: 'data' }] : []);
const startTime = new Date();
const backupConfig = restoreConfig.backupConfig || await settings.getBackupConfig();
const downloadAsync = util.promisify(download);
await downloadAsync(backupConfig, restoreConfig.backupId, restoreConfig.backupFormat, dataLayout, progressCallback);
debug('downloadApp: time: %s', (new Date() - startTime)/1000);
}
function runBackupUpload(uploadConfig, progressCallback, callback) {
assert.strictEqual(typeof uploadConfig, 'object');
assert.strictEqual(typeof progressCallback, 'function');
assert.strictEqual(typeof callback, 'function');
const { backupId, backupConfig, dataLayout, progressTag } = uploadConfig;
assert.strictEqual(typeof backupId, 'string');
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof progressTag, 'string');
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
let result = ''; // the script communicates error result as a string
// https://stackoverflow.com/questions/48387040/node-js-recommended-max-old-space-size
const envCopy = Object.assign({}, process.env);
if (backupConfig.memoryLimit && backupConfig.memoryLimit >= 2*1024*1024*1024) {
const heapSize = Math.min((backupConfig.memoryLimit/1024/1024) - 256, 8192);
debug(`runBackupUpload: adjusting heap size to ${heapSize}M`);
envCopy.NODE_OPTIONS = `--max-old-space-size=${heapSize}`;
}
shell.sudo(`backup-${backupId}`, [ BACKUP_UPLOAD_CMD, backupId, backupConfig.format, dataLayout.toString() ], { env: envCopy, preserveEnv: true, ipc: true }, function (error) {
if (error && (error.code === null /* signal */ || (error.code !== 0 && error.code !== 50))) { // backuptask crashed
return callback(new BoxError(BoxError.INTERNAL_ERROR, 'Backuptask crashed'));
} else if (error && error.code === 50) { // exited with error
return callback(new BoxError(BoxError.EXTERNAL_ERROR, result));
}
callback();
}).on('message', function (progress) { // this is { message } or { result }
if ('message' in progress) return progressCallback({ message: `${progress.message} (${progressTag})` });
debug(`runBackupUpload: result - ${JSON.stringify(progress)}`);
result = progress.result;
});
}
async function snapshotBox(progressCallback) {
assert.strictEqual(typeof progressCallback, 'function');
progressCallback({ message: 'Snapshotting box' });
const startTime = new Date();
await database.exportToFile(`${paths.BOX_DATA_DIR}/box.mysqldump`);
debug(`snapshotBox: took ${(new Date() - startTime)/1000} seconds`);
}
async function uploadBoxSnapshot(backupConfig, progressCallback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof progressCallback, 'function');
await snapshotBox(progressCallback);
const boxDataDir = safe.fs.realpathSync(paths.BOX_DATA_DIR);
if (!boxDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving boxdata: ${safe.error.message}`);
const uploadConfig = {
backupId: 'snapshot/box',
backupConfig,
dataLayout: new DataLayout(boxDataDir, []),
progressTag: 'box'
};
progressCallback({ message: 'Uploading box snapshot' });
const startTime = new Date();
await util.promisify(runBackupUpload)(uploadConfig, progressCallback);
debug(`uploadBoxSnapshot: took ${(new Date() - startTime)/1000} seconds`);
await backups.setSnapshotInfo('box', { timestamp: new Date().toISOString(), format: backupConfig.format });
}
async function copy(backupConfig, sourceBackupId, destBackupId, options, progressCallback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof sourceBackupId, 'string');
assert.strictEqual(typeof destBackupId, 'string');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof progressCallback, 'function');
const format = backupConfig.format;
return new Promise((resolve, reject) => {
const startTime = new Date();
const copyEvents = storage.api(backupConfig.provider).copy(backupConfig, storage.getBackupFilePath(backupConfig, sourceBackupId, format), storage.getBackupFilePath(backupConfig, destBackupId, format));
copyEvents.on('progress', (message) => progressCallback({ message }));
copyEvents.on('done', async function (copyBackupError) {
const state = copyBackupError ? backups.BACKUP_STATE_ERROR : backups.BACKUP_STATE_NORMAL;
const [error] = await safe(backups.update(destBackupId, { preserveSecs: options.preserveSecs || 0, state }));
if (copyBackupError) return reject(copyBackupError);
if (error) return reject(error);
debug(`copy: copied successfully to id ${destBackupId}. Took ${(new Date() - startTime)/1000} seconds`);
resolve();
});
});
}
async function rotateBoxBackup(backupConfig, tag, options, dependsOn, progressCallback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof tag, 'string');
assert.strictEqual(typeof options, 'object');
assert(Array.isArray(dependsOn));
assert.strictEqual(typeof progressCallback, 'function');
const backupId = `${tag}/box_v${constants.VERSION}`;
const format = backupConfig.format;
debug(`rotateBoxBackup: rotating to id ${backupId}`);
const data = {
encryptionVersion: backupConfig.encryption ? 2 : null,
packageVersion: constants.VERSION,
type: backups.BACKUP_TYPE_BOX,
state: backups.BACKUP_STATE_CREATING,
identifier: backups.BACKUP_IDENTIFIER_BOX,
dependsOn,
manifest: null,
format
};
await backups.add(backupId, data);
await copy(backupConfig, 'snapshot/box', backupId, options, progressCallback);
return backupId;
}
async function backupBox(dependsOn, tag, options, progressCallback) {
assert(Array.isArray(dependsOn));
assert.strictEqual(typeof tag, 'string');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof progressCallback, 'function');
const backupConfig = await settings.getBackupConfig();
await uploadBoxSnapshot(backupConfig, progressCallback);
const backupId = await rotateBoxBackup(backupConfig, tag, options, dependsOn, progressCallback);
return backupId;
}
async function rotateAppBackup(backupConfig, app, tag, options, progressCallback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof tag, 'string');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof progressCallback, 'function');
const snapshotInfo = backups.getSnapshotInfo(app.id);
const manifest = snapshotInfo.restoreConfig ? snapshotInfo.restoreConfig.manifest : snapshotInfo.manifest; // compat
const backupId = `${tag}/app_${app.fqdn}_v${manifest.version}`;
const format = backupConfig.format;
debug(`rotateAppBackup: rotating ${app.fqdn} to id ${backupId}`);
const data = {
encryptionVersion: backupConfig.encryption ? 2 : null,
packageVersion: manifest.version,
type: backups.BACKUP_TYPE_APP,
state: backups.BACKUP_STATE_CREATING,
identifier: app.id,
dependsOn: [ ],
manifest,
format: format
};
await backups.add(backupId, data);
await copy(backupConfig, `snapshot/app_${app.id}`, backupId, options, progressCallback);
return backupId;
}
async function backupApp(app, options, progressCallback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof progressCallback, 'function');
if (options.snapshotOnly) return await snapshotApp(app, progressCallback);
const tag = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
debug(`backupApp: backing up ${app.fqdn} with tag ${tag}`);
return await backupAppWithTag(app, tag, options, progressCallback);
}
async function snapshotApp(app, progressCallback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof progressCallback, 'function');
const startTime = new Date();
progressCallback({ message: `Snapshotting app ${app.fqdn}` });
await apps.backupConfig(app);
await services.backupAddons(app, app.manifest.addons);
debug(`snapshotApp: ${app.fqdn} took ${(new Date() - startTime)/1000} seconds`);
}
async function uploadAppSnapshot(backupConfig, app, progressCallback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof progressCallback, 'function');
await snapshotApp(app, progressCallback);
const backupId = util.format('snapshot/app_%s', app.id);
const appDataDir = safe.fs.realpathSync(path.join(paths.APPS_DATA_DIR, app.id));
if (!appDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving appsdata: ${safe.error.message}`);
const dataLayout = new DataLayout(appDataDir, app.dataDir ? [{ localDir: app.dataDir, remoteDir: 'data' }] : []);
progressCallback({ message: `Uploading app snapshot ${app.fqdn}`});
const uploadConfig = {
backupId,
backupConfig,
dataLayout,
progressTag: app.fqdn
};
const startTime = new Date();
await util.promisify(runBackupUpload)(uploadConfig, progressCallback);
debug(`uploadAppSnapshot: ${app.fqdn} upload with id ${backupId}. ${(new Date() - startTime)/1000} seconds`);
await backups.setSnapshotInfo(app.id, { timestamp: new Date().toISOString(), manifest: app.manifest, format: backupConfig.format });
}
async function backupAppWithTag(app, tag, options, progressCallback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof tag, 'string');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof progressCallback, 'function');
if (!canBackupApp(app)) { // if we cannot backup, reuse it's most recent backup
const results = await backups.getByIdentifierAndStatePaged(app.id, backups.BACKUP_STATE_NORMAL, 1, 1);
if (results.length === 0) return null; // no backup to re-use
return results[0].id;
}
const backupConfig = await settings.getBackupConfig();
await uploadAppSnapshot(backupConfig, app, progressCallback);
const backupId = await rotateAppBackup(backupConfig, app, tag, options, progressCallback);
return backupId;
}
async function uploadMailSnapshot(backupConfig, progressCallback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof progressCallback, 'function');
const mailDataDir = safe.fs.realpathSync(paths.MAIL_DATA_DIR);
if (!mailDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving maildata: ${safe.error.message}`);
const uploadConfig = {
backupId: 'snapshot/mail',
backupConfig,
dataLayout: new DataLayout(mailDataDir, []),
progressTag: 'mail'
};
progressCallback({ message: 'Uploading mail snapshot' });
const startTime = new Date();
await util.promisify(runBackupUpload)(uploadConfig, progressCallback);
debug(`uploadMailSnapshot: took ${(new Date() - startTime)/1000} seconds`);
await backups.setSnapshotInfo('mail', { timestamp: new Date().toISOString(), format: backupConfig.format });
}
async function rotateMailBackup(backupConfig, tag, options, progressCallback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof tag, 'string');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof progressCallback, 'function');
const backupId = `${tag}/mail_v${constants.VERSION}`;
const format = backupConfig.format;
debug(`rotateMailBackup: rotating to id ${backupId}`);
const data = {
encryptionVersion: backupConfig.encryption ? 2 : null,
packageVersion: constants.VERSION,
type: backups.BACKUP_TYPE_MAIL,
state: backups.BACKUP_STATE_CREATING,
identifier: backups.BACKUP_IDENTIFIER_MAIL,
dependsOn: [],
manifest: null,
format: format
};
await backups.add(backupId, data);
await copy(backupConfig, 'snapshot/mail', backupId, options, progressCallback);
return backupId;
}
async function backupMailWithTag(tag, options, progressCallback) {
assert.strictEqual(typeof tag, 'string');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof progressCallback, 'function');
debug(`backupMailWithTag: backing up mail with tag ${tag}`);
const backupConfig = await settings.getBackupConfig();
await uploadMailSnapshot(backupConfig, progressCallback);
const backupId = await rotateMailBackup(backupConfig, tag, options, progressCallback);
return backupId;
}
async function backupMail(options, progressCallback) {
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof progressCallback, 'function');
const tag = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
debug(`backupMail: backing up mail with tag ${tag}`);
return await backupMailWithTag(tag, options, progressCallback);
}
async function downloadMail(restoreConfig, progressCallback) {
assert.strictEqual(typeof restoreConfig, 'object');
assert.strictEqual(typeof progressCallback, 'function');
const mailDataDir = safe.fs.realpathSync(paths.MAIL_DATA_DIR);
if (!mailDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving maildata: ${safe.error.message}`);
const dataLayout = new DataLayout(mailDataDir, []);
const startTime = new Date();
const downloadAsync = util.promisify(download);
await downloadAsync(restoreConfig.backupConfig, restoreConfig.backupId, restoreConfig.backupFormat, dataLayout, progressCallback);
debug('downloadMail: time: %s', (new Date() - startTime)/1000);
}
// this function is called from external process. calling process is expected to have a lock
async function fullBackup(options, progressCallback) {
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof progressCallback, 'function');
const tag = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
const allApps = await apps.list();
let percent = 1;
let step = 100/(allApps.length+3);
const appBackupIds = [];
for (let i = 0; i < allApps.length; i++) {
const app = allApps[i];
progressCallback({ percent: percent, message: `Backing up ${app.fqdn} (${i+1}/${allApps.length})` });
percent += step;
if (!app.enableBackup) {
debug(`fullBackup: skipped backup ${app.fqdn} (${i+1}/${allApps.length})`);
continue; // nothing to backup
}
const startTime = new Date();
const appBackupId = await backupAppWithTag(app, tag, options, (progress) => progressCallback({ percent: percent, message: progress.message }));
debug(`fullBackup: app ${app.fqdn} backup finished. Took ${(new Date() - startTime)/1000} seconds`);
if (appBackupId) appBackupIds.push(appBackupId); // backupId can be null if in BAD_STATE and never backed up
}
progressCallback({ percent: percent, message: 'Backing up mail' });
percent += step;
const mailBackupId = await backupMailWithTag(tag, options, (progress) => progressCallback({ percent: percent, message: progress.message }));
progressCallback({ percent: percent, message: 'Backing up system data' });
percent += step;
const dependsOn = appBackupIds.concat(mailBackupId);
const backupId = await backupBox(dependsOn, tag, options, (progress) => progressCallback({ percent: percent, message: progress.message }));
return backupId;
}