5157789774
previously, we had a singleton 'main' flag to indicate a site can be used for updates. with this new approach, we can get rid of the 'primary' concept. each site can be used for updates or not.
569 lines
24 KiB
JavaScript
569 lines
24 KiB
JavaScript
'use strict';
|
|
|
|
exports = module.exports = {
|
|
fullBackup,
|
|
appBackup,
|
|
|
|
restore,
|
|
|
|
downloadApp,
|
|
backupApp,
|
|
|
|
downloadMail,
|
|
|
|
upload,
|
|
};
|
|
|
|
const apps = require('./apps.js'),
|
|
assert = require('node:assert'),
|
|
backupFormats = require('./backupformats.js'),
|
|
backups = require('./backups.js'),
|
|
backupSites = require('./backupsites.js'),
|
|
BoxError = require('./boxerror.js'),
|
|
constants = require('./constants.js'),
|
|
crypto = require('node:crypto'),
|
|
DataLayout = require('./datalayout.js'),
|
|
database = require('./database.js'),
|
|
debug = require('debug')('box:backuptask'),
|
|
df = require('./df.js'),
|
|
locks = require('./locks.js'),
|
|
path = require('node:path'),
|
|
paths = require('./paths.js'),
|
|
{ Readable } = require('node:stream'),
|
|
safe = require('safetydance'),
|
|
services = require('./services.js'),
|
|
shell = require('./shell.js')('backuptask'),
|
|
stream = require('stream/promises');
|
|
|
|
const BACKUP_UPLOAD_CMD = path.join(__dirname, 'scripts/backupupload.js');
|
|
|
|
function addFileExtension(backupSite, remotePath) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof remotePath, 'string');
|
|
|
|
const ext = backupFormats.api(backupSite.format).getFileExtension(!!backupSite.encyption);
|
|
return remotePath + ext;
|
|
}
|
|
|
|
async function checkPreconditions(backupSite, dataLayout) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
|
|
|
// check mount status before uploading
|
|
const status = await backupSites.ensureMounted(backupSite);
|
|
debug(`checkPreconditions: mount point status is ${JSON.stringify(status)}`);
|
|
if (status.state !== 'active') throw new BoxError(BoxError.MOUNT_ERROR, `Backup endpoint is not active: ${status.message}`);
|
|
|
|
// check availabe size. this requires root for df to work
|
|
const available = await backupSites.storageApi(backupSite).getAvailableSize(backupSite.config);
|
|
let used = 0;
|
|
for (const localPath of dataLayout.localPaths()) {
|
|
debug(`checkPreconditions: getting disk usage of ${localPath}`);
|
|
// du can error when files go missing as it is computing the size. it still prints some size anyway
|
|
// to match df output in getAvailableSize() we must use disk usage size here and not apparent size
|
|
const [duError, result] = await safe(shell.spawn('du', [ '--dereference-args', '--summarize', '--block-size=1', '--exclude=*.lock', '--exclude=dovecot.list.index.log.*', localPath], { encoding: 'utf8' }));
|
|
if (duError) debug(`checkPreconditions: du error for ${localPath}. code: ${duError.code} stderror: ${duError.stderr}`);
|
|
used += parseInt(duError ? duError.stdout : result, 10);
|
|
}
|
|
|
|
debug(`checkPreconditions: total required=${used} available=${available}`);
|
|
|
|
const needed = 0.6 * used + (1024 * 1024 * 1024); // check if there is atleast 1GB left afterwards. aim for 60% because rsync/tgz won't need full 100%
|
|
if (available <= needed) throw new BoxError(BoxError.FS_ERROR, `Not enough disk space for backup. Needed: ${df.prettyBytes(needed)} Available: ${df.prettyBytes(available)}`);
|
|
}
|
|
|
|
async function uploadBackupInfo(backupSite, remotePath, integrityMap) {
|
|
const sortedIntegrityMap = [...integrityMap.entries()].sort(([a], [b]) => a < b); // for readability, order the entries
|
|
const integrityDataJsonString = JSON.stringify(Object.fromEntries(sortedIntegrityMap), null, 2);
|
|
const integrityDataStream = Readable.from(integrityDataJsonString);
|
|
const integrityUploader = await backupSites.storageApi(backupSite).upload(backupSite.config, `${remotePath}.backupinfo`);
|
|
await stream.pipeline(integrityDataStream, integrityUploader.stream);
|
|
await integrityUploader.finish();
|
|
|
|
return await crypto.sign(null /* algorithm */, integrityDataJsonString, backupSite.integrityKeyPair.privateKey);
|
|
}
|
|
|
|
// this function is called via backupupload (since it needs root to traverse app's directory)
|
|
async function upload(remotePath, siteId, dataLayoutString, progressCallback) {
|
|
assert.strictEqual(typeof remotePath, 'string');
|
|
assert.strictEqual(typeof siteId, 'string');
|
|
assert.strictEqual(typeof dataLayoutString, 'string');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
debug(`upload: path ${remotePath} site ${siteId} dataLayout ${dataLayoutString}`);
|
|
|
|
const backupSite = await backupSites.get(siteId);
|
|
if (!backupSite) throw new BoxError(BoxError.NOT_FOUND, 'Backup site not found');
|
|
|
|
const dataLayout = DataLayout.fromString(dataLayoutString);
|
|
|
|
await checkPreconditions(backupSite, dataLayout);
|
|
|
|
const { stats, integrityMap } = await backupFormats.api(backupSite.format).upload(backupSite, remotePath, dataLayout, progressCallback);
|
|
|
|
progressCallback({ message: `Uploading integrity information to ${remotePath}.backupinfo` });
|
|
const signature = await uploadBackupInfo(backupSite, remotePath, integrityMap);
|
|
return { stats, integrity: { signature } };
|
|
}
|
|
|
|
async function download(backupSite, remotePath, dataLayout, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof remotePath, 'string');
|
|
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
debug(`download: Downloading ${remotePath} of format ${backupSite.format} (encrypted: ${!!backupSite.encryption}) to ${dataLayout.toString()}`);
|
|
|
|
await backupFormats.api(backupSite.format).download(backupSite, remotePath, dataLayout, progressCallback);
|
|
}
|
|
|
|
async function restore(backupSite, remotePath, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof remotePath, 'string');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const boxDataDir = safe.fs.realpathSync(paths.BOX_DATA_DIR);
|
|
if (!boxDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving boxdata: ${safe.error.message}`);
|
|
const dataLayout = new DataLayout(boxDataDir, []);
|
|
|
|
await download(backupSite, remotePath, dataLayout, progressCallback);
|
|
|
|
debug('restore: download completed, importing database');
|
|
|
|
await database.importFromFile(`${dataLayout.localRoot()}/box.mysqldump`);
|
|
debug('restore: database imported');
|
|
|
|
await locks.releaseAll(); // clear the locks table in database
|
|
}
|
|
|
|
async function downloadApp(app, restoreConfig, progressCallback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof restoreConfig, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const appDataDir = safe.fs.realpathSync(path.join(paths.APPS_DATA_DIR, app.id));
|
|
if (!appDataDir) throw new BoxError(BoxError.FS_ERROR, safe.error.message);
|
|
const dataLayout = new DataLayout(appDataDir, app.storageVolumeId ? [{ localDir: await apps.getStorageDir(app), remoteDir: 'data' }] : []);
|
|
|
|
const startTime = new Date();
|
|
let { backupSite, remotePath } = restoreConfig; // set when importing
|
|
if (!remotePath) {
|
|
const backup = await backups.get(restoreConfig.backupId);
|
|
if (!backup) throw new BoxError(BoxError.BAD_FIELD, 'No such backup');
|
|
remotePath = backup.remotePath;
|
|
backupSite = await backupSites.get(backup.siteId);
|
|
}
|
|
|
|
await download(backupSite, remotePath, dataLayout, progressCallback);
|
|
debug('downloadApp: time: %s', (new Date() - startTime)/1000);
|
|
}
|
|
|
|
async function runBackupUpload(uploadConfig, progressCallback) {
|
|
assert.strictEqual(typeof uploadConfig, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const { remotePath, backupSite, dataLayout, progressTag } = uploadConfig;
|
|
assert.strictEqual(typeof remotePath, 'string');
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof progressTag, 'string');
|
|
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
|
|
|
// https://stackoverflow.com/questions/48387040/node-js-recommended-max-old-space-size
|
|
const envCopy = Object.assign({}, process.env);
|
|
if (backupSite.limits?.memoryLimit >= 2*1024*1024*1024) {
|
|
const heapSize = Math.min((backupSite.limits.memoryLimit/1024/1024) - 256, 8192);
|
|
debug(`runBackupUpload: adjusting heap size to ${heapSize}M`);
|
|
envCopy.NODE_OPTIONS = `--max-old-space-size=${heapSize}`;
|
|
}
|
|
|
|
let lastMessage = null; // the script communicates error result as a string
|
|
function onMessage(progress) { // this is { message } or { result }
|
|
if ('message' in progress) return progressCallback({ message: `${progress.message} (${progressTag})` });
|
|
debug(`runBackupUpload: result - ${JSON.stringify(progress)}`);
|
|
lastMessage = progress;
|
|
}
|
|
|
|
// do not use debug for logging child output because it already has timestamps via it's own debug
|
|
const [error] = await safe(shell.sudo([ BACKUP_UPLOAD_CMD, remotePath, backupSite.id, dataLayout.toString() ], { env: envCopy, preserveEnv: true, onMessage, logger: process.stdout.write }));
|
|
if (error && (error.code === null /* signal */ || (error.code !== 0 && error.code !== 50))) { // backuptask crashed
|
|
debug(`runBackupUpload: backuptask crashed`, error);
|
|
throw new BoxError(BoxError.INTERNAL_ERROR, 'Backuptask crashed');
|
|
} else if (error && error.code === 50) { // exited with error
|
|
throw new BoxError(BoxError.EXTERNAL_ERROR, lastMessage.errorMessage);
|
|
}
|
|
|
|
return lastMessage.result;
|
|
}
|
|
|
|
async function snapshotBox(progressCallback) {
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
progressCallback({ message: 'Snapshotting box' });
|
|
|
|
const startTime = new Date();
|
|
|
|
await database.exportToFile(`${paths.BOX_DATA_DIR}/box.mysqldump`);
|
|
debug(`snapshotBox: took ${(new Date() - startTime)/1000} seconds`);
|
|
}
|
|
|
|
async function uploadBoxSnapshot(backupSite, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
await snapshotBox(progressCallback);
|
|
|
|
const remotePath = addFileExtension(backupSite, `snapshot/box`);
|
|
|
|
const boxDataDir = safe.fs.realpathSync(paths.BOX_DATA_DIR);
|
|
if (!boxDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving boxdata: ${safe.error.message}`);
|
|
|
|
const uploadConfig = {
|
|
remotePath,
|
|
backupSite,
|
|
dataLayout: new DataLayout(boxDataDir, []),
|
|
progressTag: 'box'
|
|
};
|
|
|
|
progressCallback({ message: 'Uploading box snapshot' });
|
|
|
|
const startTime = new Date();
|
|
|
|
const { stats, integrity } = await runBackupUpload(uploadConfig, progressCallback);
|
|
|
|
debug(`uploadBoxSnapshot: took ${(new Date() - startTime)/1000} seconds`);
|
|
|
|
await backupSites.setSnapshotInfo(backupSite, 'box', { timestamp: new Date().toISOString() });
|
|
|
|
return { stats, integrity };
|
|
}
|
|
|
|
async function copy(backupSite, srcRemotePath, destRemotePath, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof srcRemotePath, 'string');
|
|
assert.strictEqual(typeof destRemotePath, 'string');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const startTime = new Date();
|
|
const [copyError] = await safe(backupFormats.api(backupSite.format).copy(backupSite, srcRemotePath, destRemotePath, progressCallback));
|
|
if (copyError) {
|
|
debug(`copy: copy to ${destRemotePath} errored. error: ${copyError.message}`);
|
|
throw copyError;
|
|
}
|
|
debug(`copy: copied successfully to ${destRemotePath}. Took ${(new Date() - startTime)/1000} seconds`);
|
|
|
|
const [copyChecksumError] = await safe(backupSites.storageApi(backupSite).copy(backupSite.config, `${srcRemotePath}.backupinfo`, `${destRemotePath}.backupinfo`, progressCallback));
|
|
if (copyChecksumError) {
|
|
debug(`copy: copy to ${destRemotePath} errored. error: ${copyChecksumError.message}`);
|
|
throw copyChecksumError;
|
|
}
|
|
debug(`copy: copied backupinfo successfully to ${destRemotePath}.backupinfo`);
|
|
}
|
|
|
|
async function backupBox(backupSite, dependsOn, tag, options, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert(Array.isArray(dependsOn));
|
|
assert.strictEqual(typeof tag, 'string');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const { stats, integrity } = await uploadBoxSnapshot(backupSite, progressCallback);
|
|
|
|
const remotePath = addFileExtension(backupSite, `${tag}/box_v${constants.VERSION}`);
|
|
|
|
debug(`backupBox: rotating box snapshot of ${backupSite.id} to id ${remotePath}`);
|
|
|
|
const data = {
|
|
remotePath,
|
|
encryptionVersion: backupSite.encryption ? 2 : null,
|
|
packageVersion: constants.VERSION,
|
|
type: backups.BACKUP_TYPE_BOX,
|
|
state: backups.BACKUP_STATE_CREATING,
|
|
identifier: backups.BACKUP_IDENTIFIER_BOX,
|
|
dependsOn,
|
|
manifest: null,
|
|
preserveSecs: options.preserveSecs || 0,
|
|
appConfig: null,
|
|
siteId: backupSite.id,
|
|
stats,
|
|
integrity
|
|
};
|
|
|
|
const id = await backups.add(data);
|
|
const snapshotPath = addFileExtension(backupSite, 'snapshot/box');
|
|
const [error] = await safe(copy(backupSite, snapshotPath, remotePath, progressCallback));
|
|
const state = error ? backups.BACKUP_STATE_ERROR : backups.BACKUP_STATE_NORMAL;
|
|
await backups.setState(id, state);
|
|
if (error) throw error;
|
|
|
|
return id;
|
|
}
|
|
|
|
async function snapshotApp(app, progressCallback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const startTime = new Date();
|
|
progressCallback({ message: `Snapshotting app ${app.fqdn}` });
|
|
|
|
await apps.writeConfig(app);
|
|
await services.backupAddons(app, app.manifest.addons);
|
|
|
|
debug(`snapshotApp: ${app.fqdn} took ${(new Date() - startTime)/1000} seconds`);
|
|
}
|
|
|
|
async function uploadAppSnapshot(backupSite, app, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
await snapshotApp(app, progressCallback);
|
|
|
|
const remotePath = addFileExtension(backupSite, `snapshot/app_${app.id}`);
|
|
const appDataDir = safe.fs.realpathSync(path.join(paths.APPS_DATA_DIR, app.id));
|
|
if (!appDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving appsdata: ${safe.error.message}`);
|
|
|
|
const dataLayout = new DataLayout(appDataDir, app.storageVolumeId ? [{ localDir: await apps.getStorageDir(app), remoteDir: 'data' }] : []);
|
|
|
|
progressCallback({ message: `Uploading app snapshot ${app.fqdn}`});
|
|
|
|
const uploadConfig = {
|
|
remotePath,
|
|
backupSite,
|
|
dataLayout,
|
|
progressTag: app.fqdn
|
|
};
|
|
|
|
const startTime = new Date();
|
|
|
|
const { stats, integrity } = await runBackupUpload(uploadConfig, progressCallback);
|
|
|
|
debug(`uploadAppSnapshot: ${app.fqdn} uploaded to ${remotePath}. ${(new Date() - startTime)/1000} seconds`);
|
|
|
|
await backupSites.setSnapshotInfo(backupSite, app.id, { timestamp: new Date().toISOString(), manifest: app.manifest });
|
|
|
|
return { stats, integrity };
|
|
}
|
|
|
|
async function backupAppWithTag(app, backupSite, tag, options, progressCallback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof tag, 'string');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
if (!apps.canBackupApp(app)) { // if we cannot backup, reuse it's most recent backup
|
|
const lastKnownGoodAppBackup = await backups.getLatestInTargetByIdentifier(app.id, backupSite.id);
|
|
if (lastKnownGoodAppBackup === null) return null; // no backup to re-use
|
|
return lastKnownGoodAppBackup.id;
|
|
}
|
|
|
|
const { stats, integrity } = await uploadAppSnapshot(backupSite, app, progressCallback);
|
|
|
|
const manifest = app.manifest;
|
|
const remotePath = addFileExtension(backupSite, `${tag}/app_${app.fqdn}_v${manifest.version}`);
|
|
|
|
debug(`backupAppWithTag: rotating ${app.fqdn} snapshot of ${backupSite.id} to path ${remotePath}`);
|
|
|
|
const data = {
|
|
remotePath,
|
|
encryptionVersion: backupSite.encryption ? 2 : null,
|
|
packageVersion: manifest.version,
|
|
type: backups.BACKUP_TYPE_APP,
|
|
state: backups.BACKUP_STATE_CREATING,
|
|
identifier: app.id,
|
|
dependsOn: [],
|
|
manifest,
|
|
preserveSecs: options.preserveSecs || 0,
|
|
appConfig: app,
|
|
siteId: backupSite.id,
|
|
stats,
|
|
integrity
|
|
};
|
|
|
|
const id = await backups.add(data);
|
|
const snapshotPath = addFileExtension(backupSite, `snapshot/app_${app.id}`);
|
|
const [error] = await safe(copy(backupSite, snapshotPath, remotePath, progressCallback));
|
|
const state = error ? backups.BACKUP_STATE_ERROR : backups.BACKUP_STATE_NORMAL;
|
|
await backups.setState(id, state);
|
|
if (error) throw error;
|
|
|
|
return id;
|
|
}
|
|
|
|
async function backupApp(app, backupSite, options, progressCallback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
let backupId = null;
|
|
await locks.wait(`${locks.TYPE_APP_BACKUP_PREFIX}${app.id}`);
|
|
if (options.snapshotOnly) {
|
|
await snapshotApp(app, progressCallback);
|
|
} else {
|
|
const tag = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
|
|
backupId = await backupAppWithTag(app, backupSite, tag, options, progressCallback);
|
|
}
|
|
await locks.release(`${locks.TYPE_APP_BACKUP_PREFIX}${app.id}`);
|
|
|
|
return backupId;
|
|
}
|
|
|
|
async function uploadMailSnapshot(backupSite, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const remotePath = addFileExtension(backupSite, 'snapshot/mail');
|
|
|
|
const mailDataDir = safe.fs.realpathSync(paths.MAIL_DATA_DIR);
|
|
if (!mailDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving maildata: ${safe.error.message}`);
|
|
|
|
const uploadConfig = {
|
|
remotePath,
|
|
backupSite,
|
|
dataLayout: new DataLayout(mailDataDir, []),
|
|
progressTag: 'mail'
|
|
};
|
|
|
|
progressCallback({ message: 'Uploading mail snapshot' });
|
|
|
|
const startTime = new Date();
|
|
|
|
const { stats, integrity } = await runBackupUpload(uploadConfig, progressCallback);
|
|
|
|
debug(`uploadMailSnapshot: took ${(new Date() - startTime)/1000} seconds`);
|
|
|
|
await backupSites.setSnapshotInfo(backupSite, 'mail', { timestamp: new Date().toISOString() });
|
|
|
|
return { stats, integrity };
|
|
}
|
|
|
|
async function backupMailWithTag(backupSite, tag, options, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof tag, 'string');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
debug(`backupMailWithTag: backing up mail with tag ${tag}`);
|
|
|
|
const { stats, integrity } = await uploadMailSnapshot(backupSite, progressCallback);
|
|
|
|
const remotePath = addFileExtension(backupSite, `${tag}/mail_v${constants.VERSION}`);
|
|
|
|
debug(`backupMailWithTag: rotating mail snapshot of ${backupSite.id} to ${remotePath}`);
|
|
|
|
const data = {
|
|
remotePath,
|
|
encryptionVersion: backupSite.encryption ? 2 : null,
|
|
packageVersion: constants.VERSION,
|
|
type: backups.BACKUP_TYPE_MAIL,
|
|
state: backups.BACKUP_STATE_CREATING,
|
|
identifier: backups.BACKUP_IDENTIFIER_MAIL,
|
|
dependsOn: [],
|
|
manifest: null,
|
|
preserveSecs: options.preserveSecs || 0,
|
|
appConfig: null,
|
|
siteId: backupSite.id,
|
|
stats,
|
|
integrity
|
|
};
|
|
|
|
const id = await backups.add(data);
|
|
const snapshotPath = addFileExtension(backupSite, 'snapshot/mail');
|
|
const [error] = await safe(copy(backupSite, snapshotPath, remotePath, progressCallback));
|
|
const state = error ? backups.BACKUP_STATE_ERROR : backups.BACKUP_STATE_NORMAL;
|
|
await backups.setState(id, state);
|
|
if (error) throw error;
|
|
|
|
return id;
|
|
}
|
|
|
|
async function downloadMail(backupSite, remotePath, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof remotePath, 'string');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const mailDataDir = safe.fs.realpathSync(paths.MAIL_DATA_DIR);
|
|
if (!mailDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving maildata: ${safe.error.message}`);
|
|
const dataLayout = new DataLayout(mailDataDir, []);
|
|
|
|
const startTime = new Date();
|
|
|
|
await download(backupSite, remotePath, dataLayout, progressCallback);
|
|
debug('downloadMail: time: %s', (new Date() - startTime)/1000);
|
|
}
|
|
|
|
// this function is called from external process. calling process is expected to have a lock
|
|
async function fullBackup(backupSiteId, options, progressCallback) {
|
|
assert.strictEqual(typeof backupSiteId, 'string');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const backupSite = await backupSites.get(backupSiteId);
|
|
if (!backupSite) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Backup site not found');
|
|
|
|
const tag = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,''); // unique tag under which all apps/mail/box backs up
|
|
|
|
const allApps = await apps.list();
|
|
|
|
let percent = 1;
|
|
const step = 100/(allApps.length+3);
|
|
|
|
const appBackupIds = [];
|
|
for (let i = 0; i < allApps.length; i++) {
|
|
const app = allApps[i];
|
|
percent += step;
|
|
|
|
if (!app.enableBackup) {
|
|
debug(`fullBackup: skipped backup ${app.fqdn} (${i+1}/${allApps.length}) since automatic backup disabled`);
|
|
continue; // nothing to backup
|
|
}
|
|
if (!backupSites.hasContent(backupSite, app.id)) {
|
|
debug(`fullBackup: skipped backup ${app.fqdn} (${i+1}/${allApps.length}) as it is not in site contents`);
|
|
continue;
|
|
}
|
|
|
|
progressCallback({ percent, message: `Backing up ${app.fqdn} (${i+1}/${allApps.length}). Waiting for lock` });
|
|
await locks.wait(`${locks.TYPE_APP_BACKUP_PREFIX}${app.id}`);
|
|
const startTime = new Date();
|
|
const [appBackupError, appBackupId] = await safe(backupAppWithTag(app, backupSite, tag, options, (progress) => progressCallback({ percent, message: progress.message })));
|
|
debug(`fullBackup: app ${app.fqdn} backup finished. Took ${(new Date() - startTime)/1000} seconds`);
|
|
await locks.release(`${locks.TYPE_APP_BACKUP_PREFIX}${app.id}`);
|
|
if (appBackupError) throw appBackupError;
|
|
if (appBackupId) appBackupIds.push(appBackupId); // backupId can be null if in BAD_STATE and never backed up
|
|
}
|
|
|
|
if (!backupSites.hasContent(backupSite, 'box')) return appBackupIds;
|
|
|
|
progressCallback({ percent, message: 'Backing up mail' });
|
|
percent += step;
|
|
const mailBackupId = await backupMailWithTag(backupSite, tag, options, (progress) => progressCallback({ percent, message: progress.message }));
|
|
|
|
progressCallback({ percent, message: 'Backing up system data' });
|
|
percent += step;
|
|
|
|
const dependsOn = appBackupIds.concat(mailBackupId);
|
|
const backupId = await backupBox(backupSite, dependsOn, tag, options, (progress) => progressCallback({ percent, message: progress.message }));
|
|
return backupId;
|
|
}
|
|
|
|
// this function is called from external process
|
|
async function appBackup(appId, backupSiteId, options, progressCallback) {
|
|
assert.strictEqual(typeof appId, 'string');
|
|
assert.strictEqual(typeof backupSiteId, 'string');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const app = await apps.get(appId);
|
|
if (!app) throw new BoxError(BoxError.BAD_FIELD, 'App not found');
|
|
|
|
const backupSite = await backupSites.get(backupSiteId);
|
|
if (!backupSite) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Backup site not found');
|
|
|
|
await progressCallback({ percent: 1, message: `Backing up ${app.fqdn}. Waiting for lock` });
|
|
const startTime = new Date();
|
|
const backupId = await backupApp(app, backupSite, options, progressCallback);
|
|
await progressCallback({ percent: 100, message: `app ${app.fqdn} backup finished. Took ${(new Date() - startTime)/1000} seconds` });
|
|
return backupId;
|
|
}
|