612 lines
27 KiB
JavaScript
612 lines
27 KiB
JavaScript
import apps from './apps.js';
|
|
import assert from 'node:assert';
|
|
import backupFormats from './backupformats.js';
|
|
import backups from './backups.js';
|
|
import backupSites from './backupsites.js';
|
|
import BoxError from './boxerror.js';
|
|
import constants from './constants.js';
|
|
import crypto from 'node:crypto';
|
|
import DataLayout from './datalayout.js';
|
|
import database from './database.js';
|
|
import logger from './logger.js';
|
|
import df from './df.js';
|
|
import locks from './locks.js';
|
|
import path from 'node:path';
|
|
import paths from './paths.js';
|
|
import { Readable } from 'node:stream';
|
|
import safe from '@cloudron/safetydance';
|
|
import services from './services.js';
|
|
import shellModule from './shell.js';
|
|
import stream from 'stream/promises';
|
|
import util from 'util';
|
|
|
|
const { log } = logger('backuptask');
|
|
const shell = shellModule('backuptask');
|
|
|
|
|
|
const BACKUP_UPLOAD_CMD = path.join(import.meta.dirname, 'scripts/backupupload.js');
|
|
|
|
function addFileExtension(backupSite, remotePath) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof remotePath, 'string');
|
|
|
|
const ext = backupFormats.api(backupSite.format).getFileExtension(!!backupSite.encryption);
|
|
return remotePath + ext;
|
|
}
|
|
|
|
async function checkPreconditions(backupSite, dataLayout) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
|
|
|
// check mount status before uploading
|
|
const status = await backupSites.ensureMounted(backupSite);
|
|
log(`checkPreconditions: mount point status is ${JSON.stringify(status)}`);
|
|
if (status.state !== 'active') throw new BoxError(BoxError.MOUNT_ERROR, `Backup endpoint is not active: ${status.message}`);
|
|
|
|
// check availabe size. this requires root for df to work
|
|
const available = await backupSites.storageApi(backupSite).getAvailableSize(backupSite.config);
|
|
let used = 0;
|
|
for (const localPath of dataLayout.localPaths()) {
|
|
log(`checkPreconditions: getting disk usage of ${localPath}`);
|
|
// du can error when files go missing as it is computing the size. it still prints some size anyway
|
|
// to match df output in getAvailableSize() we must use disk usage size here and not apparent size
|
|
const [duError, result] = await safe(shell.spawn('du', [ '--dereference-args', '--summarize', '--block-size=1', '--exclude=*.lock', '--exclude=dovecot.list.index.log.*', localPath], { encoding: 'utf8' }));
|
|
if (duError) log(`checkPreconditions: du error for ${localPath}. code: ${duError.code} stderror: ${duError.stderr}`);
|
|
used += parseInt(duError ? duError.stdout : result, 10);
|
|
}
|
|
|
|
log(`checkPreconditions: total required=${used} available=${available}`);
|
|
|
|
const needed = 0.6 * used + (1024 * 1024 * 1024); // check if there is atleast 1GB left afterwards. aim for 60% because rsync/tgz won't need full 100%
|
|
if (available <= needed) throw new BoxError(BoxError.FS_ERROR, `Not enough disk space for backup. Needed: ${df.prettyBytes(needed)} Available: ${df.prettyBytes(available)}`);
|
|
}
|
|
|
|
async function uploadBackupInfo(backupSite, remotePath, integrityMap) {
|
|
const sortedIntegrityMap = [...integrityMap.entries()].sort(([a], [b]) => a < b); // for readability, order the entries
|
|
const integrityDataJsonString = JSON.stringify(Object.fromEntries(sortedIntegrityMap), null, 2);
|
|
const integrityDataStream = Readable.from(integrityDataJsonString);
|
|
// unencrypted for easy verification without having to decrypt anything
|
|
const integrityUploader = await backupSites.storageApi(backupSite).upload(backupSite.config, backupSite.limits, `${remotePath}.backupinfo`);
|
|
await stream.pipeline(integrityDataStream, integrityUploader.createStream());
|
|
await integrityUploader.finish();
|
|
|
|
const signatureBuffer = await crypto.sign(null /* algorithm */, integrityDataJsonString, backupSite.integrityKeyPair.privateKey);
|
|
return signatureBuffer.toString('hex');
|
|
}
|
|
|
|
// this function is called via backupupload (since it needs root to traverse app's directory)
|
|
async function upload(remotePath, siteId, dataLayoutString, progressCallback) {
|
|
assert.strictEqual(typeof remotePath, 'string');
|
|
assert.strictEqual(typeof siteId, 'string');
|
|
assert.strictEqual(typeof dataLayoutString, 'string');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
log(`upload: path ${remotePath} site ${siteId} dataLayout ${dataLayoutString}`);
|
|
|
|
const backupSite = await backupSites.get(siteId);
|
|
if (!backupSite) throw new BoxError(BoxError.NOT_FOUND, 'Backup site not found');
|
|
|
|
const dataLayout = DataLayout.fromString(dataLayoutString);
|
|
|
|
await checkPreconditions(backupSite, dataLayout);
|
|
|
|
// integrityMap - { size, fileCount, sha256 } of each file. this is saved in .backupinfo file
|
|
// - tgz: only one entry named "." in the map. fileCount has the file count inside.
|
|
// - rsync: entry for each relative path.
|
|
// integrity - { signature } of the uploaded .backupinfo .
|
|
// stats - { fileCount, size, transferred }
|
|
// - tgz: size (backup size) and transferred is the same
|
|
// - rsync: size (final backup size) will be different from what was transferred (only changed files)
|
|
// stats.fileCount and stats.size are stored in db and should match up what is written into .backupinfo
|
|
const { stats, integrityMap } = await backupFormats.api(backupSite.format).upload(backupSite, remotePath, dataLayout, progressCallback);
|
|
log(`upload: path ${remotePath} site ${siteId} uploaded: ${JSON.stringify(stats)}`);
|
|
|
|
progressCallback({ message: `Uploading integrity information to ${remotePath}.backupinfo` });
|
|
const signature = await uploadBackupInfo(backupSite, remotePath, integrityMap);
|
|
return { stats, integrity: { signature } };
|
|
}
|
|
|
|
async function download(backupSite, remotePath, dataLayout, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof remotePath, 'string');
|
|
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
log(`download: Downloading ${remotePath} of format ${backupSite.format} (encrypted: ${!!backupSite.encryption}) to ${dataLayout.toString()}`);
|
|
|
|
await backupFormats.api(backupSite.format).download(backupSite, remotePath, dataLayout, progressCallback);
|
|
}
|
|
|
|
async function restore(backupSite, remotePath, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof remotePath, 'string');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const boxDataDir = safe.fs.realpathSync(paths.BOX_DATA_DIR);
|
|
if (!boxDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving boxdata: ${safe.error.message}`);
|
|
const dataLayout = new DataLayout(boxDataDir, []);
|
|
|
|
await download(backupSite, remotePath, dataLayout, progressCallback);
|
|
|
|
log('restore: download completed, importing database');
|
|
|
|
await database.importFromFile(`${dataLayout.localRoot()}/box.mysqldump`);
|
|
log('restore: database imported');
|
|
|
|
await locks.releaseAll(); // clear the locks table in database
|
|
}
|
|
|
|
async function downloadApp(app, restoreConfig, progressCallback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof restoreConfig, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const appDataDir = safe.fs.realpathSync(path.join(paths.APPS_DATA_DIR, app.id));
|
|
if (!appDataDir) throw new BoxError(BoxError.FS_ERROR, safe.error.message);
|
|
const dataLayout = new DataLayout(appDataDir, app.storageVolumeId ? [{ localDir: await apps.getStorageDir(app), remoteDir: 'data' }] : []);
|
|
|
|
const startTime = new Date();
|
|
let { backupSite, remotePath } = restoreConfig; // set when importing
|
|
if (!remotePath) {
|
|
const backup = await backups.get(restoreConfig.backupId);
|
|
if (!backup) throw new BoxError(BoxError.BAD_FIELD, 'No such backup');
|
|
remotePath = backup.remotePath;
|
|
backupSite = await backupSites.get(backup.siteId);
|
|
}
|
|
|
|
await download(backupSite, remotePath, dataLayout, progressCallback);
|
|
log('downloadApp: time: %s', (new Date() - startTime)/1000);
|
|
}
|
|
|
|
async function runBackupUpload(uploadConfig, progressCallback) {
|
|
assert.strictEqual(typeof uploadConfig, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const { remotePath, backupSite, dataLayout, progressTag } = uploadConfig;
|
|
assert.strictEqual(typeof remotePath, 'string');
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof progressTag, 'string');
|
|
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
|
|
|
// https://stackoverflow.com/questions/48387040/node-js-recommended-max-old-space-size
|
|
const envCopy = Object.assign({}, process.env);
|
|
if (backupSite.limits?.memoryLimit >= 2*1024*1024*1024) {
|
|
const heapSize = Math.min((backupSite.limits.memoryLimit/1024/1024) - 256, 8192);
|
|
log(`runBackupUpload: adjusting heap size to ${heapSize}M`);
|
|
envCopy.NODE_OPTIONS = `--max-old-space-size=${heapSize}`;
|
|
}
|
|
|
|
let lastMessage = null; // the script communicates error result as a string
|
|
function onMessage(progress) { // this is { message } or { result }
|
|
if ('message' in progress) return progressCallback({ message: `${progress.message} (${progressTag})` });
|
|
log(`runBackupUpload: result - ${JSON.stringify(progress)}`);
|
|
lastMessage = progress;
|
|
}
|
|
|
|
// do not use debug for logging child output because it already has timestamps via it's own debug
|
|
const [error] = await safe(shell.sudo([ BACKUP_UPLOAD_CMD, remotePath, backupSite.id, dataLayout.toString() ], { env: envCopy, preserveEnv: true, onMessage, logger: process.stdout.write }));
|
|
if (error && (error.code === null /* signal */ || (error.code !== 0 && error.code !== 50))) { // backuptask crashed
|
|
log(`runBackupUpload: backuptask crashed`, error);
|
|
throw new BoxError(BoxError.INTERNAL_ERROR, 'Backuptask crashed');
|
|
} else if (error && error.code === 50) { // exited with error
|
|
throw new BoxError(BoxError.EXTERNAL_ERROR, lastMessage.errorMessage);
|
|
}
|
|
|
|
return lastMessage.result; // { stats, integrity }
|
|
}
|
|
|
|
async function snapshotBox(progressCallback) {
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
progressCallback({ message: 'Snapshotting box' });
|
|
|
|
const startTime = new Date();
|
|
|
|
await database.exportToFile(`${paths.BOX_DATA_DIR}/box.mysqldump`);
|
|
log(`snapshotBox: took ${(new Date() - startTime)/1000} seconds`);
|
|
}
|
|
|
|
async function uploadBoxSnapshot(backupSite, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
await snapshotBox(progressCallback);
|
|
|
|
const remotePath = addFileExtension(backupSite, `snapshot/box`);
|
|
|
|
const boxDataDir = safe.fs.realpathSync(paths.BOX_DATA_DIR);
|
|
if (!boxDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving boxdata: ${safe.error.message}`);
|
|
|
|
const uploadConfig = {
|
|
remotePath,
|
|
backupSite,
|
|
dataLayout: new DataLayout(boxDataDir, []),
|
|
progressTag: 'box'
|
|
};
|
|
|
|
progressCallback({ message: 'Uploading box snapshot' });
|
|
|
|
const startTime = new Date();
|
|
|
|
const { stats, integrity } = await runBackupUpload(uploadConfig, progressCallback);
|
|
|
|
log(`uploadBoxSnapshot: took ${(new Date() - startTime)/1000} seconds`);
|
|
|
|
await backupSites.setSnapshotInfo(backupSite, 'box', { timestamp: new Date().toISOString() });
|
|
|
|
return { stats, integrity };
|
|
}
|
|
|
|
async function copy(backupSite, srcRemotePath, destRemotePath, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof srcRemotePath, 'string');
|
|
assert.strictEqual(typeof destRemotePath, 'string');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const startTime = new Date();
|
|
const [copyError] = await safe(backupFormats.api(backupSite.format).copy(backupSite, srcRemotePath, destRemotePath, progressCallback));
|
|
if (copyError) {
|
|
log(`copy: copy to ${destRemotePath} errored. error: ${copyError.message}`);
|
|
throw copyError;
|
|
}
|
|
log(`copy: copied successfully to ${destRemotePath}. Took ${(new Date() - startTime)/1000} seconds`);
|
|
|
|
const [copyChecksumError] = await safe(backupSites.storageApi(backupSite).copy(backupSite.config, `${srcRemotePath}.backupinfo`, `${destRemotePath}.backupinfo`, progressCallback));
|
|
if (copyChecksumError) {
|
|
log(`copy: copy to ${destRemotePath} errored. error: ${copyChecksumError.message}`);
|
|
throw copyChecksumError;
|
|
}
|
|
log(`copy: copied backupinfo successfully to ${destRemotePath}.backupinfo`);
|
|
}
|
|
|
|
async function backupBox(backupSite, appBackupsMap, tag, options, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert(util.types.isMap(appBackupsMap), 'appBackupsMap should be a Map'); // id -> stats: { upload: { fileCount, size, startTime, duration, transferred } }
|
|
assert.strictEqual(typeof tag, 'string');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const uploadStartTime = Date.now();
|
|
const uploadResult = await uploadBoxSnapshot(backupSite, progressCallback); // { stats, integrity }
|
|
const stats = { upload: { ...uploadResult.stats, startTime: uploadStartTime, duration: Date.now() - uploadStartTime } };
|
|
|
|
const remotePath = addFileExtension(backupSite, `${tag}/box_v${constants.VERSION}`);
|
|
|
|
// stats object might be null for stopped/errored apps from old versions
|
|
stats.aggregatedUpload = Array.from(appBackupsMap.values()).filter(s => !!s?.upload).reduce((acc, cur) => ({
|
|
fileCount: acc.fileCount + cur.upload.fileCount,
|
|
size: acc.size + cur.upload.size,
|
|
transferred: acc.transferred + cur.upload.transferred,
|
|
startTime: Math.min(acc.startTime, cur.upload.startTime),
|
|
duration: acc.duration + cur.upload.duration,
|
|
}), stats.upload);
|
|
|
|
log(`backupBox: rotating box snapshot of ${backupSite.id} to id ${remotePath}. ${JSON.stringify(stats)}`);
|
|
|
|
const data = {
|
|
remotePath,
|
|
encryptionVersion: backupSite.encryption ? 2 : null,
|
|
packageVersion: constants.VERSION,
|
|
type: backups.BACKUP_TYPE_BOX,
|
|
state: backups.BACKUP_STATE_CREATING,
|
|
identifier: backups.BACKUP_IDENTIFIER_BOX,
|
|
dependsOn: [...appBackupsMap.keys()],
|
|
manifest: null,
|
|
preserveSecs: options.preserveSecs || 0,
|
|
appConfig: null,
|
|
siteId: backupSite.id,
|
|
stats,
|
|
integrity: uploadResult.integrity
|
|
};
|
|
|
|
const id = await backups.add(data);
|
|
const snapshotPath = addFileExtension(backupSite, 'snapshot/box');
|
|
const copyStartTime = Date.now();
|
|
const [error] = await safe(copy(backupSite, snapshotPath, remotePath, progressCallback));
|
|
const state = error ? backups.BACKUP_STATE_ERROR : backups.BACKUP_STATE_NORMAL;
|
|
if (!error) {
|
|
stats.copy = { startTime: copyStartTime, duration: Date.now() - copyStartTime };
|
|
// stats object might be null for stopped/errored apps from old versions
|
|
stats.aggregatedCopy = Array.from(appBackupsMap.values()).filter(s => !!s?.copy).reduce((acc, cur) => ({
|
|
startTime: Math.min(acc.startTime, cur.copy.startTime),
|
|
duration: acc.duration + cur.copy.duration,
|
|
}), stats.copy);
|
|
}
|
|
await backups.update({ id }, { stats, state });
|
|
if (error) throw error;
|
|
|
|
return id;
|
|
}
|
|
|
|
async function snapshotApp(app, progressCallback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const startTime = new Date();
|
|
progressCallback({ message: `Snapshotting app ${app.fqdn}` });
|
|
|
|
await apps.writeConfig(app);
|
|
await services.runBackupCommand(app);
|
|
await services.backupAddons(app, app.manifest.addons);
|
|
|
|
log(`snapshotApp: ${app.fqdn} took ${(new Date() - startTime)/1000} seconds`);
|
|
}
|
|
|
|
async function uploadAppSnapshot(backupSite, app, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
await snapshotApp(app, progressCallback);
|
|
|
|
const remotePath = addFileExtension(backupSite, `snapshot/app_${app.id}`);
|
|
const appDataDir = safe.fs.realpathSync(path.join(paths.APPS_DATA_DIR, app.id));
|
|
if (!appDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving appsdata: ${safe.error.message}`);
|
|
|
|
const dataLayout = new DataLayout(appDataDir, app.storageVolumeId ? [{ localDir: await apps.getStorageDir(app), remoteDir: 'data' }] : []);
|
|
|
|
progressCallback({ message: `Uploading app snapshot ${app.fqdn}`});
|
|
|
|
const uploadConfig = {
|
|
remotePath,
|
|
backupSite,
|
|
dataLayout,
|
|
progressTag: app.fqdn
|
|
};
|
|
|
|
const startTime = new Date();
|
|
|
|
const { stats, integrity } = await runBackupUpload(uploadConfig, progressCallback);
|
|
|
|
log(`uploadAppSnapshot: ${app.fqdn} uploaded to ${remotePath}. ${(new Date() - startTime)/1000} seconds`);
|
|
|
|
await backupSites.setSnapshotInfo(backupSite, app.id, { timestamp: new Date().toISOString(), manifest: app.manifest });
|
|
|
|
return { stats, integrity };
|
|
}
|
|
|
|
async function backupAppWithTag(app, backupSite, tag, options, progressCallback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof tag, 'string');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
if (!apps.canBackupApp(app)) { // if we cannot backup, reuse it's most recent backup
|
|
const lastKnownGoodAppBackup = await backups.getLatestInTargetByIdentifier(app.id, backupSite.id);
|
|
if (lastKnownGoodAppBackup === null) return null; // no backup to re-use
|
|
return { id: lastKnownGoodAppBackup.id, stats: lastKnownGoodAppBackup.stats };
|
|
}
|
|
|
|
const uploadStartTime = Date.now();
|
|
const uploadResult = await uploadAppSnapshot(backupSite, app, progressCallback); // { stats, integrity }
|
|
const stats = { upload: { ...uploadResult.stats, startTime: uploadStartTime, duration: Date.now() - uploadStartTime } };
|
|
|
|
const manifest = app.manifest;
|
|
const remotePath = addFileExtension(backupSite, `${tag}/app_${app.fqdn}_v${manifest.version}`);
|
|
|
|
log(`backupAppWithTag: rotating ${app.fqdn} snapshot of ${backupSite.id} to path ${remotePath}`);
|
|
|
|
const data = {
|
|
remotePath,
|
|
encryptionVersion: backupSite.encryption ? 2 : null,
|
|
packageVersion: manifest.version,
|
|
type: backups.BACKUP_TYPE_APP,
|
|
state: backups.BACKUP_STATE_CREATING,
|
|
identifier: app.id,
|
|
dependsOn: [],
|
|
manifest,
|
|
preserveSecs: options.preserveSecs || 0,
|
|
appConfig: app,
|
|
siteId: backupSite.id,
|
|
stats,
|
|
integrity: uploadResult.integrity
|
|
};
|
|
|
|
const id = await backups.add(data);
|
|
const snapshotPath = addFileExtension(backupSite, `snapshot/app_${app.id}`);
|
|
const copyStartTime = Date.now();
|
|
const [error] = await safe(copy(backupSite, snapshotPath, remotePath, progressCallback));
|
|
const state = error ? backups.BACKUP_STATE_ERROR : backups.BACKUP_STATE_NORMAL;
|
|
if (!error) stats.copy = { startTime: copyStartTime, duration: Date.now() - copyStartTime };
|
|
await backups.update({ id }, { stats, state });
|
|
if (error) throw error;
|
|
|
|
return { id, stats };
|
|
}
|
|
|
|
async function backupApp(app, backupSite, options, progressCallback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
let backup = null;
|
|
await locks.wait(`${locks.TYPE_APP_BACKUP_PREFIX}${app.id}`);
|
|
if (options.snapshotOnly) {
|
|
await snapshotApp(app, progressCallback);
|
|
} else {
|
|
const tag = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
|
|
backup = await backupAppWithTag(app, backupSite, tag, options, progressCallback); // { id, stats }
|
|
}
|
|
await locks.release(`${locks.TYPE_APP_BACKUP_PREFIX}${app.id}`);
|
|
|
|
return backup;
|
|
}
|
|
|
|
async function uploadMailSnapshot(backupSite, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const remotePath = addFileExtension(backupSite, 'snapshot/mail');
|
|
|
|
const mailDataDir = safe.fs.realpathSync(paths.MAIL_DATA_DIR);
|
|
if (!mailDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving maildata: ${safe.error.message}`);
|
|
|
|
const uploadConfig = {
|
|
remotePath,
|
|
backupSite,
|
|
dataLayout: new DataLayout(mailDataDir, []),
|
|
progressTag: 'mail'
|
|
};
|
|
|
|
progressCallback({ message: 'Uploading mail snapshot' });
|
|
|
|
const startTime = new Date();
|
|
|
|
const { stats, integrity } = await runBackupUpload(uploadConfig, progressCallback);
|
|
|
|
log(`uploadMailSnapshot: took ${(new Date() - startTime)/1000} seconds`);
|
|
|
|
await backupSites.setSnapshotInfo(backupSite, 'mail', { timestamp: new Date().toISOString() });
|
|
|
|
return { stats, integrity };
|
|
}
|
|
|
|
async function backupMailWithTag(backupSite, tag, options, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof tag, 'string');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
log(`backupMailWithTag: backing up mail with tag ${tag}`);
|
|
|
|
const uploadStartTime = Date.now();
|
|
const uploadResult = await uploadMailSnapshot(backupSite, progressCallback); // { stats, integrity }
|
|
const stats = { upload: { ...uploadResult.stats, startTime: uploadStartTime, duration: Date.now() - uploadStartTime } };
|
|
|
|
const remotePath = addFileExtension(backupSite, `${tag}/mail_v${constants.VERSION}`);
|
|
|
|
log(`backupMailWithTag: rotating mail snapshot of ${backupSite.id} to ${remotePath}`);
|
|
|
|
const data = {
|
|
remotePath,
|
|
encryptionVersion: backupSite.encryption ? 2 : null,
|
|
packageVersion: constants.VERSION,
|
|
type: backups.BACKUP_TYPE_MAIL,
|
|
state: backups.BACKUP_STATE_CREATING,
|
|
identifier: backups.BACKUP_IDENTIFIER_MAIL,
|
|
dependsOn: [],
|
|
manifest: null,
|
|
preserveSecs: options.preserveSecs || 0,
|
|
appConfig: null,
|
|
siteId: backupSite.id,
|
|
stats,
|
|
integrity: uploadResult.integrity
|
|
};
|
|
|
|
const id = await backups.add(data);
|
|
const snapshotPath = addFileExtension(backupSite, 'snapshot/mail');
|
|
const copyStartTime = Date.now();
|
|
const [error] = await safe(copy(backupSite, snapshotPath, remotePath, progressCallback));
|
|
const state = error ? backups.BACKUP_STATE_ERROR : backups.BACKUP_STATE_NORMAL;
|
|
if (!error) stats.copy = { startTime: copyStartTime, duration: Date.now() - copyStartTime };
|
|
await backups.update({ id }, { stats, state });
|
|
if (error) throw error;
|
|
|
|
return { id, stats };
|
|
}
|
|
|
|
async function downloadMail(backupSite, remotePath, progressCallback) {
|
|
assert.strictEqual(typeof backupSite, 'object');
|
|
assert.strictEqual(typeof remotePath, 'string');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const mailDataDir = safe.fs.realpathSync(paths.MAIL_DATA_DIR);
|
|
if (!mailDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving maildata: ${safe.error.message}`);
|
|
const dataLayout = new DataLayout(mailDataDir, []);
|
|
|
|
const startTime = new Date();
|
|
|
|
await download(backupSite, remotePath, dataLayout, progressCallback);
|
|
log('downloadMail: time: %s', (new Date() - startTime)/1000);
|
|
}
|
|
|
|
// this function is called from external process. calling process is expected to have a lock
|
|
async function fullBackup(backupSiteId, options, progressCallback) {
|
|
assert.strictEqual(typeof backupSiteId, 'string');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const backupSite = await backupSites.get(backupSiteId);
|
|
if (!backupSite) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Backup site not found');
|
|
|
|
const tag = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,''); // unique tag under which all apps/mail/box backs up
|
|
|
|
const allApps = await apps.list();
|
|
|
|
let percent = 1;
|
|
const step = 100/(allApps.length+3);
|
|
|
|
const appBackupsMap = new Map(); // id -> stats
|
|
for (let i = 0; i < allApps.length; i++) {
|
|
const app = allApps[i];
|
|
percent += step;
|
|
|
|
if (!app.enableBackup) {
|
|
log(`fullBackup: skipped backup ${app.fqdn} (${i+1}/${allApps.length}) since automatic backup disabled`);
|
|
continue; // nothing to backup
|
|
}
|
|
if (!backupSites.hasContent(backupSite, app.id)) {
|
|
log(`fullBackup: skipped backup ${app.fqdn} (${i+1}/${allApps.length}) as it is not in site contents`);
|
|
continue;
|
|
}
|
|
|
|
progressCallback({ percent, message: `Backing up ${app.fqdn} (${i+1}/${allApps.length}). Waiting for lock` });
|
|
await locks.wait(`${locks.TYPE_APP_BACKUP_PREFIX}${app.id}`);
|
|
const startTime = new Date();
|
|
const [appBackupError, appBackupResult] = await safe(backupAppWithTag(app, backupSite, tag, options, (progress) => progressCallback({ percent, message: progress.message })));
|
|
log(`fullBackup: app ${app.fqdn} backup finished. Took ${(new Date() - startTime)/1000} seconds`);
|
|
await locks.release(`${locks.TYPE_APP_BACKUP_PREFIX}${app.id}`);
|
|
if (appBackupError) throw appBackupError;
|
|
if (appBackupResult) appBackupsMap.set(appBackupResult.id, appBackupResult.stats); // backupId can be null if in BAD_STATE and never backed up
|
|
}
|
|
|
|
if (!backupSites.hasContent(backupSite, 'box')) return [...appBackupsMap.keys()];
|
|
|
|
progressCallback({ percent, message: 'Backing up mail' });
|
|
percent += step;
|
|
const mailBackup = await backupMailWithTag(backupSite, tag, options, (progress) => progressCallback({ percent, message: progress.message }));
|
|
appBackupsMap.set(mailBackup.id, mailBackup.stats);
|
|
|
|
progressCallback({ percent, message: 'Backing up system data' });
|
|
percent += step;
|
|
|
|
const backupId = await backupBox(backupSite, appBackupsMap, tag, options, (progress) => progressCallback({ percent, message: progress.message }));
|
|
return backupId;
|
|
}
|
|
|
|
// this function is called from external process
|
|
async function appBackup(appId, backupSiteId, options, progressCallback) {
|
|
assert.strictEqual(typeof appId, 'string');
|
|
assert.strictEqual(typeof backupSiteId, 'string');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const app = await apps.get(appId);
|
|
if (!app) throw new BoxError(BoxError.BAD_FIELD, 'App not found');
|
|
|
|
const backupSite = await backupSites.get(backupSiteId);
|
|
if (!backupSite) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Backup site not found');
|
|
|
|
await progressCallback({ percent: 1, message: `Backing up ${app.fqdn}. Waiting for lock` });
|
|
const startTime = new Date();
|
|
const backup = await backupApp(app, backupSite, options, progressCallback); // { id, stats }
|
|
await progressCallback({ percent: 100, message: `app ${app.fqdn} backup finished. Took ${(new Date() - startTime)/1000} seconds` });
|
|
return backup.id;
|
|
}
|
|
|
|
export default {
|
|
fullBackup,
|
|
appBackup,
|
|
|
|
restore,
|
|
|
|
downloadApp,
|
|
backupApp,
|
|
|
|
downloadMail,
|
|
|
|
upload,
|
|
};
|