snapshot file tracks the snapshot directory. when app gets deleted, the cleaner will remove the upstream snapshot directory when it runs. cache files are used in rsync logic to track what was uploading into snapshot in the previous run without needing to rescan upstream.
483 lines
19 KiB
JavaScript
483 lines
19 KiB
JavaScript
'use strict';
|
|
|
|
exports = module.exports = {
|
|
get,
|
|
getPrimary,
|
|
list,
|
|
add,
|
|
del,
|
|
|
|
setConfig,
|
|
setLimits,
|
|
setSchedule,
|
|
setRetention,
|
|
setPrimary,
|
|
|
|
removePrivateFields,
|
|
|
|
startBackupTask,
|
|
|
|
startCleanupTask,
|
|
|
|
getSnapshotInfo,
|
|
setSnapshotInfo,
|
|
|
|
validateFormat,
|
|
|
|
getRootPath,
|
|
|
|
remount,
|
|
getMountStatus,
|
|
ensureMounted,
|
|
};
|
|
|
|
const assert = require('assert'),
|
|
backups = require('./backups.js'),
|
|
BoxError = require('./boxerror.js'),
|
|
constants = require('./constants.js'),
|
|
cron = require('./cron.js'),
|
|
{ CronTime } = require('cron'),
|
|
crypto = require('crypto'),
|
|
database = require('./database.js'),
|
|
debug = require('debug')('box:backups'),
|
|
eventlog = require('./eventlog.js'),
|
|
hush = require('./hush.js'),
|
|
locks = require('./locks.js'),
|
|
mounts = require('./mounts.js'),
|
|
path = require('path'),
|
|
paths = require('./paths.js'),
|
|
safe = require('safetydance'),
|
|
storage = require('./storage.js'),
|
|
tasks = require('./tasks.js');
|
|
|
|
const BACKUP_TARGET_FIELDS = [ 'id', 'label', 'provider', 'configJson', 'limitsJson', 'retentionJson', 'schedule', 'encryptionJson', 'format', 'main', 'creationTime', 'ts' ].join(',');
|
|
|
|
function getRootPath(provider, config, mountPath) {
|
|
assert.strictEqual(typeof config, 'object');
|
|
assert.strictEqual(typeof mountPath, 'string');
|
|
|
|
if (mounts.isManagedProvider(provider)) {
|
|
return path.join(mountPath, config.prefix);
|
|
} else if (provider === 'mountpoint') {
|
|
return path.join(config.mountPoint, config.prefix);
|
|
} else if (provider === 'filesystem') {
|
|
return config.backupFolder;
|
|
} else {
|
|
return config.prefix;
|
|
}
|
|
}
|
|
|
|
function postProcess(result) {
|
|
assert.strictEqual(typeof result, 'object');
|
|
|
|
result.config = result.configJson ? safe.JSON.parse(result.configJson) : {};
|
|
delete result.configJson;
|
|
|
|
// note: rootPath will be dynamic for managed mount providers during app import . since it's used in api backends it has to be inside config
|
|
result.config.rootPath = getRootPath(result.provider, result.config, paths.MANAGED_BACKUP_MOUNT_DIR);
|
|
result.config.provider = result.provider; // this allows api backends to identify the real provider
|
|
|
|
result.limits = safe.JSON.parse(result.limitsJson) || {};
|
|
delete result.limitsJson;
|
|
|
|
result.retention = safe.JSON.parse(result.retentionJson) || {};
|
|
delete result.retentionJson;
|
|
|
|
result.encryption = result.encryptionJson ? safe.JSON.parse(result.encryptionJson) : null;
|
|
delete result.encryptionJson;
|
|
|
|
result.primary = !!result.main; // primary is a reserved keyword in mysql
|
|
delete result.main;
|
|
|
|
return result;
|
|
}
|
|
|
|
function removePrivateFields(target) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
|
|
target.encrypted = target.encryption !== null;
|
|
target.encryptedFilenames = target.encryption?.encryptedFilenames || false;
|
|
delete target.encryption;
|
|
|
|
delete target.config.rootPath;
|
|
target.config = storage.api(target.provider).removePrivateFields(target.config);
|
|
return target;
|
|
}
|
|
|
|
function validateFormat(format) {
|
|
assert.strictEqual(typeof format, 'string');
|
|
|
|
if (format === 'tgz' || format == 'rsync') return null;
|
|
|
|
return new BoxError(BoxError.BAD_FIELD, 'Invalid backup format');
|
|
}
|
|
|
|
function validateLabel(label) {
|
|
assert.strictEqual(typeof label, 'string');
|
|
|
|
if (label.length > 48) return new BoxError(BoxError.BAD_FIELD, 'Label too long');
|
|
}
|
|
|
|
function validateSchedule(schedule) {
|
|
assert.strictEqual(typeof schedule, 'string');
|
|
|
|
if (schedule === constants.CRON_PATTERN_NEVER) return null;
|
|
|
|
const job = safe.safeCall(function () { return new CronTime(schedule); });
|
|
if (!job) return new BoxError(BoxError.BAD_FIELD, 'Invalid schedule pattern');
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateRetention(retention) {
|
|
assert.strictEqual(typeof retention, 'object');
|
|
|
|
if (!retention) return new BoxError(BoxError.BAD_FIELD, 'retention is required');
|
|
if (!['keepWithinSecs','keepDaily','keepWeekly','keepMonthly','keepYearly'].find(k => !!retention[k])) return new BoxError(BoxError.BAD_FIELD, 'retention properties missing');
|
|
if ('keepWithinSecs' in retention && typeof retention.keepWithinSecs !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepWithinSecs must be a number');
|
|
if ('keepDaily' in retention && typeof retention.keepDaily !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepDaily must be a number');
|
|
if ('keepWeekly' in retention && typeof retention.keepWeekly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepWeekly must be a number');
|
|
if ('keepMonthly' in retention && typeof retention.keepMonthly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepMonthly must be a number');
|
|
if ('keepYearly' in retention && typeof retention.keepYearly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepYearly must be a number');
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateEncryptionPassword(password) {
|
|
assert.strictEqual(typeof password, 'string');
|
|
|
|
if (password.length < 8) return new BoxError(BoxError.BAD_FIELD, 'password must be atleast 8 characters');
|
|
}
|
|
|
|
async function list(page, perPage) {
|
|
assert(typeof page === 'number' && page > 0);
|
|
assert(typeof perPage === 'number' && perPage > 0);
|
|
|
|
const results = await database.query(`SELECT ${BACKUP_TARGET_FIELDS} FROM backupTargets ORDER BY main DESC LIMIT ?,?`, [ (page-1)*perPage, perPage ]);
|
|
|
|
results.forEach(function (result) { postProcess(result); });
|
|
|
|
return results;
|
|
}
|
|
|
|
async function get(id) {
|
|
const results = await database.query(`SELECT ${BACKUP_TARGET_FIELDS} FROM backupTargets WHERE id=?`, [ id ]);
|
|
if (results.length === 0) return null;
|
|
return postProcess(results[0]);
|
|
}
|
|
|
|
async function getPrimary() {
|
|
const results = await database.query(`SELECT ${BACKUP_TARGET_FIELDS} FROM backupTargets WHERE main=?`, [ true ]);
|
|
if (results.length === 0) return null;
|
|
return postProcess(results[0]);
|
|
}
|
|
|
|
async function update(target, data) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
assert(data && typeof data === 'object');
|
|
|
|
const args = [];
|
|
const fields = [];
|
|
for (const k in data) {
|
|
if (k === 'label' || k === 'schedule' || k === 'main') { // format, provider cannot be updated
|
|
fields.push(k + ' = ?');
|
|
args.push(data[k]);
|
|
} else if (k === 'config' || k === 'limits' || k === 'retention') { // encryption cannot be updated
|
|
fields.push(`${k}JSON = ?`);
|
|
args.push(JSON.stringify(data[k]));
|
|
}
|
|
}
|
|
args.push(target.id);
|
|
|
|
const [updateError, result] = await safe(database.query('UPDATE backupTargets SET ' + fields.join(', ') + ' WHERE id = ?', args));
|
|
if (updateError) throw updateError;
|
|
if (result.affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Target not found');
|
|
}
|
|
|
|
async function setSchedule(backupTarget, schedule, auditSource) {
|
|
assert.strictEqual(typeof backupTarget, 'object');
|
|
assert.strictEqual(typeof schedule, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const error = await validateSchedule(schedule);
|
|
if (error) throw error;
|
|
|
|
await update(backupTarget, { schedule });
|
|
|
|
await cron.handleBackupScheduleChanged(backupTarget);
|
|
|
|
await eventlog.add(eventlog.ACTION_BACKUP_TARGET_UPDATE, auditSource, { backupTarget, schedule });
|
|
}
|
|
|
|
async function setLimits(backupTarget, limits, auditSource) {
|
|
assert.strictEqual(typeof backupTarget, 'object');
|
|
assert.strictEqual(typeof limits, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
await update(backupTarget, { limits });
|
|
await eventlog.add(eventlog.ACTION_BACKUP_TARGET_UPDATE, auditSource, { backupTarget, limits });
|
|
}
|
|
|
|
async function setRetention(backupTarget, retention, auditSource) {
|
|
assert.strictEqual(typeof backupTarget, 'object');
|
|
assert.strictEqual(typeof retention, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const error = await validateRetention(retention);
|
|
if (error) throw error;
|
|
|
|
await update(backupTarget, { retention });
|
|
|
|
await eventlog.add(eventlog.ACTION_BACKUP_TARGET_UPDATE, auditSource, { backupTarget, retention });
|
|
}
|
|
|
|
async function setPrimary(backupTarget, auditSource) {
|
|
assert.strictEqual(typeof backupTarget, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const queries = [
|
|
{ query: 'SELECT 1 FROM backupTargets WHERE id=? FOR UPDATE', args: [ backupTarget.id ] }, // ensure this exists!
|
|
{ query: 'UPDATE backupTargets SET main=?', args: [ false ] },
|
|
{ query: 'UPDATE backupTargets SET main=? WHERE id=?', args: [ true, backupTarget.id ] }
|
|
];
|
|
|
|
const [error, result] = await safe(database.transaction(queries));
|
|
if (error) throw error;
|
|
if (result[2].affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Target not found');
|
|
|
|
await eventlog.add(eventlog.ACTION_BACKUP_TARGET_UPDATE, auditSource, { backupTarget, primary: true });
|
|
}
|
|
|
|
async function del(backupTarget, auditSource) {
|
|
assert.strictEqual(typeof backupTarget, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
if (backupTarget.primary) throw new BoxError(BoxError.CONFLICT, 'Cannot delete the primary backup target');
|
|
|
|
const queries = [
|
|
{ query: 'DELETE FROM backups WHERE targetId = ?', args: [ backupTarget.id ] },
|
|
{ query: 'DELETE FROM backupTargets WHERE id=? AND main=?', args: [ backupTarget.id, false ] }, // cannot delete primary
|
|
];
|
|
|
|
const [error, result] = await safe(database.transaction(queries));
|
|
if (error && error.code === 'ER_NO_REFERENCED_ROW_2') throw new BoxError(BoxError.NOT_FOUND, error);
|
|
if (error) throw error;
|
|
if (result[1].affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Target not found');
|
|
await eventlog.add(eventlog.ACTION_BACKUP_TARGET_REMOVE, auditSource, { backupTarget: backupTarget });
|
|
|
|
backupTarget.schedule = constants.CRON_PATTERN_NEVER;
|
|
await cron.handleBackupScheduleChanged(backupTarget);
|
|
|
|
const infoDir = path.join(paths.BACKUP_INFO_DIR, backupTarget.id);
|
|
safe.fs.rmdirSync(infoDir, { recursive: true });
|
|
}
|
|
|
|
async function startBackupTask(target, auditSource) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
|
|
const [error] = await safe(locks.acquire(`${locks.TYPE_FULL_BACKUP_TASK_PREFIX}${target.id}`));
|
|
if (error) throw new BoxError(BoxError.BAD_STATE, `Another backup task is in progress: ${error.message}`);
|
|
|
|
const memoryLimit = target.limits?.memoryLimit ? Math.max(target.limits.memoryLimit/1024/1024, 1024) : 1024;
|
|
|
|
const taskId = await tasks.add(`${tasks.TASK_FULL_BACKUP_PREFIX}${target.id}`, [ target.id, { /* options */ } ]);
|
|
|
|
await eventlog.add(eventlog.ACTION_BACKUP_START, auditSource, { taskId });
|
|
|
|
// background
|
|
tasks.startTask(taskId, { timeout: 24 * 60 * 60 * 1000 /* 24 hours */, nice: 15, memoryLimit, oomScoreAdjust: -999 })
|
|
.then(async (backupId) => {
|
|
const backup = await backups.get(backupId);
|
|
await eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId, backupId, remotePath: backup.remotePath });
|
|
})
|
|
.catch(async (error) => {
|
|
const timedOut = error.code === tasks.ETIMEOUT;
|
|
await safe(eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId, errorMessage: error.message, timedOut }));
|
|
})
|
|
.finally(async () => {
|
|
await locks.release(`${locks.TYPE_FULL_BACKUP_TASK_PREFIX}${target.id}`);
|
|
await locks.releaseByTaskId(taskId);
|
|
});
|
|
|
|
return taskId;
|
|
}
|
|
|
|
async function removeCacheFiles(backupTarget) {
|
|
assert.strictEqual(typeof backupTarget, 'object');
|
|
|
|
const infoDir = path.join(paths.BACKUP_INFO_DIR, backupTarget.id);
|
|
const files = safe.fs.readdirSync(infoDir);
|
|
if (!files) throw new BoxError(BoxError.FS_ERROR, `Unable to access ${infoDir}: ${safe.error.message}`);
|
|
for (const f of files) {
|
|
if (!f.endsWith('.sync.cache')) continue;
|
|
safe.fs.unlinkSync(path.join(infoDir, f));
|
|
}
|
|
}
|
|
|
|
// keeps track of contents of the snapshot directory. this provides a way to clean up backups of uninstalled apps
|
|
async function getSnapshotInfo(backupTarget) {
|
|
assert.strictEqual(typeof backupTarget, 'object');
|
|
|
|
const snapshotFilePath = path.join(paths.BACKUP_INFO_DIR, backupTarget.id, constants.SNAPSHOT_INFO_FILENAME);
|
|
const contents = safe.fs.readFileSync(snapshotFilePath, 'utf8');
|
|
const info = safe.JSON.parse(contents);
|
|
return info || {};
|
|
}
|
|
|
|
// keeps track of contents of the snapshot directory. this provides a way to clean up backups of uninstalled apps
|
|
async function setSnapshotInfo(backupTarget, id, info) {
|
|
assert.strictEqual(typeof backupTarget, 'object');
|
|
assert.strictEqual(typeof id, 'string'); // 'box', 'mail' or appId
|
|
assert.strictEqual(typeof info, 'object');
|
|
|
|
const infoDir = path.join(paths.BACKUP_INFO_DIR, backupTarget.id);
|
|
const snapshotFilePath = path.join(infoDir, constants.SNAPSHOT_INFO_FILENAME);
|
|
const contents = safe.fs.readFileSync(snapshotFilePath, 'utf8');
|
|
const data = safe.JSON.parse(contents) || {};
|
|
if (info) data[id] = info; else delete data[id];
|
|
if (!safe.fs.writeFileSync(snapshotFilePath, JSON.stringify(data, null, 4), 'utf8')) {
|
|
throw new BoxError(BoxError.FS_ERROR, safe.error.message);
|
|
}
|
|
|
|
if (!info) { // unlink the cache files
|
|
safe.fs.unlinkSync(path.join(infoDir, `${id}.sync.cache`));
|
|
safe.fs.unlinkSync(path.join(infoDir, `${id}.sync.cache.new`));
|
|
}
|
|
}
|
|
|
|
async function startCleanupTask(backupTarget, auditSource) {
|
|
assert.strictEqual(typeof backupTarget, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const taskId = await tasks.add(`${tasks.TASK_CLEAN_BACKUPS_PREFIX}${backupTarget.id}`, [ backupTarget.id ]);
|
|
|
|
// background
|
|
tasks.startTask(taskId, {})
|
|
.then(async (result) => { // { removedBoxBackupPaths, removedAppBackupPaths, removedMailBackupPaths, missingBackupPaths }
|
|
await eventlog.add(eventlog.ACTION_BACKUP_CLEANUP_FINISH, auditSource, { taskId, errorMessage: null, ...result });
|
|
})
|
|
.catch(async (error) => {
|
|
await eventlog.add(eventlog.ACTION_BACKUP_CLEANUP_FINISH, auditSource, { taskId, errorMessage: error.message });
|
|
});
|
|
|
|
return taskId;
|
|
}
|
|
|
|
function managedBackupMountObject(config) {
|
|
assert(mounts.isManagedProvider(config.provider));
|
|
|
|
return {
|
|
name: 'backup',
|
|
hostPath: paths.MANAGED_BACKUP_MOUNT_DIR,
|
|
mountType: config.provider,
|
|
mountOptions: config.mountOptions
|
|
};
|
|
}
|
|
|
|
async function remount(target) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
|
|
if (mounts.isManagedProvider(target.provider)) {
|
|
await mounts.remount(managedBackupMountObject(target.config));
|
|
}
|
|
}
|
|
|
|
async function getMountStatus(target) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
|
|
let hostPath;
|
|
if (mounts.isManagedProvider(target.provider)) {
|
|
hostPath = paths.MANAGED_BACKUP_MOUNT_DIR;
|
|
} else if (target.provider === 'mountpoint') {
|
|
hostPath = target.config.mountPoint;
|
|
} else if (target.provider === 'filesystem') {
|
|
hostPath = target.config.backupFolder;
|
|
} else {
|
|
return { state: 'active' };
|
|
}
|
|
|
|
return await mounts.getStatus(target.provider, hostPath); // { state, message }
|
|
}
|
|
|
|
async function ensureMounted(target) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
|
|
const status = await getMountStatus(target);
|
|
if (status.state === 'active') return status;
|
|
|
|
await remount();
|
|
return await getMountStatus(target);
|
|
}
|
|
|
|
async function setConfig(backupTarget, newConfig, auditSource) {
|
|
assert.strictEqual(typeof backupTarget, 'object');
|
|
assert.strictEqual(typeof newConfig, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
if (constants.DEMO) throw new BoxError(BoxError.BAD_STATE, 'Not allowed in demo mode');
|
|
|
|
const oldConfig = backupTarget.config;
|
|
|
|
storage.api(backupTarget.provider).injectPrivateFields(newConfig, oldConfig);
|
|
|
|
debug('setConfig: validating new storage configuration');
|
|
await storage.testMount(backupTarget.provider, newConfig, '/mnt/backup-storage-validation');
|
|
|
|
debug('setConfig: removing old storage configuration');
|
|
if (mounts.isManagedProvider(backupTarget.provider)) await safe(mounts.removeMount(managedBackupMountObject(oldConfig)));
|
|
|
|
debug('setConfig: setting up new storage configuration');
|
|
await storage.setupManagedMount(backupTarget.provider, newConfig, paths.MANAGED_BACKUP_MOUNT_DIR);
|
|
|
|
debug('setConfig: clearing backup cache');
|
|
// FIXME: this cleans up the cache files in case the bucket or the prefix changes and the destination already has something there
|
|
// however, this will also resync if just the credentials change
|
|
await removeCacheFiles(backupTarget);
|
|
|
|
await update(backupTarget, { config: newConfig });
|
|
|
|
await eventlog.add(eventlog.ACTION_BACKUP_TARGET_UPDATE, auditSource, { backupTarget, newConfig });
|
|
}
|
|
|
|
async function add(data, auditSource) {
|
|
assert.strictEqual(typeof data, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
if (constants.DEMO) throw new BoxError(BoxError.BAD_STATE, 'Not allowed in demo mode');
|
|
|
|
const { provider, label, config, format, retention, schedule } = data; // required
|
|
const limits = data.limits || null,
|
|
encryptionPassword = data.encryptionPassword || null,
|
|
encryptedFilenames = data.encryptedFilenames || false;
|
|
|
|
const formatError = validateFormat(format);
|
|
if (formatError) throw formatError;
|
|
|
|
const labelError = validateLabel(label);
|
|
if (labelError) throw labelError;
|
|
|
|
let encryption = null;
|
|
if (encryptionPassword) {
|
|
const encryptionPasswordError = validateEncryptionPassword(encryptionPassword);
|
|
if (encryptionPasswordError) throw encryptionPasswordError;
|
|
encryption = hush.generateEncryptionKeysSync(encryptionPassword);
|
|
encryption.encryptedFilenames = !!encryptedFilenames;
|
|
}
|
|
|
|
debug('add: validating new storage configuration');
|
|
await storage.testMount(provider, config, '/mnt/backup-storage-validation');
|
|
|
|
debug('setStorage: setting up new storage configuration');
|
|
await storage.setupManagedMount(provider, config, paths.MANAGED_BACKUP_MOUNT_DIR);
|
|
|
|
const id = `bc-${crypto.randomUUID()}`;
|
|
if (!safe.fs.mkdirSync(`${paths.BACKUP_INFO_DIR}/${id}`)) throw new BoxError(BoxError.FS_ERROR, `Failed to create info dir: ${safe.error.message}`);
|
|
|
|
await database.query('INSERT INTO backupTargets (id, label, provider, configJson, limitsJson, retentionJson, schedule, encryptionJson, format, main) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
|
|
[ id, label, provider, JSON.stringify(config), JSON.stringify(limits), JSON.stringify(retention), schedule, JSON.stringify(encryption), format, false ]);
|
|
|
|
await eventlog.add(eventlog.ACTION_BACKUP_TARGET_ADD, auditSource, { id, label, provider, config, schedule, format });
|
|
|
|
return id;
|
|
}
|