447 lines
16 KiB
JavaScript
447 lines
16 KiB
JavaScript
'use strict';
|
|
|
|
exports = module.exports = {
|
|
get,
|
|
list,
|
|
add,
|
|
del,
|
|
|
|
setConfig,
|
|
setLimits,
|
|
setSchedule,
|
|
setRetention,
|
|
setPrimary,
|
|
|
|
removePrivateFields,
|
|
|
|
startBackupTask,
|
|
|
|
startCleanupTask,
|
|
cleanupCacheFilesSync,
|
|
|
|
getSnapshotInfo,
|
|
setSnapshotInfo,
|
|
|
|
validateFormat,
|
|
|
|
getRootPath,
|
|
|
|
remount,
|
|
getMountStatus,
|
|
ensureMounted,
|
|
};
|
|
|
|
const assert = require('assert'),
|
|
backups = require('./backups.js'),
|
|
BoxError = require('./boxerror.js'),
|
|
constants = require('./constants.js'),
|
|
cron = require('./cron.js'),
|
|
{ CronTime } = require('cron'),
|
|
database = require('./database.js'),
|
|
debug = require('debug')('box:backups'),
|
|
eventlog = require('./eventlog.js'),
|
|
hush = require('./hush.js'),
|
|
locks = require('./locks.js'),
|
|
mounts = require('./mounts.js'),
|
|
path = require('path'),
|
|
paths = require('./paths.js'),
|
|
safe = require('safetydance'),
|
|
storage = require('./storage.js'),
|
|
tasks = require('./tasks.js'),
|
|
uuid = require('uuid');
|
|
|
|
const BACKUP_TARGET_FIELDS = [ 'id', 'label', 'provider', 'configJson', 'limitsJson', 'retentionJson', 'schedule', 'encryptionJson', 'format', 'main', 'creationTime', 'ts' ].join(',');
|
|
|
|
function getRootPath(provider, config, mountPath) {
|
|
assert.strictEqual(typeof config, 'object');
|
|
assert.strictEqual(typeof mountPath, 'string');
|
|
|
|
if (mounts.isManagedProvider(provider)) {
|
|
return path.join(mountPath, config.prefix);
|
|
} else if (provider === 'mountpoint') {
|
|
return path.join(config.mountPoint, config.prefix);
|
|
} else if (provider === 'filesystem') {
|
|
return config.backupFolder;
|
|
} else {
|
|
return config.prefix;
|
|
}
|
|
}
|
|
|
|
function postProcess(result) {
|
|
assert.strictEqual(typeof result, 'object');
|
|
|
|
result.config = result.configJson ? safe.JSON.parse(result.configJson) : {};
|
|
delete result.configJson;
|
|
|
|
// note: rootPath will be dynamic for managed mount providers during app import . since it's used in api backends it has to be inside config
|
|
result.config.rootPath = getRootPath(result.provider, result.config, paths.MANAGED_BACKUP_MOUNT_DIR);
|
|
result.config.provider = result.provider; // this allows api backends to identify the real provider
|
|
|
|
result.limits = result.limitsJson ? safe.JSON.parse(result.limitsJson) : {};
|
|
delete result.limitsJson;
|
|
|
|
result.retention = result.retentionJson ? safe.JSON.parse(result.retentionJson) : {};
|
|
delete result.retentionJson;
|
|
|
|
result.encryption = result.encryptionJson ? safe.JSON.parse(result.encryptionJson) : null;
|
|
delete result.encryptionJson;
|
|
|
|
result.primary = !!result.main; // primary is a reserved keyword in mysql
|
|
delete result.main;
|
|
|
|
return result;
|
|
}
|
|
|
|
function removePrivateFields(target) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
|
|
if (target.encryption) {
|
|
delete target.encryption;
|
|
target.password = constants.SECRET_PLACEHOLDER;
|
|
}
|
|
delete target.rootPath;
|
|
return storage.api(target.provider).removePrivateFields(target.config);
|
|
}
|
|
|
|
function validateFormat(format) {
|
|
assert.strictEqual(typeof format, 'string');
|
|
|
|
if (format === 'tgz' || format == 'rsync') return null;
|
|
|
|
return new BoxError(BoxError.BAD_FIELD, 'Invalid backup format');
|
|
}
|
|
|
|
function validateLabel(label) {
|
|
assert.strictEqual(typeof label, 'string');
|
|
|
|
if (label.length > 48) return new BoxError(BoxError.BAD_FIELD, 'Label too long');
|
|
}
|
|
|
|
function validateSchedule(schedule) {
|
|
assert.strictEqual(typeof schedule, 'string');
|
|
|
|
if (schedule === constants.CRON_PATTERN_NEVER) return null;
|
|
|
|
const job = safe.safeCall(function () { return new CronTime(schedule); });
|
|
if (!job) return new BoxError(BoxError.BAD_FIELD, 'Invalid schedule pattern');
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateRetention(retention) {
|
|
assert.strictEqual(typeof retention, 'object');
|
|
|
|
if (!retention) return new BoxError(BoxError.BAD_FIELD, 'retention is required');
|
|
if (!['keepWithinSecs','keepDaily','keepWeekly','keepMonthly','keepYearly'].find(k => !!retention[k])) return new BoxError(BoxError.BAD_FIELD, 'retention properties missing');
|
|
if ('keepWithinSecs' in retention && typeof retention.keepWithinSecs !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepWithinSecs must be a number');
|
|
if ('keepDaily' in retention && typeof retention.keepDaily !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepDaily must be a number');
|
|
if ('keepWeekly' in retention && typeof retention.keepWeekly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepWeekly must be a number');
|
|
if ('keepMonthly' in retention && typeof retention.keepMonthly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepMonthly must be a number');
|
|
if ('keepYearly' in retention && typeof retention.keepYearly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepYearly must be a number');
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateEncryptionPassword(password) {
|
|
assert.strictEqual(typeof password, 'string');
|
|
|
|
if (password.length < 8) return new BoxError(BoxError.BAD_FIELD, 'password must be atleast 8 characters');
|
|
}
|
|
|
|
async function list(page, perPage) {
|
|
assert(typeof page === 'number' && page > 0);
|
|
assert(typeof perPage === 'number' && perPage > 0);
|
|
|
|
const results = await database.query(`SELECT ${BACKUP_TARGET_FIELDS} FROM backupTargets ORDER BY creationTime DESC LIMIT ?,?`, [ (page-1)*perPage, perPage ]);
|
|
|
|
results.forEach(function (result) { postProcess(result); });
|
|
|
|
return results;
|
|
}
|
|
|
|
async function get(id) {
|
|
const results = await database.query(`SELECT ${BACKUP_TARGET_FIELDS} FROM backupTargets WHERE id=?`, [ id ]);
|
|
if (results.length === 0) return null;
|
|
return postProcess(results[0]);
|
|
}
|
|
|
|
async function update(target, data) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
assert(data && typeof data === 'object');
|
|
|
|
const args = [];
|
|
const fields = [];
|
|
for (const k in data) {
|
|
if (k === 'label' || k === 'schedule' || k === 'main') { // format, provider cannot be updated
|
|
fields.push(k + ' = ?');
|
|
args.push(data[k]);
|
|
} else if (k === 'config' || k === 'limits' || k === 'retention') { // encryption cannot be updated
|
|
fields.push(`${k}JSON = ?`);
|
|
args.push(JSON.stringify(data[k]));
|
|
}
|
|
}
|
|
args.push(target.id);
|
|
|
|
const [updateError, result] = await safe(database.query('UPDATE backupTargets SET ' + fields.join(', ') + ' WHERE id = ?', args));
|
|
if (updateError) throw updateError;
|
|
if (result.affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Target not found');
|
|
}
|
|
|
|
async function setSchedule(target, schedule) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
assert.strictEqual(typeof schedule, 'string');
|
|
|
|
const error = await validateSchedule(schedule);
|
|
if (error) throw error;
|
|
|
|
await update(target, { schedule });
|
|
|
|
await cron.handleBackupScheduleChanged(target);
|
|
}
|
|
|
|
async function setLimits(target, limits) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
assert.strictEqual(typeof limits, 'object');
|
|
|
|
await update(target, { limits });
|
|
}
|
|
|
|
async function setRetention(target, retention) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
assert.strictEqual(typeof retention, 'object');
|
|
|
|
const error = await validateRetention(retention);
|
|
if (error) throw error;
|
|
|
|
await update(target, { retention });
|
|
}
|
|
|
|
async function setPrimary(target) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
assert.strictEqual(typeof retention, 'object');
|
|
|
|
const queries = [
|
|
{ query: 'SELECT 1 FROM backupTargets WHERE id=? FOR UPDATE', args: [ target.id ] }, // ensure this exists!
|
|
{ query: 'UPDATE backupTargets SET main=?', args: [ false ] },
|
|
{ query: 'UPDATE backupTargets SET main=? WHERE id=?', args: [ true, target.id ] }
|
|
];
|
|
|
|
const [error, result] = await safe(database.transaction(queries));
|
|
if (error) throw error;
|
|
if (result[2].affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Target not found');
|
|
}
|
|
|
|
async function del(target, auditSource) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
assert(auditSource && typeof auditSource === 'object');
|
|
|
|
if (target.main) throw new BoxError(BoxError.CONFLICT, 'Cannot delete the primary backup target');
|
|
|
|
const queries = [
|
|
{ query: 'DELETE FROM backups WHERE targetId = ?', args: [ target.id ] },
|
|
{ query: 'DELETE FROM backupTargets WHERE id=? AND main=?', args: [ target.id, false ] },
|
|
];
|
|
|
|
const [error, result] = await safe(database.transaction(queries));
|
|
if (error && error.code === 'ER_NO_REFERENCED_ROW_2') throw new BoxError(BoxError.NOT_FOUND, error);
|
|
if (error) throw error;
|
|
if (result[1].affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Target not found');
|
|
// await eventlog.add(eventlog.ACTION_ARCHIVES_DEL, auditSource, { id: archive.id, backupId: archive.backupId });
|
|
|
|
target.schedule = constants.CRON_PATTERN_NEVER;
|
|
await cron.handleBackupScheduleChanged(target);
|
|
|
|
debug('del: clearing backup cache');
|
|
cleanupCacheFilesSync();
|
|
}
|
|
|
|
async function startBackupTask(target, auditSource) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
|
|
const [error] = await safe(locks.acquire(locks.TYPE_FULL_BACKUP_TASK));
|
|
if (error) throw new BoxError(BoxError.BAD_STATE, `Another backup task is in progress: ${error.message}`);
|
|
|
|
const memoryLimit = target.limits?.memoryLimit ? Math.max(target.limits.memoryLimit/1024/1024, 1024) : 1024;
|
|
|
|
const taskId = await tasks.add(`${tasks.TASK_FULL_BACKUP_PREFIX}${target.id}`, [ target.id, { /* options */ } ]);
|
|
|
|
await eventlog.add(eventlog.ACTION_BACKUP_START, auditSource, { taskId });
|
|
|
|
// background
|
|
tasks.startTask(taskId, { timeout: 24 * 60 * 60 * 1000 /* 24 hours */, nice: 15, memoryLimit, oomScoreAdjust: -999 })
|
|
.then(async (backupId) => {
|
|
const backup = await backups.get(backupId);
|
|
await eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId, backupId, remotePath: backup.remotePath });
|
|
})
|
|
.catch(async (error) => {
|
|
const timedOut = error.code === tasks.ETIMEOUT;
|
|
await safe(eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId, errorMessage: error.message, timedOut }));
|
|
})
|
|
.finally(async () => {
|
|
await locks.release(locks.TYPE_FULL_BACKUP_TASK);
|
|
await locks.releaseByTaskId(taskId);
|
|
});
|
|
|
|
return taskId;
|
|
}
|
|
|
|
// this function is used in migrations - 20200512172301-settings-backup-encryption.js
|
|
function cleanupCacheFilesSync(target) {
|
|
const files = safe.fs.readdirSync(path.join(paths.BACKUP_INFO_DIR, target.id));
|
|
if (!files) return;
|
|
|
|
files
|
|
.filter(function (f) { return f.endsWith('.sync.cache'); })
|
|
.forEach(function (f) {
|
|
safe.fs.unlinkSync(path.join(paths.BACKUP_INFO_DIR, f));
|
|
});
|
|
}
|
|
|
|
function getSnapshotInfo(id) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
|
|
const contents = safe.fs.readFileSync(paths.SNAPSHOT_INFO_FILE, 'utf8');
|
|
const info = safe.JSON.parse(contents);
|
|
if (!info) return { };
|
|
return info[id] || { };
|
|
}
|
|
|
|
// keeps track of contents of the snapshot directory. this provides a way to clean up backups of uninstalled apps
|
|
async function setSnapshotInfo(id, info) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert.strictEqual(typeof info, 'object');
|
|
|
|
const contents = safe.fs.readFileSync(paths.SNAPSHOT_INFO_FILE, 'utf8');
|
|
const data = safe.JSON.parse(contents) || { };
|
|
if (info) data[id] = info; else delete data[id];
|
|
if (!safe.fs.writeFileSync(paths.SNAPSHOT_INFO_FILE, JSON.stringify(data, null, 4), 'utf8')) {
|
|
throw new BoxError(BoxError.FS_ERROR, safe.error.message);
|
|
}
|
|
}
|
|
|
|
async function startCleanupTask(target, auditSource) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const taskId = await tasks.add(`${tasks.TASK_CLEAN_BACKUPS_PREFIX}${target.id}`, [ target.id ]);
|
|
|
|
// background
|
|
tasks.startTask(taskId, {})
|
|
.then(async (result) => { // { removedBoxBackupPaths, removedAppBackupPaths, removedMailBackupPaths, missingBackupPaths }
|
|
await eventlog.add(eventlog.ACTION_BACKUP_CLEANUP_FINISH, auditSource, { taskId, errorMessage: null, ...result });
|
|
})
|
|
.catch(async (error) => {
|
|
await eventlog.add(eventlog.ACTION_BACKUP_CLEANUP_FINISH, auditSource, { taskId, errorMessage: error.message });
|
|
|
|
});
|
|
|
|
return taskId;
|
|
}
|
|
|
|
function managedBackupMountObject(config) {
|
|
assert(mounts.isManagedProvider(config.provider));
|
|
|
|
return {
|
|
name: 'backup',
|
|
hostPath: paths.MANAGED_BACKUP_MOUNT_DIR,
|
|
mountType: config.provider,
|
|
mountOptions: config.mountOptions
|
|
};
|
|
}
|
|
|
|
async function remount(target) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
|
|
if (mounts.isManagedProvider(target.provider)) {
|
|
await mounts.remount(managedBackupMountObject(target.config));
|
|
}
|
|
}
|
|
|
|
async function getMountStatus(target) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
|
|
let hostPath;
|
|
if (mounts.isManagedProvider(target.provider)) {
|
|
hostPath = paths.MANAGED_BACKUP_MOUNT_DIR;
|
|
} else if (target.provider === 'mountpoint') {
|
|
hostPath = target.config.mountPoint;
|
|
} else if (target.provider === 'filesystem') {
|
|
hostPath = target.config.backupFolder;
|
|
} else {
|
|
return { state: 'active' };
|
|
}
|
|
|
|
return await mounts.getStatus(target.provider, hostPath); // { state, message }
|
|
}
|
|
|
|
async function ensureMounted(target) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
|
|
const status = await getMountStatus(target);
|
|
if (status.state === 'active') return status;
|
|
|
|
await remount();
|
|
return await getMountStatus(target);
|
|
}
|
|
|
|
async function setConfig(target, newConfig) {
|
|
assert.strictEqual(typeof target, 'object');
|
|
assert.strictEqual(typeof newConfig, 'object');
|
|
|
|
if (constants.DEMO) throw new BoxError(BoxError.BAD_STATE, 'Not allowed in demo mode');
|
|
|
|
const oldConfig = target.config;
|
|
|
|
storage.api(target.provider).injectPrivateFields(newConfig, oldConfig);
|
|
|
|
debug('setConfig: validating new storage configuration');
|
|
await storage.testMount(target.provider, newConfig, '/mnt/backup-storage-validation');
|
|
|
|
debug('setConfig: removing old storage configuration');
|
|
if (mounts.isManagedProvider(target.provider)) await safe(mounts.removeMount(managedBackupMountObject(oldConfig)));
|
|
|
|
debug('setConfig: setting up new storage configuration');
|
|
await storage.setupManagedMount(target.provider, newConfig, paths.MANAGED_BACKUP_MOUNT_DIR);
|
|
|
|
debug('setConfig: clearing backup cache');
|
|
cleanupCacheFilesSync(target);
|
|
|
|
await update(target, { config: newConfig });
|
|
}
|
|
|
|
async function add(data) {
|
|
assert.strictEqual(typeof data, 'object');
|
|
|
|
if (constants.DEMO) throw new BoxError(BoxError.BAD_STATE, 'Not allowed in demo mode');
|
|
|
|
const { provider, label, config, format, retention, schedule } = data; // required
|
|
const limits = data.limits || null,
|
|
encryptionPassword = data.encryptionPassword || null,
|
|
encryptedFilenames = data.encryptedFilenames || false;
|
|
|
|
const formatError = validateFormat(format);
|
|
if (formatError) throw formatError;
|
|
|
|
const labelError = validateLabel(label);
|
|
if (labelError) throw labelError;
|
|
|
|
let encryption = null;
|
|
if (encryptionPassword) {
|
|
const encryptionPasswordError = validateEncryptionPassword(encryptionPassword);
|
|
if (encryptionPasswordError) throw encryptionPasswordError;
|
|
encryption = hush.generateEncryptionKeysSync(encryptionPassword);
|
|
encryption.encryptedFilenames = !!encryptedFilenames;
|
|
}
|
|
|
|
debug('add: validating new storage configuration');
|
|
await storage.testMount(provider, config, '/mnt/backup-storage-validation');
|
|
|
|
debug('setStorage: setting up new storage configuration');
|
|
await storage.setupManagedMount(provider, config, paths.MANAGED_BACKUP_MOUNT_DIR);
|
|
|
|
const id = `bc-${uuid.v4()}`;
|
|
await database.query('INSERT INTO backupTargets (id, label, provider, configJson, limitsJson, retentionJson, schedule, encryptionJson, format, main) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
|
|
[ id, label, provider, JSON.stringify(config), JSON.stringify(limits), JSON.stringify(retention), schedule, JSON.stringify(encryption), format, false ]);
|
|
return id;
|
|
}
|