Files
cloudron-box/src/backups.js

531 lines
20 KiB
JavaScript
Raw Normal View History

'use strict';
exports = module.exports = {
get,
2021-07-14 11:07:19 -07:00
getByIdentifierAndStatePaged,
getByTypePaged,
add,
update,
setState,
2021-07-14 11:07:19 -07:00
list,
del,
2015-09-21 14:14:21 -07:00
startBackupTask,
startCleanupTask,
cleanupCacheFilesSync,
removePrivateFields,
generateEncryptionKeysSync,
2021-07-14 11:07:19 -07:00
getSnapshotInfo,
setSnapshotInfo,
validatePolicy,
testStorage,
validateFormat,
2023-08-04 11:24:28 +05:30
getPolicy,
setPolicy,
getConfig,
setConfig,
setStorage,
setLimits,
2023-08-04 11:24:28 +05:30
2023-08-15 20:24:54 +05:30
getRootPath,
setupManagedStorage,
remount,
getMountStatus,
ensureMounted,
BACKUP_IDENTIFIER_BOX: 'box',
BACKUP_IDENTIFIER_MAIL: 'mail',
BACKUP_TYPE_APP: 'app',
BACKUP_TYPE_BOX: 'box',
BACKUP_TYPE_MAIL: 'mail',
BACKUP_STATE_NORMAL: 'normal', // should rename to created to avoid listing in UI?
BACKUP_STATE_CREATING: 'creating',
BACKUP_STATE_ERROR: 'error',
};
2021-07-14 11:07:19 -07:00
const assert = require('assert'),
2019-10-22 20:36:20 -07:00
BoxError = require('./boxerror.js'),
constants = require('./constants.js'),
2023-08-04 11:24:28 +05:30
cron = require('./cron.js'),
{ CronTime } = require('cron'),
crypto = require('crypto'),
database = require('./database.js'),
2021-09-10 12:10:10 -07:00
debug = require('debug')('box:backups'),
eventlog = require('./eventlog.js'),
hat = require('./hat.js'),
remove global lock Currently, the update/apptask/fullbackup/platformstart take a global lock and cannot run in parallel. This causes situations where when a user tries to trigger an apptask, it says "waiting for backup to finish..." etc The solution is to let them run in parallel. We need a lock at the app level as app operations running in parallel would be bad (tm). In addition, the update task needs a lock just for the update part. We also need multi-process locks. Running tasks as processes is core to our "kill" strategy. Various inter process locks were explored: * node's IPC mechanism with process.send(). But this only works for direct node.js children. taskworker is run via sudo and the IPC does not work. * File lock using O_EXCL. Basic ideas to create lock files. While file creation can be done atomically, it becomes complicated to clean up lock files when the tasks crash. We need a way to know what locks were held by the crashing task. flock and friends are not built-into node.js * sqlite/redis were options but introduce additional deps * Settled on MySQL based locking. Initial plan was to have row locks or table locks. Each row is a kind of lock. While implementing, it was found that we need many types of locks (and not just update lock and app locks). For example, we need locks for each task type, so that only one task type is active at a time. * Instead of rows, we can just lock table and have a json blob in it. This hit a road block that LOCK TABLE is per session and our db layer cannot handle this easily! i.e when issing two db.query() it might use two different connections from the pool. We have to expose the connection, release connection etc. * Next idea was atomic blob update of the blob checking if old blob was same. This approach, was finally refined into a version field. Phew!
2024-12-07 14:35:45 +01:00
locks = require('./locks.js'),
mounts = require('./mounts.js'),
path = require('path'),
paths = require('./paths.js'),
safe = require('safetydance'),
2015-11-07 22:06:09 -08:00
settings = require('./settings.js'),
2021-07-14 11:07:19 -07:00
storage = require('./storage.js'),
2023-08-04 11:24:28 +05:30
tasks = require('./tasks.js'),
_ = require('./underscore.js');
2021-07-14 11:07:19 -07:00
const BACKUPS_FIELDS = [ 'id', 'remotePath', 'label', 'identifier', 'creationTime', 'packageVersion', 'type', 'dependsOnJson', 'state', 'manifestJson', 'format', 'preserveSecs', 'encryptionVersion', 'appConfigJson' ];
2021-07-14 11:07:19 -07:00
function postProcess(result) {
assert.strictEqual(typeof result, 'object');
result.dependsOn = result.dependsOnJson ? safe.JSON.parse(result.dependsOnJson) : [];
delete result.dependsOnJson;
2021-07-14 11:07:19 -07:00
result.manifest = result.manifestJson ? safe.JSON.parse(result.manifestJson) : null;
delete result.manifestJson;
2015-11-06 18:14:59 -08:00
result.appConfig = result.appConfigJson ? safe.JSON.parse(result.appConfigJson) : null;
delete result.appConfigJson;
2021-07-14 11:07:19 -07:00
return result;
}
function removePrivateFields(backupConfig) {
assert.strictEqual(typeof backupConfig, 'object');
if (backupConfig.encryption) {
delete backupConfig.encryption;
backupConfig.password = constants.SECRET_PLACEHOLDER;
}
delete backupConfig.rootPath;
2021-07-14 11:07:19 -07:00
return storage.api(backupConfig.provider).removePrivateFields(backupConfig);
}
// this function is used in migrations - 20200512172301-settings-backup-encryption.js
function generateEncryptionKeysSync(password) {
assert.strictEqual(typeof password, 'string');
const aesKeys = crypto.scryptSync(password, Buffer.from('CLOUDRONSCRYPTSALT', 'utf8'), 128);
return {
dataKey: aesKeys.subarray(0, 32).toString('hex'),
2024-12-14 23:25:14 +01:00
dataHmacKey: aesKeys.subarray(32, 64).toString('hex'),
filenameKey: aesKeys.subarray(64, 96).toString('hex'),
filenameHmacKey: aesKeys.subarray(96).toString('hex')
};
}
async function add(data) {
2021-07-14 11:07:19 -07:00
assert(data && typeof data === 'object');
assert.strictEqual(typeof data.remotePath, 'string');
2021-07-14 11:07:19 -07:00
assert(data.encryptionVersion === null || typeof data.encryptionVersion === 'number');
assert.strictEqual(typeof data.packageVersion, 'string');
assert.strictEqual(typeof data.type, 'string');
assert.strictEqual(typeof data.identifier, 'string');
assert.strictEqual(typeof data.state, 'string');
assert(Array.isArray(data.dependsOn));
assert.strictEqual(typeof data.manifest, 'object');
assert.strictEqual(typeof data.format, 'string');
assert.strictEqual(typeof data.preserveSecs, 'number');
assert.strictEqual(typeof data.appConfig, 'object');
2021-07-14 11:07:19 -07:00
const creationTime = data.creationTime || new Date(); // allow tests to set the time
const manifestJson = JSON.stringify(data.manifest);
const prefixId = data.type === exports.BACKUP_TYPE_APP ? `${data.type}_${data.identifier}` : data.type; // type and identifier are same for other types
2023-09-05 09:15:12 +05:30
const id = `${prefixId}_v${data.packageVersion}_${hat(32)}`; // id is used by the UI to derive dependent packages. making this a UUID will require a lot of db querying
const appConfigJson = data.appConfig ? JSON.stringify(data.appConfig) : null;
2021-07-14 11:07:19 -07:00
const [error] = await safe(database.query('INSERT INTO backups (id, remotePath, identifier, encryptionVersion, packageVersion, type, creationTime, state, dependsOnJson, manifestJson, format, preserveSecs, appConfigJson) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
[ id, data.remotePath, data.identifier, data.encryptionVersion, data.packageVersion, data.type, creationTime, data.state, JSON.stringify(data.dependsOn), manifestJson, data.format, data.preserveSecs, appConfigJson ]));
2021-07-14 11:07:19 -07:00
if (error && error.code === 'ER_DUP_ENTRY') throw new BoxError(BoxError.ALREADY_EXISTS, 'Backup already exists');
if (error) throw error;
return id;
2021-07-14 11:07:19 -07:00
}
async function getByIdentifierAndStatePaged(identifier, state, page, perPage) {
assert.strictEqual(typeof identifier, 'string');
assert.strictEqual(typeof state, 'string');
2016-03-08 08:52:20 -08:00
assert(typeof page === 'number' && page > 0);
assert(typeof perPage === 'number' && perPage > 0);
2021-07-14 11:07:19 -07:00
const results = await database.query(`SELECT ${BACKUPS_FIELDS} FROM backups WHERE identifier = ? AND state = ? ORDER BY creationTime DESC LIMIT ?,?`, [ identifier, state, (page-1)*perPage, perPage ]);
2021-07-14 11:07:19 -07:00
results.forEach(function (result) { postProcess(result); });
2021-07-14 11:07:19 -07:00
return results;
}
2021-07-14 11:07:19 -07:00
async function get(id) {
assert.strictEqual(typeof id, 'string');
2017-09-19 20:40:38 -07:00
2021-07-14 11:07:19 -07:00
const result = await database.query('SELECT ' + BACKUPS_FIELDS + ' FROM backups WHERE id = ? ORDER BY creationTime DESC', [ id ]);
if (result.length === 0) return null;
2021-07-14 11:07:19 -07:00
return postProcess(result[0]);
2017-09-19 20:40:38 -07:00
}
2021-07-14 11:07:19 -07:00
async function getByTypePaged(type, page, perPage) {
assert.strictEqual(typeof type, 'string');
assert(typeof page === 'number' && page > 0);
assert(typeof perPage === 'number' && perPage > 0);
2021-07-14 11:07:19 -07:00
const results = await database.query(`SELECT ${BACKUPS_FIELDS} FROM backups WHERE type = ? ORDER BY creationTime DESC LIMIT ?,?`, [ type, (page-1)*perPage, perPage ]);
2021-07-14 11:07:19 -07:00
results.forEach(function (result) { postProcess(result); });
2021-07-14 11:07:19 -07:00
return results;
}
function validateLabel(label) {
assert.strictEqual(typeof label, 'string');
if (label.length >= 200) return new BoxError(BoxError.BAD_FIELD, 'label too long');
2022-06-24 09:18:51 -07:00
if (/[^a-zA-Z0-9._() -]/.test(label)) return new BoxError(BoxError.BAD_FIELD, 'label can only contain alphanumerals, space, dot, hyphen, brackets or underscore');
return null;
}
async function validatePolicy(policy) {
assert.strictEqual(typeof policy, 'object');
const job = safe.safeCall(function () { return new CronTime(policy.schedule); });
if (!job) return new BoxError(BoxError.BAD_FIELD, 'Invalid schedule pattern');
const retention = policy.retention;
if (!retention) return new BoxError(BoxError.BAD_FIELD, 'retention is required');
if (!['keepWithinSecs','keepDaily','keepWeekly','keepMonthly','keepYearly'].find(k => !!retention[k])) return new BoxError(BoxError.BAD_FIELD, 'retention properties missing');
if ('keepWithinSecs' in retention && typeof retention.keepWithinSecs !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepWithinSecs must be a number');
if ('keepDaily' in retention && typeof retention.keepDaily !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepDaily must be a number');
if ('keepWeekly' in retention && typeof retention.keepWeekly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepWeekly must be a number');
if ('keepMonthly' in retention && typeof retention.keepMonthly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepMonthly must be a number');
if ('keepYearly' in retention && typeof retention.keepYearly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepYearly must be a number');
}
// this is called by REST API
async function update(id, data) {
2021-07-14 11:07:19 -07:00
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof data, 'object');
2020-05-15 16:05:12 -07:00
let error;
if ('label' in data) {
error = validateLabel(data.label);
if (error) throw error;
}
const fields = [], values = [];
for (const p in data) {
if (p === 'label' || p === 'preserveSecs') {
fields.push(p + ' = ?');
values.push(data[p]);
}
}
2021-07-14 11:07:19 -07:00
values.push(id);
const backup = await get(id);
if (backup === null) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
2021-07-14 11:07:19 -07:00
const result = await database.query('UPDATE backups SET ' + fields.join(', ') + ' WHERE id = ?', values);
if (result.affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
if ('preserveSecs' in data) {
// update the dependancies
for (const depId of backup.dependsOn) {
await database.query('UPDATE backups SET preserveSecs=? WHERE id = ?', [ data.preserveSecs, depId]);
}
}
}
async function setState(id, state) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof state, 'string');
const result = await database.query('UPDATE backups SET state = ? WHERE id = ?', [state, id]);
if (result.affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
}
2021-09-10 12:10:10 -07:00
async function startBackupTask(auditSource) {
remove global lock Currently, the update/apptask/fullbackup/platformstart take a global lock and cannot run in parallel. This causes situations where when a user tries to trigger an apptask, it says "waiting for backup to finish..." etc The solution is to let them run in parallel. We need a lock at the app level as app operations running in parallel would be bad (tm). In addition, the update task needs a lock just for the update part. We also need multi-process locks. Running tasks as processes is core to our "kill" strategy. Various inter process locks were explored: * node's IPC mechanism with process.send(). But this only works for direct node.js children. taskworker is run via sudo and the IPC does not work. * File lock using O_EXCL. Basic ideas to create lock files. While file creation can be done atomically, it becomes complicated to clean up lock files when the tasks crash. We need a way to know what locks were held by the crashing task. flock and friends are not built-into node.js * sqlite/redis were options but introduce additional deps * Settled on MySQL based locking. Initial plan was to have row locks or table locks. Each row is a kind of lock. While implementing, it was found that we need many types of locks (and not just update lock and app locks). For example, we need locks for each task type, so that only one task type is active at a time. * Instead of rows, we can just lock table and have a json blob in it. This hit a road block that LOCK TABLE is per session and our db layer cannot handle this easily! i.e when issing two db.query() it might use two different connections from the pool. We have to expose the connection, release connection etc. * Next idea was atomic blob update of the blob checking if old blob was same. This approach, was finally refined into a version field. Phew!
2024-12-07 14:35:45 +01:00
const [error] = await safe(locks.acquire(locks.TYPE_BACKUP_TASK));
if (error) throw new BoxError(BoxError.BAD_STATE, `Another backup task is in progress: ${error.message}`);
2023-08-04 11:24:28 +05:30
const backupConfig = await getConfig();
const memoryLimit = backupConfig.limits?.memoryLimit ? Math.max(backupConfig.limits.memoryLimit/1024/1024, 1024) : 1024;
2021-09-10 12:10:10 -07:00
const taskId = await tasks.add(tasks.TASK_BACKUP, [ { /* options */ } ]);
2021-09-10 12:10:10 -07:00
await eventlog.add(eventlog.ACTION_BACKUP_START, auditSource, { taskId });
tasks.startTask(taskId, { timeout: 24 * 60 * 60 * 1000 /* 24 hours */, nice: 15, memoryLimit, oomScoreAdjust: -999 }, async function (error, backupId) {
remove global lock Currently, the update/apptask/fullbackup/platformstart take a global lock and cannot run in parallel. This causes situations where when a user tries to trigger an apptask, it says "waiting for backup to finish..." etc The solution is to let them run in parallel. We need a lock at the app level as app operations running in parallel would be bad (tm). In addition, the update task needs a lock just for the update part. We also need multi-process locks. Running tasks as processes is core to our "kill" strategy. Various inter process locks were explored: * node's IPC mechanism with process.send(). But this only works for direct node.js children. taskworker is run via sudo and the IPC does not work. * File lock using O_EXCL. Basic ideas to create lock files. While file creation can be done atomically, it becomes complicated to clean up lock files when the tasks crash. We need a way to know what locks were held by the crashing task. flock and friends are not built-into node.js * sqlite/redis were options but introduce additional deps * Settled on MySQL based locking. Initial plan was to have row locks or table locks. Each row is a kind of lock. While implementing, it was found that we need many types of locks (and not just update lock and app locks). For example, we need locks for each task type, so that only one task type is active at a time. * Instead of rows, we can just lock table and have a json blob in it. This hit a road block that LOCK TABLE is per session and our db layer cannot handle this easily! i.e when issing two db.query() it might use two different connections from the pool. We have to expose the connection, release connection etc. * Next idea was atomic blob update of the blob checking if old blob was same. This approach, was finally refined into a version field. Phew!
2024-12-07 14:35:45 +01:00
await locks.release(locks.TYPE_BACKUP_TASK);
await locks.releaseByTaskId(taskId);
2021-09-10 12:10:10 -07:00
const errorMessage = error ? error.message : '';
const timedOut = error ? error.code === tasks.ETIMEOUT : false;
const backup = backupId ? await get(backupId) : null;
await safe(eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId, errorMessage, timedOut, backupId, remotePath: backup?.remotePath }), { debug });
});
2021-09-10 12:10:10 -07:00
return taskId;
2021-07-14 11:07:19 -07:00
}
2021-07-14 11:07:19 -07:00
async function list(page, perPage) {
assert(typeof page === 'number' && page > 0);
assert(typeof perPage === 'number' && perPage > 0);
2021-07-14 11:07:19 -07:00
const results = await database.query('SELECT ' + BACKUPS_FIELDS + ' FROM backups ORDER BY creationTime DESC LIMIT ?,?', [ (page-1)*perPage, perPage ]);
2021-07-14 11:07:19 -07:00
results.forEach(function (result) { postProcess(result); });
2021-07-14 11:07:19 -07:00
return results;
}
2021-07-14 11:07:19 -07:00
async function del(id) {
assert.strictEqual(typeof id, 'string');
2021-07-14 11:07:19 -07:00
const result = await database.query('DELETE FROM backups WHERE id=?', [ id ]);
if (result.affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
}
// this function is used in migrations - 20200512172301-settings-backup-encryption.js
2021-07-14 11:07:19 -07:00
function cleanupCacheFilesSync() {
2021-09-10 12:10:10 -07:00
const files = safe.fs.readdirSync(path.join(paths.BACKUP_INFO_DIR));
2021-07-14 11:07:19 -07:00
if (!files) return;
2021-09-10 12:10:10 -07:00
files
.filter(function (f) { return f.endsWith('.sync.cache'); })
.forEach(function (f) {
safe.fs.unlinkSync(path.join(paths.BACKUP_INFO_DIR, f));
});
}
2021-07-14 11:07:19 -07:00
function getSnapshotInfo(id) {
assert.strictEqual(typeof id, 'string');
2021-07-14 11:07:19 -07:00
const contents = safe.fs.readFileSync(paths.SNAPSHOT_INFO_FILE, 'utf8');
const info = safe.JSON.parse(contents);
if (!info) return { };
return info[id] || { };
2017-09-22 14:40:37 -07:00
}
// keeps track of contents of the snapshot directory. this provides a way to clean up backups of uninstalled apps
2021-09-16 13:59:03 -07:00
async function setSnapshotInfo(id, info) {
2021-07-14 11:07:19 -07:00
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof info, 'object');
2017-09-22 14:40:37 -07:00
2021-07-14 11:07:19 -07:00
const contents = safe.fs.readFileSync(paths.SNAPSHOT_INFO_FILE, 'utf8');
const data = safe.JSON.parse(contents) || { };
if (info) data[id] = info; else delete data[id];
if (!safe.fs.writeFileSync(paths.SNAPSHOT_INFO_FILE, JSON.stringify(data, null, 4), 'utf8')) {
2021-09-16 13:59:03 -07:00
throw new BoxError(BoxError.FS_ERROR, safe.error.message);
}
2017-09-22 14:40:37 -07:00
}
2021-07-14 11:07:19 -07:00
async function startCleanupTask(auditSource) {
assert.strictEqual(typeof auditSource, 'object');
2021-07-14 11:07:19 -07:00
const taskId = await tasks.add(tasks.TASK_CLEAN_BACKUPS, []);
tasks.startTask(taskId, {}, async (error, result) => { // result is { removedBoxBackupPaths, removedAppBackupPaths, removedMailBackupPaths, missingBackupPaths }
2022-02-24 20:04:46 -08:00
await safe(eventlog.add(eventlog.ACTION_BACKUP_CLEANUP_FINISH, auditSource, {
2021-07-14 11:07:19 -07:00
taskId,
errorMessage: error ? error.message : null,
removedBoxBackupPaths: result ? result.removedBoxBackupPaths : [],
removedMailBackupPaths: result ? result.removedMailBackupPaths : [],
removedAppBackupPaths: result ? result.removedAppBackupPaths : [],
missingBackupPaths: result ? result.missingBackupPaths : []
2022-02-24 20:04:46 -08:00
}), { debug });
});
2021-07-14 11:07:19 -07:00
return taskId;
}
async function testStorage(storageConfig) {
assert.strictEqual(typeof storageConfig, 'object');
const func = storage.api(storageConfig.provider);
if (!func) return new BoxError(BoxError.BAD_FIELD, 'unknown storage provider');
await storage.api(storageConfig.provider).testConfig(storageConfig);
}
2017-10-10 20:23:04 -07:00
function validateEncryptionPassword(password) {
assert.strictEqual(typeof password, 'string');
2018-09-26 12:39:33 -07:00
if (password.length < 8) return new BoxError(BoxError.BAD_FIELD, 'password must be atleast 8 characters');
2020-02-26 09:08:30 -08:00
}
function managedBackupMountObject(backupConfig) {
assert(mounts.isManagedProvider(backupConfig.provider));
return {
name: 'backup',
hostPath: paths.MANAGED_BACKUP_MOUNT_DIR,
mountType: backupConfig.provider,
mountOptions: backupConfig.mountOptions
};
}
async function remount() {
2023-08-04 11:24:28 +05:30
const backupConfig = await getConfig();
2023-08-15 08:51:25 +05:30
if (mounts.isManagedProvider(backupConfig.provider)) {
await mounts.remount(managedBackupMountObject(backupConfig));
2023-08-15 08:51:25 +05:30
}
}
async function getMountStatus() {
2023-08-04 11:24:28 +05:30
const backupConfig = await getConfig();
let hostPath;
if (mounts.isManagedProvider(backupConfig.provider)) {
hostPath = paths.MANAGED_BACKUP_MOUNT_DIR;
} else if (backupConfig.provider === 'mountpoint') {
hostPath = backupConfig.mountPoint;
} else if (backupConfig.provider === 'filesystem') {
hostPath = backupConfig.backupFolder;
} else {
2024-09-09 16:44:19 +02:00
return { state: 'active' };
}
return await mounts.getStatus(backupConfig.provider, hostPath); // { state, message }
}
2023-08-04 11:24:28 +05:30
async function ensureMounted() {
const status = await getMountStatus();
if (status.state === 'active') return status;
await remount();
return await getMountStatus();
}
2023-08-04 11:24:28 +05:30
async function getPolicy() {
const result = await settings.getJson(settings.BACKUP_POLICY_KEY);
return result || {
retention: { keepWithinSecs: 2 * 24 * 60 * 60 }, // 2 days
schedule: '00 00 23 * * *' // every day at 11pm
};
}
async function setPolicy(policy) {
assert.strictEqual(typeof policy, 'object');
const error = await validatePolicy(policy);
if (error) throw error;
await settings.setJson(settings.BACKUP_POLICY_KEY, policy);
await cron.handleBackupPolicyChanged(policy);
2023-08-04 11:24:28 +05:30
}
function getRootPath(storageConfig, mountPath) {
assert.strictEqual(typeof storageConfig, 'object');
assert.strictEqual(typeof mountPath, 'string');
if (mounts.isManagedProvider(storageConfig.provider)) {
return path.join(mountPath, storageConfig.prefix);
} else if (storageConfig.provider === 'mountpoint') {
return path.join(storageConfig.mountPoint, storageConfig.prefix);
} else if (storageConfig.provider === 'filesystem') {
return storageConfig.backupFolder;
} else {
return storageConfig.prefix;
}
}
2023-08-04 11:24:28 +05:30
async function getConfig() {
const result = await settings.getJson(settings.BACKUP_STORAGE_KEY) || { provider: 'filesystem', backupFolder: paths.DEFAULT_BACKUP_DIR, format: 'tgz', encryption: null };
const limits = await settings.getJson(settings.BACKUP_LIMITS_KEY);
if (limits) result.limits = limits;
result.rootPath = getRootPath(result, paths.MANAGED_BACKUP_MOUNT_DIR); // note: rootPath will be dynamic for managed mount providers during app import
return result;
2023-08-04 11:24:28 +05:30
}
async function setConfig(backupConfig) {
assert.strictEqual(typeof backupConfig, 'object');
await settings.setJson(settings.BACKUP_STORAGE_KEY, _.omit(backupConfig, ['limits']));
await settings.setJson(settings.BACKUP_LIMITS_KEY, backupConfig.limits || null);
}
async function setLimits(limits) {
assert.strictEqual(typeof limits, 'object');
await settings.setJson(settings.BACKUP_LIMITS_KEY, limits);
}
function validateFormat(format) {
2023-08-15 18:37:21 +05:30
assert.strictEqual(typeof format, 'string');
if (format === 'tgz' || format == 'rsync') return null;
return new BoxError(BoxError.BAD_FIELD, 'Invalid backup format');
}
async function setupManagedStorage(storageConfig, hostPath) {
assert.strictEqual(typeof storageConfig, 'object');
assert.strictEqual(typeof hostPath, 'string');
if (!mounts.isManagedProvider(storageConfig.provider)) return null;
if (!storageConfig.mountOptions || typeof storageConfig.mountOptions !== 'object') throw new BoxError(BoxError.BAD_FIELD, 'mountOptions must be an object');
const error = mounts.validateMountOptions(storageConfig.provider, storageConfig.mountOptions);
if (error) throw error;
debug(`setupManagedStorage: setting up mount at ${hostPath} with ${storageConfig.provider}`);
const newMount = {
name: path.basename(hostPath),
hostPath,
mountType: storageConfig.provider,
mountOptions: storageConfig.mountOptions
};
await mounts.tryAddMount(newMount, { timeout: 10 }); // 10 seconds
return newMount;
}
async function setStorage(storageConfig) {
assert.strictEqual(typeof storageConfig, 'object');
if (constants.DEMO) throw new BoxError(BoxError.BAD_STATE, 'Not allowed in demo mode');
2023-08-04 11:24:28 +05:30
const oldConfig = await getConfig();
if (storageConfig.provider === oldConfig.provider) storage.api(storageConfig.provider).injectPrivateFields(storageConfig, oldConfig);
2023-08-04 11:24:28 +05:30
const formatError = validateFormat(storageConfig.format);
if (formatError) throw formatError;
storageConfig.encryption = null;
if ('password' in storageConfig) { // user set password
if (storageConfig.password === constants.SECRET_PLACEHOLDER) {
storageConfig.encryption = oldConfig.encryption || null;
} else {
const encryptionPasswordError = validateEncryptionPassword(storageConfig.password);
if (encryptionPasswordError) throw encryptionPasswordError;
storageConfig.encryption = generateEncryptionKeysSync(storageConfig.password);
}
delete storageConfig.password;
}
2023-08-15 18:37:21 +05:30
debug('setStorage: validating new storage configuration');
const testMountObject = await setupManagedStorage(storageConfig, '/mnt/backup-storage-validation'); // this validates mountOptions
const testStorageError = await testStorage(Object.assign({ mountPath: '/mnt/backup-storage-validation' }, storageConfig)); // this validates provider and it's api options. requires mountPath
if (testMountObject) await mounts.removeMount(testMountObject);
if (testStorageError) throw testStorageError;
2023-08-04 11:24:28 +05:30
debug('setStorage: removing old storage configuration');
if (mounts.isManagedProvider(oldConfig.provider)) await safe(mounts.removeMount(managedBackupMountObject(oldConfig)));
debug('setStorage: setting up new storage configuration');
await setupManagedStorage(storageConfig, paths.MANAGED_BACKUP_MOUNT_DIR);
debug('setStorage: clearing backup cache');
cleanupCacheFilesSync();
2023-08-04 11:24:28 +05:30
await settings.setJson(settings.BACKUP_STORAGE_KEY, storageConfig);
}