Files
cloudron-box/src/backuptargets.js
2025-08-02 00:06:47 +02:00

542 lines
22 KiB
JavaScript

'use strict';
exports = module.exports = {
get,
getPrimary,
list,
add,
del,
setConfig,
setLimits,
setSchedule,
setRetention,
setPrimary,
removePrivateFields,
startBackupTask,
startCleanupTask,
getSnapshotInfo,
setSnapshotInfo,
getRootPath,
remount,
getMountStatus,
ensureMounted,
storageApi,
getBackupFilePath,
createPseudo,
};
const assert = require('assert'),
backupFormat = require('./backupformat.js'),
backups = require('./backups.js'),
BoxError = require('./boxerror.js'),
constants = require('./constants.js'),
cron = require('./cron.js'),
{ CronTime } = require('cron'),
crypto = require('crypto'),
database = require('./database.js'),
debug = require('debug')('box:backups'),
eventlog = require('./eventlog.js'),
hush = require('./hush.js'),
locks = require('./locks.js'),
mounts = require('./mounts.js'),
path = require('path'),
paths = require('./paths.js'),
safe = require('safetydance'),
tasks = require('./tasks.js');
// format: rsync or tgz
// provider: used to determine the api provider
// config: depends on the 'provider' field. 'provider' is not stored in config object. but it is injected when calling the api backends
// s3 providers - accessKeyId, secretAccessKey, bucket, prefix etc . see s3.js
// gcs - bucket, prefix, projectId, credentials . see gcs.js
// ext4/xfs/disk (managed providers) - mountOptions (diskPath), prefix, noHardlinks. disk is legacy.
// nfs/cifs/sshfs (managed providers) - mountOptions (host/username/password/seal/privateKey etc), prefix, noHardlinks
// filesystem - backupFolder, noHardlinks
// mountpoint - mountPoint, prefix, noHardlinks
// encryption: 'encryptionPassword' and 'encryptedFilenames' is converted into an 'encryption' object using hush.js. Password is lost forever after conversion.
const BACKUP_TARGET_FIELDS = [ 'id', 'label', 'provider', 'configJson', 'limitsJson', 'retentionJson', 'schedule', 'encryptionJson', 'format', 'main', 'creationTime', 'ts' ].join(',');
function storageApi(backupTarget) {
assert.strictEqual(typeof backupTarget, 'object');
switch (backupTarget.provider) {
case 'nfs': return require('./storage/filesystem.js');
case 'cifs': return require('./storage/filesystem.js');
case 'sshfs': return require('./storage/filesystem.js');
case 'mountpoint': return require('./storage/filesystem.js');
case 'disk': return require('./storage/filesystem.js');
case 'ext4': return require('./storage/filesystem.js');
case 's3': return require('./storage/s3.js');
case 'gcs': return require('./storage/gcs.js');
case 'filesystem': return require('./storage/filesystem.js');
case 'minio': return require('./storage/s3.js');
case 's3-v4-compat': return require('./storage/s3.js');
case 'digitalocean-spaces': return require('./storage/s3.js');
case 'exoscale-sos': return require('./storage/s3.js');
case 'wasabi': return require('./storage/s3.js');
case 'scaleway-objectstorage': return require('./storage/s3.js');
case 'backblaze-b2': return require('./storage/s3.js');
case 'cloudflare-r2': return require('./storage/s3.js');
case 'linode-objectstorage': return require('./storage/s3.js');
case 'ovh-objectstorage': return require('./storage/s3.js');
case 'ionos-objectstorage': return require('./storage/s3.js');
case 'idrive-e2': return require('./storage/s3.js');
case 'vultr-objectstorage': return require('./storage/s3.js');
case 'upcloud-objectstorage': return require('./storage/s3.js');
case 'contabo-objectstorage': return require('./storage/s3.js');
case 'hetzner-objectstorage': return require('./storage/s3.js');
case 'noop': return require('./storage/noop.js');
default: throw new BoxError(BoxError.BAD_FIELD, `Unknown provider: ${backupTarget.provider}`);
}
}
function getBackupFilePath(backupTarget, remotePath) {
assert.strictEqual(typeof backupTarget, 'object');
assert.strictEqual(typeof remotePath, 'string');
// we don't have a rootPath for noop
if (backupTarget.provider === 'noop') return remotePath;
return path.join(backupTarget.config.rootPath, remotePath);
}
function getRootPath(provider, config, mountPath) {
assert.strictEqual(typeof config, 'object');
assert.strictEqual(typeof mountPath, 'string');
if (mounts.isManagedProvider(provider)) {
return path.join(mountPath, config.prefix);
} else if (provider === 'mountpoint') {
return path.join(config.mountPoint, config.prefix);
} else if (provider === 'filesystem') {
return config.backupFolder;
} else {
return config.prefix;
}
}
function postProcess(result) {
assert.strictEqual(typeof result, 'object');
result.config = result.configJson ? safe.JSON.parse(result.configJson) : {};
delete result.configJson;
// note: rootPath will be dynamic for managed mount providers during app import . since it's used in api backends it has to be inside config
result.config.rootPath = getRootPath(result.provider, result.config, paths.MANAGED_BACKUP_MOUNT_DIR);
result.limits = safe.JSON.parse(result.limitsJson) || {};
delete result.limitsJson;
result.retention = safe.JSON.parse(result.retentionJson) || {};
delete result.retentionJson;
result.encryption = result.encryptionJson ? safe.JSON.parse(result.encryptionJson) : null;
delete result.encryptionJson;
result.primary = !!result.main; // primary is a reserved keyword in mysql
delete result.main;
return result;
}
function removePrivateFields(target) {
assert.strictEqual(typeof target, 'object');
target.encrypted = target.encryption !== null;
target.encryptedFilenames = target.encryption?.encryptedFilenames || false;
delete target.encryption;
delete target.config.rootPath;
target.config = storageApi(target).removePrivateFields(target.config);
return target;
}
function validateLabel(label) {
assert.strictEqual(typeof label, 'string');
if (label.length > 48) return new BoxError(BoxError.BAD_FIELD, 'Label too long');
}
function validateSchedule(schedule) {
assert.strictEqual(typeof schedule, 'string');
if (schedule === constants.CRON_PATTERN_NEVER) return null;
const job = safe.safeCall(function () { return new CronTime(schedule); });
if (!job) return new BoxError(BoxError.BAD_FIELD, 'Invalid schedule pattern');
return null;
}
function validateRetention(retention) {
assert.strictEqual(typeof retention, 'object');
if (!retention) return new BoxError(BoxError.BAD_FIELD, 'retention is required');
if (!['keepWithinSecs','keepDaily','keepWeekly','keepMonthly','keepYearly'].find(k => !!retention[k])) return new BoxError(BoxError.BAD_FIELD, 'retention properties missing');
if ('keepWithinSecs' in retention && typeof retention.keepWithinSecs !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepWithinSecs must be a number');
if ('keepDaily' in retention && typeof retention.keepDaily !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepDaily must be a number');
if ('keepWeekly' in retention && typeof retention.keepWeekly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepWeekly must be a number');
if ('keepMonthly' in retention && typeof retention.keepMonthly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepMonthly must be a number');
if ('keepYearly' in retention && typeof retention.keepYearly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'retention.keepYearly must be a number');
return null;
}
function validateEncryptionPassword(password) {
assert.strictEqual(typeof password, 'string');
if (password.length < 8) return new BoxError(BoxError.BAD_FIELD, 'password must be atleast 8 characters');
}
async function list(page, perPage) {
assert(typeof page === 'number' && page > 0);
assert(typeof perPage === 'number' && perPage > 0);
const results = await database.query(`SELECT ${BACKUP_TARGET_FIELDS} FROM backupTargets ORDER BY main DESC LIMIT ?,?`, [ (page-1)*perPage, perPage ]);
results.forEach(function (result) { postProcess(result); });
return results;
}
async function get(id) {
const results = await database.query(`SELECT ${BACKUP_TARGET_FIELDS} FROM backupTargets WHERE id=?`, [ id ]);
if (results.length === 0) return null;
return postProcess(results[0]);
}
async function getPrimary() {
const results = await database.query(`SELECT ${BACKUP_TARGET_FIELDS} FROM backupTargets WHERE main=?`, [ true ]);
if (results.length === 0) return null;
return postProcess(results[0]);
}
async function update(target, data) {
assert.strictEqual(typeof target, 'object');
assert(data && typeof data === 'object');
const args = [];
const fields = [];
for (const k in data) {
if (k === 'label' || k === 'schedule' || k === 'main') { // format, provider cannot be updated
fields.push(k + ' = ?');
args.push(data[k]);
} else if (k === 'config' || k === 'limits' || k === 'retention') { // encryption cannot be updated
fields.push(`${k}JSON = ?`);
args.push(JSON.stringify(data[k]));
}
}
args.push(target.id);
const [updateError, result] = await safe(database.query('UPDATE backupTargets SET ' + fields.join(', ') + ' WHERE id = ?', args));
if (updateError) throw updateError;
if (result.affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Target not found');
}
async function setSchedule(backupTarget, schedule, auditSource) {
assert.strictEqual(typeof backupTarget, 'object');
assert.strictEqual(typeof schedule, 'string');
assert.strictEqual(typeof auditSource, 'object');
const error = await validateSchedule(schedule);
if (error) throw error;
await update(backupTarget, { schedule });
await cron.handleBackupScheduleChanged(backupTarget);
await eventlog.add(eventlog.ACTION_BACKUP_TARGET_UPDATE, auditSource, { backupTarget, schedule });
}
async function setLimits(backupTarget, limits, auditSource) {
assert.strictEqual(typeof backupTarget, 'object');
assert.strictEqual(typeof limits, 'object');
assert.strictEqual(typeof auditSource, 'object');
await update(backupTarget, { limits });
await eventlog.add(eventlog.ACTION_BACKUP_TARGET_UPDATE, auditSource, { backupTarget, limits });
}
async function setRetention(backupTarget, retention, auditSource) {
assert.strictEqual(typeof backupTarget, 'object');
assert.strictEqual(typeof retention, 'object');
assert.strictEqual(typeof auditSource, 'object');
const error = await validateRetention(retention);
if (error) throw error;
await update(backupTarget, { retention });
await eventlog.add(eventlog.ACTION_BACKUP_TARGET_UPDATE, auditSource, { backupTarget, retention });
}
async function setPrimary(backupTarget, auditSource) {
assert.strictEqual(typeof backupTarget, 'object');
assert.strictEqual(typeof auditSource, 'object');
const queries = [
{ query: 'SELECT 1 FROM backupTargets WHERE id=? FOR UPDATE', args: [ backupTarget.id ] }, // ensure this exists!
{ query: 'UPDATE backupTargets SET main=?', args: [ false ] },
{ query: 'UPDATE backupTargets SET main=? WHERE id=?', args: [ true, backupTarget.id ] }
];
const [error, result] = await safe(database.transaction(queries));
if (error) throw error;
if (result[2].affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Target not found');
await eventlog.add(eventlog.ACTION_BACKUP_TARGET_UPDATE, auditSource, { backupTarget, primary: true });
}
async function del(backupTarget, auditSource) {
assert.strictEqual(typeof backupTarget, 'object');
assert.strictEqual(typeof auditSource, 'object');
await safe(storageApi(backupTarget).teardown(backupTarget.config), { debug }); // ignore error
if (backupTarget.primary) throw new BoxError(BoxError.CONFLICT, 'Cannot delete the primary backup target');
const queries = [
{ query: 'DELETE FROM backups WHERE targetId = ?', args: [ backupTarget.id ] },
{ query: 'DELETE FROM backupTargets WHERE id=? AND main=?', args: [ backupTarget.id, false ] }, // cannot delete primary
];
const [error, result] = await safe(database.transaction(queries));
if (error && error.code === 'ER_NO_REFERENCED_ROW_2') throw new BoxError(BoxError.NOT_FOUND, error);
if (error) throw error;
if (result[1].affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Target not found');
await eventlog.add(eventlog.ACTION_BACKUP_TARGET_REMOVE, auditSource, { backupTarget: backupTarget });
backupTarget.schedule = constants.CRON_PATTERN_NEVER;
await cron.handleBackupScheduleChanged(backupTarget);
const infoDir = path.join(paths.BACKUP_INFO_DIR, backupTarget.id);
safe.fs.rmdirSync(infoDir, { recursive: true });
}
async function startBackupTask(target, auditSource) {
assert.strictEqual(typeof target, 'object');
const [error] = await safe(locks.acquire(`${locks.TYPE_FULL_BACKUP_TASK_PREFIX}${target.id}`));
if (error) throw new BoxError(BoxError.BAD_STATE, `Another backup task is in progress: ${error.message}`);
const memoryLimit = target.limits?.memoryLimit ? Math.max(target.limits.memoryLimit/1024/1024, 1024) : 1024;
const taskId = await tasks.add(`${tasks.TASK_FULL_BACKUP_PREFIX}${target.id}`, [ target.id, { /* options */ } ]);
await eventlog.add(eventlog.ACTION_BACKUP_START, auditSource, { taskId });
// background
tasks.startTask(taskId, { timeout: 24 * 60 * 60 * 1000 /* 24 hours */, nice: 15, memoryLimit, oomScoreAdjust: -999 })
.then(async (backupId) => {
const backup = await backups.get(backupId);
await eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId, backupId, remotePath: backup.remotePath });
})
.catch(async (error) => {
const timedOut = error.code === tasks.ETIMEOUT;
await safe(eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId, errorMessage: error.message, timedOut }));
})
.finally(async () => {
await locks.release(`${locks.TYPE_FULL_BACKUP_TASK_PREFIX}${target.id}`);
await locks.releaseByTaskId(taskId);
});
return taskId;
}
async function removeCacheFiles(backupTarget) {
assert.strictEqual(typeof backupTarget, 'object');
const infoDir = path.join(paths.BACKUP_INFO_DIR, backupTarget.id);
const files = safe.fs.readdirSync(infoDir);
if (!files) throw new BoxError(BoxError.FS_ERROR, `Unable to access ${infoDir}: ${safe.error.message}`);
for (const f of files) {
if (!f.endsWith('.sync.cache')) continue;
safe.fs.unlinkSync(path.join(infoDir, f));
}
}
// keeps track of contents of the snapshot directory. this provides a way to clean up backups of uninstalled apps
async function getSnapshotInfo(backupTarget) {
assert.strictEqual(typeof backupTarget, 'object');
const snapshotFilePath = path.join(paths.BACKUP_INFO_DIR, backupTarget.id, constants.SNAPSHOT_INFO_FILENAME);
const contents = safe.fs.readFileSync(snapshotFilePath, 'utf8');
const info = safe.JSON.parse(contents);
return info || {};
}
// keeps track of contents of the snapshot directory. this provides a way to clean up backups of uninstalled apps
async function setSnapshotInfo(backupTarget, id, info) {
assert.strictEqual(typeof backupTarget, 'object');
assert.strictEqual(typeof id, 'string'); // 'box', 'mail' or appId
assert.strictEqual(typeof info, 'object');
const infoDir = path.join(paths.BACKUP_INFO_DIR, backupTarget.id);
const snapshotFilePath = path.join(infoDir, constants.SNAPSHOT_INFO_FILENAME);
const contents = safe.fs.readFileSync(snapshotFilePath, 'utf8');
const data = safe.JSON.parse(contents) || {};
if (info) data[id] = info; else delete data[id];
if (!safe.fs.writeFileSync(snapshotFilePath, JSON.stringify(data, null, 4), 'utf8')) {
throw new BoxError(BoxError.FS_ERROR, safe.error.message);
}
if (!info) { // unlink the cache files
safe.fs.unlinkSync(path.join(infoDir, `${id}.sync.cache`));
safe.fs.unlinkSync(path.join(infoDir, `${id}.sync.cache.new`));
}
}
async function startCleanupTask(backupTarget, auditSource) {
assert.strictEqual(typeof backupTarget, 'object');
assert.strictEqual(typeof auditSource, 'object');
const taskId = await tasks.add(`${tasks.TASK_CLEAN_BACKUPS_PREFIX}${backupTarget.id}`, [ backupTarget.id ]);
// background
tasks.startTask(taskId, {})
.then(async (result) => { // { removedBoxBackupPaths, removedAppBackupPaths, removedMailBackupPaths, missingBackupPaths }
await eventlog.add(eventlog.ACTION_BACKUP_CLEANUP_FINISH, auditSource, { taskId, errorMessage: null, ...result });
})
.catch(async (error) => {
await eventlog.add(eventlog.ACTION_BACKUP_CLEANUP_FINISH, auditSource, { taskId, errorMessage: error.message });
});
return taskId;
}
async function remount(target) {
assert.strictEqual(typeof target, 'object');
await storageApi(target).setup(target.config);
}
async function getMountStatus(target) {
assert.strictEqual(typeof target, 'object');
let hostPath;
if (mounts.isManagedProvider(target.provider)) {
hostPath = paths.MANAGED_BACKUP_MOUNT_DIR;
} else if (target.provider === 'mountpoint') {
hostPath = target.config.mountPoint;
} else if (target.provider === 'filesystem') {
hostPath = target.config.backupFolder;
} else {
return { state: 'active' };
}
return await mounts.getStatus(target.provider, hostPath); // { state, message }
}
async function ensureMounted(target) {
assert.strictEqual(typeof target, 'object');
const status = await getMountStatus(target);
if (status.state === 'active') return status;
await remount();
return await getMountStatus(target);
}
async function setConfig(backupTarget, newConfig, auditSource) {
assert.strictEqual(typeof backupTarget, 'object');
assert.strictEqual(typeof newConfig, 'object');
assert.strictEqual(typeof auditSource, 'object');
if (constants.DEMO) throw new BoxError(BoxError.BAD_STATE, 'Not allowed in demo mode');
const oldConfig = backupTarget.config;
storageApi(backupTarget).injectPrivateFields(newConfig, oldConfig);
debug('setConfig: validating new storage configuration');
const sanitizedConfig = await storageApi(backupTarget).verifyConfig({ id: backupTarget.id, provider: backupTarget.provider, config: newConfig });
debug('setConfig: clearing backup cache');
// FIXME: this cleans up the cache files in case the bucket or the prefix changes and the destination already has something there
// however, this will also resync if just the credentials change
await removeCacheFiles(backupTarget);
await update(backupTarget, { config: sanitizedConfig });
debug('setConfig: setting up new storage configuration');
await storageApi(backupTarget).setup(sanitizedConfig);
await eventlog.add(eventlog.ACTION_BACKUP_TARGET_UPDATE, auditSource, { backupTarget, newConfig });
}
async function add(data, auditSource) {
assert.strictEqual(typeof data, 'object');
assert.strictEqual(typeof auditSource, 'object');
if (constants.DEMO) throw new BoxError(BoxError.BAD_STATE, 'Not allowed in demo mode');
const { provider, label, config, format, retention, schedule } = data; // required
const limits = data.limits || null,
encryptionPassword = data.encryptionPassword || null,
encryptedFilenames = data.encryptedFilenames || false;
const formatError = backupFormat.validateFormat(format);
if (formatError) throw formatError;
const labelError = validateLabel(label);
if (labelError) throw labelError;
let encryption = null;
if (encryptionPassword) {
const encryptionPasswordError = validateEncryptionPassword(encryptionPassword);
if (encryptionPasswordError) throw encryptionPasswordError;
encryption = hush.generateEncryptionKeysSync(encryptionPassword);
encryption.encryptedFilenames = !!encryptedFilenames;
}
const id = `bc-${crypto.randomUUID()}`;
if (!safe.fs.mkdirSync(`${paths.BACKUP_INFO_DIR}/${id}`)) throw new BoxError(BoxError.FS_ERROR, `Failed to create info dir: ${safe.error.message}`);
debug('add: validating new storage configuration');
const sanitizedConfig = await storageApi({ provider }).verifyConfig({id, provider, config });
await database.query('INSERT INTO backupTargets (id, label, provider, configJson, limitsJson, retentionJson, schedule, encryptionJson, format, main) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
[ id, label, provider, JSON.stringify(sanitizedConfig), JSON.stringify(limits), JSON.stringify(retention), schedule, JSON.stringify(encryption), format, false ]);
debug('add: setting up new storage configuration');
await storageApi({ provider }).setup(sanitizedConfig);
await eventlog.add(eventlog.ACTION_BACKUP_TARGET_ADD, auditSource, { id, label, provider, config, schedule, format });
return id;
}
// creates a backup target object that is not in the database
async function createPseudo(data) {
assert.strictEqual(typeof data, 'object');
const { id, provider, config, format } = data; // required
const encryptionPassword = data.encryptionPassword || null,
encryptedFilenames = data.encryptedFilenames || false;
const formatError = backupFormat.validateFormat(format);
if (formatError) throw formatError;
let encryption = null;
if (encryptionPassword) {
const encryptionPasswordError = validateEncryptionPassword(encryptionPassword);
if (encryptionPasswordError) throw encryptionPasswordError;
encryption = hush.generateEncryptionKeysSync(encryptionPassword);
encryption.encryptedFilenames = !!encryptedFilenames;
}
debug('add: validating new storage configuration');
const sanitizedConfig = await storageApi({ provider }).verifyConfig({id, provider, config });
return { id, format, provider, config: sanitizedConfig, encryption };
}