diff --git a/src/apps.js b/src/apps.js index ffcc904c9..bcd0a17fd 100644 --- a/src/apps.js +++ b/src/apps.js @@ -181,7 +181,6 @@ const appTaskManager = require('./apptaskmanager.js'), semver = require('semver'), services = require('./services.js'), shell = require('./shell.js')('apps'), - storage = require('./storage.js'), tasks = require('./tasks.js'), tgz = require('./backupformat/tgz.js'), TransformStream = require('stream').Transform, @@ -2838,7 +2837,7 @@ async function getBackupDownloadStream(app, backupId) { const ps = new PassThrough(); - const stream = await storage.api(backupTarget.provider).download(backupTarget.config, tgz.getBackupFilePath(backupTarget, backup.remotePath)); + const stream = await backupTargets.storageApi(backupTarget).download(backupTarget.config, tgz.getBackupFilePath(backupTarget, backup.remotePath)); stream.on('error', function(error) { debug(`getBackupDownloadStream: read stream error: ${error.message}`); ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error)); diff --git a/src/backupcleaner.js b/src/backupcleaner.js index 865768ab6..b3203ef24 100644 --- a/src/backupcleaner.js +++ b/src/backupcleaner.js @@ -18,8 +18,7 @@ const apps = require('./apps.js'), debug = require('debug')('box:backupcleaner'), moment = require('moment'), path = require('path'), - safe = require('safetydance'), - storage = require('./storage.js'); + safe = require('safetydance'); function applyBackupRetention(allBackups, retention, referencedBackupIds) { assert(Array.isArray(allBackups)); @@ -89,10 +88,10 @@ async function removeBackup(target, backup, progressCallback) { let removeError; if (target.format ==='tgz') { progressCallback({ message: `${backup.remotePath}: Removing ${backupFilePath}`}); - [removeError] = await safe(storage.api(target.provider).remove(target.config, backupFilePath)); + [removeError] = await safe(backupTargets.storageApi(target).remove(target.config, backupFilePath)); } else { progressCallback({ message: `${backup.remotePath}: Removing directory ${backupFilePath}`}); - [removeError] = await safe(storage.api(target.provider).removeDir(target.config, backupFilePath, progressCallback)); + [removeError] = await safe(backupTargets.storageApi(target).removeDir(target.config, backupFilePath, progressCallback)); } if (removeError) { @@ -101,7 +100,7 @@ async function removeBackup(target, backup, progressCallback) { } // prune empty directory if possible - const [pruneError] = await safe(storage.api(target.provider).remove(target.config, path.dirname(backupFilePath))); + const [pruneError] = await safe(backupTargets.storageApi(target).remove(target.config, path.dirname(backupFilePath))); if (pruneError) debug(`removeBackup: unable to prune backup directory ${path.dirname(backupFilePath)}: ${pruneError.message}`); const [delError] = await safe(backups.del(backup.id)); @@ -222,7 +221,7 @@ async function cleanupMissingBackups(target, progressCallback) { let backupFilePath = backupFormat.api(target.format).getBackupFilePath(target, backup.remotePath); if (target.format === 'rsync') backupFilePath = backupFilePath + '/'; // add trailing slash to indicate directory - const [existsError, exists] = await safe(storage.api(target.provider).exists(target.config, backupFilePath)); + const [existsError, exists] = await safe(backupTargets.storageApi(target).exists(target.config, backupFilePath)); if (existsError || exists) continue; await progressCallback({ message: `Removing missing backup ${backup.remotePath}`}); @@ -256,9 +255,9 @@ async function removeOldAppSnapshots(backupTarget) { if (app !== null) continue; // app is still installed if (snapshotInfo[appId].format ==='tgz') { - await safe(storage.api(backupTarget.provider).remove(backupTarget.config, backupFormat.api(snapshotInfo[appId].format).getBackupFilePath(backupTarget, `snapshot/app_${appId}`)), { debug }); + await safe(backupTargets.storageApi(backupTarget).remove(backupTarget.config, backupFormat.api(snapshotInfo[appId].format).getBackupFilePath(backupTarget, `snapshot/app_${appId}`)), { debug }); } else { - await safe(storage.api(backupTarget.provider).removeDir(backupTarget.config, backupFormat.api(snapshotInfo[appId].format).getBackupFilePath(backupTarget, `snapshot/app_${appId}`), progressCallback), { debug }); + await safe(backupTargets.storageApi(backupTarget).removeDir(backupTarget.config, backupFormat.api(snapshotInfo[appId].format).getBackupFilePath(backupTarget, `snapshot/app_${appId}`), progressCallback), { debug }); } await backupTargets.setSnapshotInfo(backupTarget, appId, null /* info */); @@ -303,7 +302,7 @@ async function run(targetId, progressCallback) { await removeOldAppSnapshots(backupTarget); await progressCallback({ percent: 80, message: 'Cleaning storage artifacts' }); - await storage.api(backupTarget.provider).cleanup(backupTarget.config, progressCallback); + await backupTargets.storageApi(backupTarget).cleanup(backupTarget.config, progressCallback); return { removedBoxBackupPaths, removedMailBackupPaths, removedAppBackupPaths, missingBackupPaths }; } diff --git a/src/backupformat/rsync.js b/src/backupformat/rsync.js index 1c87e2e6b..c5a896037 100644 --- a/src/backupformat/rsync.js +++ b/src/backupformat/rsync.js @@ -11,6 +11,7 @@ exports = module.exports = { const assert = require('assert'), async = require('async'), + backupTargets = require('../backuptargets.js'), BoxError = require('../boxerror.js'), DataLayout = require('../datalayout.js'), { DecryptStream } = require('../hush.js'), @@ -24,7 +25,6 @@ const assert = require('assert'), promiseRetry = require('../promise-retry.js'), safe = require('safetydance'), shell = require('../shell.js')('backupformat/rsync'), - storage = require('../storage.js'), stream = require('stream/promises'), syncer = require('../syncer.js'); @@ -86,16 +86,16 @@ async function processSyncerChange(change, backupTarget, remotePath, dataLayout, if (change.operation === 'removedir') { debug(`Removing directory ${backupFilePath}`); - await storage.api(backupTarget.provider).removeDir(backupTarget.config, backupFilePath, progressCallback); + await backupTargets.storageApi(backupTarget).removeDir(backupTarget.config, backupFilePath, progressCallback); } else if (change.operation === 'remove') { debug(`Removing ${backupFilePath}`); - await storage.api(backupTarget.provider).remove(backupTarget.config, backupFilePath); + await backupTargets.storageApi(backupTarget).remove(backupTarget.config, backupFilePath); } else if (change.operation === 'add') { await promiseRetry({ times: 5, interval: 20000, debug }, async (retryCount) => { progressCallback({ message: `Adding ${change.path}` + (retryCount > 1 ? ` (Try ${retryCount})` : '') }); debug(`Adding ${change.path} position ${change.position} try ${retryCount}`); - const uploader = await storage.api(backupTarget.provider).upload(backupTarget.config, backupFilePath); + const uploader = await backupTargets.storageApi(backupTarget).upload(backupTarget.config, backupFilePath); await addFile(dataLayout.toLocalPath('./' + change.path), backupTarget.encryption, uploader, progressCallback); }); } @@ -216,7 +216,7 @@ async function downloadDir(backupTarget, backupFilePath, dataLayout, progressCal if (mkdirError) throw new BoxError(BoxError.FS_ERROR, mkdirError.message); await promiseRetry({ times: 3, interval: 20000 }, async function () { - const [downloadError, sourceStream] = await safe(storage.api(backupTarget.provider).download(backupTarget.config, entry.fullPath)); + const [downloadError, sourceStream] = await safe(backupTargets.storageApi(backupTarget).download(backupTarget.config, entry.fullPath)); if (downloadError) { progressCallback({ message: `Download ${entry.fullPath} to ${destFilePath} errored: ${downloadError.message}` }); throw downloadError; @@ -255,7 +255,7 @@ async function downloadDir(backupTarget, backupFilePath, dataLayout, progressCal const concurrency = backupTarget.limits?.downloadConcurrency || (backupTarget.provider === 's3' ? 30 : 10); let marker = null; while (true) { - const batch = await storage.api(backupTarget.provider).listDir(backupTarget.config, backupFilePath, marker === null ? 1 : 1000, marker); // try with one file first. if that works out, we continue faster + const batch = await backupTargets.storageApi(backupTarget).listDir(backupTarget.config, backupFilePath, marker === null ? 1 : 1000, marker); // try with one file first. if that works out, we continue faster await async.eachLimit(batch.entries, concurrency, downloadFile); if (!batch.marker) break; marker = batch.marker; diff --git a/src/backupformat/tgz.js b/src/backupformat/tgz.js index a75c00fc9..d791a06ee 100644 --- a/src/backupformat/tgz.js +++ b/src/backupformat/tgz.js @@ -1,6 +1,7 @@ 'use strict'; const assert = require('assert'), + backupTargets = require('../backuptargets.js'), BoxError = require('../boxerror.js'), DataLayout = require('../datalayout.js'), debug = require('debug')('box:backupformat/tgz'), @@ -10,7 +11,6 @@ const assert = require('assert'), ProgressStream = require('../progress-stream.js'), promiseRetry = require('../promise-retry.js'), safe = require('safetydance'), - storage = require('../storage.js'), stream = require('stream/promises'), { Transform } = require('node:stream'), tar = require('tar-stream'), @@ -244,7 +244,7 @@ async function download(backupTarget, remotePath, dataLayout, progressCallback) await promiseRetry({ times: 5, interval: 20000, debug }, async () => { progressCallback({ message: `Downloading backup ${backupFilePath}` }); - const sourceStream = await storage.api(backupTarget.provider).download(backupTarget.config, backupFilePath); + const sourceStream = await backupTargets.storageApi(backupTarget).download(backupTarget.config, backupFilePath); await tarExtract(sourceStream, dataLayout, backupTarget.encryption, progressCallback); }); } @@ -262,7 +262,7 @@ async function upload(backupTarget, remotePath, dataLayout, progressCallback) { await promiseRetry({ times: 5, interval: 20000, debug }, async () => { progressCallback({ message: `Uploading backup ${backupFilePath}` }); - const uploader = await storage.api(backupTarget.provider).upload(backupTarget.config, backupFilePath); + const uploader = await backupTargets.storageApi(backupTarget).upload(backupTarget.config, backupFilePath); await tarPack(dataLayout, backupTarget.encryption, uploader, progressCallback); }); } diff --git a/src/backuptargets.js b/src/backuptargets.js index 6e3aef668..9a44cd10e 100644 --- a/src/backuptargets.js +++ b/src/backuptargets.js @@ -29,6 +29,8 @@ exports = module.exports = { remount, getMountStatus, ensureMounted, + + storageApi }; const assert = require('assert'), @@ -47,11 +49,54 @@ const assert = require('assert'), path = require('path'), paths = require('./paths.js'), safe = require('safetydance'), - storage = require('./storage.js'), tasks = require('./tasks.js'); +// format: rsync or tgz +// provider: used to determine the api provider +// config: depends on the 'provider' field. 'provider' is not stored in config object. but it is injected when calling the api backends +// s3 providers - accessKeyId, secretAccessKey, bucket, prefix etc . see s3.js +// gcs - bucket, prefix, projectId, credentials . see gcs.js +// ext4/xfs/disk (managed providers) - mountOptions (diskPath), prefix, noHardlinks. disk is legacy. +// nfs/cifs/sshfs (managed providers) - mountOptions (host/username/password/seal/privateKey etc), prefix, noHardlinks +// filesystem - backupFolder, noHardlinks +// mountpoint - mountPoint, prefix, noHardlinks +// encryption: 'encryptionPassword' and 'encryptedFilenames' is converted into an 'encryption' object using hush.js. Password is lost forever after conversion. const BACKUP_TARGET_FIELDS = [ 'id', 'label', 'provider', 'configJson', 'limitsJson', 'retentionJson', 'schedule', 'encryptionJson', 'format', 'main', 'creationTime', 'ts' ].join(','); +function storageApi(backupTarget) { + assert.strictEqual(typeof backupTarget, 'object'); + + switch (backupTarget.provider) { + case 'nfs': return require('./storage/filesystem.js'); + case 'cifs': return require('./storage/filesystem.js'); + case 'sshfs': return require('./storage/filesystem.js'); + case 'mountpoint': return require('./storage/filesystem.js'); + case 'disk': return require('./storage/filesystem.js'); + case 'ext4': return require('./storage/filesystem.js'); + case 's3': return require('./storage/s3.js'); + case 'gcs': return require('./storage/gcs.js'); + case 'filesystem': return require('./storage/filesystem.js'); + case 'minio': return require('./storage/s3.js'); + case 's3-v4-compat': return require('./storage/s3.js'); + case 'digitalocean-spaces': return require('./storage/s3.js'); + case 'exoscale-sos': return require('./storage/s3.js'); + case 'wasabi': return require('./storage/s3.js'); + case 'scaleway-objectstorage': return require('./storage/s3.js'); + case 'backblaze-b2': return require('./storage/s3.js'); + case 'cloudflare-r2': return require('./storage/s3.js'); + case 'linode-objectstorage': return require('./storage/s3.js'); + case 'ovh-objectstorage': return require('./storage/s3.js'); + case 'ionos-objectstorage': return require('./storage/s3.js'); + case 'idrive-e2': return require('./storage/s3.js'); + case 'vultr-objectstorage': return require('./storage/s3.js'); + case 'upcloud-objectstorage': return require('./storage/s3.js'); + case 'contabo-objectstorage': return require('./storage/s3.js'); + case 'hetzner-objectstorage': return require('./storage/s3.js'); + case 'noop': return require('./storage/noop.js'); + default: throw new BoxError(BoxError.BAD_FIELD, `Unknown provider: ${backupTarget.provider}`); + } +} + function getRootPath(provider, config, mountPath) { assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof mountPath, 'string'); @@ -100,7 +145,7 @@ function removePrivateFields(target) { delete target.encryption; delete target.config.rootPath; - target.config = storage.api(target.provider).removePrivateFields(target.config); + target.config = storageApi(target).removePrivateFields(target.config); return target; } @@ -252,6 +297,8 @@ async function del(backupTarget, auditSource) { assert.strictEqual(typeof backupTarget, 'object'); assert.strictEqual(typeof auditSource, 'object'); + await safe(storageApi(backupTarget).teardown(backupTarget.config), { debug }); // ignore error + if (backupTarget.primary) throw new BoxError(BoxError.CONFLICT, 'Cannot delete the primary backup target'); const queries = [ @@ -363,23 +410,10 @@ async function startCleanupTask(backupTarget, auditSource) { return taskId; } -function managedBackupMountObject(config) { - assert(mounts.isManagedProvider(config.provider)); - - return { - name: 'backup', - hostPath: paths.MANAGED_BACKUP_MOUNT_DIR, - mountType: config.provider, - mountOptions: config.mountOptions - }; -} - async function remount(target) { assert.strictEqual(typeof target, 'object'); - if (mounts.isManagedProvider(target.provider)) { - await mounts.remount(managedBackupMountObject(target.config)); - } + await storageApi(target).setup(target.config); } async function getMountStatus(target) { @@ -418,16 +452,13 @@ async function setConfig(backupTarget, newConfig, auditSource) { const oldConfig = backupTarget.config; - storage.api(backupTarget.provider).injectPrivateFields(newConfig, oldConfig); + storageApi(backupTarget).injectPrivateFields(newConfig, oldConfig); debug('setConfig: validating new storage configuration'); - await storage.testMount(backupTarget.provider, newConfig, '/mnt/backup-storage-validation'); - - debug('setConfig: removing old storage configuration'); - if (mounts.isManagedProvider(backupTarget.provider)) await safe(mounts.removeMount(managedBackupMountObject(oldConfig))); + await storageApi(backupTarget).testConfig(Object.assign({}, newConfig, { provider: oldConfig.provider })); debug('setConfig: setting up new storage configuration'); - await storage.setupManagedMount(backupTarget.provider, newConfig, paths.MANAGED_BACKUP_MOUNT_DIR); + await storageApi(backupTarget).setup(Object.assign({}, newConfig, { provider: oldConfig.provider })); debug('setConfig: clearing backup cache'); // FIXME: this cleans up the cache files in case the bucket or the prefix changes and the destination already has something there @@ -465,10 +496,7 @@ async function add(data, auditSource) { } debug('add: validating new storage configuration'); - await storage.testMount(provider, config, '/mnt/backup-storage-validation'); - - debug('setStorage: setting up new storage configuration'); - await storage.setupManagedMount(provider, config, paths.MANAGED_BACKUP_MOUNT_DIR); + await storageApi({ provider }).testConfig(Object.assign({}, config, { provider })); const id = `bc-${crypto.randomUUID()}`; if (!safe.fs.mkdirSync(`${paths.BACKUP_INFO_DIR}/${id}`)) throw new BoxError(BoxError.FS_ERROR, `Failed to create info dir: ${safe.error.message}`); @@ -476,6 +504,9 @@ async function add(data, auditSource) { await database.query('INSERT INTO backupTargets (id, label, provider, configJson, limitsJson, retentionJson, schedule, encryptionJson, format, main) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', [ id, label, provider, JSON.stringify(config), JSON.stringify(limits), JSON.stringify(retention), schedule, JSON.stringify(encryption), format, false ]); + debug('add: setting up new storage configuration'); + await storageApi({ provider }).setup(Object.assign({}, config, { provider })); + await eventlog.add(eventlog.ACTION_BACKUP_TARGET_ADD, auditSource, { id, label, provider, config, schedule, format }); return id; diff --git a/src/backuptask.js b/src/backuptask.js index d5118a622..829c70eaa 100644 --- a/src/backuptask.js +++ b/src/backuptask.js @@ -30,8 +30,7 @@ const apps = require('./apps.js'), paths = require('./paths.js'), safe = require('safetydance'), services = require('./services.js'), - shell = require('./shell.js')('backuptask'), - storage = require('./storage.js'); + shell = require('./shell.js')('backuptask'); const BACKUP_UPLOAD_CMD = path.join(__dirname, 'scripts/backupupload.js'); @@ -45,7 +44,7 @@ async function checkPreconditions(backupTarget, dataLayout) { if (status.state !== 'active') throw new BoxError(BoxError.MOUNT_ERROR, `Backup endpoint is not active: ${status.message}`); // check availabe size. this requires root for df to work - const available = await storage.api(backupTarget.provider).getAvailableSize(backupTarget.config); + const available = await backupTargets.storageApi(backupTarget).getAvailableSize(backupTarget.config); let used = 0; for (const localPath of dataLayout.localPaths()) { debug(`checkPreconditions: getting disk usage of ${localPath}`); @@ -212,7 +211,7 @@ async function copy(backupTarget, srcRemotePath, destRemotePath, progressCallbac const newFilePath = backupFormat.api(format).getBackupFilePath(backupTarget, destRemotePath); const startTime = new Date(); - const [copyError] = await safe(storage.api(config.provider).copy(config, oldFilePath, newFilePath, progressCallback)); + const [copyError] = await safe(backupTargets.storageApi(backupTarget).copy(config, oldFilePath, newFilePath, progressCallback)); if (copyError) { debug(`copy: copied to ${destRemotePath} errored. error: ${copyError.message}`); throw copyError; diff --git a/src/routes/apps.js b/src/routes/apps.js index c5bcefbf1..fd1aa863c 100644 --- a/src/routes/apps.js +++ b/src/routes/apps.js @@ -558,17 +558,6 @@ async function restore(req, res, next) { next(new HttpSuccess(202, { taskId: result.taskId })); } -// import has three parts. -// 1. backupFormat. rsync or tgz -// 2. remotePath. this is file path -// 3. backupConfig.provider (see api() function in src/storage.js) differentiates further options -// s3 providers - accessKeyId, secretAccessKey, bucket, prefix etc . see s3.js -// gcs - bucket, prefix, projectId, credentials . see gcs.js -// ext4/xfs/disk (managed providers) - mountOptions (diskPath), prefix, noHardlinks. disk is legacy. -// nfs/cifs/sshfs (managed providers) - mountOptions (host/username/password/seal/privateKey etc), prefix, noHardlinks -// filesystem - backupFolder, noHardlinks -// mountpoint - mountPoint, prefix, noHardlinks -// 3. backupConfig.password and backupConfig.encryptedFilenames async function importApp(req, res, next) { assert.strictEqual(typeof req.body, 'object'); assert.strictEqual(typeof req.resources.app, 'object'); diff --git a/src/routes/backuptargets.js b/src/routes/backuptargets.js index 072f85343..6d4b0fad0 100644 --- a/src/routes/backuptargets.js +++ b/src/routes/backuptargets.js @@ -60,16 +60,6 @@ async function list(req, res, next) { next(new HttpSuccess(200, { backupTargets: result.map(backupTargets.removePrivateFields) })); } -// Target has three parts. these fields are merged into one top level object -// 1. format. rsync or tgz -// 2. config. the 'provider' (see api() function in src/storage.js) differentiates further options -// s3 providers - accessKeyId, secretAccessKey, bucket, prefix etc . see s3.js -// gcs - bucket, prefix, projectId, credentials . see gcs.js -// ext4/xfs/disk (managed providers) - mountOptions (diskPath), prefix, noHardlinks. disk is legacy. -// nfs/cifs/sshfs (managed providers) - mountOptions (host/username/password/seal/privateKey etc), prefix, noHardlinks -// filesystem - backupFolder, noHardlinks -// mountpoint - mountPoint, prefix, noHardlinks -// 3. encryption. password and encryptedFilenames async function add(req, res, next) { assert.strictEqual(typeof req.body, 'object'); diff --git a/src/storage.js b/src/storage.js deleted file mode 100644 index f8f3c5c54..000000000 --- a/src/storage.js +++ /dev/null @@ -1,89 +0,0 @@ -'use strict'; - -exports = module.exports = { - api, - testMount, - setupManagedMount -}; - -const assert = require('assert'), - BoxError = require('./boxerror.js'), - debug = require('debug')('box:storage'), - mounts = require('./mounts.js'), - path = require('path'), - safe = require('safetydance'); - -// choose which storage backend we use for test purpose we use s3 -function api(provider) { - switch (provider) { - case 'nfs': return require('./storage/filesystem.js'); - case 'cifs': return require('./storage/filesystem.js'); - case 'sshfs': return require('./storage/filesystem.js'); - case 'mountpoint': return require('./storage/filesystem.js'); - case 'disk': return require('./storage/filesystem.js'); - case 'ext4': return require('./storage/filesystem.js'); - case 's3': return require('./storage/s3.js'); - case 'gcs': return require('./storage/gcs.js'); - case 'filesystem': return require('./storage/filesystem.js'); - case 'minio': return require('./storage/s3.js'); - case 's3-v4-compat': return require('./storage/s3.js'); - case 'digitalocean-spaces': return require('./storage/s3.js'); - case 'exoscale-sos': return require('./storage/s3.js'); - case 'wasabi': return require('./storage/s3.js'); - case 'scaleway-objectstorage': return require('./storage/s3.js'); - case 'backblaze-b2': return require('./storage/s3.js'); - case 'cloudflare-r2': return require('./storage/s3.js'); - case 'linode-objectstorage': return require('./storage/s3.js'); - case 'ovh-objectstorage': return require('./storage/s3.js'); - case 'ionos-objectstorage': return require('./storage/s3.js'); - case 'idrive-e2': return require('./storage/s3.js'); - case 'vultr-objectstorage': return require('./storage/s3.js'); - case 'upcloud-objectstorage': return require('./storage/s3.js'); - case 'contabo-objectstorage': return require('./storage/s3.js'); - case 'hetzner-objectstorage': return require('./storage/s3.js'); - case 'noop': return require('./storage/noop.js'); - default: return null; - } -} - -async function setupManagedMount(provider, backupConfig, hostPath) { - assert.strictEqual(typeof provider, 'string'); - assert.strictEqual(typeof backupConfig, 'object'); - assert.strictEqual(typeof hostPath, 'string'); - - if (!mounts.isManagedProvider(provider)) return null; - - if (!backupConfig.mountOptions || typeof backupConfig.mountOptions !== 'object') throw new BoxError(BoxError.BAD_FIELD, 'mountOptions must be an object'); - - const error = mounts.validateMountOptions(provider, backupConfig.mountOptions); - if (error) throw error; - - debug(`setupManagedMount: setting up mount at ${hostPath} with ${provider}`); - - const newMount = { - name: path.basename(hostPath), - hostPath, - mountType: provider, - mountOptions: backupConfig.mountOptions - }; - - await mounts.tryAddMount(newMount, { timeout: 10 }); // 10 seconds - - return newMount; -} - -async function testMount(provider, backupConfig, mountPath) { - assert.strictEqual(typeof provider, 'string'); - assert.strictEqual(typeof backupConfig, 'object'); - assert.strictEqual(typeof mountPath, 'string'); - - const func = api(provider); - if (!func) return new BoxError(BoxError.BAD_FIELD, 'unknown storage provider'); - - const testMountObject = await setupManagedMount(provider, backupConfig, mountPath); // this validates mountOptions - - // filesystem backend uses mountPath - const [error] = await safe(api(provider).testConfig(Object.assign({ mountPath: '/mnt/backup-storage-validation', provider }, backupConfig))); - if (testMountObject) await mounts.removeMount(testMountObject); - if (error) throw error; -} diff --git a/src/storage/filesystem.js b/src/storage/filesystem.js index 257405746..c3b81db88 100644 --- a/src/storage/filesystem.js +++ b/src/storage/filesystem.js @@ -1,6 +1,14 @@ 'use strict'; exports = module.exports = { + setup, + teardown, + cleanup, + + testConfig, + removePrivateFields, + injectPrivateFields, + getAvailableSize, upload, @@ -13,29 +21,15 @@ exports = module.exports = { remove, removeDir, - - cleanup, - - testConfig, - removePrivateFields, - injectPrivateFields }; -const PROVIDER_FILESYSTEM = 'filesystem'; -const PROVIDER_MOUNTPOINT = 'mountpoint'; -const PROVIDER_SSHFS = 'sshfs'; -const PROVIDER_CIFS = 'cifs'; -const PROVIDER_XFS = 'xfs'; -const PROVIDER_DISK = 'disk'; // replaces xfs and ext4 -const PROVIDER_NFS = 'nfs'; -const PROVIDER_EXT4 = 'ext4'; - const assert = require('assert'), BoxError = require('../boxerror.js'), constants = require('../constants.js'), debug = require('debug')('box:storage/filesystem'), df = require('../df.js'), fs = require('fs'), + mounts = require('../mounts.js'), path = require('path'), paths = require('../paths.js'), safe = require('safetydance'), @@ -53,19 +47,19 @@ async function getAvailableSize(apiConfig) { function hasChownSupportSync(apiConfig) { switch (apiConfig.provider) { - case PROVIDER_NFS: - case PROVIDER_EXT4: - case PROVIDER_XFS: - case PROVIDER_DISK: - case PROVIDER_FILESYSTEM: + case mounts.MOUNT_TYPE_NFS: + case mounts.MOUNT_TYPE_EXT4: + case mounts.MOUNT_TYPE_XFS: + case mounts.MOUNT_TYPE_DISK: + case mounts.MOUNT_TYPE_FILESYSTEM: return true; - case PROVIDER_SSHFS: + case mounts.MOUNT_TYPE_SSHFS: // sshfs can be mounted as root or normal user. when mounted as root, we have to chown since we remove backups as the yellowtent user // when mounted as non-root user, files are created as yellowtent user but they are still owned by the non-root user (thus del also works) return apiConfig.mountOptions.user === 'root'; - case PROVIDER_CIFS: + case mounts.MOUNT_TYPE_CIFS: return true; - case PROVIDER_MOUNTPOINT: + case mounts.MOUNT_TYPE_MOUNTPOINT: return apiConfig.chown; } } @@ -159,10 +153,10 @@ async function copy(apiConfig, oldFilePath, newFilePath, progressCallback) { progressCallback({ message: `Copying ${oldFilePath} to ${newFilePath}` }); - let cpOptions = ((apiConfig.provider !== PROVIDER_MOUNTPOINT && apiConfig.provider !== PROVIDER_CIFS) || apiConfig.preserveAttributes) ? '-a' : '-dR'; + let cpOptions = ((apiConfig.provider !== mounts.MOUNT_TYPE_MOUNTPOINT && apiConfig.provider !== mounts.MOUNT_TYPE_CIFS) || apiConfig.preserveAttributes) ? '-a' : '-dR'; cpOptions += apiConfig.noHardlinks ? '' : 'l'; // this will hardlink backups saving space - if (apiConfig.provider === PROVIDER_SSHFS) { + if (apiConfig.provider === mounts.MOUNT_TYPE_SSHFS) { const identityFilePath = path.join(paths.SSHFS_KEYS_DIR, `id_rsa_${apiConfig.mountOptions.host}`); const sshOptions = [ '-o', '"StrictHostKeyChecking no"', '-i', identityFilePath, '-p', apiConfig.mountOptions.port, `${apiConfig.mountOptions.user}@${apiConfig.mountOptions.host}` ]; @@ -198,7 +192,7 @@ async function removeDir(apiConfig, pathPrefix, progressCallback) { progressCallback({ message: `Removing directory ${pathPrefix}` }); - if (apiConfig.provider === PROVIDER_SSHFS) { + if (apiConfig.provider === mounts.MOUNT_TYPE_SSHFS) { const identityFilePath = path.join(paths.SSHFS_KEYS_DIR, `id_rsa_${apiConfig.mountOptions.host}`); const sshOptions = [ '-o', '"StrictHostKeyChecking no"', '-i', identityFilePath, '-p', apiConfig.mountOptions.port, `${apiConfig.mountOptions.user}@${apiConfig.mountOptions.host}` ]; @@ -213,7 +207,7 @@ async function removeDir(apiConfig, pathPrefix, progressCallback) { if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, error.message); } -function validateBackupTarget(folder) { +function validateDestPath(folder) { assert.strictEqual(typeof folder, 'string'); if (path.normalize(folder) !== folder) return new BoxError(BoxError.BAD_FIELD, 'backupFolder/mountpoint must contain a normalized path'); @@ -234,6 +228,62 @@ async function cleanup(apiConfig, progressCallback) { assert.strictEqual(typeof progressCallback, 'function'); } +async function setupManagedMount(apiConfig, hostPath) { + assert.strictEqual(typeof apiConfig, 'object'); + assert.strictEqual(typeof hostPath, 'string'); + assert(mounts.isManagedProvider(apiConfig.provider)); + + if (!apiConfig.mountOptions || typeof apiConfig.mountOptions !== 'object') throw new BoxError(BoxError.BAD_FIELD, 'mountOptions must be an object'); + + const error = mounts.validateMountOptions(apiConfig.provider, apiConfig.mountOptions); + if (error) throw error; + + debug(`setupManagedMount: setting up mount at ${hostPath} with ${apiConfig.provider}`); + + const newMount = { + name: path.basename(hostPath), + hostPath, + mountType: apiConfig.provider, + mountOptions: apiConfig.mountOptions + }; + + await mounts.tryAddMount(newMount, { timeout: 10 }); // 10 seconds + + return newMount; +} + +async function setup(apiConfig) { + assert.strictEqual(typeof apiConfig, 'object'); + + debug('setup: removing old storage configuration'); + if (!mounts.isManagedProvider(apiConfig.provider)) return; + const oldMountObject = { + name: 'backup', + hostPath: paths.MANAGED_BACKUP_MOUNT_DIR, + mountType: apiConfig.provider, + mountOptions: apiConfig.mountOptions // must have already been validated + }; + + await safe(mounts.removeMount(oldMountObject), { debug }); // ignore error + + debug('setup: setting up new storage configuration'); + await setupManagedMount(apiConfig, paths.MANAGED_BACKUP_MOUNT_DIR); +} + +async function teardown(apiConfig) { + assert.strictEqual(typeof apiConfig, 'object'); + + if (!mounts.isManagedProvider(apiConfig.provider)) return; + const mountObject = { + name: 'backup', + hostPath: paths.MANAGED_BACKUP_MOUNT_DIR, + mountType: apiConfig.provider, + mountOptions: apiConfig.mountOptions + }; + + await safe(mounts.removeMount(mountObject), { debug }); // ignore error +} + async function testConfig(apiConfig) { assert.strictEqual(typeof apiConfig, 'object'); @@ -241,32 +291,32 @@ async function testConfig(apiConfig) { if ('chown' in apiConfig && typeof apiConfig.chown !== 'boolean') throw new BoxError(BoxError.BAD_FIELD, 'chown must be boolean'); if ('preserveAttributes' in apiConfig && typeof apiConfig.preserveAttributes !== 'boolean') throw new BoxError(BoxError.BAD_FIELD, 'preserveAttributes must be boolean'); - let rootPath; // for managed mounts, this uses 'mountPath', which could be some temporary mount location - if (apiConfig.provider === PROVIDER_FILESYSTEM) { + let rootPath, testMountObject; + if (apiConfig.provider === mounts.MOUNT_TYPE_FILESYSTEM) { if (!apiConfig.backupFolder || typeof apiConfig.backupFolder !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'backupFolder must be non-empty string'); - const error = validateBackupTarget(apiConfig.backupFolder); + const error = validateDestPath(apiConfig.backupFolder); if (error) throw error; rootPath = apiConfig.backupFolder; - } else { // xfs/cifs/ext4/nfs/mountpoint/sshfs - if (apiConfig.provider === PROVIDER_MOUNTPOINT) { - if (!apiConfig.mountPoint || typeof apiConfig.mountPoint !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'mountPoint must be non-empty string'); - const error = validateBackupTarget(apiConfig.mountPoint); - if (error) throw error; - - const [mountError] = await safe(shell.spawn('mountpoint', ['-q', '--', apiConfig.mountPoint], { timeout: 5000 })); - if (mountError) throw new BoxError(BoxError.BAD_FIELD, `${apiConfig.mountPoint} is not mounted: ${mountError.message}`); - } - + } else { if (typeof apiConfig.prefix !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'prefix must be a string'); if (apiConfig.prefix !== '') { if (path.isAbsolute(apiConfig.prefix)) throw new BoxError(BoxError.BAD_FIELD, 'prefix must be a relative path'); if (path.normalize(apiConfig.prefix) !== apiConfig.prefix) throw new BoxError(BoxError.BAD_FIELD, 'prefix must contain a normalized relative path'); } - if (apiConfig.provider === PROVIDER_MOUNTPOINT) { + if (mounts.isManagedProvider(apiConfig.provider)) { + testMountObject = await setupManagedMount(apiConfig, '/mnt/backup-storage-validation'); // this validates mountOptions + rootPath = path.join('/mnt/backup-storage-validation', apiConfig.prefix); + } else if (apiConfig.provider === mounts.MOUNT_TYPE_MOUNTPOINT) { + if (!apiConfig.mountPoint || typeof apiConfig.mountPoint !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'mountPoint must be non-empty string'); + const error = validateDestPath(apiConfig.mountPoint); + if (error) throw error; + + const [mountError] = await safe(shell.spawn('mountpoint', ['-q', '--', apiConfig.mountPoint], { timeout: 5000 })); + if (mountError) throw new BoxError(BoxError.BAD_FIELD, `${apiConfig.mountPoint} is not mounted: ${mountError.message}`); rootPath = path.join(apiConfig.mountPoint, apiConfig.prefix); } else { - rootPath = path.join(apiConfig.mountPath, apiConfig.prefix); + throw new BoxError(BoxError.BAD_FIELD, `Unknown provider: ${apiConfig.provider}`); } } @@ -282,6 +332,8 @@ async function testConfig(apiConfig) { if (!safe.fs.unlinkSync(path.join(rootPath, 'cloudron-testfile'))) { throw new BoxError(BoxError.BAD_FIELD, `Unable to remove test file as 'yellowtent' user in ${rootPath}: ${safe.error.message}. Check dir/mount permissions`); } + + if (testMountObject) await mounts.removeMount(testMountObject); } function removePrivateFields(apiConfig) { diff --git a/src/storage/gcs.js b/src/storage/gcs.js index be47ba18b..cc26a1d03 100644 --- a/src/storage/gcs.js +++ b/src/storage/gcs.js @@ -13,6 +13,8 @@ exports = module.exports = { remove, removeDir, + setup, + teardown, cleanup, testConfig, @@ -229,6 +231,14 @@ async function testConfig(apiConfig) { debug('testConfig: deleted cloudron-testfile'); } +async function setup(apiConfig) { + assert.strictEqual(typeof apiConfig, 'object'); +} + +async function teardown(apiConfig) { + assert.strictEqual(typeof apiConfig, 'object'); +} + function removePrivateFields(apiConfig) { apiConfig.credentials.private_key = constants.SECRET_PLACEHOLDER; return apiConfig; diff --git a/src/storage/interface.js b/src/storage/interface.js index eb43694f0..be39c9359 100644 --- a/src/storage/interface.js +++ b/src/storage/interface.js @@ -25,6 +25,10 @@ exports = module.exports = { remove, removeDir, + setup, + teardown, + cleanup, + testConfig, removePrivateFields, injectPrivateFields @@ -124,3 +128,19 @@ async function testConfig(apiConfig) { throw new BoxError(BoxError.NOT_IMPLEMENTED, 'testConfig is not implemented'); } + +async function setup(apiConfig) { + assert.strictEqual(typeof apiConfig, 'object'); + + // Result: none - first callback argument error if config does not pass the test + + throw new BoxError(BoxError.NOT_IMPLEMENTED, 'setup is not implemented'); +} + +async function teardown(apiConfig) { + assert.strictEqual(typeof apiConfig, 'object'); + + // Result: none - first callback argument error if config does not pass the test + + throw new BoxError(BoxError.NOT_IMPLEMENTED, 'teardown is not implemented'); +} diff --git a/src/storage/noop.js b/src/storage/noop.js index 636f3fdbd..0b1408597 100644 --- a/src/storage/noop.js +++ b/src/storage/noop.js @@ -13,6 +13,8 @@ exports = module.exports = { remove, removeDir, + setup, + teardown, cleanup, testConfig, @@ -105,6 +107,14 @@ async function testConfig(apiConfig) { assert.strictEqual(typeof apiConfig, 'object'); } +async function setup(apiConfig) { + assert.strictEqual(typeof apiConfig, 'object'); +} + +async function teardown(apiConfig) { + assert.strictEqual(typeof apiConfig, 'object'); +} + function removePrivateFields(apiConfig) { return apiConfig; } diff --git a/src/storage/s3.js b/src/storage/s3.js index b3d8acc26..a370123bb 100644 --- a/src/storage/s3.js +++ b/src/storage/s3.js @@ -1,6 +1,14 @@ 'use strict'; exports = module.exports = { + setup, + teardown, + cleanup, + + testConfig, + removePrivateFields, + injectPrivateFields, + getAvailableSize, upload, @@ -13,12 +21,6 @@ exports = module.exports = { remove, removeDir, - cleanup, - - testConfig, - removePrivateFields, - injectPrivateFields, - // Used to mock AWS _chunk: chunk }; @@ -613,6 +615,14 @@ async function testConfig(apiConfig) { if (delError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error del object cloudron-testfile. ${formatError(delError)}`); } +async function setup(apiConfig) { + assert.strictEqual(typeof apiConfig, 'object'); +} + +async function teardown(apiConfig) { + assert.strictEqual(typeof apiConfig, 'object'); +} + function removePrivateFields(apiConfig) { apiConfig.secretAccessKey = constants.SECRET_PLACEHOLDER; return apiConfig;