diff --git a/src/backupcleaner.js b/src/backupcleaner.js index f3525df38..d98d720e5 100644 --- a/src/backupcleaner.js +++ b/src/backupcleaner.js @@ -15,7 +15,6 @@ const apps = require('./apps.js'), constants = require('./constants.js'), debug = require('debug')('box:backupcleaner'), moment = require('moment'), - mounts = require('./mounts.js'), path = require('path'), paths = require('./paths.js'), safe = require('safetydance'), @@ -272,12 +271,9 @@ async function run(progressCallback) { const backupConfig = await settings.getBackupConfig(); - if (mounts.isManagedProvider(backupConfig.provider) || backupConfig.provider === 'mountpoint') { - const hostPath = mounts.isManagedProvider(backupConfig.provider) ? paths.MANAGED_BACKUP_MOUNT_DIR : backupConfig.mountPoint; - const status = await mounts.getStatus(backupConfig.provider, hostPath); // { state, message } - debug(`clean: mount point status is ${JSON.stringify(status)}`); - if (status.state !== 'active') throw new BoxError(BoxError.MOUNT_ERROR, `Backup endpoint is not mounted: ${status.message}`); - } + const status = await storage.api(backupConfig.provider).getBackupProviderStatus(backupConfig); + debug(`clean: mount point status is ${JSON.stringify(status)}`); + if (status.state !== 'active') throw new BoxError(BoxError.MOUNT_ERROR, `Backup endpoint is not mounted: ${status.message}`); if (backupConfig.retentionPolicy.keepWithinSecs < 0) { debug('cleanup: keeping all backups'); diff --git a/src/backuptask.js b/src/backuptask.js index 31a102f36..2bfb9bb71 100644 --- a/src/backuptask.js +++ b/src/backuptask.js @@ -47,6 +47,41 @@ function canBackupApp(app) { app.installationState === apps.ISTATE_PENDING_UPDATE; // called from apptask } +// binary units (non SI) 1024 based +function prettyBytes(bytes) { + assert.strictEqual(typeof bytes, 'number'); + + const i = Math.floor(Math.log(bytes) / Math.log(1024)), + sizes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']; + + return (bytes / Math.pow(1024, i)).toFixed(2) * 1 + '' + sizes[i]; +} + +async function checkPreconditions(backupConfig, dataLayout) { + assert.strictEqual(typeof backupConfig, 'object'); + assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout'); + + // check mount status before uploading + const status = await storage.api(backupConfig.provider).getBackupProviderStatus(backupConfig); + debug(`upload: mount point status is ${JSON.stringify(status)}`); + if (status.state !== 'active') throw new BoxError(BoxError.MOUNT_ERROR, `Backup endpoint is not mounted: ${status.message}`); + + // check availabe size. this requires root for df to work + const df = await storage.api(backupConfig.provider).getAvailableSize(backupConfig); + let used = 0; + for (const localPath of dataLayout.localPaths()) { + debug(`checkPreconditions: getting disk usage of ${localPath}`); + const result = safe.child_process.execSync(`du -Dsb ${localPath}`, { encoding: 'utf8' }); + if (!result) throw new BoxError(BoxError.FS_ERROR, `du error: ${safe.error.message}`); + used += parseInt(result, 10); + } + + debug(`checkPreconditions: ${used} bytes`); + + const needed = 0.6 * used + (1024 * 1024 * 1024); // check if there is atleast 1GB left afterwards. aim for 60% because rsync/tgz won't need full 100% + if (df.available <= needed) throw new BoxError(BoxError.FS_ERROR, `Not enough disk space for backup. Needed: ${prettyBytes(needed)} Available: ${prettyBytes(df.available)}`); +} + // this function is called via backupupload (since it needs root to traverse app's directory) async function upload(remotePath, format, dataLayoutString, progressCallback) { assert.strictEqual(typeof remotePath, 'string'); @@ -58,7 +93,8 @@ async function upload(remotePath, format, dataLayoutString, progressCallback) { const dataLayout = DataLayout.fromString(dataLayoutString); const backupConfig = await settings.getBackupConfig(); - await storage.api(backupConfig.provider).checkBackupPreconditions(backupConfig, dataLayout); + + await checkPreconditions(backupConfig, dataLayout); await backupFormat.api(format).upload(backupConfig, remotePath, dataLayout, progressCallback); } diff --git a/src/storage/filesystem.js b/src/storage/filesystem.js index 031e86510..4f1a3f464 100644 --- a/src/storage/filesystem.js +++ b/src/storage/filesystem.js @@ -2,7 +2,8 @@ exports = module.exports = { getBackupRootPath, - checkBackupPreconditions, + getBackupProviderStatus, + getAvailableSize, upload, download, @@ -33,7 +34,6 @@ const PROVIDER_EXT4 = 'ext4'; const assert = require('assert'), BoxError = require('../boxerror.js'), constants = require('../constants.js'), - DataLayout = require('../datalayout.js'), debug = require('debug')('box:storage/filesystem'), df = require('@sindresorhus/df'), fs = require('fs'), @@ -62,44 +62,26 @@ function getBackupRootPath(apiConfig) { } } -// binary units (non SI) 1024 based -function prettyBytes(bytes) { - assert.strictEqual(typeof bytes, 'number'); - - const i = Math.floor(Math.log(bytes) / Math.log(1024)), - sizes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']; - - return (bytes / Math.pow(1024, i)).toFixed(2) * 1 + '' + sizes[i]; -} - -// the du call in the function below requires root -async function checkBackupPreconditions(apiConfig, dataLayout) { +async function getBackupProviderStatus(apiConfig) { assert.strictEqual(typeof apiConfig, 'object'); - assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout'); // Check filesystem is mounted so we don't write into the actual folder on disk if (mounts.isManagedProvider(apiConfig.provider) || apiConfig.provider === 'mountpoint') { const hostPath = mounts.isManagedProvider(apiConfig.provider) ? paths.MANAGED_BACKUP_MOUNT_DIR : apiConfig.mountPoint; - const status = await mounts.getStatus(apiConfig.provider, hostPath); // { state, message } - debug(`clean: mount point status is ${JSON.stringify(status)}`); - if (status.state !== 'active') throw new BoxError(BoxError.MOUNT_ERROR, `Backup endpoint is not mounted: ${status.message}`); + return await mounts.getStatus(apiConfig.provider, hostPath); // { state, message } } + return await mounts.getStatus(apiConfig.provider, apiConfig.backupFolder); +} + +// the du call in the function below requires root +async function getAvailableSize(apiConfig) { + assert.strictEqual(typeof apiConfig, 'object'); + const [error, dfResult] = await safe(df.file(getBackupRootPath(apiConfig))); if (error) throw new BoxError(BoxError.FS_ERROR, `Error when checking for disk space: ${error.message}`); - let used = 0; - for (const localPath of dataLayout.localPaths()) { - debug(`checkBackupPreconditions: getting disk usage of ${localPath}`); - const result = safe.child_process.execSync(`du -Dsb ${localPath}`, { encoding: 'utf8' }); - if (!result) throw new BoxError(BoxError.FS_ERROR, `du error: ${safe.error.message}`); - used += parseInt(result, 10); - } - - debug(`checkBackupPreconditions: ${used} bytes`); - - const needed = 0.6 * used + (1024 * 1024 * 1024); // check if there is atleast 1GB left afterwards. aim for 60% because rsync/tgz won't need full 100% - if (dfResult.available <= needed) throw new BoxError(BoxError.FS_ERROR, `Not enough disk space for backup. Needed: ${prettyBytes(needed)} Available: ${prettyBytes(dfResult.available)}`); + return dfResult.available; } function hasChownSupportSync(apiConfig) { diff --git a/src/storage/gcs.js b/src/storage/gcs.js index 9589a88e0..69e876dc9 100644 --- a/src/storage/gcs.js +++ b/src/storage/gcs.js @@ -2,7 +2,8 @@ exports = module.exports = { getBackupRootPath, - checkBackupPreconditions, + getBackupProviderStatus, + getAvailableSize, upload, exists, @@ -29,7 +30,6 @@ const assert = require('assert'), async = require('async'), BoxError = require('../boxerror.js'), constants = require('../constants.js'), - DataLayout = require('../datalayout.js'), debug = require('debug')('box:storage/gcs'), PassThrough = require('stream').PassThrough, path = require('path'), @@ -72,9 +72,16 @@ function getBackupRootPath(apiConfig) { return apiConfig.prefix; } -async function checkBackupPreconditions(apiConfig, dataLayout) { +async function getBackupProviderStatus(apiConfig) { assert.strictEqual(typeof apiConfig, 'object'); - assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout'); + + return { state: 'active' }; +} + +async function getAvailableSize(apiConfig) { + assert.strictEqual(typeof apiConfig, 'object'); + + return Number.POSITIVE_INFINITY; } function upload(apiConfig, backupFilePath, sourceStream, callback) { diff --git a/src/storage/interface.js b/src/storage/interface.js index c78942587..55d21c125 100644 --- a/src/storage/interface.js +++ b/src/storage/interface.js @@ -12,7 +12,8 @@ // them to tune the concurrency based on failures/rate limits accordingly exports = module.exports = { getBackupRootPath, - checkBackupPreconditions, + getBackupProviderStatus, + getAvailableSize, upload, @@ -34,8 +35,7 @@ exports = module.exports = { }; const assert = require('assert'), - BoxError = require('../boxerror.js'), - DataLayout = require('../datalayout.js'); + BoxError = require('../boxerror.js'); function removePrivateFields(apiConfig) { // in-place removal of tokens and api keys with constants.SECRET_PLACEHOLDER @@ -54,9 +54,16 @@ function getBackupRootPath(apiConfig) { return '/'; } -async function checkBackupPreconditions(apiConfig, dataLayout) { +async function getBackupProviderStatus(apiConfig) { assert.strictEqual(typeof apiConfig, 'object'); - assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout'); + + return { state: 'active' }; +} + +async function getAvailableSize(apiConfig) { + assert.strictEqual(typeof apiConfig, 'object'); + + return Number.POSITIVE_INFINITY; } function upload(apiConfig, backupFilePath, sourceStream, callback) { diff --git a/src/storage/noop.js b/src/storage/noop.js index c87a8ca69..4149889f8 100644 --- a/src/storage/noop.js +++ b/src/storage/noop.js @@ -2,7 +2,8 @@ exports = module.exports = { getBackupRootPath, - checkBackupPreconditions, + getBackupProviderStatus, + getAvailableSize, upload, exists, @@ -23,7 +24,6 @@ exports = module.exports = { const assert = require('assert'), BoxError = require('../boxerror.js'), - DataLayout = require('../datalayout.js'), debug = require('debug')('box:storage/noop'); function getBackupRootPath(apiConfig) { @@ -31,9 +31,16 @@ function getBackupRootPath(apiConfig) { return ''; } -async function checkBackupPreconditions(apiConfig, dataLayout) { +async function getBackupProviderStatus(apiConfig) { assert.strictEqual(typeof apiConfig, 'object'); - assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout'); + + return { state: 'active' }; +} + +async function getAvailableSize(apiConfig) { + assert.strictEqual(typeof apiConfig, 'object'); + + return Number.POSITIVE_INFINITY; } function upload(apiConfig, backupFilePath, sourceStream, callback) { diff --git a/src/storage/s3.js b/src/storage/s3.js index bee56e5f8..408c00147 100644 --- a/src/storage/s3.js +++ b/src/storage/s3.js @@ -2,7 +2,8 @@ exports = module.exports = { getBackupRootPath, - checkBackupPreconditions, + getBackupProviderStatus, + getAvailableSize, upload, exists, @@ -31,7 +32,6 @@ const assert = require('assert'), AwsSdk = require('aws-sdk'), BoxError = require('../boxerror.js'), constants = require('../constants.js'), - DataLayout = require('../datalayout.js'), debug = require('debug')('box:storage/s3'), https = require('https'), path = require('path'), @@ -98,9 +98,16 @@ function getBackupRootPath(apiConfig) { return apiConfig.prefix; } -async function checkBackupPreconditions(apiConfig, dataLayout) { +async function getBackupProviderStatus(apiConfig) { assert.strictEqual(typeof apiConfig, 'object'); - assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout'); + + return { state: 'active' }; +} + +async function getAvailableSize(apiConfig) { + assert.strictEqual(typeof apiConfig, 'object'); + + return Number.POSITIVE_INFINITY; } function upload(apiConfig, backupFilePath, sourceStream, callback) {