diff --git a/dashboard/src/views/backups.js b/dashboard/src/views/backups.js index 42d6e74d6..841966e2c 100644 --- a/dashboard/src/views/backups.js +++ b/dashboard/src/views/backups.js @@ -530,12 +530,13 @@ angular.module('Application').controller('BackupsController', ['$scope', '$locat $scope.configureBackup.useHardlinks = !$scope.backupConfig.noHardlinks; $scope.configureBackup.chown = $scope.backupConfig.chown; - $scope.configureBackup.memoryLimit = $scope.backupConfig.memoryLimit; + var limits = $scope.backupConfig.limits || {}; + $scope.configureBackup.memoryLimit = limits.memoryLimit; - $scope.configureBackup.uploadPartSize = $scope.backupConfig.uploadPartSize || ($scope.configureBackup.provider === 'scaleway-objectstorage' ? 100 * 1024 * 1024 : 10 * 1024 * 1024); - $scope.configureBackup.downloadConcurrency = $scope.backupConfig.downloadConcurrency || ($scope.backupConfig.provider === 's3' ? 30 : 10); - $scope.configureBackup.syncConcurrency = $scope.backupConfig.syncConcurrency || ($scope.backupConfig.provider === 's3' ? 20 : 10); - $scope.configureBackup.copyConcurrency = $scope.backupConfig.copyConcurrency || ($scope.backupConfig.provider === 's3' ? 500 : 10); + $scope.configureBackup.uploadPartSize = limits.uploadPartSize || ($scope.configureBackup.provider === 'scaleway-objectstorage' ? 100 * 1024 * 1024 : 10 * 1024 * 1024); + $scope.configureBackup.downloadConcurrency = limits.downloadConcurrency || ($scope.backupConfig.provider === 's3' ? 30 : 10); + $scope.configureBackup.syncConcurrency = limits.syncConcurrency || ($scope.backupConfig.provider === 's3' ? 20 : 10); + $scope.configureBackup.copyConcurrency = limits.copyConcurrency || ($scope.backupConfig.provider === 's3' ? 500 : 10); var totalMemory = Math.max(($scope.memory.memory + $scope.memory.swap) * 1.5, 2 * 1024 * 1024); $scope.configureBackup.memoryTicks = [ $scope.MIN_MEMORY_LIMIT ]; @@ -571,10 +572,12 @@ angular.module('Application').controller('BackupsController', ['$scope', '$locat var backupConfig = { provider: $scope.configureBackup.provider, format: $scope.configureBackup.format, - memoryLimit: $scope.configureBackup.memoryLimit, // required for api call to provide all fields schedulePattern: $scope.backupConfig.schedulePattern, - retentionPolicy: $scope.backupConfig.retentionPolicy + retentionPolicy: $scope.backupConfig.retentionPolicy, + limits: { + memoryLimit: $scope.configureBackup.memoryLimit, + }, }; if ($scope.configureBackup.password) { backupConfig.password = $scope.configureBackup.password; @@ -675,12 +678,12 @@ angular.module('Application').controller('BackupsController', ['$scope', '$locat backupConfig.noHardlinks = !$scope.configureBackup.useHardlinks; } - backupConfig.uploadPartSize = $scope.configureBackup.uploadPartSize; + backupConfig.limits.uploadPartSize = $scope.configureBackup.uploadPartSize; if (backupConfig.format === 'rsync') { - backupConfig.downloadConcurrency = $scope.configureBackup.downloadConcurrency; - backupConfig.syncConcurrency = $scope.configureBackup.syncConcurrency; - backupConfig.copyConcurrency = $scope.configureBackup.copyConcurrency; + backupConfig.limits.downloadConcurrency = $scope.configureBackup.downloadConcurrency; + backupConfig.limits.syncConcurrency = $scope.configureBackup.syncConcurrency; + backupConfig.limits.copyConcurrency = $scope.configureBackup.copyConcurrency; } Client.setBackupConfig(backupConfig, function (error) { diff --git a/migrations/20230712042655-settings-split-backup-config-policy.js b/migrations/20230712042655-settings-split-backup-config-policy-limit.js similarity index 53% rename from migrations/20230712042655-settings-split-backup-config-policy.js rename to migrations/20230712042655-settings-split-backup-config-policy-limit.js index cd5ee3011..b2bd40f9a 100644 --- a/migrations/20230712042655-settings-split-backup-config-policy.js +++ b/migrations/20230712042655-settings-split-backup-config-policy-limit.js @@ -1,17 +1,22 @@ 'use strict'; +const _ = require('underscore'); + exports.up = async function(db) { const result = await db.runSql('SELECT * FROM settings WHERE name=?', [ 'backup_config' ]); if (!result.length) return; const backupConfig = JSON.parse(result[0].value); + // split policy from backupConfig and make limits a sub-object const backupPolicy = { schedule: backupConfig.schedulePattern, retention: backupConfig.retentionPolicy }; - delete backupConfig.schedulePattern; - delete backupConfig.retentionPolicy; - delete backupConfig.mountStatus; + + const newBackupConfig = _.omit(backupConfig, 'copyConcurrency', 'syncConcurrency', 'memoryLimit', 'downloadConcurrency', + 'deleteConcurrency', 'uploadPartSize', 'schedulePattern', 'retentionPolicy', 'mountStatus'); + newBackupConfig.limits = _.pick(backupConfig, 'copyConcurrency', 'syncConcurrency', 'memoryLimit', 'downloadConcurrency', + 'deleteConcurrency', 'uploadPartSize'); await db.runSql('START TRANSACTION'); - await db.runSql('UPDATE settings SET value=? WHERE name=?', [ JSON.stringify(backupConfig), 'backup_config']); + await db.runSql('UPDATE settings SET value=? WHERE name=?', [ JSON.stringify(newBackupConfig), 'backup_config']); await db.runSql('UPDATE settings SET value=? WHERE name=?', [ JSON.stringify(backupPolicy), 'backup_policy']); await db.runSql('COMMIT'); }; diff --git a/src/apps.js b/src/apps.js index ff74f4405..5e264db59 100644 --- a/src/apps.js +++ b/src/apps.js @@ -1206,7 +1206,7 @@ async function scheduleTask(appId, installationState, taskId, auditSource) { let memoryLimit = 400; if (installationState === exports.ISTATE_PENDING_BACKUP || installationState === exports.ISTATE_PENDING_CLONE || installationState === exports.ISTATE_PENDING_RESTORE || installationState === exports.ISTATE_PENDING_IMPORT || installationState === exports.ISTATE_PENDING_UPDATE) { - memoryLimit = 'memoryLimit' in backupConfig ? Math.max(backupConfig.memoryLimit/1024/1024, 400) : 400; + memoryLimit = backupConfig.limits?.memoryLimit ? Math.max(backupConfig.limits.memoryLimit/1024/1024, 400) : 400; } else if (installationState === exports.ISTATE_PENDING_DATA_DIR_MIGRATION) { memoryLimit = 1024; // cp takes more memory than we think } diff --git a/src/backupformat/rsync.js b/src/backupformat/rsync.js index 0e2e528ef..246cc5fc3 100644 --- a/src/backupformat/rsync.js +++ b/src/backupformat/rsync.js @@ -39,7 +39,7 @@ function sync(backupConfig, remotePath, dataLayout, progressCallback, callback) assert.strictEqual(typeof callback, 'function'); // the number here has to take into account the s3.upload partSize (which is 10MB). So 20=200MB - const concurrency = backupConfig.syncConcurrency || (backupConfig.provider === 's3' ? 20 : 10); + const concurrency = backupConfig.limits?.syncConcurrency || (backupConfig.provider === 's3' ? 20 : 10); const removeDir = util.callbackify(storage.api(backupConfig.provider).removeDir); const remove = util.callbackify(storage.api(backupConfig.provider).remove); @@ -211,7 +211,7 @@ function downloadDir(backupConfig, backupFilePath, dataLayout, progressCallback, storage.api(backupConfig.provider).listDir(backupConfig, backupFilePath, 1000, function (entries, iteratorDone) { // https://www.digitalocean.com/community/questions/rate-limiting-on-spaces?answer=40441 - const concurrency = backupConfig.downloadConcurrency || (backupConfig.provider === 's3' ? 30 : 10); + const concurrency = backupConfig.limits?.downloadConcurrency || (backupConfig.provider === 's3' ? 30 : 10); async.eachLimit(entries, concurrency, downloadFile, iteratorDone); }, callback); diff --git a/src/backups.js b/src/backups.js index 5affa8848..9ace7cf66 100644 --- a/src/backups.js +++ b/src/backups.js @@ -242,7 +242,7 @@ async function startBackupTask(auditSource) { const backupConfig = await settings.getBackupConfig(); - const memoryLimit = 'memoryLimit' in backupConfig ? Math.max(backupConfig.memoryLimit/1024/1024, 800) : 800; + const memoryLimit = backupConfig.limits?.memoryLimit ? Math.max(backupConfig.limits.memoryLimit/1024/1024, 800) : 800; const taskId = await tasks.add(tasks.TASK_BACKUP, [ { /* options */ } ]); diff --git a/src/backuptask.js b/src/backuptask.js index d2ad20b33..e85cef2f6 100644 --- a/src/backuptask.js +++ b/src/backuptask.js @@ -149,8 +149,8 @@ async function runBackupUpload(uploadConfig, progressCallback) { // https://stackoverflow.com/questions/48387040/node-js-recommended-max-old-space-size const envCopy = Object.assign({}, process.env); - if (backupConfig.memoryLimit && backupConfig.memoryLimit >= 2*1024*1024*1024) { - const heapSize = Math.min((backupConfig.memoryLimit/1024/1024) - 256, 8192); + if (backupConfig.limits?.memoryLimit >= 2*1024*1024*1024) { + const heapSize = Math.min((backupConfig.limits.memoryLimit/1024/1024) - 256, 8192); debug(`runBackupUpload: adjusting heap size to ${heapSize}M`); envCopy.NODE_OPTIONS = `--max-old-space-size=${heapSize}`; } diff --git a/src/provision.js b/src/provision.js index cae4f3865..9fb6fa956 100644 --- a/src/provision.js +++ b/src/provision.js @@ -175,7 +175,8 @@ async function restoreTask(backupConfig, remotePath, sysinfoConfig, options, aud const dashboardDomain = settings.dashboardDomain(); // load this fresh from after the backup.restore if (!options.skipDnsSetup) await cloudron.setupDnsAndCert(constants.DASHBOARD_SUBDOMAIN, dashboardDomain, auditSource, (progress) => setProgress('restore', progress.message)); await cloudron.setDashboardDomain(dashboardDomain, auditSource); - await settings.setBackupCredentials(backupConfig); // update just the credentials and not the policy and flags + + await settings.setBackupConfig(backupConfig); await eventlog.add(eventlog.ACTION_RESTORE, auditSource, { remotePath }); setImmediate(() => safe(cloudron.onActivated(options), { debug })); diff --git a/src/routes/settings.js b/src/routes/settings.js index d6fb1f53c..5aab1b9f9 100644 --- a/src/routes/settings.js +++ b/src/routes/settings.js @@ -75,28 +75,34 @@ async function setBackupConfig(req, res, next) { if ('password' in req.body && typeof req.body.password !== 'string') return next(new HttpError(400, 'password must be a string')); if ('encryptedFilenames' in req.body && typeof req.body.encryptedFilenames !== 'boolean') return next(new HttpError(400, 'encryptedFilenames must be a boolean')); - if ('syncConcurrency' in req.body) { - if (typeof req.body.syncConcurrency !== 'number') return next(new HttpError(400, 'syncConcurrency must be a positive integer')); - if (req.body.syncConcurrency < 1) return next(new HttpError(400, 'syncConcurrency must be a positive integer')); - } - if ('copyConcurrency' in req.body) { - if (typeof req.body.copyConcurrency !== 'number') return next(new HttpError(400, 'copyConcurrency must be a positive integer')); - if (req.body.copyConcurrency < 1) return next(new HttpError(400, 'copyConcurrency must be a positive integer')); - } - if ('downloadConcurrency' in req.body) { - if (typeof req.body.downloadConcurrency !== 'number') return next(new HttpError(400, 'downloadConcurrency must be a positive integer')); - if (req.body.downloadConcurrency < 1) return next(new HttpError(400, 'downloadConcurrency must be a positive integer')); - } - if ('deleteConcurrency' in req.body) { - if (typeof req.body.deleteConcurrency !== 'number') return next(new HttpError(400, 'deleteConcurrency must be a positive integer')); - if (req.body.deleteConcurrency < 1) return next(new HttpError(400, 'deleteConcurrency must be a positive integer')); - } - if ('uploadPartSize' in req.body) { - if (typeof req.body.uploadPartSize !== 'number') return next(new HttpError(400, 'uploadPartSize must be a positive integer')); - if (req.body.uploadPartSize < 1) return next(new HttpError(400, 'uploadPartSize must be a positive integer')); + if (req.body.limits) { + if (typeof req.body.limits !== 'object') return next(new HttpError(400, 'limits must be an object')); + const limits = req.body; + + if ('syncConcurrency' in limits) { + if (typeof limits.syncConcurrency !== 'number') return next(new HttpError(400, 'syncConcurrency must be a positive integer')); + if (limits.syncConcurrency < 1) return next(new HttpError(400, 'syncConcurrency must be a positive integer')); + } + if ('copyConcurrency' in limits) { + if (typeof limits.copyConcurrency !== 'number') return next(new HttpError(400, 'copyConcurrency must be a positive integer')); + if (limits.copyConcurrency < 1) return next(new HttpError(400, 'copyConcurrency must be a positive integer')); + } + if ('downloadConcurrency' in limits) { + if (typeof limits.downloadConcurrency !== 'number') return next(new HttpError(400, 'downloadConcurrency must be a positive integer')); + if (limits.downloadConcurrency < 1) return next(new HttpError(400, 'downloadConcurrency must be a positive integer')); + } + if ('deleteConcurrency' in limits) { + if (typeof limits.deleteConcurrency !== 'number') return next(new HttpError(400, 'deleteConcurrency must be a positive integer')); + if (limits.deleteConcurrency < 1) return next(new HttpError(400, 'deleteConcurrency must be a positive integer')); + } + if ('uploadPartSize' in limits) { + if (typeof limits.uploadPartSize !== 'number') return next(new HttpError(400, 'uploadPartSize must be a positive integer')); + if (limits.uploadPartSize < 1) return next(new HttpError(400, 'uploadPartSize must be a positive integer')); + } + + if ('memoryLimit' in limits && typeof limits.memoryLimit !== 'number') return next(new HttpError(400, 'memoryLimit must be a positive integer')); } - if ('memoryLimit' in req.body && typeof req.body.memoryLimit !== 'number') return next(new HttpError(400, 'memoryLimit must be a positive integer')); if (typeof req.body.format !== 'string') return next(new HttpError(400, 'format must be a string')); if ('acceptSelfSignedCerts' in req.body && typeof req.body.acceptSelfSignedCerts !== 'boolean') return next(new HttpError(400, 'format must be a boolean')); diff --git a/src/routes/test/settings-test.js b/src/routes/test/settings-test.js index e860477e9..d674036eb 100644 --- a/src/routes/test/settings-test.js +++ b/src/routes/test/settings-test.js @@ -339,7 +339,7 @@ describe('Settings API', function () { it('cannot set backup_config with invalid syncConcurrency', async function () { let tmp = JSON.parse(JSON.stringify(defaultConfig)); - tmp.syncConcurrency = 'not a number'; + tmp.limits = { syncConcurrency: 'not a number' }; const response = await superagent.post(`${serverUrl}/api/v1/settings/backup_config`) .query({ access_token: owner.token }) @@ -351,7 +351,7 @@ describe('Settings API', function () { it('cannot set backup_config with invalid syncConcurrency', async function () { let tmp = JSON.parse(JSON.stringify(defaultConfig)); - tmp.syncConcurrency = 0; + tmp.limits = { syncConcurrency: 0 }; const response = await superagent.post(`${serverUrl}/api/v1/settings/backup_config`) .query({ access_token: owner.token }) @@ -377,6 +377,7 @@ describe('Settings API', function () { let tmp = JSON.parse(JSON.stringify(defaultConfig)); tmp.format = 'rsync'; tmp.backupFolder = BACKUP_FOLDER; + tmp.limits = { copyConcurrency: 34 }; const response = await superagent.post(`${serverUrl}/api/v1/settings/backup_config`) .query({ access_token: owner.token }) diff --git a/src/settings.js b/src/settings.js index 5649c1fe7..74a9696f7 100644 --- a/src/settings.js +++ b/src/settings.js @@ -29,7 +29,6 @@ exports = module.exports = { getBackupConfig, setBackupConfig, - setBackupCredentials, getServicesConfig, setServicesConfig, @@ -485,23 +484,6 @@ async function setBackupConfig(backupConfig) { notifyChange(exports.BACKUP_CONFIG_KEY, backupConfig); } -async function setBackupCredentials(credentials) { - assert.strictEqual(typeof credentials, 'object'); - - const currentConfig = await getBackupConfig(); - - // preserve these fields - const extra = _.pick(currentConfig, 'copyConcurrency', 'syncConcurrency', 'memoryLimit', 'downloadConcurrency', 'deleteConcurrency', 'uploadPartSize'); - - const backupConfig = Object.assign({}, credentials, extra); - - backups.cleanupCacheFilesSync(); - - await set(exports.BACKUP_CONFIG_KEY, JSON.stringify(backupConfig)); - - notifyChange(exports.BACKUP_CONFIG_KEY, backupConfig); -} - async function getServicesConfig() { const value = await get(exports.SERVICES_CONFIG_KEY); if (value === null) return gDefaults[exports.SERVICES_CONFIG_KEY]; diff --git a/src/storage/gcs.js b/src/storage/gcs.js index bbed640db..85c756b54 100644 --- a/src/storage/gcs.js +++ b/src/storage/gcs.js @@ -210,7 +210,7 @@ async function copy(apiConfig, oldFilePath, newFilePath, progressCallback) { } const batchSize = 1000; - const concurrency = apiConfig.copyConcurrency || 10; + const concurrency = apiConfig.limits?.copyConcurrency || 10; let total = 0; const listDirAsync = util.promisify(listDir); @@ -239,7 +239,7 @@ async function removeDir(apiConfig, pathPrefix, progressCallback) { assert.strictEqual(typeof pathPrefix, 'string'); assert.strictEqual(typeof progressCallback, 'function'); - const batchSize = 1000, concurrency = apiConfig.deleteConcurrency || 10; // https://googleapis.dev/nodejs/storage/latest/Bucket.html#deleteFiles + const batchSize = 1000, concurrency = apiConfig.limits?.deleteConcurrency || 10; // https://googleapis.dev/nodejs/storage/latest/Bucket.html#deleteFiles let total = 0; const listDirAsync = util.promisify(listDir); diff --git a/src/storage/s3.js b/src/storage/s3.js index 9d75e0a23..a88a5453c 100644 --- a/src/storage/s3.js +++ b/src/storage/s3.js @@ -133,7 +133,7 @@ function upload(apiConfig, backupFilePath, sourceStream, callback) { // uploader will buffer at most queueSize * partSize bytes into memory at any given time. // scaleway only supports 1000 parts per object (https://www.scaleway.com/en/docs/s3-multipart-upload/) // s3: https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html (max 10k parts and no size limit on the last part!) - const partSize = apiConfig.uploadPartSize || (apiConfig.provider === 'scaleway-objectstorage' ? 100 * 1024 * 1024 : 10 * 1024 * 1024); + const partSize = apiConfig.limits?.uploadPartSize || (apiConfig.provider === 'scaleway-objectstorage' ? 100 * 1024 * 1024 : 10 * 1024 * 1024); s3.upload(params, { partSize, queueSize: 3 }, function (error, data) { if (error) { @@ -443,7 +443,7 @@ async function copy(apiConfig, oldFilePath, newFilePath, progressCallback) { } let total = 0; - const concurrency = apiConfig.copyConcurrency || (apiConfig.provider === 's3' ? 500 : 10); + const concurrency = apiConfig.limits?.copyConcurrency || (apiConfig.provider === 's3' ? 500 : 10); progressCallback({ message: `Copying with concurrency of ${concurrency}` }); const listDirAsync = util.promisify(listDir); @@ -510,7 +510,7 @@ async function removeDir(apiConfig, pathPrefix, progressCallback) { await listDirAsync(apiConfig, pathPrefix, 1000, function listDirIterator(entries, done) { total += entries.length; - const chunkSize = apiConfig.deleteConcurrency || (apiConfig.provider !== 'digitalocean-spaces' ? 1000 : 100); // throttle objects in each request + const chunkSize = apiConfig.limits?.deleteConcurrency || (apiConfig.provider !== 'digitalocean-spaces' ? 1000 : 100); // throttle objects in each request const chunks = chunk(entries, chunkSize); async.eachSeries(chunks, async function deleteFiles(objects) { diff --git a/src/updater.js b/src/updater.js index db7348429..c80c14729 100644 --- a/src/updater.js +++ b/src/updater.js @@ -200,7 +200,7 @@ async function updateToLatest(options, auditSource) { const [getError, backupConfig] = await safe(settings.getBackupConfig()); if (getError) throw getError; - const memoryLimit = 'memoryLimit' in backupConfig ? Math.max(backupConfig.memoryLimit/1024/1024, 400) : 400; + const memoryLimit = backupConfig.limits?.memoryLimit ? Math.max(backupConfig.limits.memoryLimit/1024/1024, 400) : 400; const [taskError, taskId] = await safe(tasks.add(tasks.TASK_UPDATE, [ boxUpdateInfo, options ])); if (taskError) throw taskError;