backups: move limits into a sub object

fixes #817
This commit is contained in:
Girish Ramakrishnan
2023-07-13 11:50:57 +05:30
parent 1a5cbfb2a1
commit c86059e070
13 changed files with 66 additions and 68 deletions

View File

@@ -39,7 +39,7 @@ function sync(backupConfig, remotePath, dataLayout, progressCallback, callback)
assert.strictEqual(typeof callback, 'function');
// the number here has to take into account the s3.upload partSize (which is 10MB). So 20=200MB
const concurrency = backupConfig.syncConcurrency || (backupConfig.provider === 's3' ? 20 : 10);
const concurrency = backupConfig.limits?.syncConcurrency || (backupConfig.provider === 's3' ? 20 : 10);
const removeDir = util.callbackify(storage.api(backupConfig.provider).removeDir);
const remove = util.callbackify(storage.api(backupConfig.provider).remove);
@@ -211,7 +211,7 @@ function downloadDir(backupConfig, backupFilePath, dataLayout, progressCallback,
storage.api(backupConfig.provider).listDir(backupConfig, backupFilePath, 1000, function (entries, iteratorDone) {
// https://www.digitalocean.com/community/questions/rate-limiting-on-spaces?answer=40441
const concurrency = backupConfig.downloadConcurrency || (backupConfig.provider === 's3' ? 30 : 10);
const concurrency = backupConfig.limits?.downloadConcurrency || (backupConfig.provider === 's3' ? 30 : 10);
async.eachLimit(entries, concurrency, downloadFile, iteratorDone);
}, callback);