backups: move limits into a sub object

fixes #817
This commit is contained in:
Girish Ramakrishnan
2023-07-13 11:50:57 +05:30
parent 1a5cbfb2a1
commit c86059e070
13 changed files with 66 additions and 68 deletions

View File

@@ -133,7 +133,7 @@ function upload(apiConfig, backupFilePath, sourceStream, callback) {
// uploader will buffer at most queueSize * partSize bytes into memory at any given time.
// scaleway only supports 1000 parts per object (https://www.scaleway.com/en/docs/s3-multipart-upload/)
// s3: https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html (max 10k parts and no size limit on the last part!)
const partSize = apiConfig.uploadPartSize || (apiConfig.provider === 'scaleway-objectstorage' ? 100 * 1024 * 1024 : 10 * 1024 * 1024);
const partSize = apiConfig.limits?.uploadPartSize || (apiConfig.provider === 'scaleway-objectstorage' ? 100 * 1024 * 1024 : 10 * 1024 * 1024);
s3.upload(params, { partSize, queueSize: 3 }, function (error, data) {
if (error) {
@@ -443,7 +443,7 @@ async function copy(apiConfig, oldFilePath, newFilePath, progressCallback) {
}
let total = 0;
const concurrency = apiConfig.copyConcurrency || (apiConfig.provider === 's3' ? 500 : 10);
const concurrency = apiConfig.limits?.copyConcurrency || (apiConfig.provider === 's3' ? 500 : 10);
progressCallback({ message: `Copying with concurrency of ${concurrency}` });
const listDirAsync = util.promisify(listDir);
@@ -510,7 +510,7 @@ async function removeDir(apiConfig, pathPrefix, progressCallback) {
await listDirAsync(apiConfig, pathPrefix, 1000, function listDirIterator(entries, done) {
total += entries.length;
const chunkSize = apiConfig.deleteConcurrency || (apiConfig.provider !== 'digitalocean-spaces' ? 1000 : 100); // throttle objects in each request
const chunkSize = apiConfig.limits?.deleteConcurrency || (apiConfig.provider !== 'digitalocean-spaces' ? 1000 : 100); // throttle objects in each request
const chunks = chunk(entries, chunkSize);
async.eachSeries(chunks, async function deleteFiles(objects) {