diff --git a/src/storage/s3.js b/src/storage/s3.js index d80cf17a5..f4a18781c 100644 --- a/src/storage/s3.js +++ b/src/storage/s3.js @@ -121,6 +121,7 @@ function upload(apiConfig, backupFilePath, sourceStream, callback) { // s3.upload automatically does a multi-part upload. we set queueSize to 3 to reduce memory usage // uploader will buffer at most queueSize * partSize bytes into memory at any given time. // scaleway only supports 1000 parts per object (https://www.scaleway.com/en/docs/s3-multipart-upload/) + // s3: https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html (max 10k parts and no size limit on the last part!) const partSize = apiConfig.uploadPartSize || (apiConfig.provider === 'scaleway-objectstorage' ? 100 * 1024 * 1024 : 10 * 1024 * 1024); s3.upload(params, { partSize, queueSize: 3 }, function (error, data) { @@ -129,7 +130,7 @@ function upload(apiConfig, backupFilePath, sourceStream, callback) { return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Error uploading ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`)); } - debug(`Uploaded ${backupFilePath}: ${JSON.stringify(data)}`); + debug(`Uploaded ${backupFilePath} with partSize ${partSize}: ${JSON.stringify(data)}`); callback(null); });