diff --git a/src/storage/s3.js b/src/storage/s3.js index 2a0a3ffb1..a1728821d 100644 --- a/src/storage/s3.js +++ b/src/storage/s3.js @@ -346,8 +346,7 @@ async function copy(apiConfig, oldFilePath, newFilePath, progressCallback) { }; // S3 copyObject has a file size limit of 5GB so if we have larger files, we do a multipart copy - // Exoscale and B2 take too long to copy 5GB - const largeFileLimit = (apiConfig.provider === 'exoscale-sos' || apiConfig.provider === 'backblaze-b2' || apiConfig.provider === 'digitalocean-spaces') ? 1024 * 1024 * 1024 : 5 * 1024 * 1024 * 1024; + const largeFileLimit = (apiConfig.provider === 'vultr-objectstorage' || apiConfig.provider === 'exoscale-sos' || apiConfig.provider === 'backblaze-b2' || apiConfig.provider === 'digitalocean-spaces') ? 1024 * 1024 * 1024 : 3 * 1024 * 1024 * 1024; if (entry.size < largeFileLimit) { progressCallback({ message: `Copying ${relativePath || oldFilePath}` }); @@ -367,7 +366,7 @@ async function copy(apiConfig, oldFilePath, newFilePath, progressCallback) { s3.createMultipartUpload(copyParams, function (error, multipart) { if (error) return done(error); - // Exoscale (96M) was suggested by exoscale. 1GB - rather random size for others + // Exoscale (96M) was suggested by exoscale. 1GB for others is arbitrary size const chunkSize = apiConfig.provider === 'exoscale-sos' ? 96 * 1024 * 1024 : 1024 * 1024 * 1024; const uploadId = multipart.UploadId; let uploadedParts = [], ranges = [];