vultr: fix copy of large objects
https://forum.cloudron.io/topic/10266/backups-are-failing
This commit is contained in:
@@ -346,8 +346,7 @@ async function copy(apiConfig, oldFilePath, newFilePath, progressCallback) {
|
||||
};
|
||||
|
||||
// S3 copyObject has a file size limit of 5GB so if we have larger files, we do a multipart copy
|
||||
// Exoscale and B2 take too long to copy 5GB
|
||||
const largeFileLimit = (apiConfig.provider === 'exoscale-sos' || apiConfig.provider === 'backblaze-b2' || apiConfig.provider === 'digitalocean-spaces') ? 1024 * 1024 * 1024 : 5 * 1024 * 1024 * 1024;
|
||||
const largeFileLimit = (apiConfig.provider === 'vultr-objectstorage' || apiConfig.provider === 'exoscale-sos' || apiConfig.provider === 'backblaze-b2' || apiConfig.provider === 'digitalocean-spaces') ? 1024 * 1024 * 1024 : 3 * 1024 * 1024 * 1024;
|
||||
|
||||
if (entry.size < largeFileLimit) {
|
||||
progressCallback({ message: `Copying ${relativePath || oldFilePath}` });
|
||||
@@ -367,7 +366,7 @@ async function copy(apiConfig, oldFilePath, newFilePath, progressCallback) {
|
||||
s3.createMultipartUpload(copyParams, function (error, multipart) {
|
||||
if (error) return done(error);
|
||||
|
||||
// Exoscale (96M) was suggested by exoscale. 1GB - rather random size for others
|
||||
// Exoscale (96M) was suggested by exoscale. 1GB for others is arbitrary size
|
||||
const chunkSize = apiConfig.provider === 'exoscale-sos' ? 96 * 1024 * 1024 : 1024 * 1024 * 1024;
|
||||
const uploadId = multipart.UploadId;
|
||||
let uploadedParts = [], ranges = [];
|
||||
|
||||
Reference in New Issue
Block a user