diff --git a/src/backups.js b/src/backups.js index 17e41a1df..ff24d49dc 100644 --- a/src/backups.js +++ b/src/backups.js @@ -459,7 +459,7 @@ function uploadBoxSnapshot(backupConfig, callback) { // for the moment, box backups are always tarball based. this is because it makes it easy to restore // in the future, if required, we can move out the mailboxes to a separate virtual app backup - const format = 'tgz'; + const format = backupConfig.provider === 'exoscale-sos' ? 'flat-file' : 'tgz'; // see also rotateBoxBackup runBackupTask('snapshot/box', format, paths.BOX_DATA_DIR, function (error) { if (error) return callback(error); @@ -482,7 +482,7 @@ function rotateBoxBackup(backupConfig, timestamp, appBackupIds, callback) { var snapshotTime = snapshotInfo.timestamp.replace(/[T.]/g, '-').replace(/[:Z]/g,''); var backupId = util.format('%s/box_%s_v%s', timestamp, snapshotTime, config.version()); - const format = 'tgz'; + const format = backupConfig.provider === 'exoscale-sos' ? 'flat-file' : 'tgz'; // // see also uploadBoxSnapshot debug('rotateBoxBackup: rotating to id:%s', backupId); diff --git a/src/storage/s3.js b/src/storage/s3.js index da18c5123..6b4561904 100644 --- a/src/storage/s3.js +++ b/src/storage/s3.js @@ -32,6 +32,7 @@ var assert = require('assert'), PassThrough = require('stream').PassThrough, path = require('path'), S3BlockReadStream = require('s3-block-read-stream'), + safe = require('safetydance'), superagent = require('superagent'); // test only @@ -114,6 +115,15 @@ function upload(apiConfig, backupFilePath, sourceStream, callback) { assert.strictEqual(typeof sourceStream, 'object'); assert.strictEqual(typeof callback, 'function'); + function done(error) { + if (error) { + debug('[%s] upload: s3 upload error.', backupFilePath, error); + return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, `Error uploading ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`)); + } + + callback(null); + } + getBackupCredentials(apiConfig, function (error, credentials) { if (error) return callback(error); @@ -124,15 +134,18 @@ function upload(apiConfig, backupFilePath, sourceStream, callback) { }; var s3 = new AWS.S3(credentials); - // s3.upload automatically does a multi-part upload. we set queueSize to 1 to reduce memory usage - s3.upload(params, { partSize: 10 * 1024 * 1024, queueSize: 1 }, function (error) { - if (error) { - debug('[%s] upload: s3 upload error.', backupFilePath, error); - return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, `Error uploading ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`)); - } - callback(null); - }); + // exoscale does not like multi-part uploads. so avoid them for filesystem streams < 5GB + if (apiConfig.provider === 'exoscale-sos' && typeof sourceStream.path === 'string') { + var stat = safe.fs.statSync(sourceStream.path); + if (!stat) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, `Error detecting size ${sourceStream.path}. Message: ${safe.error.message}`)); + + if (stat.size <= 5 * 1024 * 1024 * 1024) return s3.putObject(params, done); + } + + // s3.upload automatically does a multi-part upload. we set queueSize to 1 to reduce memory usage + // uploader will buffer at most queueSize * partSize bytes into memory at any given time. + return s3.upload(params, { partSize: 10 * 1024 * 1024, queueSize: 1 }, done); }); }