Revert "merge the done callback into the main code"

This reverts commit c39bec8cc1.

This was committed with extra stuff by mistake
This commit is contained in:
Girish Ramakrishnan
2018-02-22 10:58:56 -08:00
parent 0e7e672dd2
commit e022dbf8a6
3 changed files with 31 additions and 32 deletions

View File

@@ -68,7 +68,7 @@ function remove(apiConfig, filename, callback) {
assert.strictEqual(typeof filename, 'string');
assert.strictEqual(typeof callback, 'function');
// Result: none. Should not error if file is not found
// Result: none
callback(new Error('not implemented'));
}
@@ -77,7 +77,7 @@ function removeDir(apiConfig, pathPrefix) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof pathPrefix, 'string');
// Result: none. Should not error if dir is not found
// Result: none
var events = new EventEmitter();
process.nextTick(function () { events.emit('done', new Error('not implemented')); });
return events;

View File

@@ -122,6 +122,15 @@ function upload(apiConfig, backupFilePath, sourceStream, callback) {
assert.strictEqual(typeof sourceStream, 'object');
assert.strictEqual(typeof callback, 'function');
function done(error) {
if (error) {
debug('[%s] upload: s3 upload error.', backupFilePath, error);
return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, `Error uploading ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`));
}
callback(null);
}
getS3Config(apiConfig, function (error, credentials) {
if (error) return callback(error);
@@ -135,14 +144,7 @@ function upload(apiConfig, backupFilePath, sourceStream, callback) {
// s3.upload automatically does a multi-part upload. we set queueSize to 1 to reduce memory usage
// uploader will buffer at most queueSize * partSize bytes into memory at any given time.
s3.upload(params, { partSize: 10 * 1024 * 1024, queueSize: 1 }, function (error) {
if (error) {
debug('[%s] upload: s3 upload error.', backupFilePath, error);
return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, `Error uploading ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`));
}
callback(null);
});
return s3.upload(params, { partSize: 10 * 1024 * 1024, queueSize: 1 }, done);
});
}
@@ -412,11 +414,10 @@ function remove(apiConfig, filename, callback) {
}
};
// deleteObjects does not return error if key is not found
s3.deleteObjects(deleteParams, function (error) {
if (error) debug(`remove: Unable to remove ${filename}. ${error.message}`);
if (error) debug('remove: Unable to remove %s. Not fatal.', deleteParams.Key, error);
callback(error);
callback(null);
});
});
}
@@ -438,10 +439,9 @@ function removeDir(apiConfig, pathPrefix) {
events.emit('progress', `Removing ${contents.length} files from ${contents[0].Key} to ${contents[contents.length-1].Key}`);
// deleteObjects does not return error if key is not found
s3.deleteObjects(deleteParams, function (error /*, deleteData */) {
if (error) {
events.emit('progress', `Unable to remove ${contents.length} files from ${contents[0].Key} to ${contents[contents.length-1].Key}: ${error.message}`);
events.emit('progress', `Unable to remove ${deleteParams.Key} ${error.message}`);
return iteratorCallback(error);
}
@@ -452,9 +452,8 @@ function removeDir(apiConfig, pathPrefix) {
listDir(apiConfig, pathPrefix, function (s3, objects, done) {
total += objects.length;
// digitalocean spaces takes too long to delete 1000 objects at a time
const chunkSize = apiConfig.provider !== 'digitalocean-spaces' ? 1000 : 100;
var chunks = chunk(objects, chunkSize);
const batchSize = apiConfig.provider !== 'digitalocean-spaces' ? 1000 : 100; // throttle objects in each request
var chunks = batchSize === 1 ? objects : chunk(objects, batchSize);
async.eachSeries(chunks, deleteFiles.bind(null, s3), done);
}, function (error) {