Files
cloudron-box/src/storage/s3.js

601 lines
23 KiB
JavaScript
Raw Normal View History

2015-08-24 11:13:21 -07:00
'use strict';
exports = module.exports = {
getBackupRootPath,
getProviderStatus,
getAvailableSize,
2021-02-01 14:23:15 -08:00
upload,
exists,
2021-02-01 14:23:15 -08:00
download,
copy,
2021-02-01 14:23:15 -08:00
listDir,
2018-07-28 09:05:44 -07:00
2021-02-01 14:23:15 -08:00
remove,
removeDir,
remount,
2021-02-01 14:23:15 -08:00
testConfig,
removePrivateFields,
injectPrivateFields,
2017-04-18 19:15:56 +02:00
// Used to mock AWS
_mockInject: mockInject,
2022-04-15 09:25:54 -05:00
_mockRestore: mockRestore,
_chunk: chunk
2015-08-24 11:13:21 -07:00
};
// https://github.com/aws/aws-sdk-js/issues/4354
require('aws-sdk/lib/maintenance_mode_message').suppress = true;
2021-06-16 22:36:01 -07:00
const assert = require('assert'),
async = require('async'),
2021-06-23 14:30:00 -07:00
AwsSdk = require('aws-sdk'),
2019-10-22 20:36:20 -07:00
BoxError = require('../boxerror.js'),
constants = require('../constants.js'),
debug = require('debug')('box:storage/s3'),
https = require('https'),
path = require('path'),
Readable = require('stream').Readable,
2022-04-14 07:59:50 -05:00
safe = require('safetydance'),
util = require('util'),
2020-05-12 22:45:01 -07:00
_ = require('underscore');
2015-08-24 11:13:21 -07:00
2021-06-23 14:30:00 -07:00
let aws = AwsSdk;
2017-04-18 19:15:56 +02:00
// test only
2022-04-14 20:30:00 -05:00
let originalAWS;
2017-04-18 19:15:56 +02:00
function mockInject(mock) {
2021-06-23 14:30:00 -07:00
originalAWS = aws;
aws = mock;
2017-04-18 19:15:56 +02:00
}
function mockRestore() {
2021-06-23 14:30:00 -07:00
aws = originalAWS;
2017-04-18 19:15:56 +02:00
}
2018-07-30 07:39:34 -07:00
function S3_NOT_FOUND(error) {
return error.code === 'NoSuchKey' || error.code === 'NotFound' || error.code === 'ENOENT';
}
2022-04-14 07:35:41 -05:00
function getS3Config(apiConfig) {
2016-03-31 09:48:01 -07:00
assert.strictEqual(typeof apiConfig, 'object');
2015-08-24 11:13:21 -07:00
2022-04-14 07:35:41 -05:00
const credentials = {
signatureVersion: apiConfig.signatureVersion || 'v4',
s3ForcePathStyle: false, // Use vhost style instead of path style - https://forums.aws.amazon.com/ann.jspa?annID=6776
2016-03-31 09:48:01 -07:00
accessKeyId: apiConfig.accessKeyId,
secretAccessKey: apiConfig.secretAccessKey,
region: apiConfig.region || 'us-east-1',
maxRetries: 10,
2017-10-11 13:57:05 -07:00
retryDelayOptions: {
customBackoff: (/* retryCount, error */) => 20000 // constant backoff - https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/Config.html#retryDelayOptions-property
},
httpOptions: {
2020-12-15 14:37:18 -08:00
connectTimeout: 60000, // https://github.com/aws/aws-sdk-js/pull/1446
timeout: 0 // https://github.com/aws/aws-sdk-js/issues/1704 (allow unlimited time for chunk upload)
2017-10-11 13:57:05 -07:00
}
2015-11-06 18:22:29 -08:00
};
2016-12-07 10:47:06 +01:00
if (apiConfig.endpoint) credentials.endpoint = apiConfig.endpoint;
if (apiConfig.s3ForcePathStyle === true) credentials.s3ForcePathStyle = true;
// s3 endpoint names come from the SDK
const isHttps = (credentials.endpoint && credentials.endpoint.startsWith('https://')) || apiConfig.provider === 's3';
if (isHttps) { // only set agent for https calls. otherwise, it crashes
if (apiConfig.acceptSelfSignedCerts || apiConfig.bucket.includes('.')) {
credentials.httpOptions.agent = new https.Agent({ rejectUnauthorized: false });
}
}
2022-04-14 07:35:41 -05:00
return credentials;
2015-08-24 11:13:21 -07:00
}
2015-08-25 10:01:04 -07:00
// storage api
function getBackupRootPath(apiConfig) {
assert.strictEqual(typeof apiConfig, 'object');
return apiConfig.prefix;
}
async function getProviderStatus(apiConfig) {
assert.strictEqual(typeof apiConfig, 'object');
return { state: 'active' };
}
async function getAvailableSize(apiConfig) {
assert.strictEqual(typeof apiConfig, 'object');
return Number.POSITIVE_INFINITY;
}
function upload(apiConfig, backupFilePath, sourceStream, callback) {
2016-09-16 11:21:08 +02:00
assert.strictEqual(typeof apiConfig, 'object');
2017-09-19 20:40:38 -07:00
assert.strictEqual(typeof backupFilePath, 'string');
assert.strictEqual(typeof sourceStream, 'object');
2016-09-16 11:21:08 +02:00
assert.strictEqual(typeof callback, 'function');
2022-04-14 07:35:41 -05:00
const credentials = getS3Config(apiConfig);
2015-08-25 10:01:04 -07:00
2022-04-14 07:35:41 -05:00
const params = {
Bucket: apiConfig.bucket,
Key: backupFilePath,
Body: sourceStream
};
2015-08-25 10:01:04 -07:00
2022-04-14 07:35:41 -05:00
const s3 = new aws.S3(credentials);
2022-04-14 07:35:41 -05:00
// s3.upload automatically does a multi-part upload. we set queueSize to 3 to reduce memory usage
// uploader will buffer at most queueSize * partSize bytes into memory at any given time.
// scaleway only supports 1000 parts per object (https://www.scaleway.com/en/docs/s3-multipart-upload/)
// s3: https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html (max 10k parts and no size limit on the last part!)
const partSize = apiConfig.limits?.uploadPartSize || (apiConfig.provider === 'scaleway-objectstorage' ? 100 * 1024 * 1024 : 10 * 1024 * 1024);
2019-09-30 20:42:37 -07:00
2022-04-14 07:35:41 -05:00
s3.upload(params, { partSize, queueSize: 3 }, function (error, data) {
if (error) {
debug(`upload: [${backupFilePath}] s3 upload error. %o`, error);
2022-04-14 07:35:41 -05:00
return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Error uploading ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`));
}
2022-04-14 07:35:41 -05:00
debug(`Uploaded ${backupFilePath} with partSize ${partSize}: ${JSON.stringify(data)}`);
2018-03-20 18:19:14 -07:00
2022-04-14 07:35:41 -05:00
callback(null);
2015-08-25 10:01:04 -07:00
});
}
2022-04-14 08:07:03 -05:00
async function exists(apiConfig, backupFilePath) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupFilePath, 'string');
2022-04-14 07:35:41 -05:00
const credentials = getS3Config(apiConfig);
2022-04-14 07:35:41 -05:00
const s3 = new aws.S3(_.omit(credentials, 'retryDelayOptions', 'maxRetries'));
2022-04-14 07:35:41 -05:00
if (!backupFilePath.endsWith('/')) { // check for file
const params = {
Bucket: apiConfig.bucket,
Key: backupFilePath
};
2022-04-14 08:07:03 -05:00
const [error] = await safe(s3.headObject(params).promise());
if (!Object.keys(this.httpResponse.headers).some(h => h.startsWith('x-amz'))) throw new BoxError(BoxError.EXTERNAL_ERROR, 'not a s3 endpoint');
if (error && S3_NOT_FOUND(error)) return false;
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error headObject ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`);
2022-04-14 08:07:03 -05:00
return true;
2022-04-14 07:35:41 -05:00
} else { // list dir contents
const listParams = {
Bucket: apiConfig.bucket,
Prefix: backupFilePath,
MaxKeys: 1
};
2022-04-14 08:07:03 -05:00
const [error, listData] = await safe(s3.listObjects(listParams).promise());
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`);
2022-04-14 08:07:03 -05:00
return listData.Contents.length !== 0;
2022-04-14 07:35:41 -05:00
}
}
// Download the object in small parts. By downloading small parts, we reduce the chance of sporadic network errors when downloading large objects
// We can retry each part individually, but we haven't had the need for this yet
class S3MultipartDownloadStream extends Readable {
constructor (s3, params, options) {
super(options);
this._s3 = s3;
this._params = params;
this._readSize = 0;
this._fileSize = -1;
this._path = params.Bucket + '/' + params.Key;
this._blockSize = options.blockSize || 64 * 1048576; // MB
}
_done() {
this._readSize = 0;
this.push(null); // EOF
}
_handleError(error) {
if (S3_NOT_FOUND(error)) {
this.destroy(new BoxError(BoxError.NOT_FOUND, `Backup not found: ${this._path}`));
} else {
debug(`download: ${this._path} s3 stream error. %o`, error);
this.destroy(new BoxError(BoxError.EXTERNAL_ERROR, `Error multipartDownload ${this._path}. Message: ${error.message} HTTP Code: ${error.code}`));
}
}
_downloadRange(offset, length) {
const params = Object.assign({}, this._params);
const lastPos = offset + length - 1;
const range = `bytes=${offset}-${lastPos}`;
params['Range'] = range;
this._s3.getObject(params, (error, data) => {
if (error) return this._handleError(error);
const length = parseInt(data.ContentLength, 10);
if (length > 0) {
this._readSize += length;
this.push(data.Body);
} else {
this._done();
}
});
}
_nextDownload() {
let len = 0;
if (this._readSize + this._blockSize < this._fileSize) {
len = this._blockSize;
} else {
len = this._fileSize - this._readSize;
}
this._downloadRange(this._readSize, len);
}
_fetchSize() {
this._s3.headObject(this._params, (error, data) => {
if (error) return this._handleError(error);
const length = parseInt(data.ContentLength, 10);
if (length > 0) {
this._fileSize = length;
this._nextDownload();
} else {
this._done();
}
});
}
_read() {
if (this._readSize === this._fileSize) return this._done();
if (this._readSize === 0) return this._fetchSize();
this._nextDownload();
}
}
function download(apiConfig, backupFilePath, callback) {
assert.strictEqual(typeof apiConfig, 'object');
2017-09-19 20:40:38 -07:00
assert.strictEqual(typeof backupFilePath, 'string');
assert.strictEqual(typeof callback, 'function');
2022-04-14 07:35:41 -05:00
const credentials = getS3Config(apiConfig);
2022-04-14 07:35:41 -05:00
const params = {
Bucket: apiConfig.bucket,
Key: backupFilePath
};
2022-04-14 07:35:41 -05:00
const s3 = new aws.S3(credentials);
2017-04-18 16:44:49 +02:00
const multipartDownloadStream = new S3MultipartDownloadStream(s3, params, { blockSize: 64 * 1024 * 1024 });
return callback(null, multipartDownloadStream);
}
2018-07-28 09:05:44 -07:00
function listDir(apiConfig, dir, batchSize, iteratorCallback, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof dir, 'string');
assert.strictEqual(typeof batchSize, 'number');
assert.strictEqual(typeof iteratorCallback, 'function');
assert.strictEqual(typeof callback, 'function');
2022-04-14 07:35:41 -05:00
const credentials = getS3Config(apiConfig);
2022-04-14 07:35:41 -05:00
const s3 = new aws.S3(credentials);
const listParams = {
Bucket: apiConfig.bucket,
Prefix: dir,
MaxKeys: batchSize
};
2022-04-14 07:35:41 -05:00
let done = false;
2022-04-14 07:35:41 -05:00
async.whilst((testDone) => testDone(null, !done), function listAndDownload(whilstCallback) {
s3.listObjects(listParams, function (error, listData) {
if (error) return whilstCallback(new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects in ${dir}. Message: ${error.message} HTTP Code: ${error.code}`));
2022-04-14 07:35:41 -05:00
if (listData.Contents.length === 0) { done = true; return whilstCallback(); }
2017-09-27 21:46:24 -07:00
2022-04-14 07:35:41 -05:00
const entries = listData.Contents.map(function (c) { return { fullPath: c.Key, size: c.Size }; });
2018-07-28 09:05:44 -07:00
2022-04-14 07:35:41 -05:00
iteratorCallback(entries, function (error) {
if (error) return whilstCallback(error);
2022-04-14 07:35:41 -05:00
if (!listData.IsTruncated) { done = true; return whilstCallback(); }
2017-09-27 21:46:24 -07:00
2022-04-14 07:35:41 -05:00
listParams.Marker = listData.Contents[listData.Contents.length - 1].Key; // NextMarker is returned only with delimiter
2022-04-14 07:35:41 -05:00
whilstCallback();
});
2022-04-14 07:35:41 -05:00
});
}, callback);
}
// https://github.com/aws/aws-sdk-js/blob/2b6bcbdec1f274fe931640c1b61ece999aae7a19/lib/util.js#L41
// https://github.com/GeorgePhillips/node-s3-url-encode/blob/master/index.js
// See aws-sdk-js/issues/1302
function encodeCopySource(bucket, path) {
// AWS percent-encodes some extra non-standard characters in a URI
2022-04-14 07:35:41 -05:00
const output = encodeURI(path).replace(/[+!"#$@&'()*+,:;=?@]/g, function(ch) {
return '%' + ch.charCodeAt(0).toString(16).toUpperCase();
});
// the slash at the beginning is optional
return `/${bucket}/${output}`;
}
2022-04-30 16:01:42 -07:00
async function copy(apiConfig, oldFilePath, newFilePath, progressCallback) {
2016-03-31 09:48:01 -07:00
assert.strictEqual(typeof apiConfig, 'object');
2017-09-19 20:40:38 -07:00
assert.strictEqual(typeof oldFilePath, 'string');
assert.strictEqual(typeof newFilePath, 'string');
2022-04-30 16:01:42 -07:00
assert.strictEqual(typeof progressCallback, 'function');
2015-09-21 14:02:00 -07:00
2018-07-28 09:05:44 -07:00
function copyFile(entry, iteratorCallback) {
2022-04-14 07:35:41 -05:00
const credentials = getS3Config(apiConfig);
2022-04-14 07:35:41 -05:00
const s3 = new aws.S3(credentials);
const relativePath = path.relative(oldFilePath, entry.fullPath);
2017-09-22 14:40:37 -07:00
2022-04-14 07:35:41 -05:00
function done(error) {
if (error) debug(`copy: s3 copy error when copying ${entry.fullPath}: ${error}`);
2022-04-14 07:35:41 -05:00
if (error && S3_NOT_FOUND(error)) return iteratorCallback(new BoxError(BoxError.NOT_FOUND, `Old backup not found: ${entry.fullPath}`));
if (error) return iteratorCallback(new BoxError(BoxError.EXTERNAL_ERROR, `Error copying ${entry.fullPath} (${entry.size} bytes): ${error.code || ''} ${error}`));
2017-10-11 13:57:05 -07:00
2022-04-14 07:35:41 -05:00
iteratorCallback(null);
}
2022-04-14 07:35:41 -05:00
const copyParams = {
Bucket: apiConfig.bucket,
Key: path.join(newFilePath, relativePath)
};
2022-04-14 07:35:41 -05:00
// S3 copyObject has a file size limit of 5GB so if we have larger files, we do a multipart copy
// Exoscale and B2 take too long to copy 5GB
const largeFileLimit = (apiConfig.provider === 'exoscale-sos' || apiConfig.provider === 'backblaze-b2' || apiConfig.provider === 'digitalocean-spaces') ? 1024 * 1024 * 1024 : 5 * 1024 * 1024 * 1024;
2022-04-14 07:35:41 -05:00
if (entry.size < largeFileLimit) {
2022-04-30 16:01:42 -07:00
progressCallback({ message: `Copying ${relativePath || oldFilePath}` });
2022-04-14 07:35:41 -05:00
copyParams.CopySource = encodeCopySource(apiConfig.bucket, entry.fullPath);
s3.copyObject(copyParams, done).on('retry', function (response) {
2022-04-30 16:01:42 -07:00
progressCallback({ message: `Retrying (${response.retryCount+1}) copy of ${relativePath || oldFilePath}. Error: ${response.error} ${response.httpResponse.statusCode}` });
2022-04-14 07:35:41 -05:00
// on DO, we get a random 408. these are not retried by the SDK
if (response.error) response.error.retryable = true; // https://github.com/aws/aws-sdk-js/issues/412
});
2022-04-14 07:35:41 -05:00
return;
}
2022-04-30 16:01:42 -07:00
progressCallback({ message: `Copying (multipart) ${relativePath || oldFilePath}` });
2022-04-14 07:35:41 -05:00
s3.createMultipartUpload(copyParams, function (error, multipart) {
if (error) return done(error);
2020-09-02 22:32:42 -07:00
2022-04-14 07:35:41 -05:00
// Exoscale (96M) was suggested by exoscale. 1GB - rather random size for others
const chunkSize = apiConfig.provider === 'exoscale-sos' ? 96 * 1024 * 1024 : 1024 * 1024 * 1024;
const uploadId = multipart.UploadId;
let uploadedParts = [], ranges = [];
2020-09-02 22:32:42 -07:00
2022-04-14 07:35:41 -05:00
let cur = 0;
while (cur + chunkSize < entry.size) {
ranges.push({ startBytes: cur, endBytes: cur + chunkSize - 1 });
cur += chunkSize;
}
ranges.push({ startBytes: cur, endBytes: entry.size-1 });
2022-04-14 07:35:41 -05:00
async.eachOfLimit(ranges, 3, function copyChunk(range, index, iteratorDone) {
const partCopyParams = {
Bucket: apiConfig.bucket,
Key: path.join(newFilePath, relativePath),
CopySource: encodeCopySource(apiConfig.bucket, entry.fullPath), // See aws-sdk-js/issues/1302
CopySourceRange: 'bytes=' + range.startBytes + '-' + range.endBytes,
PartNumber: index+1,
UploadId: uploadId
};
2022-04-30 16:01:42 -07:00
progressCallback({ message: `Copying part ${partCopyParams.PartNumber} - ${partCopyParams.CopySource} ${partCopyParams.CopySourceRange}` });
2022-04-14 07:35:41 -05:00
s3.uploadPartCopy(partCopyParams, function (error, part) {
if (error) return iteratorDone(error);
2022-04-30 16:01:42 -07:00
progressCallback({ message: `Copying part ${partCopyParams.PartNumber} - Etag: ${part.CopyPartResult.ETag}` });
2018-07-29 09:00:57 -07:00
2022-04-14 07:35:41 -05:00
if (!part.CopyPartResult.ETag) return iteratorDone(new Error('Multi-part copy is broken or not implemented by the S3 storage provider'));
2018-07-29 09:00:57 -07:00
2022-04-14 07:35:41 -05:00
uploadedParts[index] = { ETag: part.CopyPartResult.ETag, PartNumber: partCopyParams.PartNumber };
2018-07-29 09:00:57 -07:00
2022-04-14 07:35:41 -05:00
iteratorDone();
}).on('retry', function (response) {
2022-04-30 16:01:42 -07:00
progressCallback({ message: `Retrying (${response.retryCount+1}) multipart copy of ${relativePath || oldFilePath}. Error: ${response.error} ${response.httpResponse.statusCode}` });
2022-04-14 07:35:41 -05:00
});
}, function chunksCopied(error) {
if (error) { // we must still recommend the user to set a AbortIncompleteMultipartUpload lifecycle rule
const abortParams = {
2020-09-02 22:32:42 -07:00
Bucket: apiConfig.bucket,
Key: path.join(newFilePath, relativePath),
UploadId: uploadId
};
2022-04-30 16:01:42 -07:00
progressCallback({ message: `Aborting multipart copy of ${relativePath || oldFilePath}` });
2022-04-14 07:35:41 -05:00
return s3.abortMultipartUpload(abortParams, () => done(error)); // ignore any abort errors
}
2022-04-14 07:35:41 -05:00
const completeMultipartParams = {
Bucket: apiConfig.bucket,
Key: path.join(newFilePath, relativePath),
MultipartUpload: { Parts: uploadedParts },
UploadId: uploadId
};
2022-04-30 16:01:42 -07:00
progressCallback({ message: `Finishing multipart copy - ${completeMultipartParams.Key}` });
2022-04-14 07:35:41 -05:00
s3.completeMultipartUpload(completeMultipartParams, done);
2018-07-29 09:00:57 -07:00
});
});
2017-10-11 13:57:05 -07:00
}
2020-09-02 22:32:42 -07:00
let total = 0;
const concurrency = apiConfig.limits?.copyConcurrency || (apiConfig.provider === 's3' ? 500 : 10);
2022-04-30 16:01:42 -07:00
progressCallback({ message: `Copying with concurrency of ${concurrency}` });
const listDirAsync = util.promisify(listDir);
2017-10-11 13:57:05 -07:00
2022-04-30 16:01:42 -07:00
const [copyError] = await safe(listDirAsync(apiConfig, oldFilePath, 1000, function listDirIterator(entries, done) {
2018-07-28 09:05:44 -07:00
total += entries.length;
2017-10-11 13:57:05 -07:00
2022-04-30 16:01:42 -07:00
progressCallback({ message: `Copying files from ${total-entries.length}-${total}` });
2017-10-11 13:57:05 -07:00
2018-07-28 09:05:44 -07:00
async.eachLimit(entries, concurrency, copyFile, done);
2022-04-30 16:01:42 -07:00
}));
2022-04-30 16:01:42 -07:00
progressCallback({ message: `Copied ${total} files with error: ${copyError}` });
2023-01-17 10:43:17 +01:00
if (copyError) throw copyError;
2015-09-21 14:02:00 -07:00
}
async function remove(apiConfig, filename) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof filename, 'string');
2022-04-14 07:35:41 -05:00
const credentials = getS3Config(apiConfig);
2022-04-14 07:35:41 -05:00
const s3 = new aws.S3(credentials);
2022-04-14 07:35:41 -05:00
const deleteParams = {
Bucket: apiConfig.bucket,
Delete: {
Objects: [{ Key: filename }]
}
};
2022-04-14 07:35:41 -05:00
// deleteObjects does not return error if key is not found
const [error] = await safe(s3.deleteObjects(deleteParams).promise());
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unable to remove ${deleteParams.Key}. error: ${error.message}`);
}
2022-04-15 09:25:54 -05:00
function chunk(array, size) {
assert(Array.isArray(array));
assert.strictEqual(typeof size, 'number');
const length = array.length;
if (!length) return [];
let index = 0, resIndex = 0, result = Array(Math.ceil(length / size));
for (; index < length; index += size) {
result[resIndex++] = array.slice(index, index+size);
}
return result;
}
async function removeDir(apiConfig, pathPrefix, progressCallback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof pathPrefix, 'string');
assert.strictEqual(typeof progressCallback, 'function');
2017-10-10 20:23:04 -07:00
2022-04-14 07:35:41 -05:00
const credentials = getS3Config(apiConfig);
const s3 = new aws.S3(credentials);
const listDirAsync = util.promisify(listDir);
let total = 0;
2017-10-11 13:57:05 -07:00
await listDirAsync(apiConfig, pathPrefix, 1000, function listDirIterator(entries, done) {
2022-04-14 07:35:41 -05:00
total += entries.length;
2018-02-22 12:14:13 -08:00
const chunkSize = apiConfig.limits?.deleteConcurrency || (apiConfig.provider !== 'digitalocean-spaces' ? 1000 : 100); // throttle objects in each request
const chunks = chunk(entries, chunkSize);
2018-02-22 12:14:13 -08:00
async.eachSeries(chunks, async function deleteFiles(objects) {
const deleteParams = {
2022-04-14 07:35:41 -05:00
Bucket: apiConfig.bucket,
Delete: {
Objects: objects.map(function (o) { return { Key: o.fullPath }; })
}
};
2018-02-22 12:14:13 -08:00
progressCallback({ message: `Removing ${objects.length} files from ${objects[0].fullPath} to ${objects[objects.length-1].fullPath}` });
2017-10-10 20:23:04 -07:00
2022-04-14 07:35:41 -05:00
// deleteObjects does not return error if key is not found
const [error] = await safe(s3.deleteObjects(deleteParams).promise());
if (error) {
progressCallback({ message: `Unable to remove ${deleteParams.Key} ${error.message || error.code}` });
throw new BoxError(BoxError.EXTERNAL_ERROR, `Unable to remove ${deleteParams.Key}. error: ${error.message}`);
}
2022-04-14 07:35:41 -05:00
}, done);
2017-10-10 20:23:04 -07:00
});
progressCallback({ message: `Removed ${total} files` });
}
2022-04-14 07:43:43 -05:00
async function remount(apiConfig) {
assert.strictEqual(typeof apiConfig, 'object');
}
2022-04-14 07:59:50 -05:00
async function testConfig(apiConfig) {
assert.strictEqual(typeof apiConfig, 'object');
2022-04-14 07:59:50 -05:00
if (typeof apiConfig.accessKeyId !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'accessKeyId must be a string');
if (typeof apiConfig.secretAccessKey !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'secretAccessKey must be a string');
2017-09-27 10:25:36 -07:00
2022-04-14 07:59:50 -05:00
if (typeof apiConfig.bucket !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'bucket must be a string');
2020-02-11 11:14:38 -08:00
// the node module seems to incorrectly accept bucket name with '/'
2022-04-14 07:59:50 -05:00
if (apiConfig.bucket.includes('/')) throw new BoxError(BoxError.BAD_FIELD, 'bucket name cannot contain "/"');
2020-02-11 11:14:38 -08:00
// names must be lowercase and start with a letter or number. can contain dashes
2022-04-14 07:59:50 -05:00
if (apiConfig.bucket.includes('_') || apiConfig.bucket.match(/[A-Z]/)) throw new BoxError(BoxError.BAD_FIELD, 'bucket name cannot contain "_" or capitals');
2022-04-14 07:59:50 -05:00
if (typeof apiConfig.prefix !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'prefix must be a string');
if ('signatureVersion' in apiConfig && typeof apiConfig.signatureVersion !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'signatureVersion must be a string');
if ('endpoint' in apiConfig && typeof apiConfig.endpoint !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'endpoint must be a string');
2022-04-14 07:59:50 -05:00
if ('acceptSelfSignedCerts' in apiConfig && typeof apiConfig.acceptSelfSignedCerts !== 'boolean') throw new BoxError(BoxError.BAD_FIELD, 'acceptSelfSignedCerts must be a boolean');
if ('s3ForcePathStyle' in apiConfig && typeof apiConfig.s3ForcePathStyle !== 'boolean') throw new BoxError(BoxError.BAD_FIELD, 's3ForcePathStyle must be a boolean');
// attempt to upload and delete a file with new credentials
2022-04-14 07:35:41 -05:00
const credentials = getS3Config(apiConfig);
2022-04-14 07:59:50 -05:00
const putParams = {
2022-04-14 07:35:41 -05:00
Bucket: apiConfig.bucket,
Key: path.join(apiConfig.prefix, 'snapshot/cloudron-testfile'),
2022-04-14 07:35:41 -05:00
Body: 'testcontent'
};
2022-04-14 07:35:41 -05:00
const s3 = new aws.S3(_.omit(credentials, 'retryDelayOptions', 'maxRetries'));
2022-04-14 07:59:50 -05:00
const [putError] = await safe(s3.putObject(putParams).promise());
if (putError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error put object cloudron-testfile. Message: ${putError.message} HTTP Code: ${putError.code}`);
const listParams = {
Bucket: apiConfig.bucket,
Prefix: path.join(apiConfig.prefix, 'snapshot'),
MaxKeys: 1
};
const [listError] = await safe(s3.listObjects(listParams).promise());
if (listError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects. Message: ${listError.message} HTTP Code: ${listError.code}`);
2022-04-14 07:59:50 -05:00
const delParams = {
Bucket: apiConfig.bucket,
Key: path.join(apiConfig.prefix, 'snapshot/cloudron-testfile')
2022-04-14 07:59:50 -05:00
};
2022-04-14 07:59:50 -05:00
const [delError] = await safe(s3.deleteObject(delParams).promise());
if (delError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error del object cloudron-testfile. Message: ${delError.message} HTTP Code: ${delError.code}`);
}
function removePrivateFields(apiConfig) {
apiConfig.secretAccessKey = constants.SECRET_PLACEHOLDER;
return apiConfig;
}
function injectPrivateFields(newConfig, currentConfig) {
if (newConfig.secretAccessKey === constants.SECRET_PLACEHOLDER) newConfig.secretAccessKey = currentConfig.secretAccessKey;
}