reorder functions for no-use-before-define
This commit is contained in:
+176
-177
@@ -19,9 +19,6 @@ import _ from '../underscore.js';
|
||||
|
||||
const debug = debugModule('box:storage/s3');
|
||||
|
||||
const _chunk = chunk;
|
||||
|
||||
|
||||
function S3_NOT_FOUND(error) {
|
||||
return error instanceof NoSuchKey || error instanceof NoSuchBucket;
|
||||
}
|
||||
@@ -353,6 +350,181 @@ function encodeCopySource(bucket, path) {
|
||||
return `${bucket}/${output}`;
|
||||
}
|
||||
|
||||
async function remove(config, remotePath) {
|
||||
assert.strictEqual(typeof config, 'object');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
|
||||
const s3 = createS3Client(config, { retryStrategy: RETRY_STRATEGY });
|
||||
|
||||
const fullRemotePath = path.join(config.prefix, remotePath);
|
||||
|
||||
const deleteParams = {
|
||||
Bucket: config.bucket,
|
||||
Key: fullRemotePath
|
||||
};
|
||||
|
||||
// deleteObject does not return error if key is not found
|
||||
const [error] = await safe(s3.deleteObject(deleteParams));
|
||||
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unable to remove ${fullRemotePath}. ${formatError(error)}`);
|
||||
}
|
||||
|
||||
function chunk(array, size) {
|
||||
assert(Array.isArray(array));
|
||||
assert.strictEqual(typeof size, 'number');
|
||||
|
||||
const length = array.length;
|
||||
if (!length) return [];
|
||||
let index = 0, resIndex = 0;
|
||||
const result = Array(Math.ceil(length / size));
|
||||
|
||||
for (; index < length; index += size) {
|
||||
result[resIndex++] = array.slice(index, index+size);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
async function removeDir(config, limits, remotePathPrefix, progressCallback) {
|
||||
assert.strictEqual(typeof config, 'object');
|
||||
assert.strictEqual(typeof limits, 'object');
|
||||
assert.strictEqual(typeof remotePathPrefix, 'string');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
// only use this client for DeleteObjects call. It forces md5 checksum and for anything else, it might crash
|
||||
const deleteObjectsS3Client = createS3Client(config, { retryStrategy: RETRY_STRATEGY, deleteObjects: true });
|
||||
|
||||
let total = 0;
|
||||
let marker = null;
|
||||
while (true) {
|
||||
const batch = await listDir(config, remotePathPrefix, 1000, marker); // returns entries relative to (root) prefix
|
||||
|
||||
const entries = batch.entries;
|
||||
total += entries.length;
|
||||
|
||||
const chunkSize = limits.deleteConcurrency || (config._provider !== 'digitalocean-spaces' ? 1000 : 100); // throttle objects in each request
|
||||
const chunks = chunk(entries, chunkSize);
|
||||
|
||||
await async.eachSeries(chunks, async function deleteFiles(objects) {
|
||||
const deleteParams = {
|
||||
Bucket: config.bucket,
|
||||
Delete: {
|
||||
Objects: objects.map(function (o) { return { Key: path.join(config.prefix, o.path) }; })
|
||||
}
|
||||
};
|
||||
|
||||
const fullFirstPath = path.join(config.prefix, objects[0].path), fullLastPath = path.join(config.prefix, objects[objects.length-1].path);
|
||||
progressCallback({ message: `Removing ${objects.length} files from ${fullFirstPath} to ${fullLastPath}` });
|
||||
|
||||
// deleteObjects does not return error if key is not found
|
||||
const [error] = await safe(deleteObjectsS3Client.deleteObjects(deleteParams));
|
||||
if (error) {
|
||||
progressCallback({ message: `Unable to remove from ${fullFirstPath} to ${fullLastPath} ${error.message || error.Code}` });
|
||||
throw new BoxError(BoxError.EXTERNAL_ERROR, `Unable to remove from ${fullFirstPath} to ${fullLastPath}. error: ${error.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
if (!batch.marker) break;
|
||||
marker = batch.marker;
|
||||
}
|
||||
|
||||
progressCallback({ message: `Removed ${total} files` });
|
||||
}
|
||||
|
||||
async function cleanup(config, progressCallback) {
|
||||
assert.strictEqual(typeof config, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
const s3 = createS3Client(config, { retryStrategy: RETRY_STRATEGY });
|
||||
|
||||
const uploads = await s3.listMultipartUploads({ Bucket: config.bucket, Prefix: config.prefix });
|
||||
progressCallback({ message: `Cleaning up any aborted multi-part uploads. count:${uploads.Uploads?.length || 0} truncated:${uploads.IsTruncated}` });
|
||||
if (!uploads.Uploads) return;
|
||||
|
||||
for (const upload of uploads.Uploads) {
|
||||
if (Date.now() - new Date(upload.Initiated) < 3 * 24 * 60 * 60 * 1000) continue; // 3 days ago
|
||||
progressCallback({ message: `Cleaning up multi-part upload uploadId:${upload.UploadId} key:${upload.Key}` });
|
||||
await safe(s3.abortMultipartUpload({ Bucket: config.bucket, Key: upload.Key, UploadId: upload.UploadId }), { debug }); // ignore error
|
||||
}
|
||||
}
|
||||
|
||||
async function verifyConfig({ id, provider, config }) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof provider, 'string');
|
||||
assert.strictEqual(typeof config, 'object');
|
||||
|
||||
if (typeof config.accessKeyId !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'accessKeyId must be a string');
|
||||
if (typeof config.secretAccessKey !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'secretAccessKey must be a string');
|
||||
|
||||
if (typeof config.bucket !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'bucket must be a string');
|
||||
// the node module seems to incorrectly accept bucket name with '/'
|
||||
if (config.bucket.includes('/')) throw new BoxError(BoxError.BAD_FIELD, 'bucket name cannot contain "/"');
|
||||
|
||||
// names must be lowercase and start with a letter or number. can contain dashes
|
||||
if (config.bucket.includes('_') || config.bucket.match(/[A-Z]/)) throw new BoxError(BoxError.BAD_FIELD, 'bucket name cannot contain "_" or capitals');
|
||||
|
||||
if (typeof config.prefix !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'prefix must be a string');
|
||||
if ('signatureVersion' in config && typeof config.signatureVersion !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'signatureVersion must be a string');
|
||||
if ('endpoint' in config) {
|
||||
if (typeof config.endpoint !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'endpoint must be a string');
|
||||
if (!config.endpoint.startsWith('http://') && !config.endpoint.startsWith('https://')) throw new BoxError(BoxError.BAD_FIELD, 'endpoint must start with http:// or https://');
|
||||
}
|
||||
|
||||
if ('region' in config && typeof config.region !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'region must be a string');
|
||||
|
||||
if ('acceptSelfSignedCerts' in config && typeof config.acceptSelfSignedCerts !== 'boolean') throw new BoxError(BoxError.BAD_FIELD, 'acceptSelfSignedCerts must be a boolean');
|
||||
if ('s3ForcePathStyle' in config && typeof config.s3ForcePathStyle !== 'boolean') throw new BoxError(BoxError.BAD_FIELD, 's3ForcePathStyle must be a boolean');
|
||||
|
||||
const putParams = {
|
||||
Bucket: config.bucket,
|
||||
Key: path.join(config.prefix, 'snapshot/cloudron-testfile'),
|
||||
Body: 'testcontent'
|
||||
};
|
||||
|
||||
const s3 = createS3Client(config, {});
|
||||
const [putError] = await safe(s3.putObject(putParams));
|
||||
if (putError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error put object cloudron-testfile. ${formatError(putError)}`);
|
||||
|
||||
const listParams = {
|
||||
Bucket: config.bucket,
|
||||
Prefix: path.join(config.prefix, 'snapshot'),
|
||||
MaxKeys: 1
|
||||
};
|
||||
|
||||
const [listError] = await safe(s3.listObjectsV2(listParams));
|
||||
if (listError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects. ${formatError(listError)}`);
|
||||
|
||||
const delParams = {
|
||||
Bucket: config.bucket,
|
||||
Key: path.join(config.prefix, 'snapshot/cloudron-testfile')
|
||||
};
|
||||
|
||||
const [delError] = await safe(s3.deleteObject(delParams));
|
||||
if (delError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error del object cloudron-testfile. ${formatError(delError)}`);
|
||||
|
||||
const newConfig = _.pick(config, ['accessKeyId', 'secretAccessKey', 'bucket', 'prefix', 'signatureVersion', 'endpoint', 'region', 'acceptSelfSignedCerts', 's3ForcePathStyle' ]);
|
||||
return { _provider: provider, ...newConfig };
|
||||
}
|
||||
|
||||
async function setup(config) {
|
||||
assert.strictEqual(typeof config, 'object');
|
||||
}
|
||||
|
||||
// often, the AbortIncompleteMultipartUpload lifecycle rule is not added to the bucket resulting in large bucket sizes over time
|
||||
async function teardown(config) {
|
||||
assert.strictEqual(typeof config, 'object');
|
||||
}
|
||||
|
||||
function removePrivateFields(config) {
|
||||
delete config.secretAccessKey;
|
||||
delete config._provider;
|
||||
return config;
|
||||
}
|
||||
|
||||
function injectPrivateFields(newConfig, currentConfig) {
|
||||
if (!Object.hasOwn(newConfig, 'secretAccessKey')) newConfig.secretAccessKey = currentConfig.secretAccessKey;
|
||||
newConfig._provider = currentConfig._provider;
|
||||
}
|
||||
|
||||
async function copyInternal(config, fullFromPath, fullToPath, fileSize, progressCallback) {
|
||||
assert.strictEqual(typeof config, 'object');
|
||||
assert.strictEqual(typeof fullFromPath, 'string');
|
||||
@@ -497,180 +669,7 @@ async function copyDir(config, limits, fromPath, toPath, progressCallback) {
|
||||
progressCallback({ message: `Copied ${total} files` });
|
||||
}
|
||||
|
||||
async function remove(config, remotePath) {
|
||||
assert.strictEqual(typeof config, 'object');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
|
||||
const s3 = createS3Client(config, { retryStrategy: RETRY_STRATEGY });
|
||||
|
||||
const fullRemotePath = path.join(config.prefix, remotePath);
|
||||
|
||||
const deleteParams = {
|
||||
Bucket: config.bucket,
|
||||
Key: fullRemotePath
|
||||
};
|
||||
|
||||
// deleteObject does not return error if key is not found
|
||||
const [error] = await safe(s3.deleteObject(deleteParams));
|
||||
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unable to remove ${fullRemotePath}. ${formatError(error)}`);
|
||||
}
|
||||
|
||||
function chunk(array, size) {
|
||||
assert(Array.isArray(array));
|
||||
assert.strictEqual(typeof size, 'number');
|
||||
|
||||
const length = array.length;
|
||||
if (!length) return [];
|
||||
let index = 0, resIndex = 0;
|
||||
const result = Array(Math.ceil(length / size));
|
||||
|
||||
for (; index < length; index += size) {
|
||||
result[resIndex++] = array.slice(index, index+size);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
async function removeDir(config, limits, remotePathPrefix, progressCallback) {
|
||||
assert.strictEqual(typeof config, 'object');
|
||||
assert.strictEqual(typeof limits, 'object');
|
||||
assert.strictEqual(typeof remotePathPrefix, 'string');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
// only use this client for DeleteObjects call. It forces md5 checksum and for anything else, it might crash
|
||||
const deleteObjectsS3Client = createS3Client(config, { retryStrategy: RETRY_STRATEGY, deleteObjects: true });
|
||||
|
||||
let total = 0;
|
||||
let marker = null;
|
||||
while (true) {
|
||||
const batch = await listDir(config, remotePathPrefix, 1000, marker); // returns entries relative to (root) prefix
|
||||
|
||||
const entries = batch.entries;
|
||||
total += entries.length;
|
||||
|
||||
const chunkSize = limits.deleteConcurrency || (config._provider !== 'digitalocean-spaces' ? 1000 : 100); // throttle objects in each request
|
||||
const chunks = chunk(entries, chunkSize);
|
||||
|
||||
await async.eachSeries(chunks, async function deleteFiles(objects) {
|
||||
const deleteParams = {
|
||||
Bucket: config.bucket,
|
||||
Delete: {
|
||||
Objects: objects.map(function (o) { return { Key: path.join(config.prefix, o.path) }; })
|
||||
}
|
||||
};
|
||||
|
||||
const fullFirstPath = path.join(config.prefix, objects[0].path), fullLastPath = path.join(config.prefix, objects[objects.length-1].path);
|
||||
progressCallback({ message: `Removing ${objects.length} files from ${fullFirstPath} to ${fullLastPath}` });
|
||||
|
||||
// deleteObjects does not return error if key is not found
|
||||
const [error] = await safe(deleteObjectsS3Client.deleteObjects(deleteParams));
|
||||
if (error) {
|
||||
progressCallback({ message: `Unable to remove from ${fullFirstPath} to ${fullLastPath} ${error.message || error.Code}` });
|
||||
throw new BoxError(BoxError.EXTERNAL_ERROR, `Unable to remove from ${fullFirstPath} to ${fullLastPath}. error: ${error.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
if (!batch.marker) break;
|
||||
marker = batch.marker;
|
||||
}
|
||||
|
||||
progressCallback({ message: `Removed ${total} files` });
|
||||
}
|
||||
|
||||
// often, the AbortIncompleteMultipartUpload lifecycle rule is not added to the bucket resulting in large bucket sizes over time
|
||||
async function cleanup(config, progressCallback) {
|
||||
assert.strictEqual(typeof config, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
const s3 = createS3Client(config, { retryStrategy: RETRY_STRATEGY });
|
||||
|
||||
const uploads = await s3.listMultipartUploads({ Bucket: config.bucket, Prefix: config.prefix });
|
||||
progressCallback({ message: `Cleaning up any aborted multi-part uploads. count:${uploads.Uploads?.length || 0} truncated:${uploads.IsTruncated}` });
|
||||
if (!uploads.Uploads) return;
|
||||
|
||||
for (const upload of uploads.Uploads) {
|
||||
if (Date.now() - new Date(upload.Initiated) < 3 * 24 * 60 * 60 * 1000) continue; // 3 days ago
|
||||
progressCallback({ message: `Cleaning up multi-part upload uploadId:${upload.UploadId} key:${upload.Key}` });
|
||||
await safe(s3.abortMultipartUpload({ Bucket: config.bucket, Key: upload.Key, UploadId: upload.UploadId }), { debug }); // ignore error
|
||||
}
|
||||
}
|
||||
|
||||
async function verifyConfig({ id, provider, config }) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof provider, 'string');
|
||||
assert.strictEqual(typeof config, 'object');
|
||||
|
||||
if (typeof config.accessKeyId !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'accessKeyId must be a string');
|
||||
if (typeof config.secretAccessKey !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'secretAccessKey must be a string');
|
||||
|
||||
if (typeof config.bucket !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'bucket must be a string');
|
||||
// the node module seems to incorrectly accept bucket name with '/'
|
||||
if (config.bucket.includes('/')) throw new BoxError(BoxError.BAD_FIELD, 'bucket name cannot contain "/"');
|
||||
|
||||
// names must be lowercase and start with a letter or number. can contain dashes
|
||||
if (config.bucket.includes('_') || config.bucket.match(/[A-Z]/)) throw new BoxError(BoxError.BAD_FIELD, 'bucket name cannot contain "_" or capitals');
|
||||
|
||||
if (typeof config.prefix !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'prefix must be a string');
|
||||
if ('signatureVersion' in config && typeof config.signatureVersion !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'signatureVersion must be a string');
|
||||
if ('endpoint' in config) {
|
||||
if (typeof config.endpoint !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'endpoint must be a string');
|
||||
if (!config.endpoint.startsWith('http://') && !config.endpoint.startsWith('https://')) throw new BoxError(BoxError.BAD_FIELD, 'endpoint must start with http:// or https://');
|
||||
}
|
||||
|
||||
if ('region' in config && typeof config.region !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'region must be a string');
|
||||
|
||||
if ('acceptSelfSignedCerts' in config && typeof config.acceptSelfSignedCerts !== 'boolean') throw new BoxError(BoxError.BAD_FIELD, 'acceptSelfSignedCerts must be a boolean');
|
||||
if ('s3ForcePathStyle' in config && typeof config.s3ForcePathStyle !== 'boolean') throw new BoxError(BoxError.BAD_FIELD, 's3ForcePathStyle must be a boolean');
|
||||
|
||||
const putParams = {
|
||||
Bucket: config.bucket,
|
||||
Key: path.join(config.prefix, 'snapshot/cloudron-testfile'),
|
||||
Body: 'testcontent'
|
||||
};
|
||||
|
||||
const s3 = createS3Client(config, {});
|
||||
const [putError] = await safe(s3.putObject(putParams));
|
||||
if (putError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error put object cloudron-testfile. ${formatError(putError)}`);
|
||||
|
||||
const listParams = {
|
||||
Bucket: config.bucket,
|
||||
Prefix: path.join(config.prefix, 'snapshot'),
|
||||
MaxKeys: 1
|
||||
};
|
||||
|
||||
const [listError] = await safe(s3.listObjectsV2(listParams));
|
||||
if (listError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects. ${formatError(listError)}`);
|
||||
|
||||
const delParams = {
|
||||
Bucket: config.bucket,
|
||||
Key: path.join(config.prefix, 'snapshot/cloudron-testfile')
|
||||
};
|
||||
|
||||
const [delError] = await safe(s3.deleteObject(delParams));
|
||||
if (delError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error del object cloudron-testfile. ${formatError(delError)}`);
|
||||
|
||||
const newConfig = _.pick(config, ['accessKeyId', 'secretAccessKey', 'bucket', 'prefix', 'signatureVersion', 'endpoint', 'region', 'acceptSelfSignedCerts', 's3ForcePathStyle' ]);
|
||||
return { _provider: provider, ...newConfig };
|
||||
}
|
||||
|
||||
async function setup(config) {
|
||||
assert.strictEqual(typeof config, 'object');
|
||||
}
|
||||
|
||||
async function teardown(config) {
|
||||
assert.strictEqual(typeof config, 'object');
|
||||
}
|
||||
|
||||
function removePrivateFields(config) {
|
||||
delete config.secretAccessKey;
|
||||
delete config._provider;
|
||||
return config;
|
||||
}
|
||||
|
||||
function injectPrivateFields(newConfig, currentConfig) {
|
||||
if (!Object.hasOwn(newConfig, 'secretAccessKey')) newConfig.secretAccessKey = currentConfig.secretAccessKey;
|
||||
newConfig._provider = currentConfig._provider;
|
||||
}
|
||||
const _chunk = chunk;
|
||||
|
||||
export default {
|
||||
setup,
|
||||
|
||||
Reference in New Issue
Block a user