diff --git a/src/storage/gcs.js b/src/storage/gcs.js index 0fb884510..b3ad8a6db 100644 --- a/src/storage/gcs.js +++ b/src/storage/gcs.js @@ -35,47 +35,47 @@ const assert = require('node:assert'), safe = require('safetydance'), _ = require('../underscore.js'); -function getBucket(apiConfig) { - assert.strictEqual(typeof apiConfig, 'object'); +function getBucket(config) { + assert.strictEqual(typeof config, 'object'); const gcsConfig = { - projectId: apiConfig.projectId, + projectId: config.projectId, credentials: { - client_email: apiConfig.credentials.client_email, - private_key: apiConfig.credentials.private_key + client_email: config.credentials.client_email, + private_key: config.credentials.private_key } }; const gcs = constants.TEST ? new globalThis.GCSMock(gcsConfig) : new GCS(gcsConfig); - return gcs.bucket(apiConfig.bucket); + return gcs.bucket(config.bucket); } -async function getAvailableSize(apiConfig) { - assert.strictEqual(typeof apiConfig, 'object'); +async function getAvailableSize(config) { + assert.strictEqual(typeof config, 'object'); return Number.POSITIVE_INFINITY; } -async function getStatus(apiConfig) { - assert.strictEqual(typeof apiConfig, 'object'); +async function getStatus(config) { + assert.strictEqual(typeof config, 'object'); - const bucket = getBucket(apiConfig); - const query = { prefix: path.join(apiConfig.prefix, 'snapshot'), autoPaginate: false, maxResults: 1 }; + const bucket = getBucket(config); + const query = { prefix: path.join(config.prefix, 'snapshot'), autoPaginate: false, maxResults: 1 }; const [listError] = await safe(bucket.getFiles(query)); if (listError) return { state: 'inactive', message: `Failed to get files: ${listError.message}` }; return { state: 'active', message: '' }; } -async function upload(apiConfig, remotePath) { - assert.strictEqual(typeof apiConfig, 'object'); +async function upload(config, remotePath) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof remotePath, 'string'); - const fullRemotePath = path.join(apiConfig.prefix, remotePath); + const fullRemotePath = path.join(config.prefix, remotePath); debug(`Uploading to ${fullRemotePath}`); - const uploadStream = getBucket(apiConfig) + const uploadStream = getBucket(config) .file(fullRemotePath) .createWriteStream({ resumable: false }); @@ -85,12 +85,12 @@ async function upload(apiConfig, remotePath) { }; } -async function exists(apiConfig, remotePath) { - assert.strictEqual(typeof apiConfig, 'object'); +async function exists(config, remotePath) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof remotePath, 'string'); - const bucket = getBucket(apiConfig); - const fullRemotePath = path.join(apiConfig.prefix, remotePath); + const bucket = getBucket(config); + const fullRemotePath = path.join(config.prefix, remotePath); if (!fullRemotePath.endsWith('/')) { const file = bucket.file(fullRemotePath); @@ -113,25 +113,25 @@ async function exists(apiConfig, remotePath) { } } -async function download(apiConfig, remotePath) { - assert.strictEqual(typeof apiConfig, 'object'); +async function download(config, remotePath) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof remotePath, 'string'); - const fullRemotePath = path.join(apiConfig.prefix, remotePath); + const fullRemotePath = path.join(config.prefix, remotePath); debug(`Download ${fullRemotePath} starting`); - const file = getBucket(apiConfig).file(fullRemotePath); + const file = getBucket(config).file(fullRemotePath); return file.createReadStream(); } -async function listDir(apiConfig, remotePath, batchSize, marker) { - assert.strictEqual(typeof apiConfig, 'object'); +async function listDir(config, remotePath, batchSize, marker) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof remotePath, 'string'); assert.strictEqual(typeof batchSize, 'number'); assert(typeof marker !== 'undefined'); - const bucket = getBucket(apiConfig); - const fullRemotePath = path.join(apiConfig.prefix, remotePath); + const bucket = getBucket(config); + const fullRemotePath = path.join(config.prefix, remotePath); const query = marker || { prefix: fullRemotePath + '/', autoPaginate: false, maxResults: batchSize }; @@ -140,42 +140,42 @@ async function listDir(apiConfig, remotePath, batchSize, marker) { const [files, nextQuery] = result; if (files.length === 0) return { entries: [], marker: null }; // no more - const entries = files.map(function (f) { return { path: path.relative(apiConfig.prefix, f.name) }; }); + const entries = files.map(function (f) { return { path: path.relative(config.prefix, f.name) }; }); return { entries, marker: nextQuery || null }; } -async function copy(apiConfig, fullFromPath, fullToPath, progressCallback) { - assert.strictEqual(typeof apiConfig, 'object'); +async function copy(config, fullFromPath, fullToPath, progressCallback) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof fullFromPath, 'string'); assert.strictEqual(typeof fullToPath, 'string'); assert.strictEqual(typeof progressCallback, 'function'); - const [copyError] = await safe(getBucket(apiConfig).file(fullFromPath).copy(fullToPath)); + const [copyError] = await safe(getBucket(config).file(fullFromPath).copy(fullToPath)); if (copyError) debug('copyBackup: gcs copy error. %o', copyError); if (copyError && copyError.code === 404) throw new BoxError(BoxError.NOT_FOUND, 'Old backup not found'); if (copyError) throw new BoxError(BoxError.EXTERNAL_ERROR, copyError.message); } -async function copyDir(apiConfig, fromPath, toPath, progressCallback) { - assert.strictEqual(typeof apiConfig, 'object'); +async function copyDir(config, fromPath, toPath, progressCallback) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof fromPath, 'string'); assert.strictEqual(typeof toPath, 'string'); assert.strictEqual(typeof progressCallback, 'function'); const batchSize = 1000; - const concurrency = apiConfig.limits?.copyConcurrency || 10; + const concurrency = config.limits?.copyConcurrency || 10; let total = 0; let marker = null; while (true) { - const batch = await listDir(apiConfig, fromPath, batchSize, marker); // returns entries relative to fromPath + const batch = await listDir(config, fromPath, batchSize, marker); // returns entries relative to fromPath if (batch.entries.length === 0) break; total += batch.entries.length; progressCallback({ message: `Copying ${batch.entries.length} files from ${batch.entries[0].path} to ${batch.entries[batch.entries.length-1].path}. total: ${total}` }); await async.eachLimit(batch.entries, concurrency, async (entry) => { - const fullFromPath = path.join(apiConfig.prefix, entry.path); - const fullToPath = path.join(apiConfig.prefix, toPath, path.relative(fromPath, entry.path)); - await copy(apiConfig, fullFromPath, fullToPath, progressCallback); + const fullFromPath = path.join(config.prefix, entry.path); + const fullToPath = path.join(config.prefix, toPath, path.relative(fromPath, entry.path)); + await copy(config, fullFromPath, fullToPath, progressCallback); }); if (!batch.marker) break; marker = batch.marker; @@ -183,39 +183,39 @@ async function copyDir(apiConfig, fromPath, toPath, progressCallback) { progressCallback({ message: `Copied ${total} files` }); } -async function remove(apiConfig, remotePath) { - assert.strictEqual(typeof apiConfig, 'object'); +async function remove(config, remotePath) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof remotePath, 'string'); - const fullRemotePath = path.join(apiConfig.prefix, remotePath); - const [error] = await safe(getBucket(apiConfig).file(fullRemotePath).delete()); + const fullRemotePath = path.join(config.prefix, remotePath); + const [error] = await safe(getBucket(config).file(fullRemotePath).delete()); if (error) debug('removeBackups: Unable to remove %s (%s). Not fatal.', fullRemotePath, error.message); } -async function removeDir(apiConfig, remotePathPrefix, progressCallback) { - assert.strictEqual(typeof apiConfig, 'object'); +async function removeDir(config, remotePathPrefix, progressCallback) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof remotePathPrefix, 'string'); assert.strictEqual(typeof progressCallback, 'function'); - const batchSize = 1000, concurrency = apiConfig.limits?.deleteConcurrency || 10; // https://googleapis.dev/nodejs/storage/latest/Bucket.html#deleteFiles + const batchSize = 1000, concurrency = config.limits?.deleteConcurrency || 10; // https://googleapis.dev/nodejs/storage/latest/Bucket.html#deleteFiles let total = 0; let marker = null; while (true) { - const batch = await listDir(apiConfig, remotePathPrefix, batchSize, marker); // adds 'prefix' + const batch = await listDir(config, remotePathPrefix, batchSize, marker); // adds 'prefix' if (batch.entries.length === 0) break; const entries = batch.entries; total += entries.length; progressCallback({ message: `Removing ${entries.length} files from ${entries[0].path} to ${entries[entries.length-1].path}. total: ${total}` }); - await async.eachLimit(entries, concurrency, async (entry) => await remove(apiConfig, entry.path)); // remove will add 'prefix' + await async.eachLimit(entries, concurrency, async (entry) => await remove(config, entry.path)); // remove will add 'prefix' if (!batch.marker) break; marker = batch.marker; } progressCallback({ progress: `Deleted ${total} files` }); } -async function cleanup(apiConfig, progressCallback) { - assert.strictEqual(typeof apiConfig, 'object'); +async function cleanup(config, progressCallback) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof progressCallback, 'function'); } @@ -266,17 +266,17 @@ async function verifyConfig({ id, provider, config }) { return _.pick(config, ['projectId', 'credentials', 'bucket', 'prefix']); } -async function setup(apiConfig) { - assert.strictEqual(typeof apiConfig, 'object'); +async function setup(config) { + assert.strictEqual(typeof config, 'object'); } -async function teardown(apiConfig) { - assert.strictEqual(typeof apiConfig, 'object'); +async function teardown(config) { + assert.strictEqual(typeof config, 'object'); } -function removePrivateFields(apiConfig) { - delete apiConfig.credentials.private_key; - return apiConfig; +function removePrivateFields(config) { + delete config.credentials.private_key; + return config; } function injectPrivateFields(newConfig, currentConfig) { diff --git a/src/storage/interface.js b/src/storage/interface.js index 934874b7d..9e00fc286 100644 --- a/src/storage/interface.js +++ b/src/storage/interface.js @@ -39,9 +39,9 @@ exports = module.exports = { const assert = require('node:assert'), BoxError = require('../boxerror.js'); -function removePrivateFields(apiConfig) { +function removePrivateFields(config) { // in-place removal of tokens and api keys - return apiConfig; + return config; } // eslint-disable-next-line no-unused-vars @@ -49,45 +49,45 @@ function injectPrivateFields(newConfig, currentConfig) { // in-place injection of tokens and api keys } -async function getAvailableSize(apiConfig) { - assert.strictEqual(typeof apiConfig, 'object'); +async function getAvailableSize(config) { + assert.strictEqual(typeof config, 'object'); return Number.POSITIVE_INFINITY; } -async function getStatus(apiConfig) { - assert.strictEqual(typeof apiConfig, 'object'); +async function getStatus(config) { + assert.strictEqual(typeof config, 'object'); // Result: { state, message } . state is 'active' or 'inactive' throw new BoxError(BoxError.NOT_IMPLEMENTED, 'getStatus is not implemented'); } -async function upload(apiConfig, backupFilePath) { - assert.strictEqual(typeof apiConfig, 'object'); +async function upload(config, backupFilePath) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof backupFilePath, 'string'); // Result: { stream, finish() callback } throw new BoxError(BoxError.NOT_IMPLEMENTED, 'upload is not implemented'); } -async function exists(apiConfig, backupFilePath) { - assert.strictEqual(typeof apiConfig, 'object'); +async function exists(config, backupFilePath) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof backupFilePath, 'string'); // Result: boolean if exists or not throw new BoxError(BoxError.NOT_IMPLEMENTED, 'exists is not implemented'); } -async function download(apiConfig, backupFilePath) { - assert.strictEqual(typeof apiConfig, 'object'); +async function download(config, backupFilePath) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof backupFilePath, 'string'); // Result: download stream throw new BoxError(BoxError.NOT_IMPLEMENTED, 'download is not implemented'); } -async function copy(apiConfig, oldFilePath, newFilePath, progressCallback) { - assert.strictEqual(typeof apiConfig, 'object'); +async function copy(config, oldFilePath, newFilePath, progressCallback) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof oldFilePath, 'string'); assert.strictEqual(typeof newFilePath, 'string'); assert.strictEqual(typeof progressCallback, 'function'); @@ -95,8 +95,8 @@ async function copy(apiConfig, oldFilePath, newFilePath, progressCallback) { throw new BoxError(BoxError.NOT_IMPLEMENTED, 'copy is not implemented'); } -async function copyDir(apiConfig, oldFilePath, newFilePath, progressCallback) { - assert.strictEqual(typeof apiConfig, 'object'); +async function copyDir(config, oldFilePath, newFilePath, progressCallback) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof oldFilePath, 'string'); assert.strictEqual(typeof newFilePath, 'string'); assert.strictEqual(typeof progressCallback, 'function'); @@ -104,8 +104,8 @@ async function copyDir(apiConfig, oldFilePath, newFilePath, progressCallback) { throw new BoxError(BoxError.NOT_IMPLEMENTED, 'copy is not implemented'); } -async function listDir(apiConfig, dir, batchSize, marker) { - assert.strictEqual(typeof apiConfig, 'object'); +async function listDir(config, dir, batchSize, marker) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof dir, 'string'); assert.strictEqual(typeof batchSize, 'number'); assert(typeof marker !== 'undefined'); @@ -115,16 +115,16 @@ async function listDir(apiConfig, dir, batchSize, marker) { throw new BoxError(BoxError.NOT_IMPLEMENTED, 'listDir is not implemented'); } -async function remove(apiConfig, filename) { - assert.strictEqual(typeof apiConfig, 'object'); +async function remove(config, filename) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof filename, 'string'); // Result: none throw new BoxError(BoxError.NOT_IMPLEMENTED, 'remove is not implemented'); } -async function removeDir(apiConfig, pathPrefix, progressCallback) { - assert.strictEqual(typeof apiConfig, 'object'); +async function removeDir(config, pathPrefix, progressCallback) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof pathPrefix, 'string'); assert.strictEqual(typeof progressCallback, 'function'); @@ -132,8 +132,8 @@ async function removeDir(apiConfig, pathPrefix, progressCallback) { throw new BoxError(BoxError.NOT_IMPLEMENTED, 'removeDir is not implemented'); } -async function cleanup(apiConfig, progressCallback) { - assert.strictEqual(typeof apiConfig, 'object'); +async function cleanup(config, progressCallback) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof progressCallback, 'function'); // Result: none @@ -150,16 +150,16 @@ async function verifyConfig({ id, provider, config }) { throw new BoxError(BoxError.NOT_IMPLEMENTED, 'testConfig is not implemented'); } -async function setup(apiConfig) { - assert.strictEqual(typeof apiConfig, 'object'); +async function setup(config) { + assert.strictEqual(typeof config, 'object'); // Result: none - first callback argument error if config does not pass the test throw new BoxError(BoxError.NOT_IMPLEMENTED, 'setup is not implemented'); } -async function teardown(apiConfig) { - assert.strictEqual(typeof apiConfig, 'object'); +async function teardown(config) { + assert.strictEqual(typeof config, 'object'); // Result: none - first callback argument error if config does not pass the test diff --git a/src/storage/s3.js b/src/storage/s3.js index 2c50857c6..b5a37b2ff 100644 --- a/src/storage/s3.js +++ b/src/storage/s3.js @@ -87,17 +87,17 @@ const md5Middleware = (next, context) => async (args) => { return await next(args); }; -function createS3Client(apiConfig, options) { - assert.strictEqual(typeof apiConfig, 'object'); +function createS3Client(config, options) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof options, 'object'); const credentials = { - accessKeyId: apiConfig.accessKeyId, - secretAccessKey: apiConfig.secretAccessKey + accessKeyId: config.accessKeyId, + secretAccessKey: config.secretAccessKey }; - const isHttps = apiConfig.endpoint?.startsWith('https://') || apiConfig._provider === 's3'; - const needsSelfSignedAgent = isHttps && (apiConfig.acceptSelfSignedCerts || apiConfig.bucket.includes('.')); + const isHttps = config.endpoint?.startsWith('https://') || config._provider === 's3'; + const needsSelfSignedAgent = isHttps && (config.acceptSelfSignedCerts || config.bucket.includes('.')); const requestHandler = new NodeHttpHandler({ connectionTimeout: 60000, @@ -108,12 +108,12 @@ function createS3Client(apiConfig, options) { // sdk v3 only has signature support v4 const clientConfig = { - forcePathStyle: apiConfig.s3ForcePathStyle === true ? true : false, // Use vhost style instead of path style - https://forums.aws.amazon.com/ann.jspa?annID=6776 - region: apiConfig.region || 'us-east-1', + forcePathStyle: config.s3ForcePathStyle === true ? true : false, // Use vhost style instead of path style - https://forums.aws.amazon.com/ann.jspa?annID=6776 + region: config.region || 'us-east-1', credentials, requestHandler, ...(options.retryStrategy && { retryStrategy: options.retryStrategy }), - ...(apiConfig.endpoint && { endpoint: apiConfig.endpoint }), + ...(config.endpoint && { endpoint: config.endpoint }), // logger: console }; @@ -133,7 +133,7 @@ function createS3Client(apiConfig, options) { // }); // This ensures it runs after default checksums might be added, but before signing - if (options.deleteObjects && apiConfig._provider !== 's3') { + if (options.deleteObjects && config._provider !== 's3') { // flexibleChecksumsMiddleware is only present when the request has a body. Only use this for DeleteObjects call. Other requests without a body will crash client.middlewareStack.addRelativeTo(md5Middleware, { relation: 'after', @@ -146,20 +146,20 @@ function createS3Client(apiConfig, options) { return client; } -async function getAvailableSize(apiConfig) { - assert.strictEqual(typeof apiConfig, 'object'); +async function getAvailableSize(config) { + assert.strictEqual(typeof config, 'object'); return Number.POSITIVE_INFINITY; } -async function getStatus(apiConfig) { - assert.strictEqual(typeof apiConfig, 'object'); +async function getStatus(config) { + assert.strictEqual(typeof config, 'object'); - const s3 = createS3Client(apiConfig, { retryStrategy: null }); + const s3 = createS3Client(config, { retryStrategy: null }); const listParams = { - Bucket: apiConfig.bucket, - Prefix: path.join(apiConfig.prefix, 'snapshot'), + Bucket: config.bucket, + Prefix: path.join(config.prefix, 'snapshot'), MaxKeys: 1 }; @@ -169,25 +169,25 @@ async function getStatus(apiConfig) { return { state: 'active', message: '' }; } -async function upload(apiConfig, remotePath) { - assert.strictEqual(typeof apiConfig, 'object'); +async function upload(config, remotePath) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof remotePath, 'string'); - const s3 = createS3Client(apiConfig, { retryStrategy: RETRY_STRATEGY }); + const s3 = createS3Client(config, { retryStrategy: RETRY_STRATEGY }); // s3.upload automatically does a multi-part upload. we set queueSize to 3 to reduce memory usage // uploader will buffer at most queueSize * partSize bytes into memory at any given time. // scaleway only supports 1000 parts per object (https://www.scaleway.com/en/docs/s3-multipart-upload/) // s3: https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html (max 10k parts and no size limit on the last part!) - const partSize = apiConfig.limits?.uploadPartSize || (apiConfig._provider === 'scaleway-objectstorage' ? 100 * 1024 * 1024 : 10 * 1024 * 1024); + const partSize = config.limits?.uploadPartSize || (config._provider === 'scaleway-objectstorage' ? 100 * 1024 * 1024 : 10 * 1024 * 1024); const passThrough = new PassThrough(); const options = { client: s3, params: { - Bucket: apiConfig.bucket, - Key: path.join(apiConfig.prefix, remotePath), + Bucket: config.bucket, + Key: path.join(config.prefix, remotePath), Body: passThrough }, partSize, @@ -209,17 +209,17 @@ async function upload(apiConfig, remotePath) { }; } -async function exists(apiConfig, remotePath) { - assert.strictEqual(typeof apiConfig, 'object'); +async function exists(config, remotePath) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof remotePath, 'string'); - const s3 = createS3Client(apiConfig, { retryStrategy: null }); + const s3 = createS3Client(config, { retryStrategy: null }); - const fullRemotePath = path.join(apiConfig.prefix, remotePath); + const fullRemotePath = path.join(config.prefix, remotePath); if (!fullRemotePath.endsWith('/')) { // check for file const params = { - Bucket: apiConfig.bucket, + Bucket: config.bucket, Key: fullRemotePath }; @@ -231,7 +231,7 @@ async function exists(apiConfig, remotePath) { return true; } else { // list dir contents const listParams = { - Bucket: apiConfig.bucket, + Bucket: config.bucket, Prefix: fullRemotePath, MaxKeys: 1 }; @@ -323,29 +323,29 @@ class S3MultipartDownloadStream extends Readable { } } -async function download(apiConfig, remotePath) { - assert.strictEqual(typeof apiConfig, 'object'); +async function download(config, remotePath) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof remotePath, 'string'); const params = { - Bucket: apiConfig.bucket, - Key: path.join(apiConfig.prefix, remotePath) + Bucket: config.bucket, + Key: path.join(config.prefix, remotePath) }; - const s3 = createS3Client(apiConfig, { retryStrategy: RETRY_STRATEGY }); + const s3 = createS3Client(config, { retryStrategy: RETRY_STRATEGY }); return new S3MultipartDownloadStream(s3, params, { blockSize: 64 * 1024 * 1024 }); } -async function listDir(apiConfig, remotePath, batchSize, marker) { - assert.strictEqual(typeof apiConfig, 'object'); +async function listDir(config, remotePath, batchSize, marker) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof remotePath, 'string'); assert.strictEqual(typeof batchSize, 'number'); assert(typeof marker !== 'undefined'); - const s3 = createS3Client(apiConfig, { retryStrategy: RETRY_STRATEGY }); - const fullRemotePath = path.join(apiConfig.prefix, remotePath); + const s3 = createS3Client(config, { retryStrategy: RETRY_STRATEGY }); + const fullRemotePath = path.join(config.prefix, remotePath); const listParams = { - Bucket: apiConfig.bucket, + Bucket: config.bucket, Prefix: fullRemotePath + '/', // ensure we list contents of the directory and not match other filenames with prefix MaxKeys: batchSize }; @@ -354,7 +354,7 @@ async function listDir(apiConfig, remotePath, batchSize, marker) { const [error, listData] = await safe(s3.listObjectsV2(listParams)); if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects in ${fullRemotePath}. ${formatError(error)}`); if (listData.KeyCount === 0 || listData.Contents.length === 0) return { entries: [], marker: null }; // no more - const entries = listData.Contents.map(function (c) { return { path: path.relative(apiConfig.prefix, c.Key), size: c.Size }; }); + const entries = listData.Contents.map(function (c) { return { path: path.relative(config.prefix, c.Key), size: c.Size }; }); return { entries, marker: !listData.IsTruncated ? null : listData.NextContinuationToken }; } @@ -371,14 +371,14 @@ function encodeCopySource(bucket, path) { return `/${bucket}/${output}`; } -async function copyInternal(apiConfig, fullFromPath, fullToPath, fileSize, progressCallback) { - assert.strictEqual(typeof apiConfig, 'object'); +async function copyInternal(config, fullFromPath, fullToPath, fileSize, progressCallback) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof fullFromPath, 'string'); assert.strictEqual(typeof fullToPath, 'string'); assert.strictEqual(typeof fileSize, 'number'); assert.strictEqual(typeof progressCallback, 'function'); - const s3 = createS3Client(apiConfig, { retryStrategy: RETRY_STRATEGY }); // https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html + const s3 = createS3Client(config, { retryStrategy: RETRY_STRATEGY }); // https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html function throwError(error) { if (error) debug(`copy: s3 copy error when copying ${fullFromPath}: ${error}`); @@ -388,18 +388,18 @@ async function copyInternal(apiConfig, fullFromPath, fullToPath, fileSize, progr } const copyParams = { - Bucket: apiConfig.bucket, + Bucket: config.bucket, Key: fullToPath }; // S3 copyObject has a file size limit of 5GB so if we have larger files, we do a multipart copy - const provider = apiConfig._provider; + const provider = config._provider; const largeFileLimit = (provider === 'vultr-objectstorage' || provider === 'exoscale-sos' || provider === 'backblaze-b2' || provider === 'digitalocean-spaces') ? 1024 * 1024 * 1024 : 3 * 1024 * 1024 * 1024; if (fileSize < largeFileLimit) { progressCallback({ message: `Copying ${fullFromPath}` }); - copyParams.CopySource = encodeCopySource(apiConfig.bucket, fullFromPath); + copyParams.CopySource = encodeCopySource(config.bucket, fullFromPath); const [copyError] = await safe(s3.copyObject(copyParams)); if (copyError) return throwError(copyError); return; @@ -424,9 +424,9 @@ async function copyInternal(apiConfig, fullFromPath, fullToPath, fileSize, progr const [copyError] = await safe(async.eachOfLimit(ranges, 3, async function copyChunk(range, index) { const partCopyParams = { - Bucket: apiConfig.bucket, + Bucket: config.bucket, Key: fullToPath, - CopySource: encodeCopySource(apiConfig.bucket, fullFromPath), // See aws-sdk-js/issues/1302 + CopySource: encodeCopySource(config.bucket, fullFromPath), // See aws-sdk-js/issues/1302 CopySourceRange: 'bytes=' + range.startBytes + '-' + range.endBytes, PartNumber: index+1, UploadId: uploadId @@ -444,7 +444,7 @@ async function copyInternal(apiConfig, fullFromPath, fullToPath, fileSize, progr if (copyError) { const abortParams = { - Bucket: apiConfig.bucket, + Bucket: config.bucket, Key: fullToPath, UploadId: uploadId }; @@ -454,7 +454,7 @@ async function copyInternal(apiConfig, fullFromPath, fullToPath, fileSize, progr } const completeMultipartParams = { - Bucket: apiConfig.bucket, + Bucket: config.bucket, Key: fullToPath, MultipartUpload: { Parts: uploadedParts }, UploadId: uploadId @@ -466,46 +466,46 @@ async function copyInternal(apiConfig, fullFromPath, fullToPath, fileSize, progr if (completeMultipartError) return throwError(completeMultipartError); } -async function copy(apiConfig, fromPath, toPath, progressCallback) { - assert.strictEqual(typeof apiConfig, 'object'); +async function copy(config, fromPath, toPath, progressCallback) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof fromPath, 'string'); assert.strictEqual(typeof toPath, 'string'); assert.strictEqual(typeof progressCallback, 'function'); - const fullFromPath = path.join(apiConfig.prefix, fromPath); - const fullToPath = path.join(apiConfig.prefix, toPath); + const fullFromPath = path.join(config.prefix, fromPath); + const fullToPath = path.join(config.prefix, toPath); const params = { - Bucket: apiConfig.bucket, + Bucket: config.bucket, Key: fullFromPath }; - const s3 = createS3Client(apiConfig, { retryStrategy: RETRY_STRATEGY }); // https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html + const s3 = createS3Client(config, { retryStrategy: RETRY_STRATEGY }); // https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html const [error, data] = await safe(s3.headObject(params)); if (error && S3_NOT_FOUND(error)) throw new BoxError(BoxError.NOT_FOUND, `Path ${fromPath} not found`); if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error headObject ${fromPath}. ${formatError(error)}`); - return await copyInternal(apiConfig, fullFromPath, fullToPath, data.ContentLength, progressCallback); + return await copyInternal(config, fullFromPath, fullToPath, data.ContentLength, progressCallback); } -async function copyDir(apiConfig, fromPath, toPath, progressCallback) { - assert.strictEqual(typeof apiConfig, 'object'); +async function copyDir(config, fromPath, toPath, progressCallback) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof fromPath, 'string'); assert.strictEqual(typeof toPath, 'string'); assert.strictEqual(typeof progressCallback, 'function'); let total = 0; - const concurrency = apiConfig.limits?.copyConcurrency || (apiConfig._provider === 's3' ? 500 : 10); + const concurrency = config.limits?.copyConcurrency || (config._provider === 's3' ? 500 : 10); progressCallback({ message: `Copying ${fromPath} to ${toPath} with concurrency of ${concurrency}` }); let marker = null; while (true) { - const batch = await listDir(apiConfig, fromPath, 1000, marker); // returned entries are relative to prefix + const batch = await listDir(config, fromPath, 1000, marker); // returned entries are relative to prefix total += batch.entries.length; progressCallback({ message: `Copying files from ${total-batch.entries.length}-${total}` }); await async.eachLimit(batch.entries, concurrency, async (entry) => { - const fullFromPath = path.join(apiConfig.prefix, entry.path); - const fullToPath = path.join(apiConfig.prefix, toPath, path.relative(fromPath, entry.path)); - await copyInternal(apiConfig, fullFromPath, fullToPath, entry.size, progressCallback); + const fullFromPath = path.join(config.prefix, entry.path); + const fullToPath = path.join(config.prefix, toPath, path.relative(fromPath, entry.path)); + await copyInternal(config, fullFromPath, fullToPath, entry.size, progressCallback); }); if (!batch.marker) break; marker = batch.marker; @@ -514,16 +514,16 @@ async function copyDir(apiConfig, fromPath, toPath, progressCallback) { progressCallback({ message: `Copied ${total} files` }); } -async function remove(apiConfig, remotePath) { - assert.strictEqual(typeof apiConfig, 'object'); +async function remove(config, remotePath) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof remotePath, 'string'); - const s3 = createS3Client(apiConfig, { retryStrategy: RETRY_STRATEGY }); + const s3 = createS3Client(config, { retryStrategy: RETRY_STRATEGY }); - const fullRemotePath = path.join(apiConfig.prefix, remotePath); + const fullRemotePath = path.join(config.prefix, remotePath); const deleteParams = { - Bucket: apiConfig.bucket, + Bucket: config.bucket, Key: fullRemotePath }; @@ -548,34 +548,34 @@ function chunk(array, size) { return result; } -async function removeDir(apiConfig, remotePathPrefix, progressCallback) { - assert.strictEqual(typeof apiConfig, 'object'); +async function removeDir(config, remotePathPrefix, progressCallback) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof remotePathPrefix, 'string'); assert.strictEqual(typeof progressCallback, 'function'); // only use this client for DeleteObjects call. It forces md5 checksum and for anything else, it might crash - const deleteObjectsS3Client = createS3Client(apiConfig, { retryStrategy: RETRY_STRATEGY, deleteObjects: true }); + const deleteObjectsS3Client = createS3Client(config, { retryStrategy: RETRY_STRATEGY, deleteObjects: true }); let total = 0; let marker = null; while (true) { - const batch = await listDir(apiConfig, remotePathPrefix, 1000, marker); // returns entries relative to (root) prefix + const batch = await listDir(config, remotePathPrefix, 1000, marker); // returns entries relative to (root) prefix const entries = batch.entries; total += entries.length; - const chunkSize = apiConfig.limits?.deleteConcurrency || (apiConfig._provider !== 'digitalocean-spaces' ? 1000 : 100); // throttle objects in each request + const chunkSize = config.limits?.deleteConcurrency || (config._provider !== 'digitalocean-spaces' ? 1000 : 100); // throttle objects in each request const chunks = chunk(entries, chunkSize); await async.eachSeries(chunks, async function deleteFiles(objects) { const deleteParams = { - Bucket: apiConfig.bucket, + Bucket: config.bucket, Delete: { - Objects: objects.map(function (o) { return { Key: path.join(apiConfig.prefix, o.path) }; }) + Objects: objects.map(function (o) { return { Key: path.join(config.prefix, o.path) }; }) } }; - const fullFirstPath = path.join(apiConfig.prefix, objects[0].path), fullLastPath = path.join(apiConfig.prefix, objects[objects.length-1].path); + const fullFirstPath = path.join(config.prefix, objects[0].path), fullLastPath = path.join(config.prefix, objects[objects.length-1].path); progressCallback({ message: `Removing ${objects.length} files from ${fullFirstPath} to ${fullLastPath}` }); // deleteObjects does not return error if key is not found @@ -594,20 +594,20 @@ async function removeDir(apiConfig, remotePathPrefix, progressCallback) { } // often, the AbortIncompleteMultipartUpload lifecycle rule is not added to the bucket resulting in large bucket sizes over time -async function cleanup(apiConfig, progressCallback) { - assert.strictEqual(typeof apiConfig, 'object'); +async function cleanup(config, progressCallback) { + assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof progressCallback, 'function'); - const s3 = createS3Client(apiConfig, { retryStrategy: RETRY_STRATEGY }); + const s3 = createS3Client(config, { retryStrategy: RETRY_STRATEGY }); - const uploads = await s3.listMultipartUploads({ Bucket: apiConfig.bucket, Prefix: apiConfig.prefix }); + const uploads = await s3.listMultipartUploads({ Bucket: config.bucket, Prefix: config.prefix }); progressCallback({ message: `Cleaning up any aborted multi-part uploads. count:${uploads.Uploads?.length || 0} truncated:${uploads.IsTruncated}` }); if (!uploads.Uploads) return; for (const upload of uploads.Uploads) { if (Date.now() - new Date(upload.Initiated) < 3 * 24 * 60 * 60 * 1000) continue; // 3 days ago progressCallback({ message: `Cleaning up multi-part upload uploadId:${upload.UploadId} key:${upload.Key}` }); - await safe(s3.abortMultipartUpload({ Bucket: apiConfig.bucket, Key: upload.Key, UploadId: upload.UploadId }), { debug }); // ignore error + await safe(s3.abortMultipartUpload({ Bucket: config.bucket, Key: upload.Key, UploadId: upload.UploadId }), { debug }); // ignore error } } @@ -665,18 +665,18 @@ async function verifyConfig({ id, provider, config }) { return { _provider: provider, ...newConfig }; } -async function setup(apiConfig) { - assert.strictEqual(typeof apiConfig, 'object'); +async function setup(config) { + assert.strictEqual(typeof config, 'object'); } -async function teardown(apiConfig) { - assert.strictEqual(typeof apiConfig, 'object'); +async function teardown(config) { + assert.strictEqual(typeof config, 'object'); } -function removePrivateFields(apiConfig) { - delete apiConfig.secretAccessKey; - delete apiConfig._provider; - return apiConfig; +function removePrivateFields(config) { + delete config.secretAccessKey; + delete config._provider; + return config; } function injectPrivateFields(newConfig, currentConfig) {