Files
cloudron-box/src/storage/s3.js
2025-11-05 16:52:32 +01:00

693 lines
28 KiB
JavaScript

'use strict';
exports = module.exports = {
setup,
teardown,
cleanup,
verifyConfig,
removePrivateFields,
injectPrivateFields,
getAvailableSize,
getStatus,
upload,
exists,
download,
copy,
copyDir,
listDir,
remove,
removeDir,
// Used to mock AWS
_chunk: chunk
};
const assert = require('node:assert'),
async = require('async'),
BoxError = require('../boxerror.js'),
{ ConfiguredRetryStrategy } = require('@smithy/util-retry'),
constants = require('../constants.js'),
consumers = require('node:stream/consumers'),
crypto = require('node:crypto'),
debug = require('debug')('box:storage/s3'),
http = require('node:http'),
https = require('node:https'),
{ NodeHttpHandler } = require('@smithy/node-http-handler'),
{ PassThrough } = require('node:stream'),
path = require('node:path'),
{ Readable } = require('node:stream'),
{ S3, NoSuchKey, NoSuchBucket } = require('@aws-sdk/client-s3'),
safe = require('safetydance'),
{ Upload } = require('@aws-sdk/lib-storage'),
_ = require('../underscore.js');
function S3_NOT_FOUND(error) {
return error instanceof NoSuchKey || error instanceof NoSuchBucket;
}
function formatError(error) {
// $metadata can be undefined if HTTP request was never sent
return `code: ${error.Code} message: ${error.message} HTTP: ${error.$metadata?.httpStatusCode}`;
}
const RETRY_STRATEGY = new ConfiguredRetryStrategy(10 /* max attempts */, (/* attempt */) => 20000 /* constant backoff */);
// AWS decided to use CRC32 for checksums. The Client SDK has then been changed to set this for requests as default.
// requestChecksumCalculation: "WHEN_REQUIRED", responseChecksumValidation: "WHEN_REQUIRED", "checksumAlgorithm": "md5" all don't work
// see also: https://github.com/aws/aws-sdk-js-v3/issues/6810 https://github.com/aws/aws-sdk-js-v3/issues/6819 https://github.com/aws/aws-sdk-js-v3/issues/6761
// this implements https://github.com/aws/aws-sdk-js-v3/blob/main/supplemental-docs/MD5_FALLBACK.md
const md5Middleware = (next, context) => async (args) => {
const isDeleteObjects = context.commandName === 'DeleteObjectsCommand';
if (!isDeleteObjects) return next(args);
const headers = args.request.headers;
// Remove any checksum headers added by default middleware. This ensures our Content-MD5 is the primary integrity check
for (const header of Object.keys(headers)) {
const lowerHeader = header.toLowerCase();
if (lowerHeader.startsWith('x-amz-checksum-') || lowerHeader.startsWith('x-amz-sdk-checksum-')) {
delete headers[header];
}
}
if (args.request.body) {
const bodyContent = Buffer.from(args.request.body);
headers['Content-MD5'] = crypto.createHash('md5').update(bodyContent).digest('base64');
}
// DO spaces won't respond to 100-continue. not sure why. 76f365f7e8233efb335ba386a9f558b09238b08a has another way to delete this header
delete headers.Expect;
return await next(args);
};
function createS3Client(apiConfig, options) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof options, 'object');
const credentials = {
accessKeyId: apiConfig.accessKeyId,
secretAccessKey: apiConfig.secretAccessKey
};
const requestHandler = new NodeHttpHandler({
connectionTimeout: 60000,
socketTimeout: 20 * 60 * 1000
});
// sdk v3 only has signature support v4
const clientConfig = {
forcePathStyle: apiConfig.s3ForcePathStyle === true ? true : false, // Use vhost style instead of path style - https://forums.aws.amazon.com/ann.jspa?annID=6776
region: apiConfig.region || 'us-east-1',
credentials,
requestHandler,
// logger: console
};
if (options.retryStrategy) clientConfig.retryStrategy = options.retryStrategy;
if (apiConfig.endpoint) clientConfig.endpoint = apiConfig.endpoint;
// s3 endpoint names come from the SDK
const isHttps = clientConfig.endpoint?.startsWith('https://') || apiConfig._provider === 's3';
if (isHttps) {
if (apiConfig.acceptSelfSignedCerts || apiConfig.bucket.includes('.')) {
requestHandler.agent = new https.Agent({ rejectUnauthorized: false });
}
} else { // http agent is required for http endpoints
requestHandler.agent = new http.Agent({});
}
const client = constants.TEST ? new globalThis.S3Mock(clientConfig) : new S3(clientConfig);
// https://github.com/aws/aws-sdk-js-v3/issues/6761#issuecomment-2574480834
// client.middlewareStack.add((next, context) => async (args) => {
// debug('AWS SDK context', context.clientName, context.commandName);
// debug('AWS SDK request input', JSON.stringify(args.input));
// const result = await next(args);
// console.log('AWS SDK request output:', result.output);
// return result;
// },
// {
// name: 'MyMiddleware',
// step: 'build',
// override: true,
// });
// This ensures it runs after default checksums might be added, but before signing
if (options.deleteObjects && apiConfig._provider !== 's3') {
// flexibleChecksumsMiddleware is only present when the request has a body. Only use this for DeleteObjects call. Other requests without a body will crash
client.middlewareStack.addRelativeTo(md5Middleware, {
relation: 'after',
toMiddleware: 'flexibleChecksumsMiddleware',
name: 'addMD5ChecksumForDeleteObjects',
tags: ['MD5_FALLBACK'],
});
}
return client;
}
async function getAvailableSize(apiConfig) {
assert.strictEqual(typeof apiConfig, 'object');
return Number.POSITIVE_INFINITY;
}
async function getStatus(apiConfig) {
assert.strictEqual(typeof apiConfig, 'object');
const s3 = createS3Client(apiConfig, { retryStrategy: null });
const listParams = {
Bucket: apiConfig.bucket,
Prefix: path.join(apiConfig.prefix, 'snapshot'),
MaxKeys: 1
};
const [listError] = await safe(s3.listObjectsV2(listParams));
if (listError) return { status: 'inactive', message: `Error listing objects. ${formatError(listError)}` };
return { state: 'active', message: '' };
}
async function upload(apiConfig, remotePath) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof remotePath, 'string');
const s3 = createS3Client(apiConfig, { retryStrategy: RETRY_STRATEGY });
// s3.upload automatically does a multi-part upload. we set queueSize to 3 to reduce memory usage
// uploader will buffer at most queueSize * partSize bytes into memory at any given time.
// scaleway only supports 1000 parts per object (https://www.scaleway.com/en/docs/s3-multipart-upload/)
// s3: https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html (max 10k parts and no size limit on the last part!)
const partSize = apiConfig.limits?.uploadPartSize || (apiConfig._provider === 'scaleway-objectstorage' ? 100 * 1024 * 1024 : 10 * 1024 * 1024);
const passThrough = new PassThrough();
const options = {
client: s3,
params: {
Bucket: apiConfig.bucket,
Key: path.join(apiConfig.prefix, remotePath),
Body: passThrough
},
partSize,
queueSize: 3,
leavePartsOnError: false
};
const managedUpload = constants.TEST ? new globalThis.S3MockUpload(options) : new Upload(options);
managedUpload.on('httpUploadProgress', (progress) => debug(`Upload progress: ${JSON.stringify(progress)}`));
const uploadPromise = managedUpload.done();
return {
stream: passThrough,
async finish() {
const [error, data] = await safe(uploadPromise);
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Upload error: code: ${error.Code} message: ${error.message}`); // sometimes message is null
debug(`Upload finished. ${JSON.stringify(data)}`);
}
};
}
async function exists(apiConfig, remotePath) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof remotePath, 'string');
const s3 = createS3Client(apiConfig, { retryStrategy: null });
const fullRemotePath = path.join(apiConfig.prefix, remotePath);
if (!fullRemotePath.endsWith('/')) { // check for file
const params = {
Bucket: apiConfig.bucket,
Key: fullRemotePath
};
const [error, response] = await safe(s3.headObject(params));
if (error && S3_NOT_FOUND(error)) return false;
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error headObject ${fullRemotePath}. ${formatError(error)}`);
if (!response || typeof response.Metadata !== 'object') throw new BoxError(BoxError.EXTERNAL_ERROR, 'not a s3 endpoint');
return true;
} else { // list dir contents
const listParams = {
Bucket: apiConfig.bucket,
Prefix: fullRemotePath,
MaxKeys: 1
};
const [error, listData] = await safe(s3.listObjectsV2(listParams));
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects ${fullRemotePath}. ${formatError(error)}`);
return listData.KeyCount !== 0 || listData.Contents.length !== 0;
}
}
// Download the object in small parts. By downloading small parts, we reduce the chance of sporadic network errors when downloading large objects
// We can retry each part individually, but we haven't had the need for this yet
class S3MultipartDownloadStream extends Readable {
constructor (s3, params, options) {
super(options);
this._s3 = s3;
this._params = params;
this._readSize = 0;
this._fileSize = -1;
this._path = params.Bucket + '/' + params.Key;
this._blockSize = options.blockSize || 64 * 1048576; // MB
}
_done() {
this._readSize = 0;
this.push(null); // EOF
}
_handleError(error) {
if (S3_NOT_FOUND(error)) {
this.destroy(new BoxError(BoxError.NOT_FOUND, `Backup not found: ${this._path}`));
} else {
debug(`download: ${this._path} s3 stream error. %o`, error);
this.destroy(new BoxError(BoxError.EXTERNAL_ERROR, `Error multipartDownload ${this._path}. ${formatError(error)}`));
}
}
async _downloadRange(offset, length) {
const params = Object.assign({}, this._params);
const lastPos = offset + length - 1;
const range = `bytes=${offset}-${lastPos}`;
params['Range'] = range;
const [error, data] = await safe(this._s3.getObject(params));
if (error) return this._handleError(error);
const contentLength = parseInt(data.ContentLength, 10); // should be same as length
if (contentLength > 0) {
this._readSize += contentLength;
const body = await consumers.buffer(data.Body); // data.Body.transformToString('binary') also works
this.push(body);
} else {
this._done();
}
}
_nextDownload() {
let len = 0;
if (this._readSize + this._blockSize < this._fileSize) {
len = this._blockSize;
} else {
len = this._fileSize - this._readSize;
}
this._downloadRange(this._readSize, len);
}
async _fetchSize() {
const [error, data] = await safe(this._s3.headObject(this._params));
if (error) return this._handleError(error);
const length = parseInt(data.ContentLength, 10);
if (length > 0) {
this._fileSize = length;
this._nextDownload();
} else {
this._done();
}
}
_read() { // reimp
if (this._readSize === this._fileSize) return this._done();
if (this._readSize === 0) return this._fetchSize();
this._nextDownload();
}
}
async function download(apiConfig, remotePath) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof remotePath, 'string');
const params = {
Bucket: apiConfig.bucket,
Key: path.join(apiConfig.prefix, remotePath)
};
const s3 = createS3Client(apiConfig, { retryStrategy: RETRY_STRATEGY });
return new S3MultipartDownloadStream(s3, params, { blockSize: 64 * 1024 * 1024 });
}
async function listDir(apiConfig, remotePath, batchSize, marker) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof remotePath, 'string');
assert.strictEqual(typeof batchSize, 'number');
assert(typeof marker !== 'undefined');
const s3 = createS3Client(apiConfig, { retryStrategy: RETRY_STRATEGY });
const fullRemotePath = path.join(apiConfig.prefix, remotePath);
const listParams = {
Bucket: apiConfig.bucket,
Prefix: fullRemotePath + '/', // ensure we list contents of the directory and not match other filenames with prefix
MaxKeys: batchSize
};
if (marker) listParams.ContinuationToken = marker;
const [error, listData] = await safe(s3.listObjectsV2(listParams));
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects in ${fullRemotePath}. ${formatError(error)}`);
if (listData.KeyCount === 0 || listData.Contents.length === 0) return { entries: [], marker: null }; // no more
const entries = listData.Contents.map(function (c) { return { path: path.relative(apiConfig.prefix, c.Key), size: c.Size }; });
return { entries, marker: !listData.IsTruncated ? null : listData.NextContinuationToken };
}
// https://github.com/aws/aws-sdk-js/blob/2b6bcbdec1f274fe931640c1b61ece999aae7a19/lib/util.js#L41
// https://github.com/GeorgePhillips/node-s3-url-encode/blob/master/index.js
// See aws-sdk-js/issues/1302
function encodeCopySource(bucket, path) {
// AWS percent-encodes some extra non-standard characters in a URI
const output = encodeURI(path).replace(/[+!"#$@&'()*+,:;=?@]/g, function(ch) {
return '%' + ch.charCodeAt(0).toString(16).toUpperCase();
});
// the slash at the beginning is optional
return `/${bucket}/${output}`;
}
async function copyInternal(apiConfig, fullFromPath, fullToPath, fileSize, progressCallback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof fullFromPath, 'string');
assert.strictEqual(typeof fullToPath, 'string');
assert.strictEqual(typeof fileSize, 'number');
assert.strictEqual(typeof progressCallback, 'function');
const s3 = createS3Client(apiConfig, { retryStrategy: RETRY_STRATEGY }); // https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html
function throwError(error) {
if (error) debug(`copy: s3 copy error when copying ${fullFromPath}: ${error}`);
if (error && S3_NOT_FOUND(error)) throw new BoxError(BoxError.NOT_FOUND, `Old backup not found: ${fullFromPath}`);
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error copying ${fullFromPath} (${fileSize} bytes): ${error.Code || ''} ${error}`);
}
const copyParams = {
Bucket: apiConfig.bucket,
Key: fullToPath
};
// S3 copyObject has a file size limit of 5GB so if we have larger files, we do a multipart copy
const provider = apiConfig._provider;
const largeFileLimit = (provider === 'vultr-objectstorage' || provider === 'exoscale-sos' || provider === 'backblaze-b2' || provider === 'digitalocean-spaces') ? 1024 * 1024 * 1024 : 3 * 1024 * 1024 * 1024;
if (fileSize < largeFileLimit) {
progressCallback({ message: `Copying ${fullFromPath}` });
copyParams.CopySource = encodeCopySource(apiConfig.bucket, fullFromPath);
const [copyError] = await safe(s3.copyObject(copyParams));
if (copyError) return throwError(copyError);
return;
}
progressCallback({ message: `Copying (multipart) ${fullFromPath}` });
const [createMultipartError, multipart] = await safe(s3.createMultipartUpload(copyParams));
if (createMultipartError) return throwError(createMultipartError);
// Exoscale (96M) was suggested by exoscale. 1GB for others is arbitrary size
const chunkSize = provider === 'exoscale-sos' ? 96 * 1024 * 1024 : 1024 * 1024 * 1024;
const uploadId = multipart.UploadId;
const uploadedParts = [], ranges = [];
let cur = 0;
while (cur + chunkSize < fileSize) {
ranges.push({ startBytes: cur, endBytes: cur + chunkSize - 1 });
cur += chunkSize;
}
ranges.push({ startBytes: cur, endBytes: fileSize-1 });
const [copyError] = await safe(async.eachOfLimit(ranges, 3, async function copyChunk(range, index) {
const partCopyParams = {
Bucket: apiConfig.bucket,
Key: fullToPath,
CopySource: encodeCopySource(apiConfig.bucket, fullFromPath), // See aws-sdk-js/issues/1302
CopySourceRange: 'bytes=' + range.startBytes + '-' + range.endBytes,
PartNumber: index+1,
UploadId: uploadId
};
progressCallback({ message: `Copying part ${partCopyParams.PartNumber} - ${partCopyParams.CopySource} ${partCopyParams.CopySourceRange}` });
const part = await s3.uploadPartCopy(partCopyParams);
progressCallback({ message: `Copied part ${partCopyParams.PartNumber} - Etag: ${part.CopyPartResult.ETag}` });
if (!part.CopyPartResult.ETag) throw new Error('Multi-part copy is broken or not implemented by the S3 storage provider');
uploadedParts[index] = { ETag: part.CopyPartResult.ETag, PartNumber: partCopyParams.PartNumber };
}));
if (copyError) {
const abortParams = {
Bucket: apiConfig.bucket,
Key: fullToPath,
UploadId: uploadId
};
progressCallback({ message: `Aborting multipart copy of ${fullFromPath}` });
await safe(s3.abortMultipartUpload(abortParams), { debug }); // ignore any abort errors
return throwError(copyError);
}
const completeMultipartParams = {
Bucket: apiConfig.bucket,
Key: fullToPath,
MultipartUpload: { Parts: uploadedParts },
UploadId: uploadId
};
progressCallback({ message: `Finishing multipart copy - ${completeMultipartParams.Key}` });
const [completeMultipartError] = await safe(s3.completeMultipartUpload(completeMultipartParams));
if (completeMultipartError) return throwError(completeMultipartError);
}
async function copy(apiConfig, fromPath, toPath, progressCallback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof fromPath, 'string');
assert.strictEqual(typeof toPath, 'string');
assert.strictEqual(typeof progressCallback, 'function');
const fullFromPath = path.join(apiConfig.prefix, fromPath);
const fullToPath = path.join(apiConfig.prefix, toPath);
const params = {
Bucket: apiConfig.bucket,
Key: fullFromPath
};
const s3 = createS3Client(apiConfig, { retryStrategy: RETRY_STRATEGY }); // https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html
const [error, data] = await safe(s3.headObject(params));
if (error && S3_NOT_FOUND(error)) throw new BoxError(BoxError.NOT_FOUND, `Path ${fromPath} not found`);
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error headObject ${fromPath}. ${formatError(error)}`);
return await copyInternal(apiConfig, fullFromPath, fullToPath, data.ContentLength, progressCallback);
}
async function copyDir(apiConfig, fromPath, toPath, progressCallback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof fromPath, 'string');
assert.strictEqual(typeof toPath, 'string');
assert.strictEqual(typeof progressCallback, 'function');
let total = 0;
const concurrency = apiConfig.limits?.copyConcurrency || (apiConfig._provider === 's3' ? 500 : 10);
progressCallback({ message: `Copying ${fromPath} to ${toPath} with concurrency of ${concurrency}` });
let marker = null;
while (true) {
const batch = await listDir(apiConfig, fromPath, 1000, marker); // returned entries are relative to prefix
total += batch.entries.length;
progressCallback({ message: `Copying files from ${total-batch.entries.length}-${total}` });
await async.eachLimit(batch.entries, concurrency, async (entry) => {
const fullFromPath = path.join(apiConfig.prefix, entry.path);
const fullToPath = path.join(apiConfig.prefix, toPath, path.relative(fromPath, entry.path));
await copyInternal(apiConfig, fullFromPath, fullToPath, entry.size, progressCallback);
});
if (!batch.marker) break;
marker = batch.marker;
}
progressCallback({ message: `Copied ${total} files` });
}
async function remove(apiConfig, remotePath) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof remotePath, 'string');
const s3 = createS3Client(apiConfig, { retryStrategy: RETRY_STRATEGY });
const fullRemotePath = path.join(apiConfig.prefix, remotePath);
const deleteParams = {
Bucket: apiConfig.bucket,
Key: fullRemotePath
};
// deleteObject does not return error if key is not found
const [error] = await safe(s3.deleteObject(deleteParams));
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unable to remove ${fullRemotePath}. ${formatError(error)}`);
}
function chunk(array, size) {
assert(Array.isArray(array));
assert.strictEqual(typeof size, 'number');
const length = array.length;
if (!length) return [];
let index = 0, resIndex = 0;
const result = Array(Math.ceil(length / size));
for (; index < length; index += size) {
result[resIndex++] = array.slice(index, index+size);
}
return result;
}
async function removeDir(apiConfig, remotePathPrefix, progressCallback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof remotePathPrefix, 'string');
assert.strictEqual(typeof progressCallback, 'function');
// only use this client for DeleteObjects call. It forces md5 checksum and for anything else, it might crash
const deleteObjectsS3Client = createS3Client(apiConfig, { retryStrategy: RETRY_STRATEGY, deleteObjects: true });
let total = 0;
let marker = null;
while (true) {
const batch = await listDir(apiConfig, remotePathPrefix, 1000, marker); // returns entries relative to (root) prefix
const entries = batch.entries;
total += entries.length;
const chunkSize = apiConfig.limits?.deleteConcurrency || (apiConfig._provider !== 'digitalocean-spaces' ? 1000 : 100); // throttle objects in each request
const chunks = chunk(entries, chunkSize);
await async.eachSeries(chunks, async function deleteFiles(objects) {
const deleteParams = {
Bucket: apiConfig.bucket,
Delete: {
Objects: objects.map(function (o) { return { Key: path.join(apiConfig.prefix, o.path) }; })
}
};
const fullFirstPath = path.join(apiConfig.prefix, objects[0].path), fullLastPath = path.join(apiConfig.prefix, objects[objects.length-1].path);
progressCallback({ message: `Removing ${objects.length} files from ${fullFirstPath} to ${fullLastPath}` });
// deleteObjects does not return error if key is not found
const [error] = await safe(deleteObjectsS3Client.deleteObjects(deleteParams));
if (error) {
progressCallback({ message: `Unable to remove from ${fullFirstPath} to ${fullLastPath} ${error.message || error.Code}` });
throw new BoxError(BoxError.EXTERNAL_ERROR, `Unable to remove from ${fullFirstPath} to ${fullLastPath}. error: ${error.message}`);
}
});
if (!batch.marker) break;
marker = batch.marker;
}
progressCallback({ message: `Removed ${total} files` });
}
// often, the AbortIncompleteMultipartUpload lifecycle rule is not added to the bucket resulting in large bucket sizes over time
async function cleanup(apiConfig, progressCallback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof progressCallback, 'function');
const s3 = createS3Client(apiConfig, { retryStrategy: RETRY_STRATEGY });
const uploads = await s3.listMultipartUploads({ Bucket: apiConfig.bucket, Prefix: apiConfig.prefix });
progressCallback({ message: `Cleaning up any aborted multi-part uploads. count:${uploads.Uploads?.length || 0} truncated:${uploads.IsTruncated}` });
if (!uploads.Uploads) return;
for (const upload of uploads.Uploads) {
if (Date.now() - new Date(upload.Initiated) < 3 * 24 * 60 * 60 * 1000) continue; // 3 days ago
progressCallback({ message: `Cleaning up multi-part upload uploadId:${upload.UploadId} key:${upload.Key}` });
await safe(s3.abortMultipartUpload({ Bucket: apiConfig.bucket, Key: upload.Key, UploadId: upload.UploadId }), { debug }); // ignore error
}
}
async function verifyConfig({ id, provider, config }) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof provider, 'string');
assert.strictEqual(typeof config, 'object');
if (typeof config.accessKeyId !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'accessKeyId must be a string');
if (typeof config.secretAccessKey !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'secretAccessKey must be a string');
if (typeof config.bucket !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'bucket must be a string');
// the node module seems to incorrectly accept bucket name with '/'
if (config.bucket.includes('/')) throw new BoxError(BoxError.BAD_FIELD, 'bucket name cannot contain "/"');
// names must be lowercase and start with a letter or number. can contain dashes
if (config.bucket.includes('_') || config.bucket.match(/[A-Z]/)) throw new BoxError(BoxError.BAD_FIELD, 'bucket name cannot contain "_" or capitals');
if (typeof config.prefix !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'prefix must be a string');
if ('signatureVersion' in config && typeof config.signatureVersion !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'signatureVersion must be a string');
if ('endpoint' in config && typeof config.endpoint !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'endpoint must be a string');
if ('region' in config && typeof config.region !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'region must be a string');
if ('acceptSelfSignedCerts' in config && typeof config.acceptSelfSignedCerts !== 'boolean') throw new BoxError(BoxError.BAD_FIELD, 'acceptSelfSignedCerts must be a boolean');
if ('s3ForcePathStyle' in config && typeof config.s3ForcePathStyle !== 'boolean') throw new BoxError(BoxError.BAD_FIELD, 's3ForcePathStyle must be a boolean');
const putParams = {
Bucket: config.bucket,
Key: path.join(config.prefix, 'snapshot/cloudron-testfile'),
Body: 'testcontent'
};
const s3 = createS3Client(config, {});
const [putError] = await safe(s3.putObject(putParams));
if (putError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error put object cloudron-testfile. ${formatError(putError)}`);
const listParams = {
Bucket: config.bucket,
Prefix: path.join(config.prefix, 'snapshot'),
MaxKeys: 1
};
const [listError] = await safe(s3.listObjectsV2(listParams));
if (listError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects. ${formatError(listError)}`);
const delParams = {
Bucket: config.bucket,
Key: path.join(config.prefix, 'snapshot/cloudron-testfile')
};
const [delError] = await safe(s3.deleteObject(delParams));
if (delError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error del object cloudron-testfile. ${formatError(delError)}`);
const newConfig = _.pick(config, ['accessKeyId', 'secretAccessKey', 'bucket', 'prefix', 'signatureVersion', 'acceptSelfSignedCerts', 'endpoint', 's3ForcePathStyle' ]);
return { _provider: provider, ...newConfig };
}
async function setup(apiConfig) {
assert.strictEqual(typeof apiConfig, 'object');
}
async function teardown(apiConfig) {
assert.strictEqual(typeof apiConfig, 'object');
}
function removePrivateFields(apiConfig) {
delete apiConfig.secretAccessKey;
delete apiConfig._provider;
return apiConfig;
}
function injectPrivateFields(newConfig, currentConfig) {
if (!Object.hasOwn(newConfig, 'secretAccessKey')) newConfig.secretAccessKey = currentConfig.secretAccessKey;
newConfig._provider = currentConfig._provider;
}