storage: automatically abort old multipart uploads in s3

This commit is contained in:
Girish Ramakrishnan
2025-02-13 11:08:00 +01:00
parent e7b11a7ceb
commit cb6d531300
7 changed files with 56 additions and 2 deletions

View File

@@ -13,6 +13,8 @@ exports = module.exports = {
remove,
removeDir,
cleanup,
testConfig,
removePrivateFields,
injectPrivateFields,
@@ -356,7 +358,7 @@ async function copyFile(apiConfig, oldFilePath, newFilePath, entry, progressCall
uploadedParts[index] = { ETag: part.CopyPartResult.ETag, PartNumber: partCopyParams.PartNumber };
}));
if (copyError) { // we must still recommend the user to set a AbortIncompleteMultipartUpload lifecycle rule
if (copyError) {
const abortParams = {
Bucket: apiConfig.bucket,
Key: path.join(newFilePath, relativePath),
@@ -480,6 +482,24 @@ async function removeDir(apiConfig, pathPrefix, progressCallback) {
progressCallback({ message: `Removed ${total} files` });
}
// often, the AbortIncompleteMultipartUpload lifecycle rule is not added to the bucket resulting in large bucket sizes over time
async function cleanup(apiConfig, progressCallback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof progressCallback, 'function');
const s3 = createS3Client(apiConfig, { retryStrategy: RETRY_STRATEGY });
const uploads = await s3.listMultipartUploads({ Bucket: apiConfig.bucket, Prefix: apiConfig.prefix });
progressCallback({ message: `Cleaning up any aborted multi-part uploads. count:${uploads.Uploads?.length || 0} truncated:${uploads.IsTruncated}` });
if (!uploads.Uploads) return;
for (const upload of uploads.Uploads) {
if (Date.now() - new Date(upload.Initiated) < 3 * 24 * 60 * 60 * 1000) continue; // 3 days ago
progressCallback({ message: `Cleaning up multi-part upload uploadId:${upload.UploadId} key:${upload.Key}` });
await safe(s3.abortMultipartUpload({ Bucket: apiConfig.bucket, Key: upload.Key, UploadId: upload.UploadId }), { debug }); // ignore error
}
}
async function testConfig(apiConfig) {
assert.strictEqual(typeof apiConfig, 'object');