backups: add setup/teardown

1. add setup, teardown hooks
2. move the managed mount setup and teardown to filesystem backend
3. remove this vague storage.js

we should convert storageApi into a real object, so we don't have to
keep passing apiConfig around
This commit is contained in:
Girish Ramakrishnan
2025-08-01 14:54:32 +02:00
parent a1a683ec56
commit ea419509f1
14 changed files with 228 additions and 208 deletions

View File

@@ -11,6 +11,7 @@ exports = module.exports = {
const assert = require('assert'),
async = require('async'),
backupTargets = require('../backuptargets.js'),
BoxError = require('../boxerror.js'),
DataLayout = require('../datalayout.js'),
{ DecryptStream } = require('../hush.js'),
@@ -24,7 +25,6 @@ const assert = require('assert'),
promiseRetry = require('../promise-retry.js'),
safe = require('safetydance'),
shell = require('../shell.js')('backupformat/rsync'),
storage = require('../storage.js'),
stream = require('stream/promises'),
syncer = require('../syncer.js');
@@ -86,16 +86,16 @@ async function processSyncerChange(change, backupTarget, remotePath, dataLayout,
if (change.operation === 'removedir') {
debug(`Removing directory ${backupFilePath}`);
await storage.api(backupTarget.provider).removeDir(backupTarget.config, backupFilePath, progressCallback);
await backupTargets.storageApi(backupTarget).removeDir(backupTarget.config, backupFilePath, progressCallback);
} else if (change.operation === 'remove') {
debug(`Removing ${backupFilePath}`);
await storage.api(backupTarget.provider).remove(backupTarget.config, backupFilePath);
await backupTargets.storageApi(backupTarget).remove(backupTarget.config, backupFilePath);
} else if (change.operation === 'add') {
await promiseRetry({ times: 5, interval: 20000, debug }, async (retryCount) => {
progressCallback({ message: `Adding ${change.path}` + (retryCount > 1 ? ` (Try ${retryCount})` : '') });
debug(`Adding ${change.path} position ${change.position} try ${retryCount}`);
const uploader = await storage.api(backupTarget.provider).upload(backupTarget.config, backupFilePath);
const uploader = await backupTargets.storageApi(backupTarget).upload(backupTarget.config, backupFilePath);
await addFile(dataLayout.toLocalPath('./' + change.path), backupTarget.encryption, uploader, progressCallback);
});
}
@@ -216,7 +216,7 @@ async function downloadDir(backupTarget, backupFilePath, dataLayout, progressCal
if (mkdirError) throw new BoxError(BoxError.FS_ERROR, mkdirError.message);
await promiseRetry({ times: 3, interval: 20000 }, async function () {
const [downloadError, sourceStream] = await safe(storage.api(backupTarget.provider).download(backupTarget.config, entry.fullPath));
const [downloadError, sourceStream] = await safe(backupTargets.storageApi(backupTarget).download(backupTarget.config, entry.fullPath));
if (downloadError) {
progressCallback({ message: `Download ${entry.fullPath} to ${destFilePath} errored: ${downloadError.message}` });
throw downloadError;
@@ -255,7 +255,7 @@ async function downloadDir(backupTarget, backupFilePath, dataLayout, progressCal
const concurrency = backupTarget.limits?.downloadConcurrency || (backupTarget.provider === 's3' ? 30 : 10);
let marker = null;
while (true) {
const batch = await storage.api(backupTarget.provider).listDir(backupTarget.config, backupFilePath, marker === null ? 1 : 1000, marker); // try with one file first. if that works out, we continue faster
const batch = await backupTargets.storageApi(backupTarget).listDir(backupTarget.config, backupFilePath, marker === null ? 1 : 1000, marker); // try with one file first. if that works out, we continue faster
await async.eachLimit(batch.entries, concurrency, downloadFile);
if (!batch.marker) break;
marker = batch.marker;