rename backupTargets to backupSites

This commit is contained in:
Girish Ramakrishnan
2025-09-12 09:48:37 +02:00
parent f8015c156e
commit c5b7264f1a
38 changed files with 751 additions and 751 deletions

View File

@@ -13,7 +13,7 @@ exports = module.exports = {
const assert = require('node:assert'),
async = require('async'),
backupTargets = require('../backuptargets.js'),
backupSites = require('../backupsites.js'),
BoxError = require('../boxerror.js'),
DataLayout = require('../datalayout.js'),
{ DecryptStream } = require('../hush.js'),
@@ -75,15 +75,15 @@ async function addFile(sourceFile, encryption, uploader, progressCallback) {
};
}
async function sync(backupTarget, remotePath, dataLayout, progressCallback) {
assert.strictEqual(typeof backupTarget, 'object');
async function sync(backupSite, remotePath, dataLayout, progressCallback) {
assert.strictEqual(typeof backupSite, 'object');
assert.strictEqual(typeof remotePath, 'string');
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof progressCallback, 'function');
// the number here has to take into account the s3.upload partSize (which is 10MB). So 20=200MB
const concurrency = backupTarget.limits?.syncConcurrency || (backupTarget.provider === 's3' ? 20 : 10);
const cacheFile = path.join(paths.BACKUP_INFO_DIR, backupTarget.id, `${dataLayout.getBasename()}.sync.cache`);
const concurrency = backupSite.limits?.syncConcurrency || (backupSite.provider === 's3' ? 20 : 10);
const cacheFile = path.join(paths.BACKUP_INFO_DIR, backupSite.id, `${dataLayout.getBasename()}.sync.cache`);
const { delQueue, addQueue, integrityMap } = await syncer.sync(dataLayout, cacheFile);
debug(`sync: processing ${delQueue.length} deletes and ${addQueue.length} additions`);
const aggregatedStats = {
@@ -96,33 +96,33 @@ async function sync(backupTarget, remotePath, dataLayout, progressCallback) {
async function processSyncerChange(change) {
debug('sync: processing task: %j', change);
// the empty task.path is special to signify the directory
const destPath = change.path && backupTarget.encryption?.encryptedFilenames ? hush.encryptFilePath(change.path, backupTarget.encryption) : change.path;
const destPath = change.path && backupSite.encryption?.encryptedFilenames ? hush.encryptFilePath(change.path, backupSite.encryption) : change.path;
const fullPath = path.join(remotePath, destPath);
if (change.operation === 'removedir') {
debug(`Removing directory ${fullPath}`);
await backupTargets.storageApi(backupTarget).removeDir(backupTarget.config, fullPath, progressCallback);
await backupSites.storageApi(backupSite).removeDir(backupSite.config, fullPath, progressCallback);
} else if (change.operation === 'remove') {
debug(`Removing ${fullPath}`);
await backupTargets.storageApi(backupTarget).remove(backupTarget.config, fullPath);
await backupSites.storageApi(backupSite).remove(backupSite.config, fullPath);
} else if (change.operation === 'add') {
await promiseRetry({ times: 5, interval: 20000, debug }, async (retryCount) => {
progressCallback({ message: `Adding ${change.path}` + (retryCount > 1 ? ` (Try ${retryCount})` : '') });
debug(`Adding ${change.path} position ${change.position} try ${retryCount}`);
const uploader = await backupTargets.storageApi(backupTarget).upload(backupTarget.config, fullPath);
const { integrity } = await addFile(dataLayout.toLocalPath('./' + change.path), backupTarget.encryption, uploader, progressCallback);
const uploader = await backupSites.storageApi(backupSite).upload(backupSite.config, fullPath);
const { integrity } = await addFile(dataLayout.toLocalPath('./' + change.path), backupSite.encryption, uploader, progressCallback);
integrityMap.set(destPath, integrity);
aggregatedStats.size += integrity.size;
});
}
}
const [delError] = await safe(async.eachLimit(delQueue, concurrency, async (change) => await processSyncerChange(change, backupTarget, remotePath, dataLayout, progressCallback)));
const [delError] = await safe(async.eachLimit(delQueue, concurrency, async (change) => await processSyncerChange(change, backupSite, remotePath, dataLayout, progressCallback)));
debug('sync: done processing deletes. error: %o', delError);
if (delError) throw delError;
const [addError] = await safe(async.eachLimit(addQueue, concurrency, async (change) => await processSyncerChange(change, backupTarget, remotePath, dataLayout, progressCallback)));
const [addError] = await safe(async.eachLimit(addQueue, concurrency, async (change) => await processSyncerChange(change, backupSite, remotePath, dataLayout, progressCallback)));
debug('sync: done processing adds. error: %o', addError);
if (addError) throw addError;
@@ -163,8 +163,8 @@ async function saveFsMetadata(dataLayout, metadataFile) {
if (symlinkFilesError) throw symlinkFilesError;
if (symlinkFiles.length) metadata.symlinks = metadata.symlinks.concat(symlinkFiles.trim().split('\n').map((sl) => {
const target = safe.fs.readlinkSync(sl);
return { path: dataLayout.toRemotePath(sl), target };
const site = safe.fs.readlinkSync(sl);
return { path: dataLayout.toRemotePath(sl), site };
}));
}
@@ -193,29 +193,29 @@ async function restoreFsMetadata(dataLayout, metadataFile) {
}
for (const symlink of (metadata.symlinks || [])) {
if (!symlink.target) continue;
if (!symlink.site) continue;
// the path may not exist if we had a directory full of symlinks
const [mkdirError] = await safe(fs.promises.mkdir(path.dirname(dataLayout.toLocalPath(symlink.path)), { recursive: true }));
if (mkdirError) throw new BoxError(BoxError.FS_ERROR, `unable to symlink (mkdir): ${mkdirError.message}`);
const [symlinkError] = await safe(fs.promises.symlink(symlink.target, dataLayout.toLocalPath(symlink.path), 'file'));
const [symlinkError] = await safe(fs.promises.symlink(symlink.site, dataLayout.toLocalPath(symlink.path), 'file'));
if (symlinkError) throw new BoxError(BoxError.FS_ERROR, `unable to symlink: ${symlinkError.message}`);
}
}
async function downloadDir(backupTarget, remotePath, dataLayout, progressCallback) {
assert.strictEqual(typeof backupTarget, 'object');
async function downloadDir(backupSite, remotePath, dataLayout, progressCallback) {
assert.strictEqual(typeof backupSite, 'object');
assert.strictEqual(typeof remotePath, 'string');
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof progressCallback, 'function');
const encryptedFilenames = backupTarget.encryption?.encryptedFilenames || false;
const encryptedFilenames = backupSite.encryption?.encryptedFilenames || false;
debug(`downloadDir: ${remotePath} to ${dataLayout.toString()}. encryption filenames: ${encryptedFilenames}. encrypted files: ${!!backupTarget.encryption}`);
debug(`downloadDir: ${remotePath} to ${dataLayout.toString()}. encryption filenames: ${encryptedFilenames}. encrypted files: ${!!backupSite.encryption}`);
async function downloadFile(entry) {
let relativePath = path.relative(remotePath, entry.path);
if (encryptedFilenames) {
const { error, result } = hush.decryptFilePath(relativePath, backupTarget.encryption);
const { error, result } = hush.decryptFilePath(relativePath, backupSite.encryption);
if (error) throw new BoxError(BoxError.CRYPTO_ERROR, 'Unable to decrypt file');
relativePath = result;
}
@@ -227,7 +227,7 @@ async function downloadDir(backupTarget, remotePath, dataLayout, progressCallbac
await promiseRetry({ times: 3, interval: 20000 }, async function () {
progressCallback({ message: `Downloading ${entry.path} to ${destFilePath}` });
const [downloadError, sourceStream] = await safe(backupTargets.storageApi(backupTarget).download(backupTarget.config, entry.path));
const [downloadError, sourceStream] = await safe(backupSites.storageApi(backupSite).download(backupSite.config, entry.path));
if (downloadError) {
progressCallback({ message: `Download ${entry.path} to ${destFilePath} errored: ${downloadError.message}` });
throw downloadError;
@@ -244,8 +244,8 @@ async function downloadDir(backupTarget, remotePath, dataLayout, progressCallbac
const streams = [ sourceStream, ps ];
if (backupTarget.encryption) {
const decryptStream = new DecryptStream(backupTarget.encryption);
if (backupSite.encryption) {
const decryptStream = new DecryptStream(backupSite.encryption);
streams.push(decryptStream);
}
@@ -261,45 +261,45 @@ async function downloadDir(backupTarget, remotePath, dataLayout, progressCallbac
}
// https://www.digitalocean.com/community/questions/rate-limiting-on-spaces?answer=40441
const concurrency = backupTarget.limits?.downloadConcurrency || (backupTarget.provider === 's3' ? 30 : 10);
const concurrency = backupSite.limits?.downloadConcurrency || (backupSite.provider === 's3' ? 30 : 10);
let marker = null;
while (true) {
const batch = await backupTargets.storageApi(backupTarget).listDir(backupTarget.config, remotePath, marker === null ? 1 : 1000, marker); // try with one file first. if that works out, we continue faster
const batch = await backupSites.storageApi(backupSite).listDir(backupSite.config, remotePath, marker === null ? 1 : 1000, marker); // try with one file first. if that works out, we continue faster
await async.eachLimit(batch.entries, concurrency, downloadFile);
if (!batch.marker) break;
marker = batch.marker;
}
}
async function download(backupTarget, remotePath, dataLayout, progressCallback) {
assert.strictEqual(typeof backupTarget, 'object');
async function download(backupSite, remotePath, dataLayout, progressCallback) {
assert.strictEqual(typeof backupSite, 'object');
assert.strictEqual(typeof remotePath, 'string');
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof progressCallback, 'function');
debug(`download: Downloading ${remotePath} to ${dataLayout.toString()}`);
await downloadDir(backupTarget, remotePath, dataLayout, progressCallback);
await downloadDir(backupSite, remotePath, dataLayout, progressCallback);
await restoreFsMetadata(dataLayout, `${dataLayout.localRoot()}/fsmetadata.json`);
}
async function upload(backupTarget, remotePath, dataLayout, progressCallback) {
assert.strictEqual(typeof backupTarget, 'object');
async function upload(backupSite, remotePath, dataLayout, progressCallback) {
assert.strictEqual(typeof backupSite, 'object');
assert.strictEqual(typeof remotePath, 'string');
assert.strictEqual(typeof dataLayout, 'object');
assert.strictEqual(typeof progressCallback, 'function');
await saveFsMetadata(dataLayout, `${dataLayout.localRoot()}/fsmetadata.json`);
return await sync(backupTarget, remotePath, dataLayout, progressCallback); // { stats, integrityMap }
return await sync(backupSite, remotePath, dataLayout, progressCallback); // { stats, integrityMap }
}
async function copy(backupTarget, fromPath, toPath, progressCallback) {
assert.strictEqual(typeof backupTarget, 'object');
async function copy(backupSite, fromPath, toPath, progressCallback) {
assert.strictEqual(typeof backupSite, 'object');
assert.strictEqual(typeof fromPath, 'string');
assert.strictEqual(typeof toPath, 'string');
assert.strictEqual(typeof progressCallback, 'function');
await backupTargets.storageApi(backupTarget).copyDir(backupTarget.config, fromPath, toPath, progressCallback);
await backupSites.storageApi(backupSite).copyDir(backupSite.config, fromPath, toPath, progressCallback);
}
function getFileExtension(encryption) {
@@ -308,27 +308,27 @@ function getFileExtension(encryption) {
return ''; // this also signals to backupcleanear that we are dealing with directories
}
async function verify(backupTarget, remotePath, integrityMap, progressCallback) {
assert.strictEqual(typeof backupTarget, 'object');
async function verify(backupSite, remotePath, integrityMap, progressCallback) {
assert.strictEqual(typeof backupSite, 'object');
assert.strictEqual(typeof remotePath, 'string');
assert(util.types.isMap(integrityMap), 'integrityMap should be a Map');
assert.strictEqual(typeof progressCallback, 'function');
debug(`verify: Verifying ${remotePath}`);
const encryptedFilenames = backupTarget.encryption?.encryptedFilenames || false;
const encryptedFilenames = backupSite.encryption?.encryptedFilenames || false;
let fileCount = 0;
async function validateFile(entry) {
let relativePath = path.relative(remotePath, entry.path);
if (encryptedFilenames) {
const { error, result } = hush.decryptFilePath(relativePath, backupTarget.encryption);
const { error, result } = hush.decryptFilePath(relativePath, backupSite.encryption);
if (error) throw new BoxError(BoxError.CRYPTO_ERROR, 'Unable to decrypt file');
relativePath = result;
}
++fileCount;
const sourceStream = await backupTargets.storageApi(backupTarget).download(backupTarget.config, entry.path);
const sourceStream = await backupSites.storageApi(backupSite).download(backupSite.config, entry.path);
const ps = new ProgressStream({ interval: 10000 }); // display a progress every 10 seconds
ps.on('progress', function (progress) {
@@ -339,8 +339,8 @@ async function verify(backupTarget, remotePath, integrityMap, progressCallback)
const streams = [ sourceStream, ps ];
if (backupTarget.encryption) {
const decryptStream = new DecryptStream(backupTarget.encryption);
if (backupSite.encryption) {
const decryptStream = new DecryptStream(backupSite.encryption);
streams.push(decryptStream);
}
@@ -357,10 +357,10 @@ async function verify(backupTarget, remotePath, integrityMap, progressCallback)
debug(integrityMap.entries());
// https://www.digitalocean.com/community/questions/rate-limiting-on-spaces?answer=40441
const concurrency = backupTarget.limits?.downloadConcurrency || (backupTarget.provider === 's3' ? 30 : 10);
const concurrency = backupSite.limits?.downloadConcurrency || (backupSite.provider === 's3' ? 30 : 10);
let marker = null;
while (true) {
const batch = await backupTargets.storageApi(backupTarget).listDir(backupTarget.config, remotePath, marker === null ? 1 : 1000, marker); // try with one file first. if that works out, we continue faster
const batch = await backupSites.storageApi(backupSite).listDir(backupSite.config, remotePath, marker === null ? 1 : 1000, marker); // try with one file first. if that works out, we continue faster
await async.eachLimit(batch.entries, concurrency, validateFile);
if (!batch.marker) break;
marker = batch.marker;