replace debug() with our custom logger

mostly we want trace() and log(). trace() can be enabled whenever
we want by flipping a flag and restarting box
This commit is contained in:
Girish Ramakrishnan
2026-03-12 22:55:28 +05:30
parent d57554a48c
commit 01d0c738bc
104 changed files with 1187 additions and 1174 deletions

View File

@@ -8,7 +8,7 @@ import constants from './constants.js';
import crypto from 'node:crypto';
import DataLayout from './datalayout.js';
import database from './database.js';
import debugModule from 'debug';
import logger from './logger.js';
import df from './df.js';
import locks from './locks.js';
import path from 'node:path';
@@ -20,7 +20,7 @@ import shellModule from './shell.js';
import stream from 'stream/promises';
import util from 'util';
const debug = debugModule('box:backuptask');
const { log, trace } = logger('backuptask');
const shell = shellModule('backuptask');
@@ -40,22 +40,22 @@ async function checkPreconditions(backupSite, dataLayout) {
// check mount status before uploading
const status = await backupSites.ensureMounted(backupSite);
debug(`checkPreconditions: mount point status is ${JSON.stringify(status)}`);
log(`checkPreconditions: mount point status is ${JSON.stringify(status)}`);
if (status.state !== 'active') throw new BoxError(BoxError.MOUNT_ERROR, `Backup endpoint is not active: ${status.message}`);
// check availabe size. this requires root for df to work
const available = await backupSites.storageApi(backupSite).getAvailableSize(backupSite.config);
let used = 0;
for (const localPath of dataLayout.localPaths()) {
debug(`checkPreconditions: getting disk usage of ${localPath}`);
log(`checkPreconditions: getting disk usage of ${localPath}`);
// du can error when files go missing as it is computing the size. it still prints some size anyway
// to match df output in getAvailableSize() we must use disk usage size here and not apparent size
const [duError, result] = await safe(shell.spawn('du', [ '--dereference-args', '--summarize', '--block-size=1', '--exclude=*.lock', '--exclude=dovecot.list.index.log.*', localPath], { encoding: 'utf8' }));
if (duError) debug(`checkPreconditions: du error for ${localPath}. code: ${duError.code} stderror: ${duError.stderr}`);
if (duError) log(`checkPreconditions: du error for ${localPath}. code: ${duError.code} stderror: ${duError.stderr}`);
used += parseInt(duError ? duError.stdout : result, 10);
}
debug(`checkPreconditions: total required=${used} available=${available}`);
log(`checkPreconditions: total required=${used} available=${available}`);
const needed = 0.6 * used + (1024 * 1024 * 1024); // check if there is atleast 1GB left afterwards. aim for 60% because rsync/tgz won't need full 100%
if (available <= needed) throw new BoxError(BoxError.FS_ERROR, `Not enough disk space for backup. Needed: ${df.prettyBytes(needed)} Available: ${df.prettyBytes(available)}`);
@@ -81,7 +81,7 @@ async function upload(remotePath, siteId, dataLayoutString, progressCallback) {
assert.strictEqual(typeof dataLayoutString, 'string');
assert.strictEqual(typeof progressCallback, 'function');
debug(`upload: path ${remotePath} site ${siteId} dataLayout ${dataLayoutString}`);
log(`upload: path ${remotePath} site ${siteId} dataLayout ${dataLayoutString}`);
const backupSite = await backupSites.get(siteId);
if (!backupSite) throw new BoxError(BoxError.NOT_FOUND, 'Backup site not found');
@@ -99,7 +99,7 @@ async function upload(remotePath, siteId, dataLayoutString, progressCallback) {
// - rsync: size (final backup size) will be different from what was transferred (only changed files)
// stats.fileCount and stats.size are stored in db and should match up what is written into .backupinfo
const { stats, integrityMap } = await backupFormats.api(backupSite.format).upload(backupSite, remotePath, dataLayout, progressCallback);
debug(`upload: path ${remotePath} site ${siteId} uploaded: ${JSON.stringify(stats)}`);
log(`upload: path ${remotePath} site ${siteId} uploaded: ${JSON.stringify(stats)}`);
progressCallback({ message: `Uploading integrity information to ${remotePath}.backupinfo` });
const signature = await uploadBackupInfo(backupSite, remotePath, integrityMap);
@@ -112,7 +112,7 @@ async function download(backupSite, remotePath, dataLayout, progressCallback) {
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof progressCallback, 'function');
debug(`download: Downloading ${remotePath} of format ${backupSite.format} (encrypted: ${!!backupSite.encryption}) to ${dataLayout.toString()}`);
log(`download: Downloading ${remotePath} of format ${backupSite.format} (encrypted: ${!!backupSite.encryption}) to ${dataLayout.toString()}`);
await backupFormats.api(backupSite.format).download(backupSite, remotePath, dataLayout, progressCallback);
}
@@ -128,10 +128,10 @@ async function restore(backupSite, remotePath, progressCallback) {
await download(backupSite, remotePath, dataLayout, progressCallback);
debug('restore: download completed, importing database');
log('restore: download completed, importing database');
await database.importFromFile(`${dataLayout.localRoot()}/box.mysqldump`);
debug('restore: database imported');
log('restore: database imported');
await locks.releaseAll(); // clear the locks table in database
}
@@ -155,7 +155,7 @@ async function downloadApp(app, restoreConfig, progressCallback) {
}
await download(backupSite, remotePath, dataLayout, progressCallback);
debug('downloadApp: time: %s', (new Date() - startTime)/1000);
log('downloadApp: time: %s', (new Date() - startTime)/1000);
}
async function runBackupUpload(uploadConfig, progressCallback) {
@@ -172,21 +172,21 @@ async function runBackupUpload(uploadConfig, progressCallback) {
const envCopy = Object.assign({}, process.env);
if (backupSite.limits?.memoryLimit >= 2*1024*1024*1024) {
const heapSize = Math.min((backupSite.limits.memoryLimit/1024/1024) - 256, 8192);
debug(`runBackupUpload: adjusting heap size to ${heapSize}M`);
log(`runBackupUpload: adjusting heap size to ${heapSize}M`);
envCopy.NODE_OPTIONS = `--max-old-space-size=${heapSize}`;
}
let lastMessage = null; // the script communicates error result as a string
function onMessage(progress) { // this is { message } or { result }
if ('message' in progress) return progressCallback({ message: `${progress.message} (${progressTag})` });
debug(`runBackupUpload: result - ${JSON.stringify(progress)}`);
log(`runBackupUpload: result - ${JSON.stringify(progress)}`);
lastMessage = progress;
}
// do not use debug for logging child output because it already has timestamps via it's own debug
const [error] = await safe(shell.sudo([ BACKUP_UPLOAD_CMD, remotePath, backupSite.id, dataLayout.toString() ], { env: envCopy, preserveEnv: true, onMessage, logger: process.stdout.write }));
if (error && (error.code === null /* signal */ || (error.code !== 0 && error.code !== 50))) { // backuptask crashed
debug(`runBackupUpload: backuptask crashed`, error);
log(`runBackupUpload: backuptask crashed`, error);
throw new BoxError(BoxError.INTERNAL_ERROR, 'Backuptask crashed');
} else if (error && error.code === 50) { // exited with error
throw new BoxError(BoxError.EXTERNAL_ERROR, lastMessage.errorMessage);
@@ -203,7 +203,7 @@ async function snapshotBox(progressCallback) {
const startTime = new Date();
await database.exportToFile(`${paths.BOX_DATA_DIR}/box.mysqldump`);
debug(`snapshotBox: took ${(new Date() - startTime)/1000} seconds`);
log(`snapshotBox: took ${(new Date() - startTime)/1000} seconds`);
}
async function uploadBoxSnapshot(backupSite, progressCallback) {
@@ -230,7 +230,7 @@ async function uploadBoxSnapshot(backupSite, progressCallback) {
const { stats, integrity } = await runBackupUpload(uploadConfig, progressCallback);
debug(`uploadBoxSnapshot: took ${(new Date() - startTime)/1000} seconds`);
log(`uploadBoxSnapshot: took ${(new Date() - startTime)/1000} seconds`);
await backupSites.setSnapshotInfo(backupSite, 'box', { timestamp: new Date().toISOString() });
@@ -246,17 +246,17 @@ async function copy(backupSite, srcRemotePath, destRemotePath, progressCallback)
const startTime = new Date();
const [copyError] = await safe(backupFormats.api(backupSite.format).copy(backupSite, srcRemotePath, destRemotePath, progressCallback));
if (copyError) {
debug(`copy: copy to ${destRemotePath} errored. error: ${copyError.message}`);
log(`copy: copy to ${destRemotePath} errored. error: ${copyError.message}`);
throw copyError;
}
debug(`copy: copied successfully to ${destRemotePath}. Took ${(new Date() - startTime)/1000} seconds`);
log(`copy: copied successfully to ${destRemotePath}. Took ${(new Date() - startTime)/1000} seconds`);
const [copyChecksumError] = await safe(backupSites.storageApi(backupSite).copy(backupSite.config, `${srcRemotePath}.backupinfo`, `${destRemotePath}.backupinfo`, progressCallback));
if (copyChecksumError) {
debug(`copy: copy to ${destRemotePath} errored. error: ${copyChecksumError.message}`);
log(`copy: copy to ${destRemotePath} errored. error: ${copyChecksumError.message}`);
throw copyChecksumError;
}
debug(`copy: copied backupinfo successfully to ${destRemotePath}.backupinfo`);
log(`copy: copied backupinfo successfully to ${destRemotePath}.backupinfo`);
}
async function backupBox(backupSite, appBackupsMap, tag, options, progressCallback) {
@@ -281,7 +281,7 @@ async function backupBox(backupSite, appBackupsMap, tag, options, progressCallba
duration: acc.duration + cur.upload.duration,
}), stats.upload);
debug(`backupBox: rotating box snapshot of ${backupSite.id} to id ${remotePath}. ${JSON.stringify(stats)}`);
log(`backupBox: rotating box snapshot of ${backupSite.id} to id ${remotePath}. ${JSON.stringify(stats)}`);
const data = {
remotePath,
@@ -329,7 +329,7 @@ async function snapshotApp(app, progressCallback) {
await services.runBackupCommand(app);
await services.backupAddons(app, app.manifest.addons);
debug(`snapshotApp: ${app.fqdn} took ${(new Date() - startTime)/1000} seconds`);
log(`snapshotApp: ${app.fqdn} took ${(new Date() - startTime)/1000} seconds`);
}
async function uploadAppSnapshot(backupSite, app, progressCallback) {
@@ -358,7 +358,7 @@ async function uploadAppSnapshot(backupSite, app, progressCallback) {
const { stats, integrity } = await runBackupUpload(uploadConfig, progressCallback);
debug(`uploadAppSnapshot: ${app.fqdn} uploaded to ${remotePath}. ${(new Date() - startTime)/1000} seconds`);
log(`uploadAppSnapshot: ${app.fqdn} uploaded to ${remotePath}. ${(new Date() - startTime)/1000} seconds`);
await backupSites.setSnapshotInfo(backupSite, app.id, { timestamp: new Date().toISOString(), manifest: app.manifest });
@@ -385,7 +385,7 @@ async function backupAppWithTag(app, backupSite, tag, options, progressCallback)
const manifest = app.manifest;
const remotePath = addFileExtension(backupSite, `${tag}/app_${app.fqdn}_v${manifest.version}`);
debug(`backupAppWithTag: rotating ${app.fqdn} snapshot of ${backupSite.id} to path ${remotePath}`);
log(`backupAppWithTag: rotating ${app.fqdn} snapshot of ${backupSite.id} to path ${remotePath}`);
const data = {
remotePath,
@@ -456,7 +456,7 @@ async function uploadMailSnapshot(backupSite, progressCallback) {
const { stats, integrity } = await runBackupUpload(uploadConfig, progressCallback);
debug(`uploadMailSnapshot: took ${(new Date() - startTime)/1000} seconds`);
log(`uploadMailSnapshot: took ${(new Date() - startTime)/1000} seconds`);
await backupSites.setSnapshotInfo(backupSite, 'mail', { timestamp: new Date().toISOString() });
@@ -469,7 +469,7 @@ async function backupMailWithTag(backupSite, tag, options, progressCallback) {
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof progressCallback, 'function');
debug(`backupMailWithTag: backing up mail with tag ${tag}`);
log(`backupMailWithTag: backing up mail with tag ${tag}`);
const uploadStartTime = Date.now();
const uploadResult = await uploadMailSnapshot(backupSite, progressCallback); // { stats, integrity }
@@ -477,7 +477,7 @@ async function backupMailWithTag(backupSite, tag, options, progressCallback) {
const remotePath = addFileExtension(backupSite, `${tag}/mail_v${constants.VERSION}`);
debug(`backupMailWithTag: rotating mail snapshot of ${backupSite.id} to ${remotePath}`);
log(`backupMailWithTag: rotating mail snapshot of ${backupSite.id} to ${remotePath}`);
const data = {
remotePath,
@@ -519,7 +519,7 @@ async function downloadMail(backupSite, remotePath, progressCallback) {
const startTime = new Date();
await download(backupSite, remotePath, dataLayout, progressCallback);
debug('downloadMail: time: %s', (new Date() - startTime)/1000);
log('downloadMail: time: %s', (new Date() - startTime)/1000);
}
// this function is called from external process. calling process is expected to have a lock
@@ -544,11 +544,11 @@ async function fullBackup(backupSiteId, options, progressCallback) {
percent += step;
if (!app.enableBackup) {
debug(`fullBackup: skipped backup ${app.fqdn} (${i+1}/${allApps.length}) since automatic backup disabled`);
log(`fullBackup: skipped backup ${app.fqdn} (${i+1}/${allApps.length}) since automatic backup disabled`);
continue; // nothing to backup
}
if (!backupSites.hasContent(backupSite, app.id)) {
debug(`fullBackup: skipped backup ${app.fqdn} (${i+1}/${allApps.length}) as it is not in site contents`);
log(`fullBackup: skipped backup ${app.fqdn} (${i+1}/${allApps.length}) as it is not in site contents`);
continue;
}
@@ -556,7 +556,7 @@ async function fullBackup(backupSiteId, options, progressCallback) {
await locks.wait(`${locks.TYPE_APP_BACKUP_PREFIX}${app.id}`);
const startTime = new Date();
const [appBackupError, appBackupResult] = await safe(backupAppWithTag(app, backupSite, tag, options, (progress) => progressCallback({ percent, message: progress.message })));
debug(`fullBackup: app ${app.fqdn} backup finished. Took ${(new Date() - startTime)/1000} seconds`);
log(`fullBackup: app ${app.fqdn} backup finished. Took ${(new Date() - startTime)/1000} seconds`);
await locks.release(`${locks.TYPE_APP_BACKUP_PREFIX}${app.id}`);
if (appBackupError) throw appBackupError;
if (appBackupResult) appBackupsMap.set(appBackupResult.id, appBackupResult.stats); // backupId can be null if in BAD_STATE and never backed up