- Convert all require()/module.exports to import/export across 260+ files - Add "type": "module" to package.json to enable ESM by default - Add migrations/package.json with "type": "commonjs" to keep db-migrate compatible - Convert eslint.config.js to ESM with sourceType: "module" - Replace __dirname/__filename with import.meta.dirname/import.meta.filename - Replace require.main === module with process.argv[1] === import.meta.filename - Remove 'use strict' directives (implicit in ESM) - Convert dynamic require() in switch statements to static import lookup maps (dns.js, domains.js, backupformats.js, backupsites.js, network.js) - Extract self-referencing exports.CONSTANT patterns into standalone const declarations (apps.js, services.js, locks.js, users.js, mail.js, etc.) - Lazify SERVICES object in services.js to avoid circular dependency TDZ issues - Add clearMailQueue() to mailer.js for ESM-safe queue clearing in tests - Add _setMockApp() to ldapserver.js for ESM-safe test mocking - Add _setMockResolve() wrapper to dig.js for ESM-safe DNS mocking in tests - Convert backupupload.js to use dynamic imports so --check exits before loading the module graph (which requires BOX_ENV) - Update check-install to use ESM import for infra_version.js - Convert scripts/ (hotfix, release, remote_hotfix.js, find-unused-translations) - All 1315 tests passing Migration stats (AI-assisted using Cursor with Claude): - Wall clock time: ~3-4 hours - Assistant completions: ~80-100 - Estimated token usage: ~1-2M tokens Co-authored-by: Cursor <cursoragent@cursor.com>
310 lines
10 KiB
JavaScript
310 lines
10 KiB
JavaScript
import * as appHealthMonitor from './apphealthmonitor.js';
|
|
import assert from 'node:assert';
|
|
import * as appstore from './appstore.js';
|
|
import AuditSource from './auditsource.js';
|
|
import * as backupSites from './backupsites.js';
|
|
import * as cloudron from './cloudron.js';
|
|
import constants from './constants.js';
|
|
import { CronJob } from 'cron';
|
|
import debugModule from 'debug';
|
|
import * as domains from './domains.js';
|
|
import * as dyndns from './dyndns.js';
|
|
import * as externalLdap from './externalldap.js';
|
|
import eventlog from './eventlog.js';
|
|
import * as janitor from './janitor.js';
|
|
import * as mail from './mail.js';
|
|
import * as metrics from './metrics.js';
|
|
import * as network from './network.js';
|
|
import * as oidcServer from './oidcserver.js';
|
|
import paths from './paths.js';
|
|
import * as reverseProxy from './reverseproxy.js';
|
|
import safe from 'safetydance';
|
|
import * as scheduler from './scheduler.js';
|
|
import * as system from './system.js';
|
|
import * as updater from './updater.js';
|
|
import util from 'node:util';
|
|
|
|
const debug = debugModule('box:cron');
|
|
|
|
// IMPORTANT: These patterns are together because they spin tasks which acquire a lock
|
|
// If the patterns overlap all the time, then the task may not ever get a chance to run!
|
|
// If you change this change dashboard patterns in settings.html
|
|
const DEFAULT_CLEANUP_BACKUPS_PATTERN = '00 30 1,3,5,23 * * *',
|
|
DEFAULT_AUTOUPDATE_PATTERN = '00 00 1,3,5,23 * * *';
|
|
|
|
export {
|
|
startJobs,
|
|
|
|
stopJobs,
|
|
|
|
handleBackupScheduleChanged,
|
|
handleTimeZoneChanged,
|
|
handleAutoupdatePatternChanged,
|
|
handleDynamicDnsChanged,
|
|
handleExternalLdapChanged,
|
|
|
|
DEFAULT_AUTOUPDATE_PATTERN,
|
|
};
|
|
|
|
const gJobs = {
|
|
autoUpdater: null,
|
|
backups: new Map(),
|
|
updateChecker: null,
|
|
systemChecks: null,
|
|
mailStatusCheck: null,
|
|
diskSpaceChecker: null,
|
|
certificateRenew: null,
|
|
cleanupBackups: null,
|
|
cleanupEventlog: null,
|
|
cleanupTokens: null,
|
|
cleanupOidc: null,
|
|
dockerVolumeCleaner: null,
|
|
dynamicDns: null,
|
|
schedulerSync: null,
|
|
appHealthMonitor: null,
|
|
externalLdapSyncer: null,
|
|
checkDomainConfigs: null,
|
|
collectStats: null,
|
|
subscriptionChecker: null,
|
|
};
|
|
|
|
// cron format
|
|
// Seconds: 0-59
|
|
// Minutes: 0-59
|
|
// Hours: 0-23
|
|
// Day of Month: 1-31
|
|
// Months: 0-11
|
|
// Day of Week: 0-6
|
|
|
|
function getCronSeed() {
|
|
let hour = null;
|
|
let minute = null;
|
|
|
|
const seedData = safe.fs.readFileSync(paths.CRON_SEED_FILE, 'utf8') || '';
|
|
const parts = seedData.split(':');
|
|
if (parts.length === 2) {
|
|
hour = parseInt(parts[0]) || null;
|
|
minute = parseInt(parts[1]) || null;
|
|
}
|
|
|
|
if ((hour == null || hour < 0 || hour > 23) || (minute == null || minute < 0 || minute > 60)) {
|
|
hour = Math.floor(24 * Math.random());
|
|
minute = Math.floor(60 * Math.random());
|
|
|
|
debug(`getCronSeed: writing new cron seed file with ${hour}:${minute} to ${paths.CRON_SEED_FILE}`);
|
|
|
|
safe.fs.writeFileSync(paths.CRON_SEED_FILE, `${hour}:${minute}`);
|
|
}
|
|
|
|
return { hour, minute };
|
|
}
|
|
|
|
async function startJobs() {
|
|
const { hour, minute } = getCronSeed();
|
|
|
|
debug(`startJobs: starting cron jobs with hour ${hour} and minute ${minute}`);
|
|
|
|
gJobs.systemChecks = CronJob.from({
|
|
cronTime: `00 ${minute} 2 * * *`, // once a day. if you change this interval, change the notification messages with correct duration
|
|
onTick: async () => await safe(system.runSystemChecks(), { debug }),
|
|
start: true
|
|
});
|
|
|
|
gJobs.mailStatusCheck = CronJob.from({
|
|
cronTime: `00 ${minute} 2 * * *`, // once a day. if you change this interval, change the notification messages with correct duration
|
|
onTick: async () => await safe(mail.checkStatus(), { debug }),
|
|
start: true
|
|
});
|
|
|
|
gJobs.diskSpaceChecker = CronJob.from({
|
|
cronTime: '00 30 * * * *', // every 30 minutes. if you change this interval, change the notification messages with correct duration
|
|
onTick: async () => await safe(system.checkDiskSpace(), { debug }),
|
|
start: true
|
|
});
|
|
|
|
// this is run separately from the update itself so that the user can disable automatic updates but can still get a notification
|
|
gJobs.updateCheckerJob = CronJob.from({
|
|
cronTime: `00 ${minute} 1,5,9,13,17,21,23 * * *`,
|
|
onTick: async () => await safe(updater.checkForUpdates({ stableOnly: true }), { debug }),
|
|
start: true
|
|
});
|
|
|
|
gJobs.cleanupTokens = CronJob.from({
|
|
cronTime: '00 */30 * * * *', // every 30 minutes
|
|
onTick: async () => await safe(janitor.cleanupTokens(), { debug }),
|
|
start: true
|
|
});
|
|
|
|
gJobs.cleanupOidc = CronJob.from({
|
|
cronTime: '00 10 * * * *', // every hour ten minutes past
|
|
onTick: async () => await safe(oidcServer.cleanupExpired(), { debug }),
|
|
start: true
|
|
});
|
|
|
|
gJobs.cleanupBackups = CronJob.from({
|
|
cronTime: DEFAULT_CLEANUP_BACKUPS_PATTERN,
|
|
onTick: async () => {
|
|
for (const backupSite of await backupSites.list()) {
|
|
await safe(backupSites.startCleanupTask(backupSite, AuditSource.CRON), { debug });
|
|
}
|
|
},
|
|
start: true
|
|
});
|
|
|
|
gJobs.cleanupEventlog = CronJob.from({
|
|
cronTime: '00 */30 * * * *', // every 30 minutes
|
|
onTick: async () => await safe(eventlog.cleanup({ creationTime: new Date(Date.now() - 90 * 60 * 24 * 60 * 1000) }), { debug }), // 90 days ago
|
|
start: true
|
|
});
|
|
|
|
gJobs.dockerVolumeCleaner = CronJob.from({
|
|
cronTime: '00 00 */12 * * *', // every 12 hours
|
|
onTick: async () => await safe(janitor.cleanupDockerVolumes(), { debug }),
|
|
start: true
|
|
});
|
|
|
|
gJobs.schedulerSync = CronJob.from({
|
|
cronTime: constants.TEST ? '*/10 * * * * *' : '00 */1 * * * *', // every minute
|
|
onTick: async () => await safe(scheduler.sync(), { debug }),
|
|
start: true
|
|
});
|
|
|
|
// randomized per Cloudron based on hourlySeed
|
|
gJobs.certificateRenew = CronJob.from({
|
|
cronTime: `00 10 ${hour} * * *`,
|
|
onTick: async () => await safe(reverseProxy.startRenewCerts({}, AuditSource.CRON), { debug }),
|
|
start: true
|
|
});
|
|
|
|
gJobs.checkDomainConfigs = CronJob.from({
|
|
cronTime: `00 ${minute} 5 * * *`, // once a day
|
|
onTick: async () => await safe(domains.checkConfigs(AuditSource.CRON), { debug }),
|
|
start: true
|
|
});
|
|
|
|
gJobs.appHealthMonitor = CronJob.from({
|
|
cronTime: '*/10 * * * * *', // every 10 seconds
|
|
onTick: async () => await safe(appHealthMonitor.run(10), { debug }), // 10 is the max run time
|
|
start: true
|
|
});
|
|
|
|
gJobs.collectStats = CronJob.from({
|
|
cronTime: '*/20 * * * * *', // every 20 seconds. if you change this, change carbon config
|
|
onTick: async () => await safe(metrics.sendToGraphite(), { debug }),
|
|
start: true
|
|
});
|
|
|
|
gJobs.subscriptionChecker = CronJob.from({
|
|
cronTime: `00 ${minute} ${hour} * * *`, // once a day based on seed to randomize
|
|
onTick: async () => await safe(appstore.checkSubscription(), { debug }),
|
|
start: true
|
|
});
|
|
|
|
for (const backupSite of await backupSites.list()) {
|
|
await handleBackupScheduleChanged(backupSite);
|
|
}
|
|
await handleAutoupdatePatternChanged(await updater.getAutoupdatePattern());
|
|
await handleDynamicDnsChanged(await network.getDynamicDns());
|
|
await handleExternalLdapChanged(await externalLdap.getConfig());
|
|
}
|
|
|
|
async function handleBackupScheduleChanged(site) {
|
|
assert.strictEqual(typeof site, 'object');
|
|
|
|
const tz = await cloudron.getTimeZone();
|
|
|
|
debug(`handleBackupScheduleChanged: schedule ${site.schedule} (${tz})`);
|
|
|
|
if (gJobs.backups.has(site.id)) gJobs.backups.get(site.id).stop();
|
|
gJobs.backups.delete(site.id);
|
|
|
|
if (site.schedule === constants.CRON_PATTERN_NEVER) return;
|
|
|
|
const job = CronJob.from({
|
|
cronTime: site.schedule,
|
|
onTick: async () => {
|
|
const t = await backupSites.get(site.id);
|
|
if (!t) return;
|
|
await safe(backupSites.startBackupTask(t, AuditSource.CRON), { debug });
|
|
},
|
|
start: true,
|
|
timeZone: tz
|
|
});
|
|
gJobs.backups.set(site.id, job);
|
|
}
|
|
|
|
async function handleTimeZoneChanged(tz) {
|
|
assert.strictEqual(typeof tz, 'string');
|
|
|
|
debug('handleTimeZoneChanged: recreating all jobs');
|
|
await stopJobs();
|
|
await scheduler.deleteJobs(); // have to re-create with new tz
|
|
await startJobs();
|
|
}
|
|
|
|
async function handleAutoupdatePatternChanged(pattern) {
|
|
assert.strictEqual(typeof pattern, 'string');
|
|
|
|
const tz = await cloudron.getTimeZone();
|
|
|
|
debug(`autoupdatePatternChanged: pattern - ${pattern} (${tz})`);
|
|
|
|
if (gJobs.autoUpdater) gJobs.autoUpdater.stop();
|
|
gJobs.autoUpdater = null;
|
|
|
|
if (pattern === constants.CRON_PATTERN_NEVER) return;
|
|
|
|
gJobs.autoUpdater = CronJob.from({
|
|
cronTime: pattern,
|
|
onTick: async () => await safe(updater.autoUpdate(AuditSource.CRON), { debug }),
|
|
start: true,
|
|
timeZone: tz
|
|
});
|
|
}
|
|
|
|
function handleDynamicDnsChanged(enabled) {
|
|
assert.strictEqual(typeof enabled, 'boolean');
|
|
|
|
debug('Dynamic DNS setting changed to %s', enabled);
|
|
|
|
if (gJobs.dynamicDns) gJobs.dynamicDns.stop();
|
|
gJobs.dynamicDns = null;
|
|
|
|
if (!enabled) return;
|
|
|
|
gJobs.dynamicDns = CronJob.from({
|
|
// until we can be smarter about actual IP changes, lets ensure it every 10minutes
|
|
cronTime: '00 */10 * * * *',
|
|
onTick: async () => { await safe(dyndns.refreshDns(AuditSource.CRON), { debug }); },
|
|
start: true
|
|
});
|
|
}
|
|
|
|
async function handleExternalLdapChanged(config) {
|
|
assert.strictEqual(typeof config, 'object');
|
|
|
|
if (gJobs.externalLdapSyncer) gJobs.externalLdapSyncer.stop();
|
|
gJobs.externalLdapSyncer = null;
|
|
|
|
if (config.provider === 'noop') return;
|
|
|
|
gJobs.externalLdapSyncer = CronJob.from({
|
|
cronTime: '00 00 */4 * * *', // every 4 hours
|
|
onTick: async () => await safe(externalLdap.startSyncer(AuditSource.CRON), { debug }),
|
|
start: true
|
|
});
|
|
}
|
|
|
|
async function stopJobs() {
|
|
for (const jobName in gJobs) {
|
|
if (!gJobs[jobName]) continue;
|
|
if (util.types.isMap(gJobs[jobName])) {
|
|
gJobs[jobName].forEach((job) => job.stop());
|
|
gJobs[jobName] = new Map();
|
|
} else {
|
|
gJobs[jobName].stop();
|
|
gJobs[jobName] = null;
|
|
}
|
|
}
|
|
}
|