diff --git a/src/apps.js b/src/apps.js index b3cdf9ddd..b047701fe 100644 --- a/src/apps.js +++ b/src/apps.js @@ -161,8 +161,6 @@ const appstore = require('./appstore.js'), validator = require('validator'), _ = require('underscore'); -const NOOP_CALLBACK = function (error) { if (error) debug(error); }; - const APPS_FIELDS_PREFIXED = [ 'apps.id', 'apps.appStoreId', 'apps.installationState', 'apps.errorJson', 'apps.runState', 'apps.health', 'apps.containerId', 'apps.manifestJson', 'apps.accessRestrictionJson', 'apps.memoryLimit', 'apps.cpuShares', 'apps.label', 'apps.tagsJson', 'apps.taskId', 'apps.reverseProxyConfigJson', 'apps.servicesConfigJson', @@ -1499,8 +1497,8 @@ async function setDataDir(app, dataDir, auditSource) { const task = { args: { newDataDir: dataDir }, values: { }, - onFinished: (error) => { - if (!error) services.rebuildService('sftp', NOOP_CALLBACK); + onFinished: async (error) => { + if (!error) await safe(services.rebuildService('sftp'), { debug }); } }; const taskId = await addTask(appId, exports.ISTATE_PENDING_DATA_DIR_MIGRATION, task); diff --git a/src/cloudron.js b/src/cloudron.js index 2830369b3..4b7fe3f6f 100644 --- a/src/cloudron.js +++ b/src/cloudron.js @@ -25,7 +25,6 @@ exports = module.exports = { const apps = require('./apps.js'), appstore = require('./appstore.js'), assert = require('assert'), - async = require('async'), auditSource = require('./auditsource.js'), backups = require('./backups.js'), BoxError = require('./boxerror.js'), @@ -52,15 +51,12 @@ const apps = require('./apps.js'), split = require('split'), sysinfo = require('./sysinfo.js'), tasks = require('./tasks.js'), - users = require('./users.js'), - util = require('util'); + users = require('./users.js'); const REBOOT_CMD = path.join(__dirname, 'scripts/reboot.sh'); -const NOOP_CALLBACK = function (error) { if (error) debug(error); }; - async function initialize() { - runStartupTasks(); + safe(runStartupTasks(), { debug }); await notifyUpdate(); } @@ -70,25 +66,21 @@ async function uninitialize() { await platform.stopAllTasks(); } -function onActivated(options, callback) { +async function onActivated(options) { assert.strictEqual(typeof options, 'object'); - assert.strictEqual(typeof callback, 'function'); debug('onActivated: running post activation tasks'); // Starting the platform after a user is available means: // 1. mail bounces can now be sent to the cloudron owner // 2. the restore code path can run without sudo (since mail/ is non-root) - async.series([ - platform.start.bind(null, options), - cron.startJobs, - // disable responding to api calls via IP to not leak domain info. this is carefully placed as the last item, so it buys - // the UI some time to query the dashboard domain in the restore code path - async () => { - await delay(30000); - await reverseProxy.writeDefaultConfig({ activated :true }); - } - ], callback); + await platform.start(options); + await cron.startJobs(); + + // disable responding to api calls via IP to not leak domain info. this is carefully placed as the last item, so it buys + // the UI some time to query the dashboard domain in the restore code path + await delay(30000); + await reverseProxy.writeDefaultConfig({ activated :true }); } async function notifyUpdate() { @@ -104,46 +96,45 @@ async function notifyUpdate() { } // each of these tasks can fail. we will add some routes to fix/re-run them -function runStartupTasks() { - const tasks = [ - // stop all the systemd tasks - platform.stopAllTasks, +async function runStartupTasks() { + const tasks = []; - // this configures collectd to collect backup storage metrics if filesystem is used. This is also triggerd when the settings change with the rest api - async function () { - const backupConfig = await settings.getBackupConfig(); - await backups.configureCollectd(backupConfig); - }, + // stop all the systemd tasks + tasks.push(platform.stopAllTasks); - // always generate webadmin config since we have no versioning mechanism for the ejs - async function () { - if (!settings.dashboardDomain()) return; + // this configures collectd to collect backup storage metrics if filesystem is used. This is also triggerd when the settings change with the rest api + tasks.push(async function () { + const backupConfig = await settings.getBackupConfig(); + await backups.configureCollectd(backupConfig); + }); - await reverseProxy.writeDashboardConfig(settings.dashboardDomain()); - }, + // always generate webadmin config since we have no versioning mechanism for the ejs + tasks.push(async function () { + if (!settings.dashboardDomain()) return; + await reverseProxy.writeDashboardConfig(settings.dashboardDomain()); + }); + + tasks.push(async function () { // check activation state and start the platform - async function () { - const activated = await users.isActivated(); + const activated = await users.isActivated(); - // configure nginx to be reachable by IP when not activated. for the moment, the IP based redirect exists even after domain is setup - // just in case user forgot or some network error happenned in the middle (then browser refresh takes you to activation page) - // we remove the config as a simple security measure to not expose IP <-> domain - if (!activated) { - debug('runStartupTasks: not activated. generating IP based redirection config'); - return await reverseProxy.writeDefaultConfig({ activated: false }); - } - - await util.promisify(onActivated)({}); + // configure nginx to be reachable by IP when not activated. for the moment, the IP based redirect exists even after domain is setup + // just in case user forgot or some network error happenned in the middle (then browser refresh takes you to activation page) + // we remove the config as a simple security measure to not expose IP <-> domain + if (!activated) { + debug('runStartupTasks: not activated. generating IP based redirection config'); + return await reverseProxy.writeDefaultConfig({ activated: false }); } - ]; + + await onActivated({}); + }); // we used to run tasks in parallel but simultaneous nginx reloads was causing issues - async.series(async.reflectAll(tasks), function (error, results) { - results.forEach((result, idx) => { - if (result.error) debug(`Startup task at index ${idx} failed: ${result.error.message}`); - }); - }); + for (let i = 0; i < tasks.length; i++) { + const [error] = await safe(tasks[i]()); + if (error) debug(`Startup task at index ${i} failed: ${error.message}`); + } } async function getConfig() { @@ -183,17 +174,16 @@ async function isRebootRequired() { return fs.existsSync('/var/run/reboot-required'); } -// called from cron.js -function runSystemChecks(callback) { - assert.strictEqual(typeof callback, 'function'); - +async function runSystemChecks() { debug('runSystemChecks: checking status'); - async.parallel([ - checkMailStatus, - checkRebootRequired, - checkUbuntuVersion - ], callback); + const checks = [ + checkMailStatus(), + checkRebootRequired(), + checkUbuntuVersion() + ]; + + await Promise.allSettled(checks); } async function checkMailStatus() { @@ -276,7 +266,7 @@ async function prepareDashboardDomain(domain, auditSource) { const taskId = await tasks.add(tasks.TASK_SETUP_DNS_AND_CERT, [ constants.DASHBOARD_LOCATION, domain, auditSource ]); - tasks.startTask(taskId, {}, NOOP_CALLBACK); + tasks.startTask(taskId, {}); return taskId; } @@ -312,7 +302,7 @@ async function updateDashboardDomain(domain, auditSource) { await setDashboardDomain(domain, auditSource); - services.rebuildService('turn', NOOP_CALLBACK); // to update the realm variable + safe(services.rebuildService('turn'), { debug }); // to update the realm variable } async function renewCerts(options, auditSource) { @@ -320,7 +310,7 @@ async function renewCerts(options, auditSource) { assert.strictEqual(typeof auditSource, 'object'); const taskId = await tasks.add(tasks.TASK_CHECK_CERTS, [ options, auditSource ]); - tasks.startTask(taskId, {}, NOOP_CALLBACK); + tasks.startTask(taskId, {}); return taskId; } @@ -347,6 +337,6 @@ async function syncDnsRecords(options) { assert.strictEqual(typeof options, 'object'); const taskId = await tasks.add(tasks.TASK_SYNC_DNS_RECORDS, [ options ]); - tasks.startTask(taskId, {}, NOOP_CALLBACK); + tasks.startTask(taskId, {}); return taskId; } diff --git a/src/cron.js b/src/cron.js index ddb030cb5..90450b4f4 100644 --- a/src/cron.js +++ b/src/cron.js @@ -19,7 +19,6 @@ exports = module.exports = { const appHealthMonitor = require('./apphealthmonitor.js'), apps = require('./apps.js'), assert = require('assert'), - async = require('async'), auditSource = require('./auditsource.js'), backups = require('./backups.js'), cloudron = require('./cloudron.js'), @@ -53,8 +52,6 @@ const gJobs = { appHealthMonitor: null }; -const NOOP_CALLBACK = function (error) { if (error) debug(error); }; - // cron format // Seconds: 0-59 // Minutes: 0-59 @@ -69,7 +66,7 @@ async function startJobs() { const randomTick = Math.floor(60*Math.random()); gJobs.systemChecks = new CronJob({ cronTime: '00 30 2 * * *', // once a day. if you change this interval, change the notification messages with correct duration - onTick: () => cloudron.runSystemChecks(NOOP_CALLBACK), + onTick: async () => await safe(cloudron.runSystemChecks(), { debug }), start: true }); @@ -88,25 +85,25 @@ async function startJobs() { gJobs.cleanupTokens = new CronJob({ cronTime: '00 */30 * * * *', // every 30 minutes - onTick: janitor.cleanupTokens, + onTick: async () => await safe(janitor.cleanupTokens(), { debug }), start: true }); gJobs.cleanupBackups = new CronJob({ cronTime: DEFAULT_CLEANUP_BACKUPS_PATTERN, - onTick: backups.startCleanupTask.bind(null, auditSource.CRON), + onTick: async () => await safe(backups.startCleanupTask(auditSource.CRON), { debug }), start: true }); gJobs.cleanupEventlog = new CronJob({ cronTime: '00 */30 * * * *', // every 30 minutes - onTick: eventlog.cleanup.bind(null, { creationTime: new Date(Date.now() - 60 * 60 * 24 * 10 * 1000) }), // 10 days ago + onTick: async () => await safe(eventlog.cleanup({ creationTime: new Date(Date.now() - 60 * 60 * 24 * 10 * 1000) }), { debug }), // 10 days ago start: true }); gJobs.dockerVolumeCleaner = new CronJob({ cronTime: '00 00 */12 * * *', // every 12 hours - onTick: janitor.cleanupDockerVolumes, + onTick: async () => await safe(janitor.cleanupDockerVolumes(), { debug }), start: true }); @@ -118,7 +115,7 @@ async function startJobs() { gJobs.certificateRenew = new CronJob({ cronTime: '00 00 */12 * * *', // every 12 hours - onTick: cloudron.renewCerts.bind(null, {}, auditSource.CRON, NOOP_CALLBACK), + onTick: async () => await safe(cloudron.renewCerts({}, auditSource.CRON), { debug }), start: true }); @@ -137,7 +134,7 @@ async function startJobs() { } // eslint-disable-next-line no-unused-vars -function handleSettingsChanged(key, value) { +async function handleSettingsChanged(key, value) { assert.strictEqual(typeof key, 'string'); // value is a variant @@ -147,10 +144,8 @@ function handleSettingsChanged(key, value) { case settings.AUTOUPDATE_PATTERN_KEY: case settings.DYNAMIC_DNS_KEY: debug('handleSettingsChanged: recreating all jobs'); - async.series([ - stopJobs, - startJobs - ], NOOP_CALLBACK); + await stopJobs(); + await startJobs(); break; default: break; diff --git a/src/janitor.js b/src/janitor.js index 6729e8dff..9849f415a 100644 --- a/src/janitor.js +++ b/src/janitor.js @@ -1,7 +1,6 @@ 'use strict'; const assert = require('assert'), - async = require('async'), BoxError = require('./boxerror.js'), debug = require('debug')('box:janitor'), Docker = require('dockerode'), @@ -13,8 +12,6 @@ exports = module.exports = { cleanupDockerVolumes }; -const NOOP_CALLBACK = function () { }; - const gConnection = new Docker({ socketPath: '/var/run/docker.sock' }); async function cleanupTokens() { @@ -26,44 +23,34 @@ async function cleanupTokens() { debug(`Cleaned up ${result} expired tokens`,); } -function cleanupTmpVolume(containerInfo, callback) { +async function cleanupTmpVolume(containerInfo) { assert.strictEqual(typeof containerInfo, 'object'); - assert.strictEqual(typeof callback, 'function'); - var cmd = 'find /tmp -type f -mtime +10 -exec rm -rf {} +'.split(' '); // 10 day old files + const cmd = 'find /tmp -type f -mtime +10 -exec rm -rf {} +'.split(' '); // 10 day old files debug('cleanupTmpVolume %j', containerInfo.Names); - gConnection.getContainer(containerInfo.Id).exec({ Cmd: cmd, AttachStdout: true, AttachStderr: true, Tty: false }, function (error, execContainer) { - if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, `Failed to exec container: ${error.message}`)); + const [error, execContainer] = await safe(gConnection.getContainer(containerInfo.Id).exec({ Cmd: cmd, AttachStdout: true, AttachStderr: true, Tty: false })); + if (error) throw new BoxError(BoxError.DOCKER_ERROR, `Failed to exec container: ${error.message}`); - execContainer.start({ hijack: true }, function (error, stream) { - if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, `Failed to start exec container: ${error.message}`)); + const [startError, stream] = await safe(execContainer.start({ hijack: true })); + if (startError) throw new BoxError(BoxError.DOCKER_ERROR, `Failed to start exec container: ${startError.message}`); - stream.on('error', callback); - stream.on('end', callback); + gConnection.modem.demuxStream(stream, process.stdout, process.stderr); - gConnection.modem.demuxStream(stream, process.stdout, process.stderr); - }); + return new Promise((resolve, reject) => { + stream.on('error', (error) => reject(new BoxError(BoxError.DOCKER_ERROR, `Failed to cleanup in exec container: ${error.message}`))); + stream.on('end', resolve); }); } -function cleanupDockerVolumes(callback) { - assert(!callback || typeof callback === 'function'); // callback is null when called from cronjob - - callback = callback || NOOP_CALLBACK; - +async function cleanupDockerVolumes() { debug('Cleaning up docker volumes'); - gConnection.listContainers({ all: 0 }, function (error, containers) { - if (error) return callback(error); + const [error, containers] = await safe(gConnection.listContainers({ all: 0 })); + if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); - async.eachSeries(containers, function (container, iteratorDone) { - cleanupTmpVolume(container, function (error) { - if (error) debug('Error cleaning tmp: %s', error); - - iteratorDone(); // intentionally ignore error - }); - }, callback); - }); + for (const container of containers) { + await safe(cleanupTmpVolume(container), { debug }); // intentionally ignore error + } } diff --git a/src/mail.js b/src/mail.js index 79a83730d..6831d4927 100644 --- a/src/mail.js +++ b/src/mail.js @@ -102,7 +102,6 @@ const assert = require('assert'), _ = require('underscore'); const DNS_OPTIONS = { timeout: 5000 }; -const NOOP_CALLBACK = function (error) { if (error) debug(error); }; const REMOVE_MAILBOX_CMD = path.join(__dirname, 'scripts/rmmailbox.sh'); const MAILBOX_FIELDS = [ 'name', 'type', 'ownerId', 'ownerType', 'aliasName', 'aliasDomain', 'creationTime', 'membersJson', 'membersOnly', 'domain', 'active' ].join(','); @@ -978,7 +977,7 @@ async function setLocation(subdomain, domain, auditSource) { await settings.setMailLocation(domain, fqdn); const taskId = await tasks.add(tasks.TASK_CHANGE_MAIL_LOCATION, [ auditSource ]); - tasks.startTask(taskId, {}, NOOP_CALLBACK); + tasks.startTask(taskId, {}); await eventlog.add(eventlog.ACTION_MAIL_LOCATION, auditSource, { subdomain, domain, taskId }); return taskId; diff --git a/src/provision.js b/src/provision.js index 9a1dbb29e..8e6248aef 100644 --- a/src/provision.js +++ b/src/provision.js @@ -30,10 +30,8 @@ const assert = require('assert'), util = require('util'), _ = require('underscore'); -const NOOP_CALLBACK = function (error) { if (error) debug(error); }; - // we cannot use tasks since the tasks table gets overwritten when db is imported -let gProvisionStatus = { +const gProvisionStatus = { setup: { active: false, message: '', @@ -136,7 +134,7 @@ async function activate(username, password, email, displayName, ip, auditSource) eventlog.add(eventlog.ACTION_ACTIVATE, auditSource, {}); - setImmediate(cloudron.onActivated.bind(null, {}, NOOP_CALLBACK)); + setImmediate(() => safe(cloudron.onActivated({}), { debug })); return { userId: ownerId, @@ -164,7 +162,7 @@ async function restoreTask(backupConfig, backupId, sysinfoConfig, options, audit await settings.setBackupCredentials(backupConfig); // update just the credentials and not the policy and flags await eventlog.add(eventlog.ACTION_RESTORE, auditSource, { backupId }); - setImmediate(cloudron.onActivated.bind(null, options, NOOP_CALLBACK)); + setImmediate(() => safe(cloudron.onActivated(options), { debug })); } catch (error) { gProvisionStatus.restore.errorMessage = error ? error.message : ''; } diff --git a/src/tasks.js b/src/tasks.js index c8de5c7e2..9f336a1e5 100644 --- a/src/tasks.js +++ b/src/tasks.js @@ -159,7 +159,7 @@ async function add(type, args) { function startTask(id, options, callback) { assert.strictEqual(typeof id, 'string'); assert.strictEqual(typeof options, 'object'); - assert.strictEqual(typeof callback, 'function'); + assert(typeof callback === 'undefined' || typeof callback === 'function'); const logFile = options.logFile || `${paths.TASKS_LOG_DIR}/${id}.log`; debug(`startTask - starting task ${id} with options ${JSON.stringify(options)}. logs at ${logFile}`); @@ -202,9 +202,9 @@ function startTask(id, options, callback) { delete gTasks[id]; - callback(taskError, task ? task.result : null); + if (callback) callback(taskError, task ? task.result : null); - debug(`startTask: ${id} done`); + debug(`startTask: ${id} done. error: `, taskError); }); if (options.timeout) { diff --git a/src/volumes.js b/src/volumes.js index c1b63fa01..01b9cc2d6 100644 --- a/src/volumes.js +++ b/src/volumes.js @@ -28,7 +28,6 @@ const assert = require('assert'), const VOLUMES_FIELDS = [ 'id', 'name', 'hostPath', 'creationTime', 'mountType', 'mountOptionsJson' ].join(','); const COLLECTD_CONFIG_EJS = fs.readFileSync(__dirname + '/collectd/volume.ejs', { encoding: 'utf8' }); -const NOOP_CALLBACK = function (error) { if (error) debug(error); }; function postProcess(result) { assert.strictEqual(typeof result, 'object'); @@ -100,7 +99,7 @@ async function add(volume, auditSource) { eventlog.add(eventlog.ACTION_VOLUME_ADD, auditSource, { id, name, hostPath: volume.hostPath }); // in theory, we only need to do this mountpoint volumes. but for some reason a restart is required to detect new "mounts" - services.rebuildService('sftp', NOOP_CALLBACK); + safe(services.rebuildService('sftp'), { debug }); const collectdConf = ejs.render(COLLECTD_CONFIG_EJS, { volumeId: id, hostPath: volume.hostPath }); await collectd.addProfile(id, collectdConf); @@ -153,7 +152,7 @@ async function del(volume, auditSource) { eventlog.add(eventlog.ACTION_VOLUME_REMOVE, auditSource, { volume }); if (volume.mountType === 'mountpoint' || volume.mountType === 'filesystem') { - services.rebuildService('sftp', NOOP_CALLBACK); + safe(services.rebuildService('sftp'), { debug }); } else { await safe(mounts.removeMount(volume)); }