Files
cloudron-box/src/updater.js

375 lines
16 KiB
JavaScript
Raw Normal View History

2018-07-31 11:35:23 -07:00
'use strict';
exports = module.exports = {
setAutoupdatePattern,
getAutoupdatePattern,
startBoxUpdateTask,
updateBox,
2023-08-12 19:28:07 +05:30
autoUpdate,
2025-06-26 13:41:09 +02:00
notifyBoxUpdate,
checkForUpdates,
checkAppUpdate,
checkBoxUpdate,
2025-06-26 13:41:09 +02:00
getBoxUpdate,
2018-07-31 11:35:23 -07:00
};
2021-08-31 11:16:58 -07:00
const apps = require('./apps.js'),
2025-06-26 13:41:09 +02:00
appstore = require('./appstore.js'),
assert = require('node:assert'),
2023-08-12 19:28:07 +05:30
AuditSource = require('./auditsource.js'),
2019-10-23 09:39:26 -07:00
BoxError = require('./boxerror.js'),
2025-09-12 09:48:37 +02:00
backupSites = require('./backupsites.js'),
2021-08-31 11:16:58 -07:00
backuptask = require('./backuptask.js'),
constants = require('./constants.js'),
cron = require('./cron.js'),
{ CronTime } = require('cron'),
crypto = require('node:crypto'),
2018-07-31 11:35:23 -07:00
debug = require('debug')('box:updater'),
df = require('./df.js'),
eventlog = require('./eventlog.js'),
fs = require('node:fs'),
remove global lock Currently, the update/apptask/fullbackup/platformstart take a global lock and cannot run in parallel. This causes situations where when a user tries to trigger an apptask, it says "waiting for backup to finish..." etc The solution is to let them run in parallel. We need a lock at the app level as app operations running in parallel would be bad (tm). In addition, the update task needs a lock just for the update part. We also need multi-process locks. Running tasks as processes is core to our "kill" strategy. Various inter process locks were explored: * node's IPC mechanism with process.send(). But this only works for direct node.js children. taskworker is run via sudo and the IPC does not work. * File lock using O_EXCL. Basic ideas to create lock files. While file creation can be done atomically, it becomes complicated to clean up lock files when the tasks crash. We need a way to know what locks were held by the crashing task. flock and friends are not built-into node.js * sqlite/redis were options but introduce additional deps * Settled on MySQL based locking. Initial plan was to have row locks or table locks. Each row is a kind of lock. While implementing, it was found that we need many types of locks (and not just update lock and app locks). For example, we need locks for each task type, so that only one task type is active at a time. * Instead of rows, we can just lock table and have a json blob in it. This hit a road block that LOCK TABLE is per session and our db layer cannot handle this easily! i.e when issing two db.query() it might use two different connections from the pool. We have to expose the connection, release connection etc. * Next idea was atomic blob update of the blob checking if old blob was same. This approach, was finally refined into a version field. Phew!
2024-12-07 14:35:45 +01:00
locks = require('./locks.js'),
notifications = require('./notifications.js'),
os = require('node:os'),
path = require('node:path'),
2018-08-01 15:38:40 -07:00
paths = require('./paths.js'),
2021-08-31 13:12:14 -07:00
promiseRetry = require('./promise-retry.js'),
2018-08-01 15:38:40 -07:00
safe = require('safetydance'),
semver = require('semver'),
2021-02-01 14:07:23 -08:00
settings = require('./settings.js'),
2024-10-14 19:10:31 +02:00
shell = require('./shell.js')('updater'),
tasks = require('./tasks.js');
2018-07-31 11:35:23 -07:00
2018-08-01 15:38:40 -07:00
const RELEASES_PUBLIC_KEY = path.join(__dirname, 'releases.gpg');
2018-07-31 11:35:23 -07:00
const UPDATE_CMD = path.join(__dirname, 'scripts/update.sh');
async function setAutoupdatePattern(pattern) {
assert.strictEqual(typeof pattern, 'string');
if (pattern !== constants.CRON_PATTERN_NEVER) { // check if pattern is valid
const job = safe.safeCall(function () { return new CronTime(pattern); });
if (!job) throw new BoxError(BoxError.BAD_FIELD, 'Invalid pattern');
}
await settings.set(settings.AUTOUPDATE_PATTERN_KEY, pattern);
await cron.handleAutoupdatePatternChanged(pattern);
}
async function getAutoupdatePattern() {
const pattern = await settings.get(settings.AUTOUPDATE_PATTERN_KEY);
return pattern || cron.DEFAULT_AUTOUPDATE_PATTERN;
}
2025-06-26 13:41:09 +02:00
async function downloadBoxUrl(url, file) {
2018-08-01 15:38:40 -07:00
assert.strictEqual(typeof file, 'string');
// do not assert since it comes from the appstore
2021-08-31 13:12:14 -07:00
if (typeof url !== 'string') throw new BoxError(BoxError.EXTERNAL_ERROR, `url cannot be download to ${file} as it is not a string`);
2018-08-01 15:38:40 -07:00
safe.fs.unlinkSync(file);
2021-12-07 11:18:26 -08:00
await promiseRetry({ times: 10, interval: 5000, debug }, async function () {
2025-06-26 13:41:09 +02:00
debug(`downloadBoxUrl: downloading ${url} to ${file}`);
const [error] = await safe(shell.spawn('curl', ['-s', '--fail', url, '-o', file], { encoding: 'utf8' }));
2021-08-31 13:12:14 -07:00
if (error) throw new BoxError(BoxError.NETWORK_ERROR, `Failed to download ${url}: ${error.message}`);
2025-06-26 13:41:09 +02:00
debug('downloadBoxUrl: done');
2021-08-31 13:12:14 -07:00
});
2018-08-01 15:38:40 -07:00
}
2025-06-26 13:41:09 +02:00
async function gpgVerifyBoxTarball(file, sig) {
2021-08-31 13:12:14 -07:00
assert.strictEqual(typeof file, 'string');
assert.strictEqual(typeof sig, 'string');
const [error, stdout] = await safe(shell.spawn('/usr/bin/gpg', ['--status-fd', '1', '--no-default-keyring', '--keyring', RELEASES_PUBLIC_KEY, '--verify', sig, file], { encoding: 'utf8' }));
2021-08-31 13:12:14 -07:00
if (error) {
2025-06-26 13:41:09 +02:00
debug(`gpgVerifyBoxTarball: command failed. error: ${error}\n stdout: ${error.stdout}\n stderr: ${error.stderr}`);
2021-08-31 13:12:14 -07:00
throw new BoxError(BoxError.NOT_SIGNED, `The signature in ${path.basename(sig)} could not be verified (command failed)`);
}
2018-08-01 15:38:40 -07:00
2021-08-31 13:12:14 -07:00
if (stdout.indexOf('[GNUPG:] VALIDSIG 0EADB19CDDA23CD0FE71E3470A372F8703C493CC') !== -1) return; // success
2018-08-01 15:38:40 -07:00
2025-06-26 13:41:09 +02:00
debug(`gpgVerifyBoxTarball: verification of ${sig} failed: ${stdout}\n`);
2018-08-01 15:38:40 -07:00
2021-08-31 13:12:14 -07:00
throw new BoxError(BoxError.NOT_SIGNED, `The signature in ${path.basename(sig)} could not be verified (bad sig)`);
2018-08-01 15:38:40 -07:00
}
2025-06-26 13:41:09 +02:00
async function extractBoxTarball(tarball, dir) {
2021-08-31 13:12:14 -07:00
assert.strictEqual(typeof tarball, 'string');
assert.strictEqual(typeof dir, 'string');
2025-06-26 13:41:09 +02:00
debug(`extractBoxTarball: extracting ${tarball} to ${dir}`);
2018-08-01 15:38:40 -07:00
const [error] = await safe(shell.spawn('tar', ['-zxf', tarball, '-C', dir], { encoding: 'utf8' }));
2021-08-31 13:12:14 -07:00
if (error) throw new BoxError(BoxError.FS_ERROR, `Failed to extract release package: ${error.message}`);
safe.fs.unlinkSync(tarball);
2018-08-01 15:38:40 -07:00
2025-06-26 13:41:09 +02:00
debug('extractBoxTarball: extracted');
2018-08-01 15:38:40 -07:00
}
2025-06-26 13:41:09 +02:00
async function verifyBoxUpdateInfo(versionsFile, updateInfo) {
2018-08-01 15:38:40 -07:00
assert.strictEqual(typeof versionsFile, 'string');
assert.strictEqual(typeof updateInfo, 'object');
const releases = safe.JSON.parse(safe.fs.readFileSync(versionsFile, 'utf8')) || {};
2021-08-31 13:12:14 -07:00
if (!releases[constants.VERSION]) throw new BoxError(BoxError.EXTERNAL_ERROR, `No version info for ${constants.VERSION}`);
if (!releases[constants.VERSION].next) throw new BoxError(BoxError.EXTERNAL_ERROR, `No next version info for ${constants.VERSION}`);
const nextVersion = releases[constants.VERSION].next;
2021-08-31 13:12:14 -07:00
if (typeof releases[nextVersion] !== 'object' || !releases[nextVersion]) throw new BoxError(BoxError.EXTERNAL_ERROR, 'No next version info');
if (releases[nextVersion].sourceTarballUrl !== updateInfo.sourceTarballUrl) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Version info mismatch');
2018-08-01 15:38:40 -07:00
}
2025-06-26 13:41:09 +02:00
async function downloadAndVerifyBoxRelease(updateInfo) {
2018-08-01 15:38:40 -07:00
assert.strictEqual(typeof updateInfo, 'object');
2021-08-31 13:12:14 -07:00
2024-10-16 10:25:07 +02:00
const filenames = await fs.promises.readdir(os.tmpdir());
const oldArtifactNames = filenames.filter(f => f.startsWith('box-'));
for (const artifactName of oldArtifactNames) {
const fullPath = path.join(os.tmpdir(), artifactName);
2025-06-26 13:41:09 +02:00
debug(`downloadAndVerifyBoxRelease: removing old artifact ${fullPath}`);
2024-10-16 10:25:07 +02:00
await fs.promises.rm(fullPath, { recursive: true, force: true });
}
2021-08-31 13:12:14 -07:00
2025-06-26 13:41:09 +02:00
await downloadBoxUrl(updateInfo.boxVersionsUrl, `${paths.UPDATE_DIR}/versions.json`);
await downloadBoxUrl(updateInfo.boxVersionsSigUrl, `${paths.UPDATE_DIR}/versions.json.sig`);
await gpgVerifyBoxTarball(`${paths.UPDATE_DIR}/versions.json`, `${paths.UPDATE_DIR}/versions.json.sig`);
await verifyBoxUpdateInfo(`${paths.UPDATE_DIR}/versions.json`, updateInfo);
await downloadBoxUrl(updateInfo.sourceTarballUrl, `${paths.UPDATE_DIR}/box.tar.gz`);
await downloadBoxUrl(updateInfo.sourceTarballSigUrl, `${paths.UPDATE_DIR}/box.tar.gz.sig`);
await gpgVerifyBoxTarball(`${paths.UPDATE_DIR}/box.tar.gz`, `${paths.UPDATE_DIR}/box.tar.gz.sig`);
const newBoxSource = path.join(os.tmpdir(), 'box-' + crypto.randomBytes(4).readUInt32LE(0));
2021-08-31 13:12:14 -07:00
const [mkdirError] = await safe(fs.promises.mkdir(newBoxSource, { recursive: true }));
if (mkdirError) throw new BoxError(BoxError.FS_ERROR, `Failed to create directory ${newBoxSource}: ${mkdirError.message}`);
2025-06-26 13:41:09 +02:00
await extractBoxTarball(`${paths.UPDATE_DIR}/box.tar.gz`, newBoxSource);
2021-08-31 13:12:14 -07:00
return { file: newBoxSource };
2018-08-01 15:38:40 -07:00
}
2021-08-31 13:12:14 -07:00
async function checkFreeDiskSpace(neededSpace) {
assert.strictEqual(typeof neededSpace, 'number');
// can probably be a bit more aggressive here since a new update can bring in new docker images
2021-08-31 13:12:14 -07:00
const [error, diskUsage] = await safe(df.file('/'));
if (error) throw new BoxError(BoxError.FS_ERROR, error);
2019-08-12 21:47:22 -07:00
2023-01-30 12:54:25 +01:00
if (diskUsage.available < neededSpace) throw new BoxError(BoxError.FS_ERROR, `Not enough disk space. Updates require at least 2GB of free space. Available: ${df.prettyBytes(diskUsage.available)}`);
}
async function updateBox(boxUpdateInfo, options, progressCallback) {
2018-08-01 15:38:40 -07:00
assert(boxUpdateInfo && typeof boxUpdateInfo === 'object');
assert(options && typeof options === 'object');
assert.strictEqual(typeof progressCallback, 'function');
2018-08-01 15:38:40 -07:00
progressCallback({ percent: 1, message: 'Checking disk space' });
2018-08-01 15:38:40 -07:00
2022-02-18 09:56:35 -08:00
await checkFreeDiskSpace(2*1024*1024*1024);
2018-08-01 15:38:40 -07:00
2021-08-31 13:12:14 -07:00
progressCallback({ percent: 5, message: 'Downloading and verifying release' });
2025-06-26 13:41:09 +02:00
const packageInfo = await downloadAndVerifyBoxRelease(boxUpdateInfo);
2018-08-01 15:38:40 -07:00
2021-08-31 13:12:14 -07:00
if (!options.skipBackup) {
progressCallback({ percent: 10, message: 'Backing up' });
2018-08-01 15:38:40 -07:00
const sites = await backupSites.listByContentForUpdates('box');
if (sites.length === 0) throw new BoxError(BoxError.BAD_STATE, 'no backup site for update');
for (const site of sites) {
await backuptask.fullBackup(site.id, { preserveSecs: 3*7*24*60*60 }, (progress) => progressCallback({ percent: 10+progress.percent*70/100, message: progress.message }));
}
2022-02-18 09:56:35 -08:00
await checkFreeDiskSpace(2*1024*1024*1024); // check again in case backup is in same disk
2021-08-31 13:12:14 -07:00
}
2018-08-01 15:38:40 -07:00
await locks.wait(locks.TYPE_BOX_UPDATE);
remove global lock Currently, the update/apptask/fullbackup/platformstart take a global lock and cannot run in parallel. This causes situations where when a user tries to trigger an apptask, it says "waiting for backup to finish..." etc The solution is to let them run in parallel. We need a lock at the app level as app operations running in parallel would be bad (tm). In addition, the update task needs a lock just for the update part. We also need multi-process locks. Running tasks as processes is core to our "kill" strategy. Various inter process locks were explored: * node's IPC mechanism with process.send(). But this only works for direct node.js children. taskworker is run via sudo and the IPC does not work. * File lock using O_EXCL. Basic ideas to create lock files. While file creation can be done atomically, it becomes complicated to clean up lock files when the tasks crash. We need a way to know what locks were held by the crashing task. flock and friends are not built-into node.js * sqlite/redis were options but introduce additional deps * Settled on MySQL based locking. Initial plan was to have row locks or table locks. Each row is a kind of lock. While implementing, it was found that we need many types of locks (and not just update lock and app locks). For example, we need locks for each task type, so that only one task type is active at a time. * Instead of rows, we can just lock table and have a json blob in it. This hit a road block that LOCK TABLE is per session and our db layer cannot handle this easily! i.e when issing two db.query() it might use two different connections from the pool. We have to expose the connection, release connection etc. * Next idea was atomic blob update of the blob checking if old blob was same. This approach, was finally refined into a version field. Phew!
2024-12-07 14:35:45 +01:00
debug(`Updating box with ${boxUpdateInfo.sourceTarballUrl}`);
2025-09-10 21:35:26 +02:00
progressCallback({ percent: 70, message: 'Installing update...' });
const [error] = await safe(shell.sudo([ UPDATE_CMD, packageInfo.file, process.stdout.logFile ], {})); // run installer.sh from new box code as a separate service
if (error) await locks.release(locks.TYPE_BOX_UPDATE);
2021-08-31 13:12:14 -07:00
// Do not add any code here. The installer script will stop the box code any instant
2018-08-01 15:38:40 -07:00
}
2018-07-31 11:35:23 -07:00
2025-06-26 13:41:09 +02:00
async function checkBoxUpdateRequirements(boxUpdateInfo) {
assert.strictEqual(typeof boxUpdateInfo, 'object');
2021-08-20 09:19:44 -07:00
const result = await apps.list();
2024-05-13 17:02:20 +02:00
for (const app of result) {
2021-08-20 09:19:44 -07:00
const maxBoxVersion = app.manifest.maxBoxVersion;
if (semver.valid(maxBoxVersion) && semver.gt(boxUpdateInfo.version, maxBoxVersion)) {
throw new BoxError(BoxError.BAD_STATE, `Cannot update to v${boxUpdateInfo.version} because ${app.fqdn} has a maxBoxVersion of ${maxBoxVersion}`);
}
2021-08-20 09:19:44 -07:00
}
}
async function getBoxUpdate() {
const updateInfo = safe.JSON.parse(safe.fs.readFileSync(paths.BOX_UPDATE_FILE, 'utf8'));
return updateInfo || null;
}
async function startBoxUpdateTask(options, auditSource) {
assert.strictEqual(typeof options, 'object');
2018-07-31 11:35:23 -07:00
assert.strictEqual(typeof auditSource, 'object');
const boxUpdateInfo = await getBoxUpdate();
2021-08-20 09:19:44 -07:00
if (!boxUpdateInfo) throw new BoxError(BoxError.NOT_FOUND, 'No update available');
if (!boxUpdateInfo.sourceTarballUrl) throw new BoxError(BoxError.BAD_STATE, 'No automatic update available');
if (semver.gte(constants.VERSION, boxUpdateInfo.version)) throw new BoxError(BoxError.NOT_FOUND, 'No update available'); // can happen after update completed or hotfix
2018-07-31 11:35:23 -07:00
2025-06-26 13:41:09 +02:00
await checkBoxUpdateRequirements(boxUpdateInfo);
2018-12-04 14:04:43 -08:00
const sites = await backupSites.listByContentForUpdates('box');
if (sites.length === 0) throw new BoxError(BoxError.BAD_STATE, 'No backup site for update');
const [error] = await safe(locks.acquire(locks.TYPE_BOX_UPDATE_TASK));
remove global lock Currently, the update/apptask/fullbackup/platformstart take a global lock and cannot run in parallel. This causes situations where when a user tries to trigger an apptask, it says "waiting for backup to finish..." etc The solution is to let them run in parallel. We need a lock at the app level as app operations running in parallel would be bad (tm). In addition, the update task needs a lock just for the update part. We also need multi-process locks. Running tasks as processes is core to our "kill" strategy. Various inter process locks were explored: * node's IPC mechanism with process.send(). But this only works for direct node.js children. taskworker is run via sudo and the IPC does not work. * File lock using O_EXCL. Basic ideas to create lock files. While file creation can be done atomically, it becomes complicated to clean up lock files when the tasks crash. We need a way to know what locks were held by the crashing task. flock and friends are not built-into node.js * sqlite/redis were options but introduce additional deps * Settled on MySQL based locking. Initial plan was to have row locks or table locks. Each row is a kind of lock. While implementing, it was found that we need many types of locks (and not just update lock and app locks). For example, we need locks for each task type, so that only one task type is active at a time. * Instead of rows, we can just lock table and have a json blob in it. This hit a road block that LOCK TABLE is per session and our db layer cannot handle this easily! i.e when issing two db.query() it might use two different connections from the pool. We have to expose the connection, release connection etc. * Next idea was atomic blob update of the blob checking if old blob was same. This approach, was finally refined into a version field. Phew!
2024-12-07 14:35:45 +01:00
if (error) throw new BoxError(BoxError.BAD_STATE, `Another update task is in progress: ${error.message}`);
const memoryLimit = sites.reduce((acc, cur) => cur.limits?.memoryLimit ? Math.max(cur.limits.memoryLimit/1024/1024, acc) : acc, 400);
const taskId = await tasks.add(tasks.TASK_BOX_UPDATE, [ boxUpdateInfo, options ]);
2022-02-24 20:04:46 -08:00
await eventlog.add(eventlog.ACTION_UPDATE, auditSource, { taskId, boxUpdateInfo });
// background
tasks.startTask(taskId, { timeout: 20 * 60 * 60 * 1000 /* 20 hours */, nice: 15, memoryLimit })
2025-09-10 21:10:56 +02:00
.then(() => debug('startBoxUpdateTask: update task completed'))
.catch(async (error) => {
debug('Update failed with error. %o', error);
await locks.release(locks.TYPE_BOX_UPDATE_TASK);
await locks.releaseByTaskId(taskId);
const timedOut = error.code === tasks.ETIMEOUT;
await eventlog.add(eventlog.ACTION_UPDATE_FINISH, auditSource, { taskId, errorMessage: error.message, timedOut });
});
2021-08-20 09:19:44 -07:00
return taskId;
2018-07-31 11:35:23 -07:00
}
2023-08-12 19:28:07 +05:30
async function notifyBoxUpdate() {
2023-08-12 19:28:07 +05:30
const version = safe.fs.readFileSync(paths.VERSION_FILE, 'utf8');
if (version === constants.VERSION) return;
2025-09-10 21:35:26 +02:00
safe.fs.unlinkSync(paths.BOX_UPDATE_FILE);
2023-08-12 19:28:07 +05:30
if (!version) {
await eventlog.add(eventlog.ACTION_INSTALL_FINISH, AuditSource.CRON, { version: constants.VERSION });
} else {
2025-09-10 21:35:26 +02:00
debug(`notifyBoxUpdate: update finished from ${version} to ${constants.VERSION}`);
2025-06-30 17:39:18 +02:00
const [error] = await safe(tasks.setCompletedByType(tasks.TASK_BOX_UPDATE, { error: null }));
2023-08-12 19:28:07 +05:30
if (error && error.reason !== BoxError.NOT_FOUND) throw error; // when hotfixing, task may not exist
await eventlog.add(eventlog.ACTION_UPDATE_FINISH, AuditSource.CRON, { errorMessage: '', oldVersion: version || 'dev', newVersion: constants.VERSION });
await notifications.unpin(notifications.TYPE_BOX_UPDATE, { context: constants.VERSION });
2023-08-12 19:28:07 +05:30
}
safe.fs.writeFileSync(paths.VERSION_FILE, constants.VERSION, 'utf8');
}
async function autoUpdate(auditSource) {
assert.strictEqual(typeof auditSource, 'object');
const boxUpdateInfo = await getBoxUpdate();
// do box before app updates. for the off chance that the box logic fixes some app update logic issue
if (boxUpdateInfo && !boxUpdateInfo.unstable) {
debug('autoUpdate: starting box autoupdate to %j', boxUpdateInfo.version);
const [error] = await safe(startBoxUpdateTask({ skipBackup: false }, AuditSource.CRON));
if (!error) return; // do not start app updates when a box update got scheduled
debug(`autoUpdate: failed to start box autoupdate task: ${error.message}`);
// fall through to update apps if box update never started (failed ubuntu or avx check)
}
const result = await apps.list();
for (const app of result) {
if (!app.updateInfo) continue;
if (!app.updateInfo.isAutoUpdatable) {
debug(`autoUpdate: ${app.fqdn} requires manual update. skipping`);
continue;
}
const sites = await backupSites.listByContentForUpdates(app.id);
if (sites.length === 0) {
debug(`autoUpdate: ${app.fqdn} has no backup site for updates. skipping`);
continue;
}
const data = {
manifest: app.updateInfo.manifest,
force: false
};
debug(`autoUpdate: ${app.fqdn} will be automatically updated`);
const [updateError] = await safe(apps.updateApp(app, data, auditSource));
if (updateError) debug(`autoUpdate: error autoupdating ${app.fqdn}: ${updateError.message}`);
}
}
2025-06-26 13:41:09 +02:00
async function checkAppUpdate(app, options) {
assert.strictEqual(typeof app, 'object');
2025-06-26 13:41:09 +02:00
assert.strictEqual(typeof options, 'object');
if (app.appStoreId === '') return null; // appStoreId can be '' for dev apps
2025-06-26 13:41:09 +02:00
const updateInfo = await appstore.getAppUpdate(app, options);
await apps.update(app.id, { updateInfo });
return updateInfo;
2025-06-26 13:41:09 +02:00
}
async function checkBoxUpdate(options) {
2025-06-26 13:41:09 +02:00
assert.strictEqual(typeof options, 'object');
debug('checkBoxUpdate: checking for updates');
2025-06-26 13:41:09 +02:00
const updateInfo = await appstore.getBoxUpdate(options);
if (updateInfo) {
safe.fs.writeFileSync(paths.BOX_UPDATE_FILE, JSON.stringify(updateInfo, null, 4));
} else {
safe.fs.unlinkSync(paths.BOX_UPDATE_FILE);
2025-06-26 13:41:09 +02:00
}
}
async function raiseNotifications() {
const pattern = await getAutoupdatePattern();
const boxUpdate = await getBoxUpdate();
if (pattern === constants.CRON_PATTERN_NEVER && boxUpdate) {
const changelog = boxUpdate.changelog.map((m) => `* ${m}\n`).join('');
2025-06-26 13:41:09 +02:00
const message = `Changelog:\n${changelog}\n\nGo to the Settings view to update.\n\n`;
await notifications.pin(notifications.TYPE_BOX_UPDATE, `Cloudron v${boxUpdate.version} is available`, message, { context: boxUpdate.version });
2025-06-26 13:41:09 +02:00
}
const result = await apps.list();
for (const app of result) {
// currently, we do not raise notifications when auto-update is disabled. separate notifications appears spammy when having many apps
// in the future, we can maybe aggregate?
if (app.updateInfo && !app.updateInfo.isAutoUpdatable) {
debug(`autoUpdate: ${app.fqdn} cannot be autoupdated. skipping`);
await notifications.pin(notifications.TYPE_MANUAL_APP_UPDATE_NEEDED, `${app.manifest.title} at ${app.fqdn} requires manual update to version ${app.updateInfo.manifest.version}`,
`Changelog:\n${app.updateInfo.manifest.changelog}\n`, { context: app.id });
continue;
}
}
}
async function checkForUpdates(options) {
assert.strictEqual(typeof options, 'object');
const [boxError] = await safe(checkBoxUpdate(options));
2025-06-26 13:41:09 +02:00
if (boxError) debug('checkForUpdates: error checking for box updates: %o', boxError);
// check app updates
const result = await apps.list();
for (const app of result) {
2025-07-01 15:21:53 +02:00
await safe(checkAppUpdate(app, options), { debug });
}
2025-06-26 13:41:09 +02:00
// raise notifications here because the updatechecker runs regardless of auto-updater cron job
await raiseNotifications();
}