2018-07-31 11:35:23 -07:00
'use strict' ;
exports = module . exports = {
2023-08-03 14:26:41 +05:30
setAutoupdatePattern ,
getAutoupdatePattern ,
2025-06-20 19:04:55 +02:00
startBoxUpdateTask ,
updateBox ,
2023-08-12 19:28:07 +05:30
2025-06-20 19:10:25 +02:00
autoUpdate ,
2025-06-26 13:41:09 +02:00
notifyBoxUpdate ,
checkForUpdates ,
2025-06-26 15:19:28 +02:00
checkAppUpdate ,
checkBoxUpdate ,
2025-06-26 13:41:09 +02:00
2025-06-26 15:19:28 +02:00
getBoxUpdate ,
2018-07-31 11:35:23 -07:00
} ;
2021-08-31 11:16:58 -07:00
const apps = require ( './apps.js' ) ,
2025-06-26 13:41:09 +02:00
appstore = require ( './appstore.js' ) ,
2025-08-14 11:17:38 +05:30
assert = require ( 'node:assert' ) ,
2023-08-12 19:28:07 +05:30
AuditSource = require ( './auditsource.js' ) ,
2019-10-23 09:39:26 -07:00
BoxError = require ( './boxerror.js' ) ,
2025-09-12 09:48:37 +02:00
backupSites = require ( './backupsites.js' ) ,
2021-08-31 11:16:58 -07:00
backuptask = require ( './backuptask.js' ) ,
2019-07-25 14:40:52 -07:00
constants = require ( './constants.js' ) ,
2023-08-03 14:26:41 +05:30
cron = require ( './cron.js' ) ,
2024-04-19 18:19:41 +02:00
{ CronTime } = require ( 'cron' ) ,
2025-08-14 11:17:38 +05:30
crypto = require ( 'node:crypto' ) ,
2018-07-31 11:35:23 -07:00
debug = require ( 'debug' ) ( 'box:updater' ) ,
2022-10-18 19:32:07 +02:00
df = require ( './df.js' ) ,
2018-12-09 03:20:00 -08:00
eventlog = require ( './eventlog.js' ) ,
2025-08-14 11:17:38 +05:30
fs = require ( 'node:fs' ) ,
remove global lock
Currently, the update/apptask/fullbackup/platformstart take a
global lock and cannot run in parallel. This causes situations
where when a user tries to trigger an apptask, it says "waiting for
backup to finish..." etc
The solution is to let them run in parallel. We need a lock at the
app level as app operations running in parallel would be bad (tm).
In addition, the update task needs a lock just for the update part.
We also need multi-process locks. Running tasks as processes is core
to our "kill" strategy.
Various inter process locks were explored:
* node's IPC mechanism with process.send(). But this only works for direct node.js
children. taskworker is run via sudo and the IPC does not work.
* File lock using O_EXCL. Basic ideas to create lock files. While file creation
can be done atomically, it becomes complicated to clean up lock files when
the tasks crash. We need a way to know what locks were held by the crashing task.
flock and friends are not built-into node.js
* sqlite/redis were options but introduce additional deps
* Settled on MySQL based locking. Initial plan was to have row locks
or table locks. Each row is a kind of lock. While implementing, it was found that
we need many types of locks (and not just update lock and app locks). For example,
we need locks for each task type, so that only one task type is active at a time.
* Instead of rows, we can just lock table and have a json blob in it. This hit a road
block that LOCK TABLE is per session and our db layer cannot handle this easily! i.e
when issing two db.query() it might use two different connections from the pool. We have to
expose the connection, release connection etc.
* Next idea was atomic blob update of the blob checking if old blob was same. This approach,
was finally refined into a version field.
Phew!
2024-12-07 14:35:45 +01:00
locks = require ( './locks.js' ) ,
2024-12-11 15:47:41 +01:00
notifications = require ( './notifications.js' ) ,
2025-08-14 11:17:38 +05:30
os = require ( 'node:os' ) ,
path = require ( 'node:path' ) ,
2018-08-01 15:38:40 -07:00
paths = require ( './paths.js' ) ,
2021-08-31 13:12:14 -07:00
promiseRetry = require ( './promise-retry.js' ) ,
2018-08-01 15:38:40 -07:00
safe = require ( 'safetydance' ) ,
2019-02-25 10:03:46 -08:00
semver = require ( 'semver' ) ,
2021-02-01 14:07:23 -08:00
settings = require ( './settings.js' ) ,
2024-10-14 19:10:31 +02:00
shell = require ( './shell.js' ) ( 'updater' ) ,
2025-06-26 15:19:28 +02:00
tasks = require ( './tasks.js' ) ;
2018-07-31 11:35:23 -07:00
2018-08-01 15:38:40 -07:00
const RELEASES _PUBLIC _KEY = path . join ( _ _dirname , 'releases.gpg' ) ;
2018-07-31 11:35:23 -07:00
const UPDATE _CMD = path . join ( _ _dirname , 'scripts/update.sh' ) ;
2023-08-03 14:26:41 +05:30
async function setAutoupdatePattern ( pattern ) {
assert . strictEqual ( typeof pattern , 'string' ) ;
2025-07-24 21:47:33 +02:00
if ( pattern !== constants . CRON _PATTERN _NEVER ) { // check if pattern is valid
2024-04-19 18:19:41 +02:00
const job = safe . safeCall ( function ( ) { return new CronTime ( pattern ) ; } ) ;
2023-08-03 14:26:41 +05:30
if ( ! job ) throw new BoxError ( BoxError . BAD _FIELD , 'Invalid pattern' ) ;
}
await settings . set ( settings . AUTOUPDATE _PATTERN _KEY , pattern ) ;
2023-08-04 11:43:39 +05:30
await cron . handleAutoupdatePatternChanged ( pattern ) ;
2023-08-03 14:26:41 +05:30
}
async function getAutoupdatePattern ( ) {
const pattern = await settings . get ( settings . AUTOUPDATE _PATTERN _KEY ) ;
return pattern || cron . DEFAULT _AUTOUPDATE _PATTERN ;
}
2025-06-26 13:41:09 +02:00
async function downloadBoxUrl ( url , file ) {
2018-08-01 15:38:40 -07:00
assert . strictEqual ( typeof file , 'string' ) ;
// do not assert since it comes from the appstore
2021-08-31 13:12:14 -07:00
if ( typeof url !== 'string' ) throw new BoxError ( BoxError . EXTERNAL _ERROR , ` url cannot be download to ${ file } as it is not a string ` ) ;
2018-08-01 15:38:40 -07:00
safe . fs . unlinkSync ( file ) ;
2021-12-07 11:18:26 -08:00
await promiseRetry ( { times : 10 , interval : 5000 , debug } , async function ( ) {
2025-06-26 13:41:09 +02:00
debug ( ` downloadBoxUrl: downloading ${ url } to ${ file } ` ) ;
2025-10-13 10:32:08 +02:00
const [ error ] = await safe ( shell . spawn ( 'curl' , [ '-s' , '--fail' , url , '-o' , file ] , { encoding : 'utf8' } ) ) ;
2021-08-31 13:12:14 -07:00
if ( error ) throw new BoxError ( BoxError . NETWORK _ERROR , ` Failed to download ${ url } : ${ error . message } ` ) ;
2025-06-26 13:41:09 +02:00
debug ( 'downloadBoxUrl: done' ) ;
2021-08-31 13:12:14 -07:00
} ) ;
2018-08-01 15:38:40 -07:00
}
2025-06-26 13:41:09 +02:00
async function gpgVerifyBoxTarball ( file , sig ) {
2021-08-31 13:12:14 -07:00
assert . strictEqual ( typeof file , 'string' ) ;
assert . strictEqual ( typeof sig , 'string' ) ;
2024-10-15 10:10:15 +02:00
const [ error , stdout ] = await safe ( shell . spawn ( '/usr/bin/gpg' , [ '--status-fd' , '1' , '--no-default-keyring' , '--keyring' , RELEASES _PUBLIC _KEY , '--verify' , sig , file ] , { encoding : 'utf8' } ) ) ;
2021-08-31 13:12:14 -07:00
if ( error ) {
2025-06-26 13:41:09 +02:00
debug ( ` gpgVerifyBoxTarball: command failed. error: ${ error } \n stdout: ${ error . stdout } \n stderr: ${ error . stderr } ` ) ;
2021-08-31 13:12:14 -07:00
throw new BoxError ( BoxError . NOT _SIGNED , ` The signature in ${ path . basename ( sig ) } could not be verified (command failed) ` ) ;
}
2018-08-01 15:38:40 -07:00
2021-08-31 13:12:14 -07:00
if ( stdout . indexOf ( '[GNUPG:] VALIDSIG 0EADB19CDDA23CD0FE71E3470A372F8703C493CC' ) !== - 1 ) return ; // success
2018-08-01 15:38:40 -07:00
2025-06-26 13:41:09 +02:00
debug ( ` gpgVerifyBoxTarball: verification of ${ sig } failed: ${ stdout } \n ` ) ;
2018-08-01 15:38:40 -07:00
2021-08-31 13:12:14 -07:00
throw new BoxError ( BoxError . NOT _SIGNED , ` The signature in ${ path . basename ( sig ) } could not be verified (bad sig) ` ) ;
2018-08-01 15:38:40 -07:00
}
2025-06-26 13:41:09 +02:00
async function extractBoxTarball ( tarball , dir ) {
2021-08-31 13:12:14 -07:00
assert . strictEqual ( typeof tarball , 'string' ) ;
assert . strictEqual ( typeof dir , 'string' ) ;
2025-06-26 13:41:09 +02:00
debug ( ` extractBoxTarball: extracting ${ tarball } to ${ dir } ` ) ;
2018-08-01 15:38:40 -07:00
2025-10-13 10:32:08 +02:00
const [ error ] = await safe ( shell . spawn ( 'tar' , [ '-zxf' , tarball , '-C' , dir ] , { encoding : 'utf8' } ) ) ;
2021-08-31 13:12:14 -07:00
if ( error ) throw new BoxError ( BoxError . FS _ERROR , ` Failed to extract release package: ${ error . message } ` ) ;
safe . fs . unlinkSync ( tarball ) ;
2018-08-01 15:38:40 -07:00
2025-06-26 13:41:09 +02:00
debug ( 'extractBoxTarball: extracted' ) ;
2018-08-01 15:38:40 -07:00
}
2025-06-26 13:41:09 +02:00
async function verifyBoxUpdateInfo ( versionsFile , updateInfo ) {
2018-08-01 15:38:40 -07:00
assert . strictEqual ( typeof versionsFile , 'string' ) ;
assert . strictEqual ( typeof updateInfo , 'object' ) ;
2021-03-03 10:21:52 -08:00
const releases = safe . JSON . parse ( safe . fs . readFileSync ( versionsFile , 'utf8' ) ) || { } ;
2021-08-31 13:12:14 -07:00
if ( ! releases [ constants . VERSION ] ) throw new BoxError ( BoxError . EXTERNAL _ERROR , ` No version info for ${ constants . VERSION } ` ) ;
if ( ! releases [ constants . VERSION ] . next ) throw new BoxError ( BoxError . EXTERNAL _ERROR , ` No next version info for ${ constants . VERSION } ` ) ;
2021-03-03 10:21:52 -08:00
const nextVersion = releases [ constants . VERSION ] . next ;
2021-08-31 13:12:14 -07:00
if ( typeof releases [ nextVersion ] !== 'object' || ! releases [ nextVersion ] ) throw new BoxError ( BoxError . EXTERNAL _ERROR , 'No next version info' ) ;
if ( releases [ nextVersion ] . sourceTarballUrl !== updateInfo . sourceTarballUrl ) throw new BoxError ( BoxError . EXTERNAL _ERROR , 'Version info mismatch' ) ;
2018-08-01 15:38:40 -07:00
}
2025-06-26 13:41:09 +02:00
async function downloadAndVerifyBoxRelease ( updateInfo ) {
2018-08-01 15:38:40 -07:00
assert . strictEqual ( typeof updateInfo , 'object' ) ;
2021-08-31 13:12:14 -07:00
2024-10-16 10:25:07 +02:00
const filenames = await fs . promises . readdir ( os . tmpdir ( ) ) ;
const oldArtifactNames = filenames . filter ( f => f . startsWith ( 'box-' ) ) ;
for ( const artifactName of oldArtifactNames ) {
const fullPath = path . join ( os . tmpdir ( ) , artifactName ) ;
2025-06-26 13:41:09 +02:00
debug ( ` downloadAndVerifyBoxRelease: removing old artifact ${ fullPath } ` ) ;
2024-10-16 10:25:07 +02:00
await fs . promises . rm ( fullPath , { recursive : true , force : true } ) ;
}
2021-08-31 13:12:14 -07:00
2025-06-26 13:41:09 +02:00
await downloadBoxUrl ( updateInfo . boxVersionsUrl , ` ${ paths . UPDATE _DIR } /versions.json ` ) ;
await downloadBoxUrl ( updateInfo . boxVersionsSigUrl , ` ${ paths . UPDATE _DIR } /versions.json.sig ` ) ;
await gpgVerifyBoxTarball ( ` ${ paths . UPDATE _DIR } /versions.json ` , ` ${ paths . UPDATE _DIR } /versions.json.sig ` ) ;
await verifyBoxUpdateInfo ( ` ${ paths . UPDATE _DIR } /versions.json ` , updateInfo ) ;
await downloadBoxUrl ( updateInfo . sourceTarballUrl , ` ${ paths . UPDATE _DIR } /box.tar.gz ` ) ;
await downloadBoxUrl ( updateInfo . sourceTarballSigUrl , ` ${ paths . UPDATE _DIR } /box.tar.gz.sig ` ) ;
await gpgVerifyBoxTarball ( ` ${ paths . UPDATE _DIR } /box.tar.gz ` , ` ${ paths . UPDATE _DIR } /box.tar.gz.sig ` ) ;
2024-05-13 16:50:14 +02:00
const newBoxSource = path . join ( os . tmpdir ( ) , 'box-' + crypto . randomBytes ( 4 ) . readUInt32LE ( 0 ) ) ;
2021-08-31 13:12:14 -07:00
const [ mkdirError ] = await safe ( fs . promises . mkdir ( newBoxSource , { recursive : true } ) ) ;
if ( mkdirError ) throw new BoxError ( BoxError . FS _ERROR , ` Failed to create directory ${ newBoxSource } : ${ mkdirError . message } ` ) ;
2025-06-26 13:41:09 +02:00
await extractBoxTarball ( ` ${ paths . UPDATE _DIR } /box.tar.gz ` , newBoxSource ) ;
2021-08-31 13:12:14 -07:00
return { file : newBoxSource } ;
2018-08-01 15:38:40 -07:00
}
2021-08-31 13:12:14 -07:00
async function checkFreeDiskSpace ( neededSpace ) {
2019-08-12 21:09:22 -07:00
assert . strictEqual ( typeof neededSpace , 'number' ) ;
// can probably be a bit more aggressive here since a new update can bring in new docker images
2021-08-31 13:12:14 -07:00
const [ error , diskUsage ] = await safe ( df . file ( '/' ) ) ;
if ( error ) throw new BoxError ( BoxError . FS _ERROR , error ) ;
2019-08-12 21:47:22 -07:00
2023-01-30 12:54:25 +01:00
if ( diskUsage . available < neededSpace ) throw new BoxError ( BoxError . FS _ERROR , ` Not enough disk space. Updates require at least 2GB of free space. Available: ${ df . prettyBytes ( diskUsage . available ) } ` ) ;
2019-08-12 21:09:22 -07:00
}
2025-06-20 19:04:55 +02:00
async function updateBox ( boxUpdateInfo , options , progressCallback ) {
2018-08-01 15:38:40 -07:00
assert ( boxUpdateInfo && typeof boxUpdateInfo === 'object' ) ;
2019-05-12 13:28:53 -07:00
assert ( options && typeof options === 'object' ) ;
2018-11-30 16:00:47 -08:00
assert . strictEqual ( typeof progressCallback , 'function' ) ;
2018-08-01 15:38:40 -07:00
2019-08-12 21:09:22 -07:00
progressCallback ( { percent : 1 , message : 'Checking disk space' } ) ;
2018-08-01 15:38:40 -07:00
2022-02-18 09:56:35 -08:00
await checkFreeDiskSpace ( 2 * 1024 * 1024 * 1024 ) ;
2018-08-01 15:38:40 -07:00
2021-08-31 13:12:14 -07:00
progressCallback ( { percent : 5 , message : 'Downloading and verifying release' } ) ;
2019-08-12 21:09:22 -07:00
2025-06-26 13:41:09 +02:00
const packageInfo = await downloadAndVerifyBoxRelease ( boxUpdateInfo ) ;
2018-08-01 15:38:40 -07:00
2021-08-31 13:12:14 -07:00
if ( ! options . skipBackup ) {
progressCallback ( { percent : 10 , message : 'Backing up' } ) ;
2018-08-01 15:38:40 -07:00
2025-09-22 17:59:26 +02:00
const sites = await backupSites . listByContentForUpdates ( 'box' ) ;
if ( sites . length === 0 ) throw new BoxError ( BoxError . BAD _STATE , 'no backup site for update' ) ;
2025-08-21 11:15:42 +02:00
2025-09-22 17:59:26 +02:00
for ( const site of sites ) {
await backuptask . fullBackup ( site . id , { preserveSecs : 3 * 7 * 24 * 60 * 60 } , ( progress ) => progressCallback ( { percent : 10 + progress . percent * 70 / 100 , message : progress . message } ) ) ;
}
2021-11-16 18:20:12 -08:00
2022-02-18 09:56:35 -08:00
await checkFreeDiskSpace ( 2 * 1024 * 1024 * 1024 ) ; // check again in case backup is in same disk
2021-08-31 13:12:14 -07:00
}
2018-08-01 15:38:40 -07:00
2025-07-18 13:22:33 +02:00
await locks . wait ( locks . TYPE _BOX _UPDATE ) ;
2019-08-12 21:09:22 -07:00
remove global lock
Currently, the update/apptask/fullbackup/platformstart take a
global lock and cannot run in parallel. This causes situations
where when a user tries to trigger an apptask, it says "waiting for
backup to finish..." etc
The solution is to let them run in parallel. We need a lock at the
app level as app operations running in parallel would be bad (tm).
In addition, the update task needs a lock just for the update part.
We also need multi-process locks. Running tasks as processes is core
to our "kill" strategy.
Various inter process locks were explored:
* node's IPC mechanism with process.send(). But this only works for direct node.js
children. taskworker is run via sudo and the IPC does not work.
* File lock using O_EXCL. Basic ideas to create lock files. While file creation
can be done atomically, it becomes complicated to clean up lock files when
the tasks crash. We need a way to know what locks were held by the crashing task.
flock and friends are not built-into node.js
* sqlite/redis were options but introduce additional deps
* Settled on MySQL based locking. Initial plan was to have row locks
or table locks. Each row is a kind of lock. While implementing, it was found that
we need many types of locks (and not just update lock and app locks). For example,
we need locks for each task type, so that only one task type is active at a time.
* Instead of rows, we can just lock table and have a json blob in it. This hit a road
block that LOCK TABLE is per session and our db layer cannot handle this easily! i.e
when issing two db.query() it might use two different connections from the pool. We have to
expose the connection, release connection etc.
* Next idea was atomic blob update of the blob checking if old blob was same. This approach,
was finally refined into a version field.
Phew!
2024-12-07 14:35:45 +01:00
debug ( ` Updating box with ${ boxUpdateInfo . sourceTarballUrl } ` ) ;
2025-09-10 21:35:26 +02:00
progressCallback ( { percent : 70 , message : 'Installing update...' } ) ;
2025-07-16 21:53:22 +02:00
const [ error ] = await safe ( shell . sudo ( [ UPDATE _CMD , packageInfo . file , process . stdout . logFile ] , { } ) ) ; // run installer.sh from new box code as a separate service
2025-07-18 13:22:33 +02:00
if ( error ) await locks . release ( locks . TYPE _BOX _UPDATE ) ;
2019-08-12 21:09:22 -07:00
2021-08-31 13:12:14 -07:00
// Do not add any code here. The installer script will stop the box code any instant
2018-08-01 15:38:40 -07:00
}
2018-07-31 11:35:23 -07:00
2025-06-26 13:41:09 +02:00
async function checkBoxUpdateRequirements ( boxUpdateInfo ) {
2019-02-25 10:03:46 -08:00
assert . strictEqual ( typeof boxUpdateInfo , 'object' ) ;
2021-08-20 09:19:44 -07:00
const result = await apps . list ( ) ;
2019-02-25 10:03:46 -08:00
2024-05-13 17:02:20 +02:00
for ( const app of result ) {
2021-08-20 09:19:44 -07:00
const maxBoxVersion = app . manifest . maxBoxVersion ;
if ( semver . valid ( maxBoxVersion ) && semver . gt ( boxUpdateInfo . version , maxBoxVersion ) ) {
throw new BoxError ( BoxError . BAD _STATE , ` Cannot update to v ${ boxUpdateInfo . version } because ${ app . fqdn } has a maxBoxVersion of ${ maxBoxVersion } ` ) ;
2019-02-25 10:03:46 -08:00
}
2021-08-20 09:19:44 -07:00
}
2019-02-25 10:03:46 -08:00
}
2025-06-26 15:19:28 +02:00
async function getBoxUpdate ( ) {
const updateInfo = safe . JSON . parse ( safe . fs . readFileSync ( paths . BOX _UPDATE _FILE , 'utf8' ) ) ;
return updateInfo || null ;
}
2025-06-20 19:04:55 +02:00
async function startBoxUpdateTask ( options , auditSource ) {
2019-05-12 13:28:53 -07:00
assert . strictEqual ( typeof options , 'object' ) ;
2018-07-31 11:35:23 -07:00
assert . strictEqual ( typeof auditSource , 'object' ) ;
2025-06-26 15:19:28 +02:00
const boxUpdateInfo = await getBoxUpdate ( ) ;
2021-08-20 09:19:44 -07:00
if ( ! boxUpdateInfo ) throw new BoxError ( BoxError . NOT _FOUND , 'No update available' ) ;
if ( ! boxUpdateInfo . sourceTarballUrl ) throw new BoxError ( BoxError . BAD _STATE , 'No automatic update available' ) ;
2023-09-09 20:46:24 +05:30
if ( semver . gte ( constants . VERSION , boxUpdateInfo . version ) ) throw new BoxError ( BoxError . NOT _FOUND , 'No update available' ) ; // can happen after update completed or hotfix
2018-07-31 11:35:23 -07:00
2025-06-26 13:41:09 +02:00
await checkBoxUpdateRequirements ( boxUpdateInfo ) ;
2018-12-04 14:04:43 -08:00
2025-09-22 17:59:26 +02:00
const sites = await backupSites . listByContentForUpdates ( 'box' ) ;
if ( sites . length === 0 ) throw new BoxError ( BoxError . BAD _STATE , 'No backup site for update' ) ;
2025-07-18 13:22:33 +02:00
const [ error ] = await safe ( locks . acquire ( locks . TYPE _BOX _UPDATE _TASK ) ) ;
remove global lock
Currently, the update/apptask/fullbackup/platformstart take a
global lock and cannot run in parallel. This causes situations
where when a user tries to trigger an apptask, it says "waiting for
backup to finish..." etc
The solution is to let them run in parallel. We need a lock at the
app level as app operations running in parallel would be bad (tm).
In addition, the update task needs a lock just for the update part.
We also need multi-process locks. Running tasks as processes is core
to our "kill" strategy.
Various inter process locks were explored:
* node's IPC mechanism with process.send(). But this only works for direct node.js
children. taskworker is run via sudo and the IPC does not work.
* File lock using O_EXCL. Basic ideas to create lock files. While file creation
can be done atomically, it becomes complicated to clean up lock files when
the tasks crash. We need a way to know what locks were held by the crashing task.
flock and friends are not built-into node.js
* sqlite/redis were options but introduce additional deps
* Settled on MySQL based locking. Initial plan was to have row locks
or table locks. Each row is a kind of lock. While implementing, it was found that
we need many types of locks (and not just update lock and app locks). For example,
we need locks for each task type, so that only one task type is active at a time.
* Instead of rows, we can just lock table and have a json blob in it. This hit a road
block that LOCK TABLE is per session and our db layer cannot handle this easily! i.e
when issing two db.query() it might use two different connections from the pool. We have to
expose the connection, release connection etc.
* Next idea was atomic blob update of the blob checking if old blob was same. This approach,
was finally refined into a version field.
Phew!
2024-12-07 14:35:45 +01:00
if ( error ) throw new BoxError ( BoxError . BAD _STATE , ` Another update task is in progress: ${ error . message } ` ) ;
2018-12-09 03:20:00 -08:00
2025-09-22 17:59:26 +02:00
const memoryLimit = sites . reduce ( ( acc , cur ) => cur . limits ? . memoryLimit ? Math . max ( cur . limits . memoryLimit / 1024 / 1024 , acc ) : acc , 400 ) ;
2019-10-14 09:30:20 -07:00
2025-06-20 19:04:55 +02:00
const taskId = await tasks . add ( tasks . TASK _BOX _UPDATE , [ boxUpdateInfo , options ] ) ;
2022-02-24 20:04:46 -08:00
await eventlog . add ( eventlog . ACTION _UPDATE , auditSource , { taskId , boxUpdateInfo } ) ;
2019-08-27 22:39:59 -07:00
2025-06-17 18:54:12 +02:00
// background
tasks . startTask ( taskId , { timeout : 20 * 60 * 60 * 1000 /* 20 hours */ , nice : 15 , memoryLimit } )
2025-09-10 21:10:56 +02:00
. then ( ( ) => debug ( 'startBoxUpdateTask: update task completed' ) )
2025-06-17 18:54:12 +02:00
. catch ( async ( error ) => {
debug ( 'Update failed with error. %o' , error ) ;
2025-07-18 13:22:33 +02:00
await locks . release ( locks . TYPE _BOX _UPDATE _TASK ) ;
2025-06-17 18:54:12 +02:00
await locks . releaseByTaskId ( taskId ) ;
const timedOut = error . code === tasks . ETIMEOUT ;
await eventlog . add ( eventlog . ACTION _UPDATE _FINISH , auditSource , { taskId , errorMessage : error . message , timedOut } ) ;
} ) ;
2021-08-20 09:19:44 -07:00
return taskId ;
2018-07-31 11:35:23 -07:00
}
2023-08-12 19:28:07 +05:30
2025-06-20 19:04:55 +02:00
async function notifyBoxUpdate ( ) {
2023-08-12 19:28:07 +05:30
const version = safe . fs . readFileSync ( paths . VERSION _FILE , 'utf8' ) ;
if ( version === constants . VERSION ) return ;
2025-09-10 21:35:26 +02:00
safe . fs . unlinkSync ( paths . BOX _UPDATE _FILE ) ;
2023-08-12 19:28:07 +05:30
if ( ! version ) {
await eventlog . add ( eventlog . ACTION _INSTALL _FINISH , AuditSource . CRON , { version : constants . VERSION } ) ;
} else {
2025-09-10 21:35:26 +02:00
debug ( ` notifyBoxUpdate: update finished from ${ version } to ${ constants . VERSION } ` ) ;
2025-06-30 17:39:18 +02:00
const [ error ] = await safe ( tasks . setCompletedByType ( tasks . TASK _BOX _UPDATE , { error : null } ) ) ;
2023-08-12 19:28:07 +05:30
if ( error && error . reason !== BoxError . NOT _FOUND ) throw error ; // when hotfixing, task may not exist
2025-10-17 19:10:16 +02:00
await eventlog . add ( eventlog . ACTION _UPDATE _FINISH , AuditSource . CRON , { errorMessage : '' , oldVersion : version || 'dev' , newVersion : constants . VERSION } ) ;
await notifications . unpin ( notifications . TYPE _BOX _UPDATE , { context : constants . VERSION } ) ;
2023-08-12 19:28:07 +05:30
}
safe . fs . writeFileSync ( paths . VERSION _FILE , constants . VERSION , 'utf8' ) ;
}
2025-06-20 19:10:25 +02:00
async function autoUpdate ( auditSource ) {
assert . strictEqual ( typeof auditSource , 'object' ) ;
2025-06-26 15:19:28 +02:00
const boxUpdateInfo = await getBoxUpdate ( ) ;
2025-06-20 19:10:25 +02:00
// do box before app updates. for the off chance that the box logic fixes some app update logic issue
2025-06-26 15:19:28 +02:00
if ( boxUpdateInfo && ! boxUpdateInfo . unstable ) {
debug ( 'autoUpdate: starting box autoupdate to %j' , boxUpdateInfo . version ) ;
2025-06-20 19:10:25 +02:00
const [ error ] = await safe ( startBoxUpdateTask ( { skipBackup : false } , AuditSource . CRON ) ) ;
if ( ! error ) return ; // do not start app updates when a box update got scheduled
debug ( ` autoUpdate: failed to start box autoupdate task: ${ error . message } ` ) ;
// fall through to update apps if box update never started (failed ubuntu or avx check)
}
2025-06-26 15:19:28 +02:00
const result = await apps . list ( ) ;
for ( const app of result ) {
if ( ! app . updateInfo ) continue ;
if ( ! app . updateInfo . isAutoUpdatable ) {
2025-06-26 13:27:00 +02:00
debug ( ` autoUpdate: ${ app . fqdn } requires manual update. skipping ` ) ;
2025-06-20 19:49:20 +02:00
continue ;
}
2025-09-22 17:59:26 +02:00
const sites = await backupSites . listByContentForUpdates ( app . id ) ;
if ( sites . length === 0 ) {
debug ( ` autoUpdate: ${ app . fqdn } has no backup site for updates. skipping ` ) ;
continue ;
}
2025-06-20 19:49:20 +02:00
const data = {
2025-06-26 15:19:28 +02:00
manifest : app . updateInfo . manifest ,
2025-06-20 19:49:20 +02:00
force : false
} ;
debug ( ` autoUpdate: ${ app . fqdn } will be automatically updated ` ) ;
const [ updateError ] = await safe ( apps . updateApp ( app , data , auditSource ) ) ;
if ( updateError ) debug ( ` autoUpdate: error autoupdating ${ app . fqdn } : ${ updateError . message } ` ) ;
2025-06-20 19:10:25 +02:00
}
}
2025-06-26 13:41:09 +02:00
2025-06-26 15:19:28 +02:00
async function checkAppUpdate ( app , options ) {
assert . strictEqual ( typeof app , 'object' ) ;
2025-06-26 13:41:09 +02:00
assert . strictEqual ( typeof options , 'object' ) ;
2025-06-26 15:19:28 +02:00
if ( app . appStoreId === '' ) return null ; // appStoreId can be '' for dev apps
2025-06-26 13:41:09 +02:00
2025-06-26 15:19:28 +02:00
const updateInfo = await appstore . getAppUpdate ( app , options ) ;
await apps . update ( app . id , { updateInfo } ) ;
return updateInfo ;
2025-06-26 13:41:09 +02:00
}
2025-06-26 15:19:28 +02:00
async function checkBoxUpdate ( options ) {
2025-06-26 13:41:09 +02:00
assert . strictEqual ( typeof options , 'object' ) ;
2025-06-26 15:19:28 +02:00
debug ( 'checkBoxUpdate: checking for updates' ) ;
2025-06-26 13:41:09 +02:00
const updateInfo = await appstore . getBoxUpdate ( options ) ;
2025-06-26 15:19:28 +02:00
if ( updateInfo ) {
safe . fs . writeFileSync ( paths . BOX _UPDATE _FILE , JSON . stringify ( updateInfo , null , 4 ) ) ;
} else {
safe . fs . unlinkSync ( paths . BOX _UPDATE _FILE ) ;
2025-06-26 13:41:09 +02:00
}
}
async function raiseNotifications ( ) {
const pattern = await getAutoupdatePattern ( ) ;
2025-06-26 15:19:28 +02:00
const boxUpdate = await getBoxUpdate ( ) ;
2025-07-24 21:47:33 +02:00
if ( pattern === constants . CRON _PATTERN _NEVER && boxUpdate ) {
2025-06-26 15:19:28 +02:00
const changelog = boxUpdate . changelog . map ( ( m ) => ` * ${ m } \n ` ) . join ( '' ) ;
2025-06-26 13:41:09 +02:00
const message = ` Changelog: \n ${ changelog } \n \n Go to the Settings view to update. \n \n ` ;
2025-06-26 15:19:28 +02:00
await notifications . pin ( notifications . TYPE _BOX _UPDATE , ` Cloudron v ${ boxUpdate . version } is available ` , message , { context : boxUpdate . version } ) ;
2025-06-26 13:41:09 +02:00
}
const result = await apps . list ( ) ;
for ( const app of result ) {
// currently, we do not raise notifications when auto-update is disabled. separate notifications appears spammy when having many apps
// in the future, we can maybe aggregate?
if ( app . updateInfo && ! app . updateInfo . isAutoUpdatable ) {
debug ( ` autoUpdate: ${ app . fqdn } cannot be autoupdated. skipping ` ) ;
await notifications . pin ( notifications . TYPE _MANUAL _APP _UPDATE _NEEDED , ` ${ app . manifest . title } at ${ app . fqdn } requires manual update to version ${ app . updateInfo . manifest . version } ` ,
` Changelog: \n ${ app . updateInfo . manifest . changelog } \n ` , { context : app . id } ) ;
continue ;
}
}
}
async function checkForUpdates ( options ) {
assert . strictEqual ( typeof options , 'object' ) ;
2025-06-26 15:19:28 +02:00
const [ boxError ] = await safe ( checkBoxUpdate ( options ) ) ;
2025-06-26 13:41:09 +02:00
if ( boxError ) debug ( 'checkForUpdates: error checking for box updates: %o' , boxError ) ;
2025-06-26 15:19:28 +02:00
// check app updates
const result = await apps . list ( ) ;
for ( const app of result ) {
2025-07-01 15:21:53 +02:00
await safe ( checkAppUpdate ( app , options ) , { debug } ) ;
2025-06-26 15:19:28 +02:00
}
2025-06-26 13:41:09 +02:00
// raise notifications here because the updatechecker runs regardless of auto-updater cron job
await raiseNotifications ( ) ;
}