Migrate codebase from CommonJS to ES Modules
- Convert all require()/module.exports to import/export across 260+ files
- Add "type": "module" to package.json to enable ESM by default
- Add migrations/package.json with "type": "commonjs" to keep db-migrate compatible
- Convert eslint.config.js to ESM with sourceType: "module"
- Replace __dirname/__filename with import.meta.dirname/import.meta.filename
- Replace require.main === module with process.argv[1] === import.meta.filename
- Remove 'use strict' directives (implicit in ESM)
- Convert dynamic require() in switch statements to static import lookup maps
(dns.js, domains.js, backupformats.js, backupsites.js, network.js)
- Extract self-referencing exports.CONSTANT patterns into standalone const
declarations (apps.js, services.js, locks.js, users.js, mail.js, etc.)
- Lazify SERVICES object in services.js to avoid circular dependency TDZ issues
- Add clearMailQueue() to mailer.js for ESM-safe queue clearing in tests
- Add _setMockApp() to ldapserver.js for ESM-safe test mocking
- Add _setMockResolve() wrapper to dig.js for ESM-safe DNS mocking in tests
- Convert backupupload.js to use dynamic imports so --check exits before
loading the module graph (which requires BOX_ENV)
- Update check-install to use ESM import for infra_version.js
- Convert scripts/ (hotfix, release, remote_hotfix.js, find-unused-translations)
- All 1315 tests passing
Migration stats (AI-assisted using Cursor with Claude):
- Wall clock time: ~3-4 hours
- Assistant completions: ~80-100
- Estimated token usage: ~1-2M tokens
Co-authored-by: Cursor <cursoragent@cursor.com>
2026-02-14 09:53:14 +01:00
import apps from './apps.js' ;
import assert from 'node:assert' ;
import BoxError from './boxerror.js' ;
import constants from './constants.js' ;
2026-02-14 15:43:24 +01:00
import dashboard from './dashboard.js' ;
2026-03-12 22:55:28 +05:30
import logger from './logger.js' ;
Migrate codebase from CommonJS to ES Modules
- Convert all require()/module.exports to import/export across 260+ files
- Add "type": "module" to package.json to enable ESM by default
- Add migrations/package.json with "type": "commonjs" to keep db-migrate compatible
- Convert eslint.config.js to ESM with sourceType: "module"
- Replace __dirname/__filename with import.meta.dirname/import.meta.filename
- Replace require.main === module with process.argv[1] === import.meta.filename
- Remove 'use strict' directives (implicit in ESM)
- Convert dynamic require() in switch statements to static import lookup maps
(dns.js, domains.js, backupformats.js, backupsites.js, network.js)
- Extract self-referencing exports.CONSTANT patterns into standalone const
declarations (apps.js, services.js, locks.js, users.js, mail.js, etc.)
- Lazify SERVICES object in services.js to avoid circular dependency TDZ issues
- Add clearMailQueue() to mailer.js for ESM-safe queue clearing in tests
- Add _setMockApp() to ldapserver.js for ESM-safe test mocking
- Add _setMockResolve() wrapper to dig.js for ESM-safe DNS mocking in tests
- Convert backupupload.js to use dynamic imports so --check exits before
loading the module graph (which requires BOX_ENV)
- Update check-install to use ESM import for infra_version.js
- Convert scripts/ (hotfix, release, remote_hotfix.js, find-unused-translations)
- All 1315 tests passing
Migration stats (AI-assisted using Cursor with Claude):
- Wall clock time: ~3-4 hours
- Assistant completions: ~80-100
- Estimated token usage: ~1-2M tokens
Co-authored-by: Cursor <cursoragent@cursor.com>
2026-02-14 09:53:14 +01:00
import Docker from 'dockerode' ;
2026-02-14 15:43:24 +01:00
import dockerRegistries from './dockerregistries.js' ;
Migrate codebase from CommonJS to ES Modules
- Convert all require()/module.exports to import/export across 260+ files
- Add "type": "module" to package.json to enable ESM by default
- Add migrations/package.json with "type": "commonjs" to keep db-migrate compatible
- Convert eslint.config.js to ESM with sourceType: "module"
- Replace __dirname/__filename with import.meta.dirname/import.meta.filename
- Replace require.main === module with process.argv[1] === import.meta.filename
- Remove 'use strict' directives (implicit in ESM)
- Convert dynamic require() in switch statements to static import lookup maps
(dns.js, domains.js, backupformats.js, backupsites.js, network.js)
- Extract self-referencing exports.CONSTANT patterns into standalone const
declarations (apps.js, services.js, locks.js, users.js, mail.js, etc.)
- Lazify SERVICES object in services.js to avoid circular dependency TDZ issues
- Add clearMailQueue() to mailer.js for ESM-safe queue clearing in tests
- Add _setMockApp() to ldapserver.js for ESM-safe test mocking
- Add _setMockResolve() wrapper to dig.js for ESM-safe DNS mocking in tests
- Convert backupupload.js to use dynamic imports so --check exits before
loading the module graph (which requires BOX_ENV)
- Update check-install to use ESM import for infra_version.js
- Convert scripts/ (hotfix, release, remote_hotfix.js, find-unused-translations)
- All 1315 tests passing
Migration stats (AI-assisted using Cursor with Claude):
- Wall clock time: ~3-4 hours
- Assistant completions: ~80-100
- Estimated token usage: ~1-2M tokens
Co-authored-by: Cursor <cursoragent@cursor.com>
2026-02-14 09:53:14 +01:00
import fs from 'node:fs' ;
2026-02-14 15:43:24 +01:00
import mailServer from './mailserver.js' ;
Migrate codebase from CommonJS to ES Modules
- Convert all require()/module.exports to import/export across 260+ files
- Add "type": "module" to package.json to enable ESM by default
- Add migrations/package.json with "type": "commonjs" to keep db-migrate compatible
- Convert eslint.config.js to ESM with sourceType: "module"
- Replace __dirname/__filename with import.meta.dirname/import.meta.filename
- Replace require.main === module with process.argv[1] === import.meta.filename
- Remove 'use strict' directives (implicit in ESM)
- Convert dynamic require() in switch statements to static import lookup maps
(dns.js, domains.js, backupformats.js, backupsites.js, network.js)
- Extract self-referencing exports.CONSTANT patterns into standalone const
declarations (apps.js, services.js, locks.js, users.js, mail.js, etc.)
- Lazify SERVICES object in services.js to avoid circular dependency TDZ issues
- Add clearMailQueue() to mailer.js for ESM-safe queue clearing in tests
- Add _setMockApp() to ldapserver.js for ESM-safe test mocking
- Add _setMockResolve() wrapper to dig.js for ESM-safe DNS mocking in tests
- Convert backupupload.js to use dynamic imports so --check exits before
loading the module graph (which requires BOX_ENV)
- Update check-install to use ESM import for infra_version.js
- Convert scripts/ (hotfix, release, remote_hotfix.js, find-unused-translations)
- All 1315 tests passing
Migration stats (AI-assisted using Cursor with Claude):
- Wall clock time: ~3-4 hours
- Assistant completions: ~80-100
- Estimated token usage: ~1-2M tokens
Co-authored-by: Cursor <cursoragent@cursor.com>
2026-02-14 09:53:14 +01:00
import os from 'node:os' ;
import paths from './paths.js' ;
import promiseRetry from './promise-retry.js' ;
import services from './services.js' ;
import shellModule from './shell.js' ;
import safe from 'safetydance' ;
import timers from 'timers/promises' ;
2026-02-14 15:43:24 +01:00
import volumes from './volumes.js' ;
Migrate codebase from CommonJS to ES Modules
- Convert all require()/module.exports to import/export across 260+ files
- Add "type": "module" to package.json to enable ESM by default
- Add migrations/package.json with "type": "commonjs" to keep db-migrate compatible
- Convert eslint.config.js to ESM with sourceType: "module"
- Replace __dirname/__filename with import.meta.dirname/import.meta.filename
- Replace require.main === module with process.argv[1] === import.meta.filename
- Remove 'use strict' directives (implicit in ESM)
- Convert dynamic require() in switch statements to static import lookup maps
(dns.js, domains.js, backupformats.js, backupsites.js, network.js)
- Extract self-referencing exports.CONSTANT patterns into standalone const
declarations (apps.js, services.js, locks.js, users.js, mail.js, etc.)
- Lazify SERVICES object in services.js to avoid circular dependency TDZ issues
- Add clearMailQueue() to mailer.js for ESM-safe queue clearing in tests
- Add _setMockApp() to ldapserver.js for ESM-safe test mocking
- Add _setMockResolve() wrapper to dig.js for ESM-safe DNS mocking in tests
- Convert backupupload.js to use dynamic imports so --check exits before
loading the module graph (which requires BOX_ENV)
- Update check-install to use ESM import for infra_version.js
- Convert scripts/ (hotfix, release, remote_hotfix.js, find-unused-translations)
- All 1315 tests passing
Migration stats (AI-assisted using Cursor with Claude):
- Wall clock time: ~3-4 hours
- Assistant completions: ~80-100
- Estimated token usage: ~1-2M tokens
Co-authored-by: Cursor <cursoragent@cursor.com>
2026-02-14 09:53:14 +01:00
2026-03-12 22:55:28 +05:30
const { log , trace } = logger ( 'docker' ) ;
Migrate codebase from CommonJS to ES Modules
- Convert all require()/module.exports to import/export across 260+ files
- Add "type": "module" to package.json to enable ESM by default
- Add migrations/package.json with "type": "commonjs" to keep db-migrate compatible
- Convert eslint.config.js to ESM with sourceType: "module"
- Replace __dirname/__filename with import.meta.dirname/import.meta.filename
- Replace require.main === module with process.argv[1] === import.meta.filename
- Remove 'use strict' directives (implicit in ESM)
- Convert dynamic require() in switch statements to static import lookup maps
(dns.js, domains.js, backupformats.js, backupsites.js, network.js)
- Extract self-referencing exports.CONSTANT patterns into standalone const
declarations (apps.js, services.js, locks.js, users.js, mail.js, etc.)
- Lazify SERVICES object in services.js to avoid circular dependency TDZ issues
- Add clearMailQueue() to mailer.js for ESM-safe queue clearing in tests
- Add _setMockApp() to ldapserver.js for ESM-safe test mocking
- Add _setMockResolve() wrapper to dig.js for ESM-safe DNS mocking in tests
- Convert backupupload.js to use dynamic imports so --check exits before
loading the module graph (which requires BOX_ENV)
- Update check-install to use ESM import for infra_version.js
- Convert scripts/ (hotfix, release, remote_hotfix.js, find-unused-translations)
- All 1315 tests passing
Migration stats (AI-assisted using Cursor with Claude):
- Wall clock time: ~3-4 hours
- Assistant completions: ~80-100
- Estimated token usage: ~1-2M tokens
Co-authored-by: Cursor <cursoragent@cursor.com>
2026-02-14 09:53:14 +01:00
const shell = shellModule ( 'docker' ) ;
2015-10-19 11:08:23 -07:00
2025-05-07 14:09:10 +02:00
const gConnection = new Docker ( { socketPath : paths . DOCKER _SOCKET _PATH } ) ;
2019-10-22 22:07:44 -07:00
2026-01-01 18:22:48 +01:00
const CLOUDRON _REGISTRIES = [ 'registry.docker.com' , 'registry.ipv4.docker.com' , 'quay.io' ] ; // order determines priority and is important!
2024-12-14 14:53:08 +01:00
function parseImageRef ( imageRef ) {
assert . strictEqual ( typeof imageRef , 'string' ) ;
// a ref is like registry.docker.com/cloudron/base:4.2.0@sha256:46da2fffb36353ef714f97ae8e962bd2c212ca091108d768ba473078319a47f4
2025-01-03 10:09:00 +01:00
// registry.docker.com is registry name . cloudron is (optional) namespace . base is image name . cloudron/base is repository path
2024-12-14 14:53:08 +01:00
// registry.docker.com/cloudron/base is fullRepositoryName
const result = { fullRepositoryName : null , registry : null , tag : null , digest : null } ;
result . fullRepositoryName = imageRef . split ( /[:@]/ ) [ 0 ] ;
const parts = result . fullRepositoryName . split ( '/' ) ;
2025-01-03 10:09:00 +01:00
result . registry = parts [ 0 ] . includes ( '.' ) ? parts [ 0 ] : null ; // https://docs.docker.com/admin/faqs/general-faqs/#what-is-a-docker-id
2024-12-14 14:53:08 +01:00
let remaining = imageRef . substr ( result . fullRepositoryName . length ) ;
if ( remaining . startsWith ( ':' ) ) {
result . tag = remaining . substr ( 1 ) . split ( '@' , 1 ) [ 0 ] ;
remaining = remaining . substr ( result . tag . length + 1 ) ; // also ':'
}
if ( remaining . startsWith ( '@sha256:' ) ) result . digest = remaining . substr ( 8 ) ;
return result ;
}
2021-08-26 21:14:49 -07:00
async function ping ( ) {
2018-11-23 15:49:47 +01:00
// do not let the request linger
2025-05-07 14:09:10 +02:00
const connection = new Docker ( { socketPath : paths . DOCKER _SOCKET _PATH , timeout : 1000 } ) ;
2018-11-19 10:19:46 +01:00
2021-08-26 21:14:49 -07:00
const [ error , result ] = await safe ( connection . ping ( ) ) ;
if ( error ) throw new BoxError ( BoxError . DOCKER _ERROR , error ) ;
if ( Buffer . isBuffer ( result ) && result . toString ( 'utf8' ) === 'OK' ) return ; // sometimes it returns buffer
if ( result === 'OK' ) return ;
2018-11-19 10:19:46 +01:00
2021-08-26 21:14:49 -07:00
throw new BoxError ( BoxError . DOCKER _ERROR , 'Unable to ping the docker daemon' ) ;
2018-11-19 10:19:46 +01:00
}
2024-12-14 14:53:08 +01:00
async function getAuthConfig ( imageRef ) {
assert . strictEqual ( typeof imageRef , 'string' ) ;
const parsedRef = parseImageRef ( imageRef ) ;
2024-12-14 14:04:40 +01:00
2024-12-14 14:21:15 +01:00
// images in our cloudron namespace are always unauthenticated to not interfere with any user limits
2026-01-01 18:22:48 +01:00
if ( ( parsedRef . registry === null || CLOUDRON _REGISTRIES . includes ( parsedRef . registry ) ) && parsedRef . fullRepositoryName . startsWith ( 'cloudron/' ) ) return null ;
2019-10-27 12:14:27 -07:00
2025-05-07 14:09:10 +02:00
const registries = await dockerRegistries . list ( ) ;
2024-12-14 14:21:15 +01:00
2025-05-07 14:09:10 +02:00
for ( const registry of registries ) {
if ( registry . serverAddress !== parsedRef . registry ) { // ideally they match but there's too many docker registry domains!
if ( ! registry . serverAddress . includes ( '.docker.' ) ) continue ;
2026-01-01 18:22:48 +01:00
if ( parsedRef . registry !== null && ! parsedRef . registry . includes ( '.docker.' ) ) continue ;
2025-05-07 14:09:10 +02:00
}
2019-10-27 12:14:27 -07:00
2025-05-07 14:09:10 +02:00
// https://github.com/apocas/dockerode#pull-from-private-repos
const authConfig = {
username : registry . username ,
password : registry . password ,
auth : registry . auth || '' , // the auth token at login time
email : registry . email || '' ,
serveraddress : registry . serverAddress
} ;
return authConfig ;
}
2015-10-19 11:40:19 -07:00
2025-05-07 14:09:10 +02:00
return null ;
2021-08-25 19:41:46 -07:00
}
2015-10-19 11:40:19 -07:00
2024-12-14 14:53:08 +01:00
async function pullImage ( imageRef ) {
assert . strictEqual ( typeof imageRef , 'string' ) ;
2019-10-27 12:14:27 -07:00
2024-12-14 14:53:08 +01:00
const authConfig = await getAuthConfig ( imageRef ) ;
2019-10-27 12:14:27 -07:00
2026-03-12 22:55:28 +05:30
log ( ` pullImage: will pull ${ imageRef } . auth: ${ authConfig ? 'yes' : 'no' } ` ) ;
2024-12-14 14:05:53 +01:00
2024-12-14 14:53:08 +01:00
const [ error , stream ] = await safe ( gConnection . pull ( imageRef , { authconfig : authConfig } ) ) ;
if ( error && error . statusCode === 404 ) throw new BoxError ( BoxError . NOT _FOUND , ` Unable to pull image ${ imageRef } . message: ${ error . message } statusCode: ${ error . statusCode } ` ) ;
2024-12-14 14:21:15 +01:00
// toomanyrequests is flagged as a 500. dockerhub appears to have 10 pulls her hour per IP limit
2024-12-14 14:53:08 +01:00
if ( error && error . statusCode === 500 ) throw new BoxError ( BoxError . DOCKER _ERROR , ` Unable to pull image ${ imageRef } . registry error: ${ JSON . stringify ( error ) } ` ) ;
if ( error ) throw new BoxError ( BoxError . DOCKER _ERROR , ` Unable to pull image ${ imageRef } . Please check the network or if the image needs authentication. statusCode: ${ error . statusCode } ` ) ;
2019-10-27 12:14:27 -07:00
2021-08-25 19:41:46 -07:00
return new Promise ( ( resolve , reject ) => {
2023-03-09 21:06:54 +01:00
// https://github.com/dotcloud/docker/issues/1074 says each status message is emitted as a chunk
let layerError = null ;
2021-08-25 19:41:46 -07:00
stream . on ( 'data' , function ( chunk ) {
2022-04-14 17:41:41 -05:00
const data = safe . JSON . parse ( chunk ) || { } ;
2026-03-12 22:55:28 +05:30
log ( 'pullImage: %j' , data ) ;
2019-10-27 12:14:27 -07:00
2021-08-25 19:41:46 -07:00
// The data.status here is useless because this is per layer as opposed to per image
2023-03-09 21:06:54 +01:00
if ( ! data . status && data . error ) { // data is { errorDetail: { message: xx } , error: xx }
2026-03-12 22:55:28 +05:30
log ( ` pullImage error ${ imageRef } : ${ data . errorDetail . message } ` ) ;
2023-03-09 21:06:54 +01:00
layerError = data . errorDetail ;
2021-08-25 19:41:46 -07:00
}
} ) ;
2015-10-19 11:40:19 -07:00
2021-08-25 19:41:46 -07:00
stream . on ( 'end' , function ( ) {
2026-03-12 22:55:28 +05:30
log ( ` downloaded image ${ imageRef } . error: ${ ! ! layerError } ` ) ;
2023-03-09 21:06:54 +01:00
if ( ! layerError ) return resolve ( ) ;
2023-10-09 07:38:39 +05:30
reject ( new BoxError ( layerError . message . includes ( 'no space' ) ? BoxError . FS _ERROR : BoxError . DOCKER _ERROR , layerError . message ) ) ;
2021-08-25 19:41:46 -07:00
} ) ;
2015-10-19 11:40:19 -07:00
2026-02-18 08:18:37 +01:00
stream . on ( 'error' , function ( streamError ) { // this is only hit for stream error and not for some download error
2026-03-12 22:55:28 +05:30
log ( ` error pulling image ${ imageRef } : %o ` , streamError ) ;
2026-02-18 08:18:37 +01:00
reject ( new BoxError ( BoxError . DOCKER _ERROR , streamError . message ) ) ;
2015-10-19 11:40:19 -07:00
} ) ;
} ) ;
}
2026-01-27 12:06:40 +01:00
async function buildImage ( dockerImage , sourceArchiveFilePath ) {
assert . strictEqual ( typeof dockerImage , 'string' ) ;
assert . strictEqual ( typeof sourceArchiveFilePath , 'string' ) ;
2026-01-26 20:21:02 +01:00
2026-03-12 22:55:28 +05:30
log ( ` buildImage: building ${ dockerImage } from ${ sourceArchiveFilePath } ` ) ;
2026-01-26 20:21:02 +01:00
2026-03-02 12:06:27 +01:00
const buildOptions = { t : dockerImage } ;
const [ listError , listOut ] = await safe ( shell . spawn ( 'tar' , [ '-tzf' , sourceArchiveFilePath ] , { encoding : 'utf8' } ) ) ;
if ( ! listError && listOut ) {
const dockerfileCloudronPath = listOut . split ( '\n' ) . map ( line => line . trim ( ) ) . find ( line => {
const path = line . replace ( /\/$/ , '' ) ;
return path . endsWith ( 'Dockerfile.cloudron' ) ;
} ) ;
if ( dockerfileCloudronPath ) {
buildOptions . dockerfile = dockerfileCloudronPath . replace ( /\/$/ , '' ) ;
2026-03-12 22:55:28 +05:30
log ( ` buildImage: using ${ buildOptions . dockerfile } ` ) ;
2026-03-02 12:06:27 +01:00
}
}
2026-01-27 12:06:40 +01:00
const tarStream = fs . createReadStream ( sourceArchiveFilePath ) ;
2026-03-02 12:06:27 +01:00
const [ error , stream ] = await safe ( gConnection . buildImage ( tarStream , buildOptions ) ) ;
2026-01-27 12:06:40 +01:00
if ( error ) throw new BoxError ( BoxError . DOCKER _ERROR , ` Unable to build image from ${ sourceArchiveFilePath } : ${ error . message } ` ) ;
2026-01-26 20:21:02 +01:00
return new Promise ( ( resolve , reject ) => {
let buildError = null ;
stream . on ( 'data' , ( chunk ) => {
const data = safe . JSON . parse ( chunk ) || { } ;
if ( data . error ) {
buildError = data . errorDetail || { message : data . error } ;
} else {
const message = ( data . stream || data . status || data . aux ? . ID || '' ) . replace ( /\n$/ , '' ) ;
2026-03-12 22:55:28 +05:30
if ( message ) log ( 'buildImage: ' + message ) ;
2026-01-26 20:21:02 +01:00
}
} ) ;
stream . on ( 'end' , ( ) => {
if ( buildError ) {
2026-03-12 22:55:28 +05:30
log ( ` buildImage: error ${ buildError } ` ) ;
2026-01-26 20:21:02 +01:00
return reject ( new BoxError ( buildError . message . includes ( 'no space' ) ? BoxError . FS _ERROR : BoxError . DOCKER _ERROR , buildError . message ) ) ;
} else {
2026-03-12 22:55:28 +05:30
log ( ` buildImage: success ${ dockerImage } ` ) ;
2026-01-26 20:21:02 +01:00
}
resolve ( ) ;
} ) ;
2026-02-18 08:18:37 +01:00
stream . on ( 'error' , ( streamError ) => {
2026-03-12 22:55:28 +05:30
log ( ` buildImage: error building image ${ dockerImage } : %o ` , streamError ) ;
2026-02-18 08:18:37 +01:00
reject ( new BoxError ( BoxError . DOCKER _ERROR , streamError . message ) ) ;
2026-01-26 20:21:02 +01:00
} ) ;
} ) ;
}
2021-05-11 17:50:48 -07:00
async function getVolumeMounts ( app ) {
2020-04-29 21:55:21 -07:00
assert . strictEqual ( typeof app , 'object' ) ;
2020-10-27 22:39:05 -07:00
2021-05-11 17:50:48 -07:00
if ( app . mounts . length === 0 ) return [ ] ;
2020-04-29 21:55:21 -07:00
2021-05-11 17:50:48 -07:00
const result = await volumes . list ( ) ;
2024-06-06 15:22:33 +02:00
const volumesById = { } ;
2021-05-11 17:50:48 -07:00
result . forEach ( r => volumesById [ r . id ] = r ) ;
2020-10-27 22:39:05 -07:00
2024-06-06 15:22:33 +02:00
const mounts = [ ] ;
2021-05-11 17:50:48 -07:00
for ( const mount of app . mounts ) {
const volume = volumesById [ mount . volumeId ] ;
2026-02-28 01:02:22 +01:00
const status = await volumes . getStatus ( volume ) ;
if ( status . state !== 'active' ) throw new BoxError ( BoxError . BAD _STATE , ` Volume " ${ volume . name } " is not active. ${ status . message } ` ) ;
2021-02-17 22:53:50 -08:00
2021-05-11 17:50:48 -07:00
mounts . push ( {
Source : volume . hostPath ,
Target : ` /media/ ${ volume . name } ` ,
Type : 'bind' ,
ReadOnly : mount . readOnly
} ) ;
}
2020-04-29 21:55:21 -07:00
2021-05-11 17:50:48 -07:00
return mounts ;
2021-02-17 22:53:50 -08:00
}
2021-08-25 19:41:46 -07:00
async function getAddonMounts ( app ) {
2021-02-17 22:53:50 -08:00
assert . strictEqual ( typeof app , 'object' ) ;
2024-06-06 15:22:33 +02:00
const mounts = [ ] ;
2021-02-17 22:53:50 -08:00
const addons = app . manifest . addons ;
2021-08-25 19:41:46 -07:00
if ( ! addons ) return mounts ;
2021-02-17 22:53:50 -08:00
2021-08-25 19:41:46 -07:00
for ( const addon of Object . keys ( addons ) ) {
2021-02-17 22:53:50 -08:00
switch ( addon ) {
2022-06-06 15:59:50 -07:00
case 'localstorage' : {
2026-02-28 01:02:22 +01:00
if ( app . storageVolumeId ) {
const volume = await volumes . get ( app . storageVolumeId ) ;
const status = await volumes . getStatus ( volume ) ;
if ( status . state !== 'active' ) throw new BoxError ( BoxError . BAD _STATE , ` Storage volume " ${ volume . name } " is not active. ${ status . message } ` ) ;
}
2022-06-06 15:59:50 -07:00
const storageDir = await apps . getStorageDir ( app ) ;
2021-02-17 22:53:50 -08:00
mounts . push ( {
Target : '/app/data' ,
2022-06-06 15:59:50 -07:00
Source : storageDir ,
Type : 'bind' ,
2021-02-17 22:53:50 -08:00
ReadOnly : false
} ) ;
2021-08-25 19:41:46 -07:00
break ;
2022-06-06 15:59:50 -07:00
}
2021-08-17 14:04:29 -07:00
case 'tls' : {
2022-11-28 22:32:34 +01:00
const certificateDir = ` ${ paths . PLATFORM _DATA _DIR } /tls/ ${ app . id } ` ;
2021-08-17 14:04:29 -07:00
mounts . push ( {
2022-11-28 22:32:34 +01:00
Target : '/etc/certs' ,
Source : certificateDir ,
2021-08-17 14:04:29 -07:00
Type : 'bind' ,
ReadOnly : true
2021-02-17 22:53:50 -08:00
} ) ;
2021-08-25 19:41:46 -07:00
break ;
2021-08-17 14:04:29 -07:00
}
2021-02-17 22:53:50 -08:00
default :
2021-08-25 19:41:46 -07:00
break ;
2021-02-17 22:53:50 -08:00
}
2021-08-25 19:41:46 -07:00
}
return mounts ;
2021-02-17 22:53:50 -08:00
}
2026-02-21 19:42:52 +01:00
function getPersistentDirMounts ( app ) {
assert . strictEqual ( typeof app , 'object' ) ;
const mounts = [ ] ;
if ( ! app . manifest . persistentDirs ) return mounts ;
for ( const dir of app . manifest . persistentDirs ) {
const sanitized = dir . replace ( /\//g , '_' ) . replace ( /^_/ , '' ) ;
const hostDir = ` ${ paths . PERSISTENT _DATA _DIR } / ${ app . id } / ${ sanitized } ` ;
mounts . push ( { Target : dir , Source : hostDir , Type : 'bind' , ReadOnly : false } ) ;
}
return mounts ;
}
2021-08-25 19:41:46 -07:00
async function getMounts ( app ) {
2021-02-17 22:53:50 -08:00
assert . strictEqual ( typeof app , 'object' ) ;
2021-08-25 19:41:46 -07:00
const volumeMounts = await getVolumeMounts ( app ) ;
const addonMounts = await getAddonMounts ( app ) ;
2026-02-21 19:42:52 +01:00
const persistentDirMounts = getPersistentDirMounts ( app ) ;
return volumeMounts . concat ( addonMounts ) . concat ( persistentDirMounts ) ;
2020-04-29 21:55:21 -07:00
}
2024-02-20 23:09:49 +01:00
async function getAddressesForPort53 ( ) {
const [ error , deviceLinks ] = await safe ( fs . promises . readdir ( '/sys/class/net' ) ) ; // https://man7.org/linux/man-pages/man5/sysfs.5.html
if ( error ) return [ ] ;
2021-08-10 21:59:58 -07:00
const devices = deviceLinks . map ( d => { return { name : d , link : safe . fs . readlinkSync ( ` /sys/class/net/ ${ d } ` ) } ; } ) ;
const physicalDevices = devices . filter ( d => d . link && ! d . link . includes ( 'virtual' ) ) ;
const addresses = [ ] ;
for ( const phy of physicalDevices ) {
2026-02-18 08:18:37 +01:00
const [ ipError , output ] = await safe ( shell . spawn ( 'ip' , [ '-f' , 'inet' , '-j' , 'addr' , 'show' , 'dev' , phy . name , 'scope' , 'global' ] , { encoding : 'utf8' } ) ) ;
if ( ipError ) continue ;
2024-02-20 23:09:49 +01:00
const inet = safe . JSON . parse ( output ) || [ ] ;
2022-02-17 11:56:08 -08:00
for ( const r of inet ) {
const address = safe . query ( r , 'addr_info[0].local' ) ;
if ( address ) addresses . push ( address ) ;
}
2020-11-18 11:43:28 -08:00
}
2021-08-10 21:59:58 -07:00
return addresses ;
2020-11-18 11:43:28 -08:00
}
2026-02-14 16:34:34 +01:00
// This only returns ipv4 addresses
// We dont bind to ipv6 interfaces, public prefix changes and container restarts wont work
2021-08-25 19:41:46 -07:00
async function startContainer ( containerId ) {
2015-10-19 15:51:02 -07:00
assert . strictEqual ( typeof containerId , 'string' ) ;
2021-08-25 19:41:46 -07:00
const container = gConnection . getContainer ( containerId ) ;
2015-10-19 11:40:19 -07:00
2021-08-25 19:41:46 -07:00
const [ error ] = await safe ( container . start ( ) ) ;
2024-10-30 16:21:21 +01:00
if ( error && error . statusCode === 404 ) throw new BoxError ( BoxError . NOT _FOUND , ` Container ${ containerId } not found ` ) ;
2021-08-25 19:41:46 -07:00
if ( error && error . statusCode === 400 ) throw new BoxError ( BoxError . BAD _FIELD , error ) ; // e.g start.sh is not executable
if ( error && error . statusCode !== 304 ) throw new BoxError ( BoxError . DOCKER _ERROR , error ) ; // 304 means already started
2019-12-20 10:29:29 -08:00
}
2021-08-25 19:41:46 -07:00
async function restartContainer ( containerId ) {
2019-12-20 10:29:29 -08:00
assert . strictEqual ( typeof containerId , 'string' ) ;
2021-08-25 19:41:46 -07:00
const container = gConnection . getContainer ( containerId ) ;
2015-10-19 11:40:19 -07:00
2021-08-25 19:41:46 -07:00
const [ error ] = await safe ( container . restart ( ) ) ;
2024-10-30 16:21:21 +01:00
if ( error && error . statusCode === 404 ) throw new BoxError ( BoxError . NOT _FOUND , ` Contanier ${ containerId } not found ` ) ;
2021-08-25 19:41:46 -07:00
if ( error && error . statusCode === 400 ) throw new BoxError ( BoxError . BAD _FIELD , error ) ; // e.g start.sh is not executable
if ( error && error . statusCode !== 204 ) throw new BoxError ( BoxError . DOCKER _ERROR , error ) ;
2015-10-19 11:40:19 -07:00
}
2021-08-25 19:41:46 -07:00
async function stopContainer ( containerId ) {
2025-08-26 11:07:59 +02:00
assert . strictEqual ( typeof containerId , 'string' ) ;
2015-10-19 15:51:02 -07:00
2026-03-12 22:55:28 +05:30
log ( ` stopContainer: stopping container ${ containerId } ` ) ;
2015-10-19 11:40:19 -07:00
2021-08-25 19:41:46 -07:00
const container = gConnection . getContainer ( containerId ) ;
2015-10-19 11:40:19 -07:00
2021-08-25 19:41:46 -07:00
const options = {
2015-10-19 11:40:19 -07:00
t : 10 // wait for 10 seconds before killing it
} ;
2021-08-25 19:41:46 -07:00
let [ error ] = await safe ( container . stop ( options ) ) ;
if ( error && ( error . statusCode !== 304 && error . statusCode !== 404 ) ) throw new BoxError ( BoxError . DOCKER _ERROR , 'Error stopping container:' + error . message ) ;
2015-10-19 11:40:19 -07:00
2021-08-25 19:41:46 -07:00
[ error ] = await safe ( container . wait ( ) ) ;
if ( error && ( error . statusCode !== 304 && error . statusCode !== 404 ) ) throw new BoxError ( BoxError . DOCKER _ERROR , 'Error waiting on container:' + error . message ) ;
2015-10-19 11:40:19 -07:00
}
2021-08-25 19:41:46 -07:00
async function deleteContainer ( containerId ) { // id can also be name
2025-08-26 11:07:59 +02:00
assert . strictEqual ( typeof containerId , 'string' ) ;
2015-10-19 15:51:02 -07:00
2026-03-12 22:55:28 +05:30
log ( ` deleteContainer: deleting ${ containerId } ` ) ;
2015-10-19 11:40:19 -07:00
2021-08-25 19:41:46 -07:00
const container = gConnection . getContainer ( containerId ) ;
2015-10-19 11:40:19 -07:00
2021-08-25 19:41:46 -07:00
const removeOptions = {
2015-10-19 11:40:19 -07:00
force : true , // kill container if it's running
v : true // removes volumes associated with the container (but not host mounts)
} ;
2021-08-25 19:41:46 -07:00
const [ error ] = await safe ( container . remove ( removeOptions ) ) ;
if ( error && error . statusCode === 404 ) return ;
2015-10-19 15:39:26 -07:00
2021-08-25 19:41:46 -07:00
if ( error ) {
2026-03-12 22:55:28 +05:30
log ( 'Error removing container %s : %o' , containerId , error ) ;
2021-08-25 19:41:46 -07:00
throw new BoxError ( BoxError . DOCKER _ERROR , error ) ;
}
2015-10-19 11:40:19 -07:00
}
2021-08-25 19:41:46 -07:00
async function deleteContainers ( appId , options ) {
2015-10-19 18:48:56 -07:00
assert . strictEqual ( typeof appId , 'string' ) ;
2019-01-17 23:32:24 -08:00
assert . strictEqual ( typeof options , 'object' ) ;
2015-10-19 18:48:56 -07:00
2021-08-25 19:41:46 -07:00
const labels = [ 'appId=' + appId ] ;
2019-01-17 23:32:24 -08:00
if ( options . managedOnly ) labels . push ( 'isCloudronManaged=true' ) ;
2025-05-27 17:47:36 +02:00
const [ error , containers ] = await safe ( gConnection . listContainers ( { all : 1 , filters : JSON . stringify ( { label : labels } ) } ) ) ;
2021-08-25 19:41:46 -07:00
if ( error ) throw new BoxError ( BoxError . DOCKER _ERROR , error ) ;
2015-10-19 18:48:56 -07:00
2021-08-25 19:41:46 -07:00
for ( const container of containers ) {
await deleteContainer ( container . Id ) ;
}
2015-10-19 18:48:56 -07:00
}
2021-08-25 19:41:46 -07:00
async function stopContainers ( appId ) {
2015-10-20 00:05:07 -07:00
assert . strictEqual ( typeof appId , 'string' ) ;
2021-08-25 19:41:46 -07:00
const [ error , containers ] = await safe ( gConnection . listContainers ( { all : 1 , filters : JSON . stringify ( { label : [ 'appId=' + appId ] } ) } ) ) ;
if ( error ) throw new BoxError ( BoxError . DOCKER _ERROR , error ) ;
2015-10-20 00:05:07 -07:00
2021-08-25 19:41:46 -07:00
for ( const container of containers ) {
await stopContainer ( container . Id ) ;
}
2015-10-20 00:05:07 -07:00
}
2024-12-14 14:53:08 +01:00
async function deleteImage ( imageRef ) {
assert . strictEqual ( typeof imageRef , 'string' ) ;
2015-10-19 15:51:02 -07:00
2024-12-14 14:53:08 +01:00
if ( ! imageRef ) return ;
if ( imageRef . includes ( '//' ) || imageRef . startsWith ( '/' ) ) return ; // a common mistake is to paste a https:// as docker image. this results in a crash at runtime in dockerode module (https://github.com/apocas/dockerode/issues/548)
2015-10-19 11:40:19 -07:00
2021-06-16 11:46:18 -07:00
const removeOptions = {
2016-01-21 14:59:24 -08:00
force : false , // might be shared with another instance of this app
noprune : false // delete untagged parents
} ;
2015-10-19 11:40:19 -07:00
2016-01-21 14:59:24 -08:00
// registry v1 used to pull down all *tags*. this meant that deleting image by tag was not enough (since that
// just removes the tag). we used to remove the image by id. this is not required anymore because aliases are
// not created anymore after https://github.com/docker/docker/pull/10571
2026-03-12 22:55:28 +05:30
log ( ` deleteImage: removing ${ imageRef } ` ) ;
2024-12-14 20:47:35 +01:00
const [ error ] = await safe ( gConnection . getImage ( imageRef . replace ( /@sha256:.*/ , '' ) ) . remove ( removeOptions ) ) ; // can't have the manifest id. won't remove anythin
2021-08-26 18:34:32 -07:00
if ( error && error . statusCode === 400 ) return ; // invalid image format. this can happen if user installed with a bad --docker-image
if ( error && error . statusCode === 404 ) return ; // not found
if ( error && error . statusCode === 409 ) return ; // another container using the image
2015-10-19 11:40:19 -07:00
2021-08-26 18:34:32 -07:00
if ( error ) {
2026-03-12 22:55:28 +05:30
log ( ` Error removing image ${ imageRef } : %o ` , error ) ;
2021-08-26 18:34:32 -07:00
throw new BoxError ( BoxError . DOCKER _ERROR , error ) ;
}
2015-10-19 11:40:19 -07:00
}
2016-02-18 15:39:27 +01:00
2021-08-25 19:41:46 -07:00
async function inspect ( containerId ) {
2017-08-11 22:04:40 +02:00
assert . strictEqual ( typeof containerId , 'string' ) ;
2021-08-25 19:41:46 -07:00
const container = gConnection . getContainer ( containerId ) ;
2017-08-11 22:04:40 +02:00
2021-08-25 19:41:46 -07:00
const [ error , result ] = await safe ( container . inspect ( ) ) ;
if ( error && error . statusCode === 404 ) throw new BoxError ( BoxError . NOT _FOUND , ` Unable to find container ${ containerId } ` ) ;
if ( error ) throw new BoxError ( BoxError . DOCKER _ERROR , error ) ;
2018-11-19 10:19:46 +01:00
2021-08-25 19:41:46 -07:00
return result ;
2018-11-28 10:39:12 +01:00
}
2026-02-14 16:34:34 +01:00
async function downloadImage ( manifest ) {
assert . strictEqual ( typeof manifest , 'object' ) ;
2026-03-12 22:55:28 +05:30
log ( ` downloadImage: ${ manifest . dockerImage } ` ) ;
2026-02-14 16:34:34 +01:00
const image = gConnection . getImage ( manifest . dockerImage ) ;
const [ error , result ] = await safe ( image . inspect ( ) ) ;
if ( ! error && result ) return ; // image is already present locally
const parsedManifestRef = parseImageRef ( manifest . dockerImage ) ;
2026-03-12 22:55:28 +05:30
await promiseRetry ( { times : 10 , interval : 5000 , debug : log , retry : ( pullError ) => pullError . reason !== BoxError . FS _ERROR } , async ( ) => {
2026-02-14 16:34:34 +01:00
// custom (non appstore) image
if ( parsedManifestRef . registry !== null || ! parsedManifestRef . fullRepositoryName . startsWith ( 'cloudron/' ) ) return await pullImage ( manifest . dockerImage ) ;
// docker hub only uses first 64 bits for ipv6 addressing. this causes many ipv6 rate limit errors
// https://www.docker.com/blog/beta-ipv6-support-on-docker-hub-registry/ . as a hack, we try ipv4 explicity
let upstreamRef = null , pullError = null ;
for ( const registry of CLOUDRON _REGISTRIES ) {
upstreamRef = ` ${ registry } / ${ manifest . dockerImage } ` ;
[ pullError ] = await safe ( pullImage ( upstreamRef ) ) ;
if ( ! pullError ) break ;
}
if ( pullError || ! upstreamRef ) throw new BoxError ( BoxError . DOCKER _ERROR , ` Unable to pull ${ manifest . dockerImage } from dockerhub or quay: ${ pullError ? . message } ` ) ;
// retag the downloaded image to not have the registry name. this prevents 'docker run' from redownloading it
2026-03-12 22:55:28 +05:30
log ( ` downloadImage: tagging ${ upstreamRef } as ${ parsedManifestRef . fullRepositoryName } : ${ parsedManifestRef . tag } ` ) ;
2026-02-14 16:34:34 +01:00
await gConnection . getImage ( upstreamRef ) . tag ( { repo : parsedManifestRef . fullRepositoryName , tag : parsedManifestRef . tag } ) ;
2026-03-12 22:55:28 +05:30
log ( ` downloadImage: untagging ${ upstreamRef } ` ) ;
2026-02-14 16:34:34 +01:00
await deleteImage ( upstreamRef ) ;
} ) ;
}
2021-08-25 19:41:46 -07:00
async function getContainerIp ( containerId ) {
2020-11-18 23:24:34 -08:00
assert . strictEqual ( typeof containerId , 'string' ) ;
2021-08-25 19:41:46 -07:00
if ( constants . TEST ) return '127.0.5.5' ;
2020-11-18 23:24:34 -08:00
2021-08-25 19:41:46 -07:00
const result = await inspect ( containerId ) ;
2020-11-18 23:24:34 -08:00
2021-08-25 19:41:46 -07:00
const ip = safe . query ( result , 'NetworkSettings.Networks.cloudron.IPAddress' , null ) ;
if ( ! ip ) throw new BoxError ( BoxError . DOCKER _ERROR , 'Error getting container IP' ) ;
2020-11-18 23:24:34 -08:00
2021-08-25 19:41:46 -07:00
return ip ;
2020-11-18 23:24:34 -08:00
}
2022-05-16 10:26:30 -07:00
async function createExec ( containerId , options ) {
2019-12-04 13:17:58 -08:00
assert . strictEqual ( typeof containerId , 'string' ) ;
2019-03-06 11:54:37 -08:00
assert . strictEqual ( typeof options , 'object' ) ;
2021-08-25 19:41:46 -07:00
const container = gConnection . getContainer ( containerId ) ;
2022-05-16 10:26:30 -07:00
const [ error , exec ] = await safe ( container . exec ( options ) ) ;
2024-10-30 16:21:21 +01:00
if ( error && error . statusCode === 404 ) throw new BoxError ( BoxError . NOT _FOUND , ` Container ${ containerId } not found ` ) ;
2021-08-25 19:41:46 -07:00
if ( error && error . statusCode === 409 ) throw new BoxError ( BoxError . BAD _STATE , error . message ) ; // container restarting/not running
if ( error ) throw new BoxError ( BoxError . DOCKER _ERROR , error ) ;
2019-12-04 13:17:58 -08:00
2022-05-16 10:26:30 -07:00
return exec . id ;
}
2019-12-04 13:17:58 -08:00
2026-02-14 16:34:34 +01:00
async function getExec ( execId ) {
2022-05-16 10:26:30 -07:00
assert . strictEqual ( typeof execId , 'string' ) ;
2019-12-04 13:17:58 -08:00
2022-05-16 10:26:30 -07:00
const exec = gConnection . getExec ( execId ) ;
2026-02-14 16:34:34 +01:00
const [ error , result ] = await safe ( exec . inspect ( ) ) ;
if ( error && error . statusCode === 404 ) throw new BoxError ( BoxError . NOT _FOUND , ` Unable to find exec container ${ execId } ` ) ;
2022-05-16 10:26:30 -07:00
if ( error ) throw new BoxError ( BoxError . DOCKER _ERROR , error ) ;
2026-02-14 16:34:34 +01:00
return { exitCode : result . ExitCode , running : result . Running } ;
2019-12-04 13:17:58 -08:00
}
2026-02-14 16:34:34 +01:00
async function startExec ( execId , options ) {
2022-05-16 10:26:30 -07:00
assert . strictEqual ( typeof execId , 'string' ) ;
2026-02-14 16:34:34 +01:00
assert . strictEqual ( typeof options , 'object' ) ;
2022-05-16 10:26:30 -07:00
const exec = gConnection . getExec ( execId ) ;
2026-02-14 16:34:34 +01:00
const [ error , stream ] = await safe ( exec . start ( options ) ) ; /* in hijacked mode, stream is a net.socket */
if ( error && error . statusCode === 404 ) throw new BoxError ( BoxError . NOT _FOUND , ` Exec container ${ execId } not found ` ) ;
2022-05-16 10:26:30 -07:00
if ( error ) throw new BoxError ( BoxError . DOCKER _ERROR , error ) ;
2026-02-14 16:34:34 +01:00
return stream ;
2022-05-16 10:26:30 -07:00
}
async function resizeExec ( execId , options ) {
assert . strictEqual ( typeof execId , 'string' ) ;
assert . strictEqual ( typeof options , 'object' ) ;
const exec = gConnection . getExec ( execId ) ;
const [ error ] = await safe ( exec . resize ( options ) ) ; // { h, w }
2024-10-30 16:21:21 +01:00
if ( error && error . statusCode === 404 ) throw new BoxError ( BoxError . NOT _FOUND , ` Exec container ${ execId } not found ` ) ;
2022-05-16 10:26:30 -07:00
if ( error ) throw new BoxError ( BoxError . DOCKER _ERROR , error ) ;
}
2021-08-25 19:41:46 -07:00
async function getEvents ( options ) {
2019-12-04 13:17:58 -08:00
assert . strictEqual ( typeof options , 'object' ) ;
2021-08-25 19:41:46 -07:00
const [ error , stream ] = await safe ( gConnection . getEvents ( options ) ) ;
if ( error ) throw new BoxError ( BoxError . DOCKER _ERROR , error ) ;
return stream ;
2019-03-06 11:54:37 -08:00
}
2025-07-03 22:36:36 +02:00
async function getStats ( containerId , options ) {
2018-11-28 10:39:12 +01:00
assert . strictEqual ( typeof containerId , 'string' ) ;
2025-07-03 22:36:36 +02:00
assert . strictEqual ( typeof options , 'object' ) ;
2018-11-28 10:39:12 +01:00
2021-08-25 19:41:46 -07:00
const container = gConnection . getContainer ( containerId ) ;
2018-11-28 10:39:12 +01:00
2025-07-03 22:36:36 +02:00
const [ error , result ] = await safe ( container . stats ( { stream : ! ! options . stream } ) ) ;
2024-10-30 16:21:21 +01:00
if ( error && error . statusCode === 404 ) throw new BoxError ( BoxError . NOT _FOUND , ` Container ${ containerId } not found ` ) ;
2021-08-25 19:41:46 -07:00
if ( error ) throw new BoxError ( BoxError . DOCKER _ERROR , error ) ;
2018-11-28 10:39:12 +01:00
2021-08-25 19:41:46 -07:00
return result ;
2017-08-11 22:04:40 +02:00
}
2021-08-25 19:41:46 -07:00
async function info ( ) {
const [ error , result ] = await safe ( gConnection . info ( ) ) ;
2025-02-05 16:23:31 +01:00
if ( error ) throw new BoxError ( BoxError . DOCKER _ERROR , ` Error connecting to docker: ${ error . message } ` ) ;
2021-08-25 19:41:46 -07:00
return result ;
2019-08-12 20:38:24 -07:00
}
2021-01-20 12:01:15 -08:00
2025-07-17 01:16:24 +02:00
async function df ( options ) {
assert . strictEqual ( typeof options , 'object' ) ;
const [ error , result ] = await safe ( gConnection . df ( options ) ) ;
2025-02-05 16:23:31 +01:00
if ( error ) throw new BoxError ( BoxError . DOCKER _ERROR , ` Error connecting to docker: ${ error . message } ` ) ;
2022-10-11 22:38:26 +02:00
return result ;
}
2024-04-09 18:59:40 +02:00
async function update ( name , memory ) {
2021-01-20 12:01:15 -08:00
assert . strictEqual ( typeof name , 'string' ) ;
assert . strictEqual ( typeof memory , 'number' ) ;
// scale back db containers, if possible. this is retried because updating memory constraints can fail
// with failed to write to memory.memsw.limit_in_bytes: write /sys/fs/cgroup/memory/docker/xx/memory.memsw.limit_in_bytes: device or resource busy
2021-08-25 19:41:46 -07:00
for ( let times = 0 ; times < 10 ; times ++ ) {
2025-10-13 10:32:08 +02:00
const [ error ] = await safe ( shell . spawn ( 'docker' , [ 'update' , '--memory' , memory , '--memory-swap' , '-1' , name ] , { encoding : 'utf8' } ) ) ;
2021-08-25 19:41:46 -07:00
if ( ! error ) return ;
2023-05-14 10:53:50 +02:00
await timers . setTimeout ( 60 * 1000 ) ;
2021-08-25 19:41:46 -07:00
}
throw new BoxError ( BoxError . DOCKER _ERROR , 'Unable to update container' ) ;
2021-01-20 12:01:15 -08:00
}
2026-02-14 15:43:24 +01:00
2026-02-14 16:34:34 +01:00
async function createSubcontainer ( app , name , cmd , options ) {
assert . strictEqual ( typeof app , 'object' ) ;
assert . strictEqual ( typeof name , 'string' ) ;
assert ( ! cmd || Array . isArray ( cmd ) ) ;
assert . strictEqual ( typeof options , 'object' ) ;
const isAppContainer = ! cmd ; // non app-containers are like scheduler
const manifest = app . manifest ;
const exposedPorts = { } , dockerPortBindings = { } ;
const domain = app . fqdn ;
const { fqdn : dashboardFqdn } = await dashboard . getLocation ( ) ;
const stdEnv = [
'LANG=C.UTF-8' ,
'CLOUDRON=1' ,
'CLOUDRON_PROXY_IP=172.18.0.1' ,
` CLOUDRON_APP_HOSTNAME= ${ app . id } ` ,
` CLOUDRON_WEBADMIN_ORIGIN=https:// ${ dashboardFqdn } ` ,
` CLOUDRON_API_ORIGIN=https:// ${ dashboardFqdn } ` ,
` CLOUDRON_APP_ORIGIN=https:// ${ domain } ` ,
` CLOUDRON_APP_DOMAIN= ${ domain } `
] ;
2026-02-23 10:53:01 +01:00
if ( app . debugMode ) stdEnv . push ( 'CLOUDRON_DEBUG=1' ) ;
2026-02-14 16:34:34 +01:00
if ( app . manifest . multiDomain ) stdEnv . push ( ` CLOUDRON_ALIAS_DOMAINS= ${ app . aliasDomains . map ( ad => ad . fqdn ) . join ( ',' ) } ` ) ;
const secondaryDomainsEnv = app . secondaryDomains . map ( sd => ` ${ sd . environmentVariable } = ${ sd . fqdn } ` ) ;
const portEnv = [ ] ;
for ( const portName in app . portBindings ) {
const { hostPort , type : portType , count : portCount } = app . portBindings [ portName ] ;
const portSpec = portType == 'tcp' ? manifest . tcpPorts : manifest . udpPorts ;
const containerPort = portSpec [ portName ] . containerPort || hostPort ;
// port 53 is special. systemd-resolved is listening on 127.0.0.x port 53 and another process cannot listen to 0.0.0.0 port 53
// for port 53 alone, we listen explicitly on the server's interface IP
const hostIps = hostPort === 53 ? await getAddressesForPort53 ( ) : [ '0.0.0.0' , '::0' ] ;
portEnv . push ( ` ${ portName } = ${ hostPort } ` ) ;
if ( portCount > 1 ) portEnv . push ( ` ${ portName } _COUNT= ${ portCount } ` ) ;
// docker portBindings requires ports to be exposed
for ( let i = 0 ; i < portCount ; ++ i ) {
exposedPorts [ ` ${ containerPort + i } / ${ portType } ` ] = { } ;
dockerPortBindings [ ` ${ containerPort + i } / ${ portType } ` ] = hostIps . map ( hip => { return { HostIp : hip , HostPort : String ( hostPort + i ) } ; } ) ;
}
}
const appEnv = [ ] ;
2026-02-18 08:18:37 +01:00
Object . keys ( app . env ) . forEach ( function ( envName ) { appEnv . push ( ` ${ envName } = ${ app . env [ envName ] } ` ) ; } ) ;
2026-02-14 16:34:34 +01:00
let memoryLimit = apps . getMemoryLimit ( app ) ;
// give scheduler tasks twice the memory limit since background jobs take more memory
// if required, we can make this a manifest and runtime argument later
if ( ! isAppContainer ) memoryLimit *= 2 ;
const mounts = await getMounts ( app ) ;
const addonEnv = await services . getEnvironment ( app ) ;
const runtimeVolumes = {
'/tmp' : { } ,
'/run' : { } ,
} ;
if ( app . manifest . runtimeDirs ) {
app . manifest . runtimeDirs . forEach ( dir => runtimeVolumes [ dir ] = { } ) ;
}
const containerOptions = {
name : name , // for referencing containers
Tty : isAppContainer ,
Image : app . manifest . dockerImage ,
Cmd : ( isAppContainer && app . debugMode && app . debugMode . cmd ) ? app . debugMode . cmd : cmd ,
Env : stdEnv . concat ( addonEnv ) . concat ( portEnv ) . concat ( appEnv ) . concat ( secondaryDomainsEnv ) ,
ExposedPorts : isAppContainer ? exposedPorts : { } ,
Volumes : runtimeVolumes ,
Labels : {
'fqdn' : app . fqdn ,
'appId' : app . id ,
'isSubcontainer' : String ( ! isAppContainer ) ,
'isCloudronManaged' : String ( true )
} ,
HostConfig : {
Mounts : mounts ,
LogConfig : {
Type : 'syslog' ,
Config : {
'tag' : app . id ,
'syslog-address' : ` unix:// ${ paths . SYSLOG _SOCKET _FILE } ` ,
'syslog-format' : 'rfc5424'
}
} ,
Memory : memoryLimit ,
MemorySwap : - 1 , // Unlimited swap
PortBindings : isAppContainer ? dockerPortBindings : { } ,
PublishAllPorts : false ,
ReadonlyRootfs : app . debugMode ? ! ! app . debugMode . readonlyRootfs : true ,
RestartPolicy : {
'Name' : isAppContainer ? 'unless-stopped' : 'no' ,
'MaximumRetryCount' : 0
} ,
// CpuPeriod (100000 microseconds) and CpuQuota(app.cpuQuota% of CpuPeriod)
// 1000000000 is one core https://github.com/moby/moby/issues/24713#issuecomment-233167619 and https://stackoverflow.com/questions/52391877/set-the-number-of-cpu-cores-of-a-container-using-docker-engine-api
NanoCPUs : app . cpuQuota === 100 ? 0 : Math . round ( os . cpus ( ) . length * app . cpuQuota / 100 * 1000000000 ) ,
VolumesFrom : isAppContainer ? null : [ app . containerId + ':rw' ] ,
SecurityOpt : [ 'apparmor=docker-cloudron-app' ] ,
CapAdd : [ ] ,
CapDrop : [ ] ,
Sysctls : { } ,
ExtraHosts : [ ]
}
} ;
// do no set hostname of containers to location as it might conflict with addons names. for example, an app installed in mail
// location may not reach mail container anymore by DNS. We cannot set hostname to fqdn either as that sets up the dns
// name to look up the internal docker ip. this makes curl from within container fail
// Note that Hostname has no effect on DNS. We have to use the --net-alias for dns.
// Hostname cannot be set with container NetworkMode. Subcontainers run is the network space of the app container
// This is done to prevent lots of up/down events and iptables locking
if ( isAppContainer ) {
containerOptions . Hostname = app . id ;
containerOptions . HostConfig . NetworkMode = 'cloudron' ; // user defined bridge network
// Do not inject for AdGuard. It ends up resolving the dashboard domain as the docker bridge IP
if ( manifest . id !== 'com.adguard.home.cloudronapp' ) containerOptions . HostConfig . ExtraHosts . push ( ` ${ dashboardFqdn } :172.18.0.1 ` ) ;
if ( manifest . addons ? . sendmail ? . requiresValidCertificate ) {
const { fqdn : mailFqdn } = await mailServer . getLocation ( ) ;
containerOptions . HostConfig . ExtraHosts . push ( ` ${ mailFqdn } : ${ constants . MAIL _SERVICE _IPv4 } ` ) ;
}
containerOptions . NetworkingConfig = {
EndpointsConfig : {
cloudron : {
IPAMConfig : {
IPv4Address : app . containerIp
} ,
Aliases : [ name ] // adds hostname entry with container name
}
}
} ;
} else {
containerOptions . HostConfig . NetworkMode = ` container: ${ app . containerId } ` ; // scheduler containers must have same IP as app for various addon auth
}
const capabilities = manifest . capabilities || [ ] ;
// https://docs-stage.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities
if ( capabilities . includes ( 'net_admin' ) ) {
containerOptions . HostConfig . CapAdd . push ( 'NET_ADMIN' , 'NET_RAW' ) ;
// ipv6 for new interfaces is disabled in the container. this prevents the openvpn tun device having ipv6
// See https://github.com/moby/moby/issues/20569 and https://github.com/moby/moby/issues/33099
containerOptions . HostConfig . Sysctls [ 'net.ipv6.conf.all.disable_ipv6' ] = '0' ;
containerOptions . HostConfig . Sysctls [ 'net.ipv6.conf.all.forwarding' ] = '1' ;
}
if ( capabilities . includes ( 'mlock' ) ) containerOptions . HostConfig . CapAdd . push ( 'IPC_LOCK' ) ; // mlock prevents swapping
if ( ! capabilities . includes ( 'ping' ) ) containerOptions . HostConfig . CapDrop . push ( 'NET_RAW' ) ; // NET_RAW is included by default by Docker
containerOptions . HostConfig . Devices = Object . keys ( app . devices ) . map ( ( d ) => {
if ( ! safe . fs . existsSync ( d ) ) {
2026-03-12 22:55:28 +05:30
log ( ` createSubcontainer: device ${ d } does not exist. Skipping... ` ) ;
2026-02-14 16:34:34 +01:00
return null ;
}
return { PathOnHost : d , PathInContainer : d , CgroupPermissions : 'rwm' } ;
} ) . filter ( d => d ) ;
if ( capabilities . includes ( 'vaapi' ) && safe . fs . existsSync ( '/dev/dri' ) ) {
containerOptions . HostConfig . Devices . push ( { PathOnHost : '/dev/dri' , PathInContainer : '/dev/dri' , CgroupPermissions : 'rwm' } ) ;
}
const mergedOptions = Object . assign ( { } , containerOptions , options ) ;
const [ createError , container ] = await safe ( gConnection . createContainer ( mergedOptions ) ) ;
if ( createError && createError . statusCode === 409 ) throw new BoxError ( BoxError . ALREADY _EXISTS , createError ) ;
if ( createError ) throw new BoxError ( BoxError . DOCKER _ERROR , createError ) ;
return container ;
}
async function createContainer ( app ) {
return await createSubcontainer ( app , app . id /* name */ , null /* cmd */ , { } /* options */ ) ;
}
2026-02-14 15:43:24 +01:00
export default {
ping ,
info ,
df ,
buildImage ,
downloadImage ,
createContainer ,
startContainer ,
restartContainer ,
stopContainer ,
stopContainers ,
deleteContainer ,
deleteImage ,
deleteContainers ,
createSubcontainer ,
inspect ,
getContainerIp ,
getEvents ,
getStats ,
update ,
parseImageRef ,
createExec ,
startExec ,
getExec ,
resizeExec
} ;