diff --git a/eslint.config.js b/eslint.config.js index dec0798f9..7c7146051 100644 --- a/eslint.config.js +++ b/eslint.config.js @@ -15,7 +15,7 @@ export default [ rules: { semi: "error", "prefer-const": "error", - "no-use-before-define": ["error", { "functions": false }] + "no-use-before-define": "error" } } ]; diff --git a/src/acme2.js b/src/acme2.js index 9b9f5b901..d0bcd87ff 100644 --- a/src/acme2.js +++ b/src/acme2.js @@ -14,10 +14,6 @@ import users from './users.js'; const debug = debugModule('box:cert/acme2'); -const _name = 'acme'; -const _getChallengeSubdomain = getChallengeSubdomain; - - const CA_PROD_DIRECTORY_URL = 'https://acme-v02.api.letsencrypt.org/directory', CA_STAGING_DIRECTORY_URL = 'https://acme-staging-v02.api.letsencrypt.org/directory'; @@ -534,9 +530,11 @@ async function getCertificate(fqdn, domainObject, key) { }); } +const _name = 'acme'; + export default { getCertificate, getRenewalInfo, _name, - _getChallengeSubdomain, + _getChallengeSubdomain: getChallengeSubdomain, }; diff --git a/src/addonconfigs.js b/src/addonconfigs.js index 7e8ff0937..db32a9b15 100644 --- a/src/addonconfigs.js +++ b/src/addonconfigs.js @@ -2,6 +2,13 @@ import assert from 'node:assert'; import database from './database.js'; +async function unset(appId, addonId) { + assert.strictEqual(typeof appId, 'string'); + assert.strictEqual(typeof addonId, 'string'); + + await database.query('DELETE FROM appAddonConfigs WHERE appId = ? AND addonId = ?', [ appId, addonId ]); +} + async function set(appId, addonId, env) { assert.strictEqual(typeof appId, 'string'); assert.strictEqual(typeof addonId, 'string'); @@ -20,13 +27,6 @@ async function set(appId, addonId, env) { await database.query(query + queryArgs.join(','), args); } -async function unset(appId, addonId) { - assert.strictEqual(typeof appId, 'string'); - assert.strictEqual(typeof addonId, 'string'); - - await database.query('DELETE FROM appAddonConfigs WHERE appId = ? AND addonId = ?', [ appId, addonId ]); -} - async function unsetByAppId(appId) { assert.strictEqual(typeof appId, 'string'); diff --git a/src/apps.js b/src/apps.js index 3d2dde994..c0d8ac504 100644 --- a/src/apps.js +++ b/src/apps.js @@ -85,71 +85,6 @@ const LOCATION_FIELDS = [ 'appId', 'subdomain', 'domain', 'type', 'certificateJs const CHECKVOLUME_CMD = path.join(import.meta.dirname, 'scripts/checkvolume.sh'); // ports is a map of envvar -> hostPort -function validatePorts(ports, manifest) { - assert.strictEqual(typeof ports, 'object'); - assert.strictEqual(typeof manifest, 'object'); - - // keep the public ports in sync with firewall rules in setup/start/cloudron-firewall.sh - // these ports are reserved even if we listen only on 127.0.0.1 because we setup HostIp to be 127.0.0.1 - // for custom tcp ports - const RESERVED_PORTS = [ - 22, /* ssh */ - 25, /* smtp */ - 80, /* http */ - 143, /* imap */ - 202, /* alternate ssh */ - 222, /* proftd */ - 443, /* https */ - 465, /* smtps */ - 587, /* submission */ - 993, /* imaps */ - 995, /* pop3s */ - 2003, /* graphite (lo) */ - constants.PORT, /* app server (lo) */ - constants.AUTHWALL_PORT, /* protected sites */ - constants.INTERNAL_SMTP_PORT, /* internal smtp port (lo) */ - constants.LDAP_PORT, - 3306, /* mysql (lo) */ - 3478, /* turn,stun */ - 4190, /* managesieve */ - 5349, /* turn,stun TLS */ - 8000, /* ESXi monitoring */ - ]; - - const RESERVED_PORT_RANGES = [ - [constants.TURN_UDP_PORT_START, constants.TURN_UDP_PORT_END] /* turn udp ports */ - ]; - - const ALLOWED_PORTS = [ - 53, // dns 53 is special and adblocker apps can use them - 853 // dns over tls - ]; - - if (!ports) return null; - - const tcpPorts = manifest.tcpPorts || {}; - const udpPorts = manifest.udpPorts || {}; - - for (const portName in ports) { - if (!/^[A-Z0-9_]+$/.test(portName)) return new BoxError(BoxError.BAD_FIELD, `${portName} is not a valid environment variable in ports`); - - const hostPort = ports[portName]; - if (!Number.isInteger(hostPort)) return new BoxError(BoxError.BAD_FIELD, `${hostPort} is not an integer in ${portName} ports`); - if (RESERVED_PORTS.indexOf(hostPort) !== -1) return new BoxError(BoxError.BAD_FIELD, `Port ${hostPort} for ${portName} is reserved in ports`); - if (RESERVED_PORT_RANGES.find(range => (hostPort >= range[0] && hostPort <= range[1]))) return new BoxError(BoxError.BAD_FIELD, `Port ${hostPort} for ${portName} is reserved in ports`); - if (ALLOWED_PORTS.indexOf(hostPort) === -1 && (hostPort <= 1023 || hostPort > 65535)) return new BoxError(BoxError.BAD_FIELD, `${hostPort} for ${portName} is not in permitted range in ports`); - - // it is OK if there is no 1-1 mapping between values in manifest.tcpPorts and ports. missing values implies the service is disabled - const portSpec = tcpPorts[portName] || udpPorts[portName]; - if (!portSpec) return new BoxError(BoxError.BAD_FIELD, `Invalid portBinding ${portName}`); - if (portSpec.readOnly && portSpec.defaultValue !== hostPort) return new BoxError(BoxError.BAD_FIELD, `portBinding ${portName} is readOnly and cannot have a different value that the default`); - if ((hostPort + (portSpec.portCount || 1)) > 65535) return new BoxError(BoxError.BAD_FIELD, `${hostPort}+${portSpec.portCount} for ${portName} exceeds valid port range`); - } - - return null; -} - -// translates the REST API ports (envvar -> hostPort) to database portBindings (envvar -> { hostPort, count, type }) function translateToPortBindings(ports, manifest) { assert.strictEqual(typeof ports, 'object'); assert.strictEqual(typeof manifest, 'object'); @@ -169,6 +104,7 @@ function translateToPortBindings(ports, manifest) { return portBindings; } +// translates the REST API ports (envvar -> hostPort) to database portBindings (envvar -> { hostPort, count, type }) function validateSecondaryDomains(secondaryDomains, manifest) { assert.strictEqual(typeof secondaryDomains, 'object'); assert.strictEqual(typeof manifest, 'object'); @@ -260,7 +196,6 @@ function getSchedulerConfig(app) { return schedulerConfig; } -// also validates operators function validateAccessRestriction(accessRestriction) { assert.strictEqual(typeof accessRestriction, 'object'); @@ -280,25 +215,7 @@ function validateAccessRestriction(accessRestriction) { return null; } -function validateMemoryLimit(manifest, memoryLimit) { - assert.strictEqual(typeof manifest, 'object'); - assert.strictEqual(typeof memoryLimit, 'number'); - - // max is not checked because docker allows any value for --memory - const min = manifest.memoryLimit || constants.DEFAULT_MEMORY_LIMIT; - - // allow 0, which indicates that it is not set, the one from the manifest will be choosen but we don't commit any user value - // this is needed so an app update can change the value in the manifest, and if not set by the user, the new value should be used - if (memoryLimit === 0) return null; - - // a special value that indicates unlimited memory - if (memoryLimit === -1) return null; - - if (memoryLimit < min) return new BoxError(BoxError.BAD_FIELD, 'memoryLimit too small'); - - return null; -} - +// also validates operators function validateCpuQuota(cpuQuota) { assert.strictEqual(typeof cpuQuota, 'number'); @@ -371,14 +288,6 @@ function validateTags(tags) { return null; } -function validateDevices(devices) { - for (const key in devices) { - if (key.indexOf('/dev/') !== 0) return new BoxError(BoxError.BAD_FIELD, `"${key}" must start with /dev/`); - } - - return null; -} - function validateEnv(env) { for (const key in env) { if (key.length > 512) return new BoxError(BoxError.BAD_FIELD, 'Max env var key length is 512'); @@ -389,38 +298,6 @@ function validateEnv(env) { return null; } -async function checkStorage(app, volumeId, prefix) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof volumeId, 'string'); - assert.strictEqual(typeof prefix, 'string'); - - const volume = await volumes.get(volumeId); - if (volume === null) throw new BoxError(BoxError.BAD_FIELD, 'Storage volume not found'); - - // lack of file perms makes these unsupported - if (volume.mountType === 'cifs' || volume.mountType === 'sshfs') throw new BoxError(BoxError.BAD_FIELD, `${volume.mountType} volumes cannot be used as data directory`); - - const status = await volumes.getStatus(volume); - if (status.state !== 'active') throw new BoxError(BoxError.BAD_FIELD, 'Volume is not active'); - - if (path.isAbsolute(prefix)) throw new BoxError(BoxError.BAD_FIELD, `prefix "${prefix}" must be a relative path`); - if (prefix.endsWith('/')) throw new BoxError(BoxError.BAD_FIELD, `prefix "${prefix}" contains trailing slash`); - if (prefix !== '' && path.normalize(prefix) !== prefix) throw new BoxError(BoxError.BAD_FIELD, `prefix "${prefix}" is not a normalized path`); - - const sourceDir = await getStorageDir(app); - if (sourceDir === null) throw new BoxError(BoxError.BAD_STATE, 'App does not use localstorage addon'); - - const targetDir = path.join(volume.hostPath, prefix); - const rel = path.relative(sourceDir, targetDir); - if (!rel.startsWith('../') && rel.split('/').length > 1) throw new BoxError(BoxError.BAD_FIELD, 'Only one level subdirectory moves are supported'); - - const [error] = await safe(shell.sudo([ CHECKVOLUME_CMD, targetDir, sourceDir ], {})); - if (error && error.code === 2) throw new BoxError(BoxError.BAD_FIELD, `Target directory ${targetDir} is not empty`); - if (error && error.code === 3) throw new BoxError(BoxError.BAD_FIELD, `Target directory ${targetDir} does not support chown`); - - return null; -} - function getDuplicateErrorDetails(errorMessage, locations, portBindings) { assert.strictEqual(typeof errorMessage, 'string'); assert(Array.isArray(locations)); @@ -453,62 +330,6 @@ function getDuplicateErrorDetails(errorMessage, locations, portBindings) { return new BoxError(BoxError.ALREADY_EXISTS, `${match[2]} '${match[1]}' is in use`); } -async function getStorageDir(app) { - assert.strictEqual(typeof app, 'object'); - - if (!app.manifest.addons?.localstorage) return null; - - if (!app.storageVolumeId) return path.join(paths.APPS_DATA_DIR, app.id, 'data'); - const volume = await volumes.get(app.storageVolumeId); - if (!volume) throw new BoxError(BoxError.NOT_FOUND, 'Volume not found'); // not possible - return path.join(volume.hostPath, app.storageVolumePrefix); -} - -function pickFields(app, accessLevel) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof accessLevel, 'string'); - - if (accessLevel === ACCESS_LEVEL_NONE) return null; // cannot happen! - - let result; - if (accessLevel === ACCESS_LEVEL_USER) { - result = _.pick(app, [ - 'id', 'appStoreId', 'versionsUrl', 'installationState', 'error', 'runState', 'health', 'taskId', 'accessRestriction', - 'secondaryDomains', 'redirectDomains', 'aliasDomains', 'sso', 'subdomain', 'domain', 'fqdn', 'certificate', - 'manifest', 'portBindings', 'iconUrl', 'creationTime', 'ts', 'tags', 'label', 'upstreamUri']); - } else { // admin or operator - result = _.pick(app, [ - 'id', 'appStoreId', 'versionsUrl', 'installationState', 'error', 'runState', 'health', 'taskId', - 'subdomain', 'domain', 'fqdn', 'certificate', 'crontab', 'upstreamUri', - 'accessRestriction', 'manifest', 'portBindings', 'iconUrl', 'memoryLimit', 'cpuQuota', 'operators', - 'sso', 'debugMode', 'reverseProxyConfig', 'enableBackup', 'creationTime', 'updateTime', 'ts', 'tags', - 'label', 'notes', 'secondaryDomains', 'redirectDomains', 'aliasDomains', 'devices', 'env', 'enableAutomaticUpdate', - 'storageVolumeId', 'storageVolumePrefix', 'mounts', 'enableTurn', 'enableRedis', 'checklist', - 'enableMailbox', 'mailboxDisplayName', 'mailboxName', 'mailboxDomain', 'enableInbox', 'inboxName', 'inboxDomain', 'updateInfo']); - } - - // remove private certificate key - if (result.certificate) delete result.certificate.key; - result.secondaryDomains.forEach(sd => { if (sd.certificate) delete sd.certificate.key; }); - result.aliasDomains.forEach(ad => { if (ad.certificate) delete ad.certificate.key; }); - result.redirectDomains.forEach(rd => { if (rd.certificate) delete rd.certificate.key; }); - - return result; -} - -async function getIcon(app, options) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof options, 'object'); - - const icons = await getIcons(app.id); - if (!icons) throw new BoxError(BoxError.NOT_FOUND, 'No such app'); - - if (!options.original && icons.icon) return icons.icon; - if (icons.packageIcon) return icons.packageIcon; - - return null; -} - function getMemoryLimit(app) { assert.strictEqual(typeof app, 'object'); @@ -644,59 +465,6 @@ function postProcess(result) { delete result.devicesJson; } -// note: this value cannot be cached as it depends on enableAutomaticUpdate and runState -function canAutoupdateAppSync(app, updateInfo) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof updateInfo, 'object'); - - const manifest = updateInfo.manifest; - - if (!app.enableAutomaticUpdate) return { code: false, reason: 'Automatic updates for the app is disabled' }; - - // for invalid subscriptions the appstore does not return a dockerImage - if (!manifest.dockerImage) return { code: false, reason: 'Invalid or Expired subscription' }; - - if (updateInfo.unstable) return { code: false, reason: 'Update is marked as unstable' }; // only manual update allowed for unstable updates - - // for community apps, it's a warning sign when the repo changes (for example: versions domain gets hijacked) - if (docker.parseImageRef(manifest.dockerImage).fullRepositoryName !== docker.parseImageRef(app.manifest.dockerImage).fullRepositoryName) return { code: false, reason: 'Package docker image repository has changed' }; - - if ((semver.major(app.manifest.version) !== 0) && (semver.major(app.manifest.version) !== semver.major(manifest.version))) { - return { code: false, reason: 'Major package version requires review of breaking changes' }; // major changes are blocking - } - - if (app.runState === RSTATE_STOPPED) return { code: false, reason: 'Stopped apps cannot run migration scripts' }; - - const newTcpPorts = manifest.tcpPorts || {}; - const newUdpPorts = manifest.udpPorts || {}; - const portBindings = app.portBindings; // this is never null - - for (const portName in portBindings) { - if (!(portName in newTcpPorts) && !(portName in newUdpPorts)) return { code: false, reason: `${portName} port was in use but new update removes it` }; - } - - // it's fine if one or more (unused) port keys got removed - return { code: true, reason: '' }; -} - -// attaches computed properties -function attachProperties(app, domainObjectMap) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof domainObjectMap, 'object'); - - app.iconUrl = app.hasIcon || app.hasPackageIcon ? `/api/v1/apps/${app.id}/icon` : null; - app.fqdn = dns.fqdn(app.subdomain, app.domain); - app.secondaryDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); }); - app.redirectDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); }); - app.aliasDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); }); - - if (app.updateInfo) { - const { code, reason } = canAutoupdateAppSync(app, app.updateInfo); // isAutoUpdatable is not cached since it depends on enableAutomaticUpdate and runState - app.updateInfo.isAutoUpdatable = code; - app.updateInfo.manualUpdateReason = reason; - } -} - function isAdmin(user) { assert.strictEqual(typeof user, 'object'); @@ -735,6 +503,38 @@ function accessLevel(app, user) { return canAccess(app, user) ? ACCESS_LEVEL_USER : ACCESS_LEVEL_NONE; } +function pickFields(app, accessLevel) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof accessLevel, 'string'); + + if (accessLevel === ACCESS_LEVEL_NONE) return null; // cannot happen! + + let result; + if (accessLevel === ACCESS_LEVEL_USER) { + result = _.pick(app, [ + 'id', 'appStoreId', 'versionsUrl', 'installationState', 'error', 'runState', 'health', 'taskId', 'accessRestriction', + 'secondaryDomains', 'redirectDomains', 'aliasDomains', 'sso', 'subdomain', 'domain', 'fqdn', 'certificate', + 'manifest', 'portBindings', 'iconUrl', 'creationTime', 'ts', 'tags', 'label', 'upstreamUri']); + } else { // admin or operator + result = _.pick(app, [ + 'id', 'appStoreId', 'versionsUrl', 'installationState', 'error', 'runState', 'health', 'taskId', + 'subdomain', 'domain', 'fqdn', 'certificate', 'crontab', 'upstreamUri', + 'accessRestriction', 'manifest', 'portBindings', 'iconUrl', 'memoryLimit', 'cpuQuota', 'operators', + 'sso', 'debugMode', 'reverseProxyConfig', 'enableBackup', 'creationTime', 'updateTime', 'ts', 'tags', + 'label', 'notes', 'secondaryDomains', 'redirectDomains', 'aliasDomains', 'devices', 'env', 'enableAutomaticUpdate', + 'storageVolumeId', 'storageVolumePrefix', 'mounts', 'enableTurn', 'enableRedis', 'checklist', + 'enableMailbox', 'mailboxDisplayName', 'mailboxName', 'mailboxDomain', 'enableInbox', 'inboxName', 'inboxDomain', 'updateInfo']); + } + + // remove private certificate key + if (result.certificate) delete result.certificate.key; + result.secondaryDomains.forEach(sd => { if (sd.certificate) delete sd.certificate.key; }); + result.aliasDomains.forEach(ad => { if (ad.certificate) delete ad.certificate.key; }); + result.redirectDomains.forEach(rd => { if (rd.certificate) delete rd.certificate.key; }); + + return result; +} + async function checkForPortBindingConflict(portBindings, options) { assert.strictEqual(typeof portBindings, 'object'); assert.strictEqual(typeof options, 'object'); @@ -878,6 +678,7 @@ async function add(id, appStoreId, versionsUrl, manifest, subdomain, domain, por if (error) throw new BoxError(BoxError.DATABASE_ERROR, error); } +// note: this value cannot be cached as it depends on enableAutomaticUpdate and runState async function getIcons(id) { assert.strictEqual(typeof id, 'string'); @@ -886,6 +687,20 @@ async function getIcons(id) { return { icon: results[0].icon, packageIcon: results[0].packageIcon }; } +// attaches computed properties +async function getIcon(app, options) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof options, 'object'); + + const icons = await getIcons(app.id); + if (!icons) throw new BoxError(BoxError.NOT_FOUND, 'No such app'); + + if (!options.original && icons.icon) return icons.icon; + if (icons.packageIcon) return icons.packageIcon; + + return null; +} + async function updateWithConstraints(id, app, constraints) { assert.strictEqual(typeof id, 'string'); assert.strictEqual(typeof app, 'object'); @@ -985,6 +800,76 @@ async function update(id, app) { await updateWithConstraints(id, app, ''); } +function validateMemoryLimit(manifest, memoryLimit) { + assert.strictEqual(typeof manifest, 'object'); + assert.strictEqual(typeof memoryLimit, 'number'); + + // max is not checked because docker allows any value for --memory + const min = manifest.memoryLimit || constants.DEFAULT_MEMORY_LIMIT; + + // allow 0, which indicates that it is not set, the one from the manifest will be choosen but we don't commit any user value + // this is needed so an app update can change the value in the manifest, and if not set by the user, the new value should be used + if (memoryLimit === 0) return null; + + // a special value that indicates unlimited memory + if (memoryLimit === -1) return null; + + if (memoryLimit < min) return new BoxError(BoxError.BAD_FIELD, 'memoryLimit too small'); + + return null; +} + +function canAutoupdateAppSync(app, updateInfo) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof updateInfo, 'object'); + + const manifest = updateInfo.manifest; + + if (!app.enableAutomaticUpdate) return { code: false, reason: 'Automatic updates for the app is disabled' }; + + // for invalid subscriptions the appstore does not return a dockerImage + if (!manifest.dockerImage) return { code: false, reason: 'Invalid or Expired subscription' }; + + if (updateInfo.unstable) return { code: false, reason: 'Update is marked as unstable' }; // only manual update allowed for unstable updates + + // for community apps, it's a warning sign when the repo changes (for example: versions domain gets hijacked) + if (docker.parseImageRef(manifest.dockerImage).fullRepositoryName !== docker.parseImageRef(app.manifest.dockerImage).fullRepositoryName) return { code: false, reason: 'Package docker image repository has changed' }; + + if ((semver.major(app.manifest.version) !== 0) && (semver.major(app.manifest.version) !== semver.major(manifest.version))) { + return { code: false, reason: 'Major package version requires review of breaking changes' }; // major changes are blocking + } + + if (app.runState === RSTATE_STOPPED) return { code: false, reason: 'Stopped apps cannot run migration scripts' }; + + const newTcpPorts = manifest.tcpPorts || {}; + const newUdpPorts = manifest.udpPorts || {}; + const portBindings = app.portBindings; // this is never null + + for (const portName in portBindings) { + if (!(portName in newTcpPorts) && !(portName in newUdpPorts)) return { code: false, reason: `${portName} port was in use but new update removes it` }; + } + + // it's fine if one or more (unused) port keys got removed + return { code: true, reason: '' }; +} + +function attachProperties(app, domainObjectMap) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof domainObjectMap, 'object'); + + app.iconUrl = app.hasIcon || app.hasPackageIcon ? `/api/v1/apps/${app.id}/icon` : null; + app.fqdn = dns.fqdn(app.subdomain, app.domain); + app.secondaryDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); }); + app.redirectDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); }); + app.aliasDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); }); + + if (app.updateInfo) { + const { code, reason } = canAutoupdateAppSync(app, app.updateInfo); // isAutoUpdatable is not cached since it depends on enableAutomaticUpdate and runState + app.updateInfo.isAutoUpdatable = code; + app.updateInfo.manualUpdateReason = reason; + } +} + async function setHealth(appId, health, healthTime) { assert.strictEqual(typeof appId, 'string'); assert.strictEqual(typeof health, 'string'); @@ -1058,7 +943,49 @@ async function get(id) { return result[0]; } -// returns the app associated with this IP (app or scheduler) +async function getStorageDir(app) { + assert.strictEqual(typeof app, 'object'); + + if (!app.manifest.addons?.localstorage) return null; + + if (!app.storageVolumeId) return path.join(paths.APPS_DATA_DIR, app.id, 'data'); + const volume = await volumes.get(app.storageVolumeId); + if (!volume) throw new BoxError(BoxError.NOT_FOUND, 'Volume not found'); // not possible + return path.join(volume.hostPath, app.storageVolumePrefix); +} + +async function checkStorage(app, volumeId, prefix) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof volumeId, 'string'); + assert.strictEqual(typeof prefix, 'string'); + + const volume = await volumes.get(volumeId); + if (volume === null) throw new BoxError(BoxError.BAD_FIELD, 'Storage volume not found'); + + // lack of file perms makes these unsupported + if (volume.mountType === 'cifs' || volume.mountType === 'sshfs') throw new BoxError(BoxError.BAD_FIELD, `${volume.mountType} volumes cannot be used as data directory`); + + const status = await volumes.getStatus(volume); + if (status.state !== 'active') throw new BoxError(BoxError.BAD_FIELD, 'Volume is not active'); + + if (path.isAbsolute(prefix)) throw new BoxError(BoxError.BAD_FIELD, `prefix "${prefix}" must be a relative path`); + if (prefix.endsWith('/')) throw new BoxError(BoxError.BAD_FIELD, `prefix "${prefix}" contains trailing slash`); + if (prefix !== '' && path.normalize(prefix) !== prefix) throw new BoxError(BoxError.BAD_FIELD, `prefix "${prefix}" is not a normalized path`); + + const sourceDir = await getStorageDir(app); + if (sourceDir === null) throw new BoxError(BoxError.BAD_STATE, 'App does not use localstorage addon'); + + const targetDir = path.join(volume.hostPath, prefix); + const rel = path.relative(sourceDir, targetDir); + if (!rel.startsWith('../') && rel.split('/').length > 1) throw new BoxError(BoxError.BAD_FIELD, 'Only one level subdirectory moves are supported'); + + const [error] = await safe(shell.sudo([ CHECKVOLUME_CMD, targetDir, sourceDir ], {})); + if (error && error.code === 2) throw new BoxError(BoxError.BAD_FIELD, `Target directory ${targetDir} is not empty`); + if (error && error.code === 3) throw new BoxError(BoxError.BAD_FIELD, `Target directory ${targetDir} does not support chown`); + + return null; +} + async function getByIpAddress(ip) { assert.strictEqual(typeof ip, 'string'); @@ -1081,6 +1008,7 @@ async function list() { return results; } +// returns the app associated with this IP (app or scheduler) async function getByFqdn(fqdn) { assert.strictEqual(typeof fqdn, 'string'); @@ -1138,6 +1066,610 @@ async function onTaskFinished(error, appId, installationState, taskId, auditSour } } +async function getCount() { + const result = await database.query('SELECT COUNT(*) AS total FROM apps'); + return result[0].total; +} + +async function setAccessRestriction(app, accessRestriction, auditSource) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof accessRestriction, 'object'); + assert.strictEqual(typeof auditSource, 'object'); + + const appId = app.id; + const error = validateAccessRestriction(accessRestriction); + if (error) throw error; + + await update(appId, { accessRestriction }); + await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, accessRestriction }); +} + +async function setOperators(app, operators, auditSource) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof operators, 'object'); + assert.strictEqual(typeof auditSource, 'object'); + + const appId = app.id; + const error = validateAccessRestriction(operators); // not a typo. same structure for operators and accessRestriction + if (error) throw error; + + await update(appId, { operators }); + await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, operators }); +} + +async function setCrontab(app, crontab, auditSource) { + assert.strictEqual(typeof app, 'object'); + assert(crontab === null || typeof crontab === 'string'); + assert.strictEqual(typeof auditSource, 'object'); + + const appId = app.id; + parseCrontab(crontab); + + await update(appId, { crontab }); + await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, crontab }); +} + +async function setUpstreamUri(app, upstreamUri, auditSource) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof upstreamUri, 'string'); + assert.strictEqual(typeof auditSource, 'object'); + + if (app.manifest.id !== constants.PROXY_APP_APPSTORE_ID) throw new BoxError(BoxError.BAD_FIELD, 'upstreamUri can only be set for proxy app'); + + const appId = app.id; + const error = validateUpstreamUri(upstreamUri); + if (error) throw error; + + await reverseProxy.writeAppConfigs(Object.assign({}, app, { upstreamUri })); + + await update(appId, { upstreamUri }); + + await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, upstreamUri }); +} + +async function setLabel(app, label, auditSource) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof label, 'string'); + assert.strictEqual(typeof auditSource, 'object'); + + const appId = app.id; + const error = validateLabel(label); + if (error) throw error; + + await update(appId, { label }); + await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, label }); +} + +async function setTags(app, tags, auditSource) { + assert.strictEqual(typeof app, 'object'); + assert(Array.isArray(tags)); + assert.strictEqual(typeof auditSource, 'object'); + + const appId = app.id; + const error = validateTags(tags); + if (error) throw error; + + await update(appId, { tags }); + await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, tags }); +} + +async function setNotes(app, notes, auditSource) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof notes, 'string'); + assert.strictEqual(typeof auditSource, 'object'); + + await update(app.id, { notes }); + await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId: app.id, app, notes }); +} + +async function setChecklistItem(app, checklistItemKey, acknowledged, auditSource) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof checklistItemKey, 'string'); + assert.strictEqual(typeof acknowledged, 'boolean'); + assert.strictEqual(typeof auditSource, 'object'); + + if (!app.checklist[checklistItemKey]) throw new BoxError(BoxError.NOT_FOUND, 'no such checklist item'); + + // nothing changed + if (app.checklist[checklistItemKey].acknowledged === acknowledged) return; + + const checklist = app.checklist; + checklist[checklistItemKey].acknowledged = acknowledged; + checklist[checklistItemKey].changedAt = Date.now(); + checklist[checklistItemKey].changedBy = auditSource.username; + + await update(app.id, { checklist }); + await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId: app.id, app, checklist }); +} + +async function setIcon(app, icon, auditSource) { + assert.strictEqual(typeof app, 'object'); + assert(icon === null || typeof icon === 'string'); + assert.strictEqual(typeof auditSource, 'object'); + + const appId = app.id; + + if (icon) { + icon = Buffer.from(icon, 'base64'); + if (icon.length === 0) throw new BoxError(BoxError.BAD_FIELD, 'icon is not base64'); + } + + await update(appId, { icon }); + await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, iconChanged: true }); +} + +async function setAutomaticBackup(app, enable, auditSource) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof enable, 'boolean'); + assert.strictEqual(typeof auditSource, 'object'); + + const appId = app.id; + await update(appId, { enableBackup: enable }); + await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, enableBackup: enable }); +} + +async function setAutomaticUpdate(app, enable, auditSource) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof enable, 'boolean'); + assert.strictEqual(typeof auditSource, 'object'); + + const appId = app.id; + await update(appId, { enableAutomaticUpdate: enable }); + await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, enableAutomaticUpdate: enable }); +} + +async function setReverseProxyConfig(app, reverseProxyConfig, auditSource) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof reverseProxyConfig, 'object'); + assert.strictEqual(typeof auditSource, 'object'); + + reverseProxyConfig = Object.assign({ robotsTxt: null, csp: null, hstsPreload: false }, reverseProxyConfig); + + const appId = app.id; + let error = validateCsp(reverseProxyConfig.csp); + if (error) throw error; + + error = validateRobotsTxt(reverseProxyConfig.robotsTxt); + if (error) throw error; + + await reverseProxy.writeAppConfigs(Object.assign({}, app, { reverseProxyConfig })); + + await update(appId, { reverseProxyConfig }); + + await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, reverseProxyConfig }); +} + +async function getLocation(subdomain, domain) { + assert.strictEqual(typeof subdomain, 'string'); + assert.strictEqual(typeof domain, 'string'); + + const result = await database.query(`SELECT ${LOCATION_FIELDS} FROM locations WHERE subdomain=? AND domain=?`, [ subdomain, domain ]); + if (result.length === 0) return null; + + return new Location(subdomain, domain, result[0].type, safe.JSON.parse(result[0].certificateJson)); +} + +async function validateLocations(locations) { + assert(Array.isArray(locations)); + + const domainObjectMap = await domains.getDomainObjectMap(); + + const RESERVED_SUBDOMAINS = [ + constants.SMTP_SUBDOMAIN, + constants.IMAP_SUBDOMAIN + ]; + + const dashboardLocation = await dashboard.getLocation(); + for (const location of locations) { + if (!(location.domain in domainObjectMap)) return new BoxError(BoxError.BAD_FIELD, `No such domain in ${location.type} location`); + + let subdomain = location.subdomain; + if (location.type === Location.TYPE_ALIAS && subdomain.startsWith('*')) { + if (subdomain === '*') continue; + subdomain = subdomain.replace(/^\*\./, ''); // remove *. + } + + if (RESERVED_SUBDOMAINS.indexOf(subdomain) !== -1) return new BoxError(BoxError.BAD_FIELD, `subdomain '${subdomain}' is reserved`); + + if (location.fqdn === dashboardLocation.fqdn) return new BoxError(BoxError.BAD_FIELD, `subdomain '${subdomain}' is reserved for dashboard`); + + const error = dns.validateHostname(subdomain, location.domain); + if (error) return new BoxError(BoxError.BAD_FIELD, `Bad ${location.type} location: ${error.message}`); + } + + return null; +} + +async function setCertificate(app, data, auditSource) { + assert.strictEqual(typeof app, 'object'); + assert(data && typeof data === 'object'); + assert.strictEqual(typeof auditSource, 'object'); + + const { subdomain, domain, cert, key } = data; + + const domainObject = await domains.get(domain); + if (domainObject === null) throw new BoxError(BoxError.NOT_FOUND, 'Domain not found'); + + if (cert && key) await reverseProxy.validateCertificate(subdomain, domain, { cert, key }); + + const certificate = cert && key ? { cert, key } : null; + const result = await database.query('UPDATE locations SET certificateJson=? WHERE location=? AND domain=?', [ certificate ? JSON.stringify(certificate) : null, subdomain, domain ]); + if (result.affectedRows === 0) throw new BoxError(BoxError.NOT_FOUND, 'Location not found'); + + const location = await getLocation(subdomain, domain); // fresh location object with type + await reverseProxy.setUserCertificate(app, location); + await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId: app.id, app, subdomain, domain, cert }); +} + +async function getLogPaths(app) { + assert.strictEqual(typeof app, 'object'); + + const appId = app.id; + + const filePaths = []; + filePaths.push(path.join(paths.LOG_DIR, appId, 'apptask.log')); + filePaths.push(path.join(paths.LOG_DIR, appId, 'app.log')); + if (app.manifest.addons && app.manifest.addons.redis) filePaths.push(path.join(paths.LOG_DIR, `redis-${appId}/app.log`)); + + if (app.manifest.logPaths) { + const [error, result] = await safe(docker.inspect(app.containerId)); + if (!error) { + const runVolume = result.Mounts.find(function (mount) { return mount.Destination === '/run'; }); + const tmpVolume = result.Mounts.find(function (mount) { return mount.Destination === '/tmp'; }); + const dataVolume = result.Mounts.find(function (mount) { return mount.Destination === '/app/data'; }); + + // note: wild cards are not supported yet in logPaths since that will require shell expansion + for (const logPath of app.manifest.logPaths) { + if (logPath.startsWith('/tmp/')) filePaths.push(`${tmpVolume.Source}/${logPath.slice('/tmp/'.length)}`); + else if (logPath.startsWith('/run/')) filePaths.push(`${runVolume.Source}/${logPath.slice('/run/'.length)}`); + else if (logPath.startsWith('/app/data/')) filePaths.push(`${dataVolume.Source}/${logPath.slice('/app/data/'.length)}`); + } + } + } + + return filePaths; +} + +async function getLogs(app, options) { + assert.strictEqual(typeof app, 'object'); + assert(options && typeof options === 'object'); + + assert.strictEqual(typeof options.lines, 'number'); + assert.strictEqual(typeof options.format, 'string'); + assert.strictEqual(typeof options.follow, 'boolean'); + + const appId = app.id; + + const logPaths = await getLogPaths(app); + const cp = logs.tail(logPaths, { lines: options.lines, follow: options.follow, sudo: true }); // need sudo access for paths inside app container (manifest.logPaths) + + const logStream = new logs.LogStream({ format: options.format || 'json', source: appId }); + logStream.on('close', () => cp.terminate()); // the caller has to call destroy() on logStream. destroy() of Transform emits 'close' + + cp.stdout.pipe(logStream); + + return logStream; +} + +async function appendLogLine(app, line) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof line, 'string'); + + const logFilePath = path.join(paths.LOG_DIR, app.id, 'app.log'); + const isoDate = new Date(new Date().toUTCString()).toISOString(); + + if (!safe.fs.appendFileSync(logFilePath, `${isoDate} ${line}\n`)) console.error(`Could not append log line for app ${app.id} at ${logFilePath}: ${safe.error.message}`); +} + +async function checkManifest(manifest) { + assert(manifest && typeof manifest === 'object'); + + if (manifest.manifestVersion !== 2) return new BoxError(BoxError.BAD_FIELD, 'Manifest version must be 2'); + + if (!manifest.dockerImage) return new BoxError(BoxError.BAD_FIELD, 'Missing dockerImage'); // dockerImage is optional in manifest + + if (semver.valid(manifest.maxBoxVersion) && semver.gt(constants.VERSION, manifest.maxBoxVersion)) { + return new BoxError(BoxError.BAD_FIELD, 'Box version exceeds Apps maxBoxVersion'); + } + + if (semver.valid(manifest.minBoxVersion) && semver.gt(manifest.minBoxVersion, constants.VERSION)) { + return new BoxError(BoxError.BAD_FIELD, 'App version requires a new platform version'); + } + + const error = await services.checkAddonsSupport(manifest.addons || {}); + return error; +} + +async function createExec(app, options) { + assert.strictEqual(typeof app, 'object'); + assert(options && typeof options === 'object'); + + if (app.manifest.id === constants.PROXY_APP_APPSTORE_ID) throw new BoxError(BoxError.BAD_FIELD, 'cannot exec on proxy app'); + + const cmd = options.cmd || [ '/bin/bash' ]; + assert(Array.isArray(cmd) && cmd.length > 0); + + if (app.installationState !== ISTATE_INSTALLED || app.runState !== RSTATE_RUNNING) { + throw new BoxError(BoxError.BAD_STATE, 'App not installed or running'); + } + + const createOptions = { + AttachStdin: true, + AttachStdout: true, + AttachStderr: true, + // A pseudo tty is a terminal which processes can detect (for example, disable colored output) + // Creating a pseudo terminal also assigns a terminal driver which detects control sequences + // When passing binary data, tty must be disabled. In addition, the stdout/stderr becomes a single + // unified stream because of the nature of a tty (see https://github.com/docker/docker/issues/19696) + Tty: options.tty, + Cmd: cmd + }; + + // currently the webterminal and cli sets C.UTF-8 + if (options.lang) createOptions.Env = [ 'LANG=' + options.lang ]; + + if (options.cwd) createOptions.WorkingDir = options.cwd; + + return await docker.createExec(app.containerId, createOptions); +} + +async function startExec(app, execId, options) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof execId, 'string'); + assert(options && typeof options === 'object'); + + if (app.installationState !== ISTATE_INSTALLED || app.runState !== RSTATE_RUNNING) { + throw new BoxError(BoxError.BAD_STATE, 'App not installed or running'); + } + + const startOptions = { + Detach: false, + Tty: options.tty, + // hijacking upgrades the docker connection from http to tcp. because of this upgrade, + // we can work with half-close connections (not defined in http). this way, the client + // can properly signal that stdin is EOF by closing it's side of the socket. In http, + // the whole connection will be dropped when stdin get EOF. + // https://github.com/apocas/dockerode/commit/b4ae8a03707fad5de893f302e4972c1e758592fe + hijack: true, + stream: true, + stdin: true, + stdout: true, + stderr: true + }; + + const stream = await docker.startExec(execId, startOptions); + + if (options.rows && options.columns) { + // there is a race where resizing too early results in a 404 "no such exec" + // https://git.cloudron.io/cloudron/box/issues/549 + setTimeout(async function () { + await safe(docker.resizeExec(execId, { h: options.rows, w: options.columns }, { debug })); + }, 2000); + } + + return stream; +} + +async function getExec(app, execId) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof execId, 'string'); + + return await docker.getExec(execId); +} + +async function listBackups(app, page, perPage) { + assert.strictEqual(typeof app, 'object'); + assert(typeof page === 'number' && page > 0); + assert(typeof perPage === 'number' && perPage > 0); + + return await backups.getByIdentifierAndStatePaged(app.id, backups.BACKUP_STATE_NORMAL, page, perPage); +} + +async function listEventlog(app, page, perPage) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof page, 'number'); + assert.strictEqual(typeof perPage, 'number'); + + const actions = []; + const search = app.id; + return await eventlog.listPaged(actions, search, page, perPage); +} + +async function drainStream(stream) { + return new Promise((resolve, reject) => { + let data = ''; + stream.setEncoding('utf8'); + stream.on('error', (error) => reject(new BoxError.FS_ERROR, error.message)); + stream.on('data', function (d) { data += d; }); + stream.on('end', function () { + resolve(data); + }); + }); +} + +async function downloadFile(app, filePath) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof filePath, 'string'); + + const statExecId = await createExec(app, { cmd: [ 'stat', '--printf=%F-%s', filePath ], tty: true }); + const statStream = await startExec(app, statExecId, { tty: true }); + const data = await drainStream(statStream); + + const parts = data.split('-'); + if (parts.length !== 2) throw new BoxError(BoxError.NOT_FOUND, 'file does not exist'); + + const type = parts[0]; + let filename, cmd, size; + + if (type === 'regular file') { + cmd = [ 'cat', filePath ]; + size = parseInt(parts[1], 10); + filename = path.basename(filePath); + if (isNaN(size)) throw new BoxError(BoxError.NOT_FOUND, 'file does not exist'); + } else if (type === 'directory') { + cmd = ['tar', 'zcf', '-', '-C', filePath, '.']; + filename = path.basename(filePath) + '.tar.gz'; + size = 0; // unknown + } else { + throw new BoxError(BoxError.NOT_FOUND, 'only files or dirs can be downloaded'); + } + + const execId = await createExec(app, { cmd, tty: false }); + const inputStream = await startExec(app, execId, { tty: false }); + + // transforms the docker stream into a normal stream + const stdoutStream = new TransformStream({ + transform: function (chunk, ignoredEncoding, callback) { + this._buffer = this._buffer ? Buffer.concat([this._buffer, chunk]) : chunk; + + for (;;) { + if (this._buffer.length < 8) break; // header is 8 bytes + + const type = this._buffer.readUInt8(0); + const len = this._buffer.readUInt32BE(4); + + if (this._buffer.length < (8 + len)) break; // not enough + + const payload = this._buffer.slice(8, 8 + len); + + this._buffer = this._buffer.slice(8+len); // consumed + + if (type === 1) this.push(payload); + } + + callback(); + } + }); + + inputStream.pipe(stdoutStream); + + return { stream: stdoutStream, filename, size }; +} + +async function uploadFile(app, sourceFilePath, destFilePath) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof sourceFilePath, 'string'); + assert.strictEqual(typeof destFilePath, 'string'); + + // the built-in bash printf understands "%q" but not /usr/bin/printf. + // ' gets replaced with '\'' . the first closes the quote and last one starts a new one + const escapedDestFilePath = await shell.bash(`printf %q '${destFilePath.replace(/'/g, '\'\\\'\'')}'`, { encoding: 'utf8' }); + debug(`uploadFile: ${sourceFilePath} -> ${escapedDestFilePath}`); + + const execId = await createExec(app, { cmd: [ 'bash', '-c', `cat - > ${escapedDestFilePath}` ], tty: false }); + const destStream = await startExec(app, execId, { tty: false }); + + return new Promise((resolve, reject) => { + const done = once(error => reject(new BoxError(BoxError.FS_ERROR, error.message))); + + const sourceStream = fs.createReadStream(sourceFilePath); + sourceStream.on('error', done); + destStream.on('error', done); + + destStream.on('finish', resolve); + + sourceStream.pipe(destStream); + }); +} + +async function writeConfig(app) { + assert.strictEqual(typeof app, 'object'); + + if (!safe.fs.writeFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/config.json'), JSON.stringify(app, null, 4))) { + throw new BoxError(BoxError.FS_ERROR, 'Error creating config.json: ' + safe.error.message); + } + + const [error, icons] = await safe(getIcons(app.id)); + if (!error && icons.icon) safe.fs.writeFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/icon.png'), icons.icon); +} + +async function loadConfig(app) { + assert.strictEqual(typeof app, 'object'); + + const appConfig = safe.JSON.parse(safe.fs.readFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/config.json'))); + let data = {}; + if (appConfig) { + data = _.pick(appConfig, ['memoryLimit', 'cpuQuota', 'enableBackup', 'reverseProxyConfig', 'env', 'servicesConfig', 'label', 'tags', 'enableAutomaticUpdate']); + } + + const icon = safe.fs.readFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/icon.png')); + if (icon) data.icon = icon; + + await update(app.id, data); +} + +function validatePorts(ports, manifest) { + assert.strictEqual(typeof ports, 'object'); + assert.strictEqual(typeof manifest, 'object'); + + // keep the public ports in sync with firewall rules in setup/start/cloudron-firewall.sh + // these ports are reserved even if we listen only on 127.0.0.1 because we setup HostIp to be 127.0.0.1 + // for custom tcp ports + const RESERVED_PORTS = [ + 22, /* ssh */ + 25, /* smtp */ + 80, /* http */ + 143, /* imap */ + 202, /* alternate ssh */ + 222, /* proftd */ + 443, /* https */ + 465, /* smtps */ + 587, /* submission */ + 993, /* imaps */ + 995, /* pop3s */ + 2003, /* graphite (lo) */ + constants.PORT, /* app server (lo) */ + constants.AUTHWALL_PORT, /* protected sites */ + constants.INTERNAL_SMTP_PORT, /* internal smtp port (lo) */ + constants.LDAP_PORT, + 3306, /* mysql (lo) */ + 3478, /* turn,stun */ + 4190, /* managesieve */ + 5349, /* turn,stun TLS */ + 8000, /* ESXi monitoring */ + ]; + + const RESERVED_PORT_RANGES = [ + [constants.TURN_UDP_PORT_START, constants.TURN_UDP_PORT_END] /* turn udp ports */ + ]; + + const ALLOWED_PORTS = [ + 53, // dns 53 is special and adblocker apps can use them + 853 // dns over tls + ]; + + if (!ports) return null; + + const tcpPorts = manifest.tcpPorts || {}; + const udpPorts = manifest.udpPorts || {}; + + for (const portName in ports) { + if (!/^[A-Z0-9_]+$/.test(portName)) return new BoxError(BoxError.BAD_FIELD, `${portName} is not a valid environment variable in ports`); + + const hostPort = ports[portName]; + if (!Number.isInteger(hostPort)) return new BoxError(BoxError.BAD_FIELD, `${hostPort} is not an integer in ${portName} ports`); + if (RESERVED_PORTS.indexOf(hostPort) !== -1) return new BoxError(BoxError.BAD_FIELD, `Port ${hostPort} for ${portName} is reserved in ports`); + if (RESERVED_PORT_RANGES.find(range => (hostPort >= range[0] && hostPort <= range[1]))) return new BoxError(BoxError.BAD_FIELD, `Port ${hostPort} for ${portName} is reserved in ports`); + if (ALLOWED_PORTS.indexOf(hostPort) === -1 && (hostPort <= 1023 || hostPort > 65535)) return new BoxError(BoxError.BAD_FIELD, `${hostPort} for ${portName} is not in permitted range in ports`); + + // it is OK if there is no 1-1 mapping between values in manifest.tcpPorts and ports. missing values implies the service is disabled + const portSpec = tcpPorts[portName] || udpPorts[portName]; + if (!portSpec) return new BoxError(BoxError.BAD_FIELD, `Invalid portBinding ${portName}`); + if (portSpec.readOnly && portSpec.defaultValue !== hostPort) return new BoxError(BoxError.BAD_FIELD, `portBinding ${portName} is readOnly and cannot have a different value that the default`); + if ((hostPort + (portSpec.portCount || 1)) > 65535) return new BoxError(BoxError.BAD_FIELD, `${hostPort}+${portSpec.portCount} for ${portName} exceeds valid port range`); + } + + return null; +} + +function validateDevices(devices) { + for (const key in devices) { + if (key.indexOf('/dev/') !== 0) return new BoxError(BoxError.BAD_FIELD, `"${key}" must start with /dev/`); + } + + return null; +} + async function scheduleTask(appId, installationState, taskId, auditSource) { assert.strictEqual(typeof appId, 'string'); assert.strictEqual(typeof installationState, 'string'); @@ -1221,42 +1753,6 @@ function checkAppState(app, state) { return null; } -async function validateLocations(locations) { - assert(Array.isArray(locations)); - - const domainObjectMap = await domains.getDomainObjectMap(); - - const RESERVED_SUBDOMAINS = [ - constants.SMTP_SUBDOMAIN, - constants.IMAP_SUBDOMAIN - ]; - - const dashboardLocation = await dashboard.getLocation(); - for (const location of locations) { - if (!(location.domain in domainObjectMap)) return new BoxError(BoxError.BAD_FIELD, `No such domain in ${location.type} location`); - - let subdomain = location.subdomain; - if (location.type === Location.TYPE_ALIAS && subdomain.startsWith('*')) { - if (subdomain === '*') continue; - subdomain = subdomain.replace(/^\*\./, ''); // remove *. - } - - if (RESERVED_SUBDOMAINS.indexOf(subdomain) !== -1) return new BoxError(BoxError.BAD_FIELD, `subdomain '${subdomain}' is reserved`); - - if (location.fqdn === dashboardLocation.fqdn) return new BoxError(BoxError.BAD_FIELD, `subdomain '${subdomain}' is reserved for dashboard`); - - const error = dns.validateHostname(subdomain, location.domain); - if (error) return new BoxError(BoxError.BAD_FIELD, `Bad ${location.type} location: ${error.message}`); - } - - return null; -} - -async function getCount() { - const result = await database.query('SELECT COUNT(*) AS total FROM apps'); - return result[0].total; -} - async function install(data, auditSource) { assert(data && typeof data === 'object'); assert.strictEqual(typeof auditSource, 'object'); @@ -1429,133 +1925,6 @@ async function install(data, auditSource) { return { id : appId, taskId }; } -async function setAccessRestriction(app, accessRestriction, auditSource) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof accessRestriction, 'object'); - assert.strictEqual(typeof auditSource, 'object'); - - const appId = app.id; - const error = validateAccessRestriction(accessRestriction); - if (error) throw error; - - await update(appId, { accessRestriction }); - await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, accessRestriction }); -} - -async function setOperators(app, operators, auditSource) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof operators, 'object'); - assert.strictEqual(typeof auditSource, 'object'); - - const appId = app.id; - const error = validateAccessRestriction(operators); // not a typo. same structure for operators and accessRestriction - if (error) throw error; - - await update(appId, { operators }); - await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, operators }); -} - -async function setCrontab(app, crontab, auditSource) { - assert.strictEqual(typeof app, 'object'); - assert(crontab === null || typeof crontab === 'string'); - assert.strictEqual(typeof auditSource, 'object'); - - const appId = app.id; - parseCrontab(crontab); - - await update(appId, { crontab }); - await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, crontab }); -} - -async function setUpstreamUri(app, upstreamUri, auditSource) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof upstreamUri, 'string'); - assert.strictEqual(typeof auditSource, 'object'); - - if (app.manifest.id !== constants.PROXY_APP_APPSTORE_ID) throw new BoxError(BoxError.BAD_FIELD, 'upstreamUri can only be set for proxy app'); - - const appId = app.id; - const error = validateUpstreamUri(upstreamUri); - if (error) throw error; - - await reverseProxy.writeAppConfigs(Object.assign({}, app, { upstreamUri })); - - await update(appId, { upstreamUri }); - - await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, upstreamUri }); -} - -async function setLabel(app, label, auditSource) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof label, 'string'); - assert.strictEqual(typeof auditSource, 'object'); - - const appId = app.id; - const error = validateLabel(label); - if (error) throw error; - - await update(appId, { label }); - await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, label }); -} - -async function setTags(app, tags, auditSource) { - assert.strictEqual(typeof app, 'object'); - assert(Array.isArray(tags)); - assert.strictEqual(typeof auditSource, 'object'); - - const appId = app.id; - const error = validateTags(tags); - if (error) throw error; - - await update(appId, { tags }); - await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, tags }); -} - -async function setNotes(app, notes, auditSource) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof notes, 'string'); - assert.strictEqual(typeof auditSource, 'object'); - - await update(app.id, { notes }); - await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId: app.id, app, notes }); -} - -async function setChecklistItem(app, checklistItemKey, acknowledged, auditSource) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof checklistItemKey, 'string'); - assert.strictEqual(typeof acknowledged, 'boolean'); - assert.strictEqual(typeof auditSource, 'object'); - - if (!app.checklist[checklistItemKey]) throw new BoxError(BoxError.NOT_FOUND, 'no such checklist item'); - - // nothing changed - if (app.checklist[checklistItemKey].acknowledged === acknowledged) return; - - const checklist = app.checklist; - checklist[checklistItemKey].acknowledged = acknowledged; - checklist[checklistItemKey].changedAt = Date.now(); - checklist[checklistItemKey].changedBy = auditSource.username; - - await update(app.id, { checklist }); - await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId: app.id, app, checklist }); -} - -async function setIcon(app, icon, auditSource) { - assert.strictEqual(typeof app, 'object'); - assert(icon === null || typeof icon === 'string'); - assert.strictEqual(typeof auditSource, 'object'); - - const appId = app.id; - - if (icon) { - icon = Buffer.from(icon, 'base64'); - if (icon.length === 0) throw new BoxError(BoxError.BAD_FIELD, 'icon is not base64'); - } - - await update(appId, { icon }); - await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, iconChanged: true }); -} - async function setMemoryLimit(app, memoryLimit, auditSource) { assert.strictEqual(typeof app, 'object'); assert.strictEqual(typeof memoryLimit, 'number'); @@ -1579,6 +1948,7 @@ async function setMemoryLimit(app, memoryLimit, auditSource) { return { taskId }; } +// never fails just prints error async function setCpuQuota(app, cpuQuota, auditSource) { assert.strictEqual(typeof app, 'object'); assert.strictEqual(typeof cpuQuota, 'number'); @@ -1602,6 +1972,8 @@ async function setCpuQuota(app, cpuQuota, auditSource) { return { taskId }; } +// does a re-configure when called from most states. for install/clone errors, it re-installs with an optional manifest +// re-configure can take a dockerImage but not a manifest because re-configure does not clean up addons async function setMounts(app, mounts, auditSource) { assert.strictEqual(typeof app, 'object'); assert(Array.isArray(mounts)); @@ -1823,78 +2195,6 @@ async function setRedis(app, enableRedis, auditSource) { return { taskId }; } -async function setAutomaticBackup(app, enable, auditSource) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof enable, 'boolean'); - assert.strictEqual(typeof auditSource, 'object'); - - const appId = app.id; - await update(appId, { enableBackup: enable }); - await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, enableBackup: enable }); -} - -async function setAutomaticUpdate(app, enable, auditSource) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof enable, 'boolean'); - assert.strictEqual(typeof auditSource, 'object'); - - const appId = app.id; - await update(appId, { enableAutomaticUpdate: enable }); - await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, enableAutomaticUpdate: enable }); -} - -async function setReverseProxyConfig(app, reverseProxyConfig, auditSource) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof reverseProxyConfig, 'object'); - assert.strictEqual(typeof auditSource, 'object'); - - reverseProxyConfig = Object.assign({ robotsTxt: null, csp: null, hstsPreload: false }, reverseProxyConfig); - - const appId = app.id; - let error = validateCsp(reverseProxyConfig.csp); - if (error) throw error; - - error = validateRobotsTxt(reverseProxyConfig.robotsTxt); - if (error) throw error; - - await reverseProxy.writeAppConfigs(Object.assign({}, app, { reverseProxyConfig })); - - await update(appId, { reverseProxyConfig }); - - await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, reverseProxyConfig }); -} - -async function getLocation(subdomain, domain) { - assert.strictEqual(typeof subdomain, 'string'); - assert.strictEqual(typeof domain, 'string'); - - const result = await database.query(`SELECT ${LOCATION_FIELDS} FROM locations WHERE subdomain=? AND domain=?`, [ subdomain, domain ]); - if (result.length === 0) return null; - - return new Location(subdomain, domain, result[0].type, safe.JSON.parse(result[0].certificateJson)); -} - -async function setCertificate(app, data, auditSource) { - assert.strictEqual(typeof app, 'object'); - assert(data && typeof data === 'object'); - assert.strictEqual(typeof auditSource, 'object'); - - const { subdomain, domain, cert, key } = data; - - const domainObject = await domains.get(domain); - if (domainObject === null) throw new BoxError(BoxError.NOT_FOUND, 'Domain not found'); - - if (cert && key) await reverseProxy.validateCertificate(subdomain, domain, { cert, key }); - - const certificate = cert && key ? { cert, key } : null; - const result = await database.query('UPDATE locations SET certificateJson=? WHERE location=? AND domain=?', [ certificate ? JSON.stringify(certificate) : null, subdomain, domain ]); - if (result.affectedRows === 0) throw new BoxError(BoxError.NOT_FOUND, 'Location not found'); - - const location = await getLocation(subdomain, domain); // fresh location object with type - await reverseProxy.setUserCertificate(app, location); - await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId: app.id, app, subdomain, domain, cert }); -} - async function setLocation(app, data, auditSource) { assert.strictEqual(typeof app, 'object'); assert.strictEqual(typeof data, 'object'); @@ -2080,69 +2380,6 @@ async function updateApp(app, data, auditSource) { return { taskId }; } -async function getLogPaths(app) { - assert.strictEqual(typeof app, 'object'); - - const appId = app.id; - - const filePaths = []; - filePaths.push(path.join(paths.LOG_DIR, appId, 'apptask.log')); - filePaths.push(path.join(paths.LOG_DIR, appId, 'app.log')); - if (app.manifest.addons && app.manifest.addons.redis) filePaths.push(path.join(paths.LOG_DIR, `redis-${appId}/app.log`)); - - if (app.manifest.logPaths) { - const [error, result] = await safe(docker.inspect(app.containerId)); - if (!error) { - const runVolume = result.Mounts.find(function (mount) { return mount.Destination === '/run'; }); - const tmpVolume = result.Mounts.find(function (mount) { return mount.Destination === '/tmp'; }); - const dataVolume = result.Mounts.find(function (mount) { return mount.Destination === '/app/data'; }); - - // note: wild cards are not supported yet in logPaths since that will require shell expansion - for (const logPath of app.manifest.logPaths) { - if (logPath.startsWith('/tmp/')) filePaths.push(`${tmpVolume.Source}/${logPath.slice('/tmp/'.length)}`); - else if (logPath.startsWith('/run/')) filePaths.push(`${runVolume.Source}/${logPath.slice('/run/'.length)}`); - else if (logPath.startsWith('/app/data/')) filePaths.push(`${dataVolume.Source}/${logPath.slice('/app/data/'.length)}`); - } - } - } - - return filePaths; -} - -async function getLogs(app, options) { - assert.strictEqual(typeof app, 'object'); - assert(options && typeof options === 'object'); - - assert.strictEqual(typeof options.lines, 'number'); - assert.strictEqual(typeof options.format, 'string'); - assert.strictEqual(typeof options.follow, 'boolean'); - - const appId = app.id; - - const logPaths = await getLogPaths(app); - const cp = logs.tail(logPaths, { lines: options.lines, follow: options.follow, sudo: true }); // need sudo access for paths inside app container (manifest.logPaths) - - const logStream = new logs.LogStream({ format: options.format || 'json', source: appId }); - logStream.on('close', () => cp.terminate()); // the caller has to call destroy() on logStream. destroy() of Transform emits 'close' - - cp.stdout.pipe(logStream); - - return logStream; -} - -// never fails just prints error -async function appendLogLine(app, line) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof line, 'string'); - - const logFilePath = path.join(paths.LOG_DIR, app.id, 'app.log'); - const isoDate = new Date(new Date().toUTCString()).toISOString(); - - if (!safe.fs.appendFileSync(logFilePath, `${isoDate} ${line}\n`)) console.error(`Could not append log line for app ${app.id} at ${logFilePath}: ${safe.error.message}`); -} - -// does a re-configure when called from most states. for install/clone errors, it re-installs with an optional manifest -// re-configure can take a dockerImage but not a manifest because re-configure does not clean up addons async function repair(app, data, auditSource) { assert.strictEqual(typeof app, 'object'); assert.strictEqual(typeof data, 'object'); // { manifest } @@ -2289,6 +2526,19 @@ async function importApp(app, data, auditSource) { return { taskId }; } +function canBackupApp(app) { + // only backup apps that are installed or specific pending states + + // stopped apps cannot be backed up because addons might be down (redis) + if (app.runState === RSTATE_STOPPED) return false; + + // we used to check the health here but that doesn't work for stopped apps. it's better to just fail + // and inform the user if the backup fails and the app addons have not been setup yet. + return app.installationState === ISTATE_INSTALLED || + app.installationState === ISTATE_PENDING_CONFIGURE || + app.installationState === ISTATE_PENDING_UPDATE; // called from apptask +} + async function exportApp(app, backupSiteId, auditSource) { assert.strictEqual(typeof app, 'object'); assert.strictEqual(typeof backupSiteId, 'string'); // FIXME: this is not used at all in snapshotOnly mode @@ -2559,115 +2809,7 @@ async function restart(app, auditSource) { return { taskId }; } -async function checkManifest(manifest) { - assert(manifest && typeof manifest === 'object'); - - if (manifest.manifestVersion !== 2) return new BoxError(BoxError.BAD_FIELD, 'Manifest version must be 2'); - - if (!manifest.dockerImage) return new BoxError(BoxError.BAD_FIELD, 'Missing dockerImage'); // dockerImage is optional in manifest - - if (semver.valid(manifest.maxBoxVersion) && semver.gt(constants.VERSION, manifest.maxBoxVersion)) { - return new BoxError(BoxError.BAD_FIELD, 'Box version exceeds Apps maxBoxVersion'); - } - - if (semver.valid(manifest.minBoxVersion) && semver.gt(manifest.minBoxVersion, constants.VERSION)) { - return new BoxError(BoxError.BAD_FIELD, 'App version requires a new platform version'); - } - - const error = await services.checkAddonsSupport(manifest.addons || {}); - return error; -} - -async function createExec(app, options) { - assert.strictEqual(typeof app, 'object'); - assert(options && typeof options === 'object'); - - if (app.manifest.id === constants.PROXY_APP_APPSTORE_ID) throw new BoxError(BoxError.BAD_FIELD, 'cannot exec on proxy app'); - - const cmd = options.cmd || [ '/bin/bash' ]; - assert(Array.isArray(cmd) && cmd.length > 0); - - if (app.installationState !== ISTATE_INSTALLED || app.runState !== RSTATE_RUNNING) { - throw new BoxError(BoxError.BAD_STATE, 'App not installed or running'); - } - - const createOptions = { - AttachStdin: true, - AttachStdout: true, - AttachStderr: true, - // A pseudo tty is a terminal which processes can detect (for example, disable colored output) - // Creating a pseudo terminal also assigns a terminal driver which detects control sequences - // When passing binary data, tty must be disabled. In addition, the stdout/stderr becomes a single - // unified stream because of the nature of a tty (see https://github.com/docker/docker/issues/19696) - Tty: options.tty, - Cmd: cmd - }; - - // currently the webterminal and cli sets C.UTF-8 - if (options.lang) createOptions.Env = [ 'LANG=' + options.lang ]; - - if (options.cwd) createOptions.WorkingDir = options.cwd; - - return await docker.createExec(app.containerId, createOptions); -} - -async function startExec(app, execId, options) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof execId, 'string'); - assert(options && typeof options === 'object'); - - if (app.installationState !== ISTATE_INSTALLED || app.runState !== RSTATE_RUNNING) { - throw new BoxError(BoxError.BAD_STATE, 'App not installed or running'); - } - - const startOptions = { - Detach: false, - Tty: options.tty, - // hijacking upgrades the docker connection from http to tcp. because of this upgrade, - // we can work with half-close connections (not defined in http). this way, the client - // can properly signal that stdin is EOF by closing it's side of the socket. In http, - // the whole connection will be dropped when stdin get EOF. - // https://github.com/apocas/dockerode/commit/b4ae8a03707fad5de893f302e4972c1e758592fe - hijack: true, - stream: true, - stdin: true, - stdout: true, - stderr: true - }; - - const stream = await docker.startExec(execId, startOptions); - - if (options.rows && options.columns) { - // there is a race where resizing too early results in a 404 "no such exec" - // https://git.cloudron.io/cloudron/box/issues/549 - setTimeout(async function () { - await safe(docker.resizeExec(execId, { h: options.rows, w: options.columns }, { debug })); - }, 2000); - } - - return stream; -} - -async function getExec(app, execId) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof execId, 'string'); - - return await docker.getExec(execId); -} - -function canBackupApp(app) { - // only backup apps that are installed or specific pending states - - // stopped apps cannot be backed up because addons might be down (redis) - if (app.runState === RSTATE_STOPPED) return false; - - // we used to check the health here but that doesn't work for stopped apps. it's better to just fail - // and inform the user if the backup fails and the app addons have not been setup yet. - return app.installationState === ISTATE_INSTALLED || - app.installationState === ISTATE_PENDING_CONFIGURE || - app.installationState === ISTATE_PENDING_UPDATE; // called from apptask -} - +// auto-restart app tasks after a crash async function backup(app, backupSiteId, auditSource) { assert.strictEqual(typeof app, 'object'); assert.strictEqual(typeof backupSiteId, 'string'); @@ -2700,14 +2842,6 @@ async function backup(app, backupSiteId, auditSource) { return { taskId }; } -async function listBackups(app, page, perPage) { - assert.strictEqual(typeof app, 'object'); - assert(typeof page === 'number' && page > 0); - assert(typeof perPage === 'number' && perPage > 0); - - return await backups.getByIdentifierAndStatePaged(app.id, backups.BACKUP_STATE_NORMAL, page, perPage); -} - async function updateBackup(app, backupId, data) { assert.strictEqual(typeof app, 'object'); assert.strictEqual(typeof backupId, 'string'); @@ -2838,7 +2972,6 @@ async function restartAppsUsingAddons(changedAddons, auditSource) { } } -// auto-restart app tasks after a crash async function schedulePendingTasks(auditSource) { assert.strictEqual(typeof auditSource, 'object'); @@ -2855,139 +2988,6 @@ async function schedulePendingTasks(auditSource) { } } -async function listEventlog(app, page, perPage) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof page, 'number'); - assert.strictEqual(typeof perPage, 'number'); - - const actions = []; - const search = app.id; - return await eventlog.listPaged(actions, search, page, perPage); -} - -async function drainStream(stream) { - return new Promise((resolve, reject) => { - let data = ''; - stream.setEncoding('utf8'); - stream.on('error', (error) => reject(new BoxError.FS_ERROR, error.message)); - stream.on('data', function (d) { data += d; }); - stream.on('end', function () { - resolve(data); - }); - }); -} - -async function downloadFile(app, filePath) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof filePath, 'string'); - - const statExecId = await createExec(app, { cmd: [ 'stat', '--printf=%F-%s', filePath ], tty: true }); - const statStream = await startExec(app, statExecId, { tty: true }); - const data = await drainStream(statStream); - - const parts = data.split('-'); - if (parts.length !== 2) throw new BoxError(BoxError.NOT_FOUND, 'file does not exist'); - - const type = parts[0]; - let filename, cmd, size; - - if (type === 'regular file') { - cmd = [ 'cat', filePath ]; - size = parseInt(parts[1], 10); - filename = path.basename(filePath); - if (isNaN(size)) throw new BoxError(BoxError.NOT_FOUND, 'file does not exist'); - } else if (type === 'directory') { - cmd = ['tar', 'zcf', '-', '-C', filePath, '.']; - filename = path.basename(filePath) + '.tar.gz'; - size = 0; // unknown - } else { - throw new BoxError(BoxError.NOT_FOUND, 'only files or dirs can be downloaded'); - } - - const execId = await createExec(app, { cmd, tty: false }); - const inputStream = await startExec(app, execId, { tty: false }); - - // transforms the docker stream into a normal stream - const stdoutStream = new TransformStream({ - transform: function (chunk, ignoredEncoding, callback) { - this._buffer = this._buffer ? Buffer.concat([this._buffer, chunk]) : chunk; - - for (;;) { - if (this._buffer.length < 8) break; // header is 8 bytes - - const type = this._buffer.readUInt8(0); - const len = this._buffer.readUInt32BE(4); - - if (this._buffer.length < (8 + len)) break; // not enough - - const payload = this._buffer.slice(8, 8 + len); - - this._buffer = this._buffer.slice(8+len); // consumed - - if (type === 1) this.push(payload); - } - - callback(); - } - }); - - inputStream.pipe(stdoutStream); - - return { stream: stdoutStream, filename, size }; -} - -async function uploadFile(app, sourceFilePath, destFilePath) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof sourceFilePath, 'string'); - assert.strictEqual(typeof destFilePath, 'string'); - - // the built-in bash printf understands "%q" but not /usr/bin/printf. - // ' gets replaced with '\'' . the first closes the quote and last one starts a new one - const escapedDestFilePath = await shell.bash(`printf %q '${destFilePath.replace(/'/g, '\'\\\'\'')}'`, { encoding: 'utf8' }); - debug(`uploadFile: ${sourceFilePath} -> ${escapedDestFilePath}`); - - const execId = await createExec(app, { cmd: [ 'bash', '-c', `cat - > ${escapedDestFilePath}` ], tty: false }); - const destStream = await startExec(app, execId, { tty: false }); - - return new Promise((resolve, reject) => { - const done = once(error => reject(new BoxError(BoxError.FS_ERROR, error.message))); - - const sourceStream = fs.createReadStream(sourceFilePath); - sourceStream.on('error', done); - destStream.on('error', done); - - destStream.on('finish', resolve); - - sourceStream.pipe(destStream); - }); -} - -async function writeConfig(app) { - assert.strictEqual(typeof app, 'object'); - - if (!safe.fs.writeFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/config.json'), JSON.stringify(app, null, 4))) { - throw new BoxError(BoxError.FS_ERROR, 'Error creating config.json: ' + safe.error.message); - } - - const [error, icons] = await safe(getIcons(app.id)); - if (!error && icons.icon) safe.fs.writeFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/icon.png'), icons.icon); -} - -async function loadConfig(app) { - assert.strictEqual(typeof app, 'object'); - - const appConfig = safe.JSON.parse(safe.fs.readFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/config.json'))); - let data = {}; - if (appConfig) { - data = _.pick(appConfig, ['memoryLimit', 'cpuQuota', 'enableBackup', 'reverseProxyConfig', 'env', 'servicesConfig', 'label', 'tags', 'enableAutomaticUpdate']); - } - - const icon = safe.fs.readFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/icon.png')); - if (icon) data.icon = icon; - - await update(app.id, data); -} - export default { canAccess, isOperator, diff --git a/src/appstore.js b/src/appstore.js index 27253bf0a..5d6dd2a92 100644 --- a/src/appstore.js +++ b/src/appstore.js @@ -23,10 +23,6 @@ import volumes from './volumes.js'; const debug = debugModule('box:appstore'); -const _setApiServerOrigin = setApiServerOrigin; -const _unregister = unregister; - - // These are the default options and will be adjusted once a subscription state is obtained // Keep in sync with appstore/routes/cloudrons.js const DEFAULT_FEATURES = { @@ -241,6 +237,31 @@ async function getAppUpdate(app, options) { return updateInfo; } +async function updateCloudron(data) { + assert.strictEqual(typeof data, 'object'); + + const { domain, version } = data; + + const token = await settings.get(settings.APPSTORE_API_TOKEN_KEY); + if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token'); + + const query = { + accessToken: token + }; + + const [error, response] = await safe(superagent.post(`${await getApiServerOrigin()}/api/v1/update_cloudron`) + .query(query) + .send({ domain, version }) + .timeout(60 * 1000) + .ok(() => true)); + + if (error) throw new BoxError(BoxError.NETWORK_ERROR, error); + if (response.status === 401) throw new BoxError(BoxError.LICENSE_ERROR, 'Invalid appstore token'); + if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Bad response: ${response.status} ${response.text}`); + + debug(`updateCloudron: Cloudron updated with data ${JSON.stringify(data)}`); +} + async function registerCloudron3() { const { domain } = await dashboard.getLocation(); const version = constants.VERSION; @@ -271,6 +292,11 @@ async function registerCloudron3() { await getSubscription(); } +async function unregister() { + await settings.set(settings.CLOUDRON_ID_KEY, ''); + await settings.set(settings.APPSTORE_API_TOKEN_KEY, ''); +} + async function unlinkAccount() { debug('unlinkAccount: Unlinking existing account.'); @@ -280,36 +306,6 @@ async function unlinkAccount() { return await registerCloudron3(); } -async function updateCloudron(data) { - assert.strictEqual(typeof data, 'object'); - - const { domain, version } = data; - - const token = await settings.get(settings.APPSTORE_API_TOKEN_KEY); - if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token'); - - const query = { - accessToken: token - }; - - const [error, response] = await safe(superagent.post(`${await getApiServerOrigin()}/api/v1/update_cloudron`) - .query(query) - .send({ domain, version }) - .timeout(60 * 1000) - .ok(() => true)); - - if (error) throw new BoxError(BoxError.NETWORK_ERROR, error); - if (response.status === 401) throw new BoxError(BoxError.LICENSE_ERROR, 'Invalid appstore token'); - if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Bad response: ${response.status} ${response.text}`); - - debug(`updateCloudron: Cloudron updated with data ${JSON.stringify(data)}`); -} - -async function unregister() { - await settings.set(settings.CLOUDRON_ID_KEY, ''); - await settings.set(settings.APPSTORE_API_TOKEN_KEY, ''); -} - async function downloadManifest(appStoreId, manifest) { if (!appStoreId && !manifest) throw new BoxError(BoxError.BAD_FIELD, 'Neither manifest nor appStoreId provided'); @@ -398,6 +394,8 @@ async function downloadIcon(appStoreId, version) { }); } +const _setApiServerOrigin = setApiServerOrigin; + export default { getFeatures, getApiServerOrigin, @@ -417,5 +415,5 @@ export default { getAppUpdate, getBoxUpdate, _setApiServerOrigin, - _unregister, + _unregister: unregister, }; diff --git a/src/apptask.js b/src/apptask.js index 18dbeae2d..d2cb3eb44 100644 --- a/src/apptask.js +++ b/src/apptask.js @@ -31,11 +31,6 @@ import _ from './underscore.js'; const debug = debugModule('box:apptask'); const shell = shellModule('apptask'); -const _createAppDir = createAppDir; -const _deleteAppDir = deleteAppDir; -const _verifyManifest = verifyManifest; - - const LOGROTATE_CONFIG_EJS = fs.readFileSync(import.meta.dirname + '/logrotate.ejs', { encoding: 'utf8' }), CONFIGURE_LOGROTATE_CMD = path.join(import.meta.dirname, 'scripts/configurelogrotate.sh'); @@ -76,35 +71,6 @@ async function allocateContainerIp(app) { }); } -async function createContainer(app) { - assert.strictEqual(typeof app, 'object'); - assert(!app.containerId); // otherwise, it will trigger volumeFrom - - if (app.manifest.id === constants.PROXY_APP_APPSTORE_ID) return; - - debug('createContainer: creating container'); - - const container = await docker.createContainer(app); - - await updateApp(app, { containerId: container.id }); - - // re-generate configs that rely on container id - await addLogrotateConfig(app); -} - -async function deleteContainers(app, options) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof options, 'object'); - - debug('deleteContainer: deleting app containers (app, scheduler)'); - - // remove configs that rely on container id - await removeLogrotateConfig(app); - await docker.stopContainers(app.id); - await docker.deleteContainers(app.id, options); - await updateApp(app, { containerId: null }); -} - async function createAppDir(app) { assert.strictEqual(typeof app, 'object'); @@ -158,25 +124,6 @@ async function deleteAppDir(app, options) { } } -async function addLogrotateConfig(app) { - assert.strictEqual(typeof app, 'object'); - - const result = await docker.inspect(app.containerId); - - const runVolume = result.Mounts.find(function (mount) { return mount.Destination === '/run'; }); - if (!runVolume) throw new BoxError(BoxError.DOCKER_ERROR, 'App does not have /run mounted'); - - // logrotate configs can have arbitrary commands, so the config files must be owned by root - const logrotateConf = ejs.render(LOGROTATE_CONFIG_EJS, { volumePath: runVolume.Source, appId: app.id }); - const tmpFilePath = path.join(os.tmpdir(), app.id + '.logrotate'); - - safe.fs.writeFileSync(tmpFilePath, logrotateConf); - if (safe.error) throw new BoxError(BoxError.FS_ERROR, `Error writing logrotate config: ${safe.error.message}`); - - const [error] = await safe(shell.sudo([ CONFIGURE_LOGROTATE_CMD, 'add', app.id, tmpFilePath ], {})); - if (error) throw new BoxError(BoxError.LOGROTATE_ERROR, `Error adding logrotate config: ${error.message}`); -} - async function removeLogrotateConfig(app) { assert.strictEqual(typeof app, 'object'); @@ -184,6 +131,19 @@ async function removeLogrotateConfig(app) { if (error) throw new BoxError(BoxError.LOGROTATE_ERROR, `Error removing logrotate config: ${error.message}`); } +async function deleteContainers(app, options) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof options, 'object'); + + debug('deleteContainer: deleting app containers (app, scheduler)'); + + // remove configs that rely on container id + await removeLogrotateConfig(app); + await docker.stopContainers(app.id); + await docker.deleteContainers(app.id, options); + await updateApp(app, { containerId: null }); +} + async function cleanupLogs(app) { assert.strictEqual(typeof app, 'object'); @@ -301,6 +261,131 @@ async function startApp(app) { await docker.startContainer(app.id); } +async function startCommand(app, args, progressCallback) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof args, 'object'); + assert.strictEqual(typeof progressCallback, 'function'); + + await progressCallback({ percent: 10, message: 'Starting app services' }); + await services.startAppServices(app); + + if (app.manifest.id !== constants.PROXY_APP_APPSTORE_ID) { + await progressCallback({ percent: 35, message: 'Starting container' }); + await docker.startContainer(app.id); + } + + // stopped apps do not renew certs. currently, we don't do DNS to not overwrite existing user settings + await progressCallback({ percent: 80, message: 'Configuring reverse proxy' }); + await reverseProxy.configureApp(app, AuditSource.APPTASK); + + await progressCallback({ percent: 100, message: 'Done' }); + await updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null }); +} + +async function stopCommand(app, args, progressCallback) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof args, 'object'); + assert.strictEqual(typeof progressCallback, 'function'); + + // we don't delete the containers. app containers are created with the unless-stopped restart policy. there is no danger of apps getting restarted on reboot + await progressCallback({ percent: 20, message: 'Stopping container' }); + await reverseProxy.unconfigureApp(app); // removing nginx configs also means that we can auto-cleanup old certs since they are not referenced + await docker.stopContainers(app.id); + + await progressCallback({ percent: 50, message: 'Stopping app services' }); + await services.stopAppServices(app); + + await progressCallback({ percent: 100, message: 'Done' }); + await updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null }); +} + +async function restartCommand(app, args, progressCallback) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof args, 'object'); + assert.strictEqual(typeof progressCallback, 'function'); + + if (app.manifest.id !== constants.PROXY_APP_APPSTORE_ID) { + await progressCallback({ percent: 10, message: 'Starting app services' }); + await services.startAppServices(app); + + await progressCallback({ percent: 20, message: 'Restarting container' }); + await docker.restartContainer(app.id); + } + + // stopped apps do not renew certs. currently, we don't do DNS to not overwrite existing user settings + await progressCallback({ percent: 80, message: 'Configuring reverse proxy' }); + await reverseProxy.configureApp(app, AuditSource.APPTASK); + + await progressCallback({ percent: 100, message: 'Done' }); + await updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null }); +} + +// this command can also be called when the app is stopped. do not touch services +async function uninstallCommand(app, args, progressCallback) { + assert.strictEqual(typeof app, 'object'); + assert.strictEqual(typeof args, 'object'); + assert.strictEqual(typeof progressCallback, 'function'); + + await progressCallback({ percent: 20, message: 'Deleting container' }); + await reverseProxy.unconfigureApp(app); + await deleteContainers(app, {}); + + await progressCallback({ percent: 30, message: 'Teardown addons' }); + await services.teardownAddons(app, app.manifest.addons); + + await progressCallback({ percent: 40, message: 'Cleanup file manager' }); + + await progressCallback({ percent: 50, message: 'Deleting app data directory' }); + await deleteAppDir(app, { removeDirectory: true }); + + await progressCallback({ percent: 60, message: 'Deleting image' }); + await docker.deleteImage(app.manifest.dockerImage); + + await progressCallback({ percent: 70, message: 'Unregistering domains' }); + await dns.unregisterLocations([ { subdomain: app.subdomain, domain: app.domain } ].concat(app.secondaryDomains).concat(app.redirectDomains).concat(app.aliasDomains), progressCallback); + + await progressCallback({ percent: 90, message: 'Cleanup logs' }); + await cleanupLogs(app); + + await progressCallback({ percent: 95, message: 'Remove app from database' }); + await apps.del(app.id); +} + +async function addLogrotateConfig(app) { + assert.strictEqual(typeof app, 'object'); + + const result = await docker.inspect(app.containerId); + + const runVolume = result.Mounts.find(function (mount) { return mount.Destination === '/run'; }); + if (!runVolume) throw new BoxError(BoxError.DOCKER_ERROR, 'App does not have /run mounted'); + + // logrotate configs can have arbitrary commands, so the config files must be owned by root + const logrotateConf = ejs.render(LOGROTATE_CONFIG_EJS, { volumePath: runVolume.Source, appId: app.id }); + const tmpFilePath = path.join(os.tmpdir(), app.id + '.logrotate'); + + safe.fs.writeFileSync(tmpFilePath, logrotateConf); + if (safe.error) throw new BoxError(BoxError.FS_ERROR, `Error writing logrotate config: ${safe.error.message}`); + + const [error] = await safe(shell.sudo([ CONFIGURE_LOGROTATE_CMD, 'add', app.id, tmpFilePath ], {})); + if (error) throw new BoxError(BoxError.LOGROTATE_ERROR, `Error adding logrotate config: ${error.message}`); +} + +async function createContainer(app) { + assert.strictEqual(typeof app, 'object'); + assert(!app.containerId); // otherwise, it will trigger volumeFrom + + if (app.manifest.id === constants.PROXY_APP_APPSTORE_ID) return; + + debug('createContainer: creating container'); + + const container = await docker.createContainer(app); + + await updateApp(app, { containerId: container.id }); + + // re-generate configs that rely on container id + await addLogrotateConfig(app); +} + async function installCommand(app, args, progressCallback) { assert.strictEqual(typeof app, 'object'); assert.strictEqual(typeof args, 'object'); @@ -412,7 +497,7 @@ async function installCommand(app, args, progressCallback) { await updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null }); } -// this command can also be called when the app is stopped. do not touch services +// configure is called for an infra update and repair to re-create container, reverseproxy config. it's all "local" async function recreateCommand(app, args, progressCallback) { assert.strictEqual(typeof app, 'object'); assert.strictEqual(typeof args, 'object'); @@ -552,7 +637,6 @@ async function migrateDataDirCommand(app, args, progressCallback) { await updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null }); } -// configure is called for an infra update and repair to re-create container, reverseproxy config. it's all "local" async function configureCommand(app, args, progressCallback) { assert.strictEqual(typeof app, 'object'); assert.strictEqual(typeof args, 'object'); @@ -703,95 +787,6 @@ async function updateCommand(app, args, progressCallback) { await updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null, updateInfo: null, updateTime: new Date() }); } -async function startCommand(app, args, progressCallback) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof args, 'object'); - assert.strictEqual(typeof progressCallback, 'function'); - - await progressCallback({ percent: 10, message: 'Starting app services' }); - await services.startAppServices(app); - - if (app.manifest.id !== constants.PROXY_APP_APPSTORE_ID) { - await progressCallback({ percent: 35, message: 'Starting container' }); - await docker.startContainer(app.id); - } - - // stopped apps do not renew certs. currently, we don't do DNS to not overwrite existing user settings - await progressCallback({ percent: 80, message: 'Configuring reverse proxy' }); - await reverseProxy.configureApp(app, AuditSource.APPTASK); - - await progressCallback({ percent: 100, message: 'Done' }); - await updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null }); -} - -async function stopCommand(app, args, progressCallback) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof args, 'object'); - assert.strictEqual(typeof progressCallback, 'function'); - - // we don't delete the containers. app containers are created with the unless-stopped restart policy. there is no danger of apps getting restarted on reboot - await progressCallback({ percent: 20, message: 'Stopping container' }); - await reverseProxy.unconfigureApp(app); // removing nginx configs also means that we can auto-cleanup old certs since they are not referenced - await docker.stopContainers(app.id); - - await progressCallback({ percent: 50, message: 'Stopping app services' }); - await services.stopAppServices(app); - - await progressCallback({ percent: 100, message: 'Done' }); - await updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null }); -} - -async function restartCommand(app, args, progressCallback) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof args, 'object'); - assert.strictEqual(typeof progressCallback, 'function'); - - if (app.manifest.id !== constants.PROXY_APP_APPSTORE_ID) { - await progressCallback({ percent: 10, message: 'Starting app services' }); - await services.startAppServices(app); - - await progressCallback({ percent: 20, message: 'Restarting container' }); - await docker.restartContainer(app.id); - } - - // stopped apps do not renew certs. currently, we don't do DNS to not overwrite existing user settings - await progressCallback({ percent: 80, message: 'Configuring reverse proxy' }); - await reverseProxy.configureApp(app, AuditSource.APPTASK); - - await progressCallback({ percent: 100, message: 'Done' }); - await updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null }); -} - -async function uninstallCommand(app, args, progressCallback) { - assert.strictEqual(typeof app, 'object'); - assert.strictEqual(typeof args, 'object'); - assert.strictEqual(typeof progressCallback, 'function'); - - await progressCallback({ percent: 20, message: 'Deleting container' }); - await reverseProxy.unconfigureApp(app); - await deleteContainers(app, {}); - - await progressCallback({ percent: 30, message: 'Teardown addons' }); - await services.teardownAddons(app, app.manifest.addons); - - await progressCallback({ percent: 40, message: 'Cleanup file manager' }); - - await progressCallback({ percent: 50, message: 'Deleting app data directory' }); - await deleteAppDir(app, { removeDirectory: true }); - - await progressCallback({ percent: 60, message: 'Deleting image' }); - await docker.deleteImage(app.manifest.dockerImage); - - await progressCallback({ percent: 70, message: 'Unregistering domains' }); - await dns.unregisterLocations([ { subdomain: app.subdomain, domain: app.domain } ].concat(app.secondaryDomains).concat(app.redirectDomains).concat(app.aliasDomains), progressCallback); - - await progressCallback({ percent: 90, message: 'Cleanup logs' }); - await cleanupLogs(app); - - await progressCallback({ percent: 95, message: 'Remove app from database' }); - await apps.del(app.id); -} - async function run(appId, args, progressCallback) { assert.strictEqual(typeof appId, 'string'); assert.strictEqual(typeof args, 'object'); @@ -867,9 +862,13 @@ async function run(appId, args, progressCallback) { return result || null; } +const _createAppDir = createAppDir; + +const _deleteAppDir = deleteAppDir; + export default { run, _createAppDir, _deleteAppDir, - _verifyManifest, + _verifyManifest: verifyManifest, }; diff --git a/src/backupcleaner.js b/src/backupcleaner.js index 5b05e706b..71392b77b 100644 --- a/src/backupcleaner.js +++ b/src/backupcleaner.js @@ -13,9 +13,6 @@ import safe from 'safetydance'; const debug = debugModule('box:backupcleaner'); -const _applyBackupRetention = applyBackupRetention; - - function applyBackupRetention(allBackups, retention, referencedBackupIds) { assert(Array.isArray(allBackups)); assert.strictEqual(typeof retention, 'object'); @@ -308,6 +305,8 @@ async function run(siteId, progressCallback) { return { removedBoxBackupPaths, removedMailBackupPaths, removedAppBackupPaths, missingBackupPaths }; } +const _applyBackupRetention = applyBackupRetention; + export default { run, _applyBackupRetention, diff --git a/src/backupformat/rsync.js b/src/backupformat/rsync.js index 0985f2028..ae87fa53d 100644 --- a/src/backupformat/rsync.js +++ b/src/backupformat/rsync.js @@ -21,10 +21,6 @@ import util from 'node:util'; const debug = debugModule('box:backupformat/rsync'); const shell = shellModule('backupformat/rsync'); -const _saveFsMetadata = saveFsMetadata; -const _restoreFsMetadata = restoreFsMetadata; - - async function addFile(sourceFile, encryption, uploader, progressCallback) { assert.strictEqual(typeof sourceFile, 'string'); assert.strictEqual(typeof encryption, 'object'); @@ -71,6 +67,89 @@ async function addFile(sourceFile, encryption, uploader, progressCallback) { }; } +async function saveFsMetadata(dataLayout, metadataFile) { + assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout'); + assert.strictEqual(typeof metadataFile, 'string'); + + // contains paths prefixed with './' + const metadata = { + emptyDirs: [], + execFiles: [], + symlinks: [] + }; + + // we assume small number of files. spawnSync will raise a ENOBUFS error after maxBuffer + for (const lp of dataLayout.localPaths()) { + const [emptyDirsError, emptyDirs] = await safe(shell.spawn('find', [lp, '-type', 'd', '-empty'], { encoding: 'utf8', maxLines: 80000 })); + if (emptyDirsError && emptyDirsError.stdoutLineCount >= 80000) throw new BoxError(BoxError.FS_ERROR, `Too many empty directories. Run "find ${lp} -type d -empty" to investigate`); + if (emptyDirsError) throw emptyDirsError; + if (emptyDirs.length) metadata.emptyDirs = metadata.emptyDirs.concat(emptyDirs.trim().split('\n').map((ed) => dataLayout.toRemotePath(ed))); + + const [execFilesError, execFiles] = await safe(shell.spawn('find', [lp, '-type', 'f', '-executable'], { encoding: 'utf8', maxLines: 20000 })); + if (execFilesError && execFilesError.stdoutLineCount >= 20000) throw new BoxError(BoxError.FS_ERROR, `Too many executable files. Run "find ${lp} -type f -executable" to investigate`); + if (execFilesError) throw execFilesError; + if (execFiles.length) metadata.execFiles = metadata.execFiles.concat(execFiles.trim().split('\n').map((ef) => dataLayout.toRemotePath(ef))); + + const [symlinkFilesError, symlinkFiles] = await safe(shell.spawn('find', [lp, '-type', 'l'], { encoding: 'utf8', maxLines: 20000 })); + if (symlinkFilesError && symlinkFilesError.stdoutLineCount >= 20000) throw new BoxError(BoxError.FS_ERROR, `Too many symlinks. Run "find ${lp} -type l" to investigate`); + if (symlinkFilesError) throw symlinkFilesError; + + if (symlinkFiles.length) metadata.symlinks = metadata.symlinks.concat(symlinkFiles.trim().split('\n').map((sl) => { + const site = safe.fs.readlinkSync(sl); + return { path: dataLayout.toRemotePath(sl), site }; + })); + } + + if (!safe.fs.writeFileSync(metadataFile, JSON.stringify(metadata, null, 2))) throw new BoxError(BoxError.FS_ERROR, `Error writing fs metadata: ${safe.error.message}`); +} + +// this is not part of 'snapshotting' because we need root access to traverse +async function restoreFsMetadata(dataLayout, metadataFile) { + assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout'); + assert.strictEqual(typeof metadataFile, 'string'); + + debug(`Recreating empty directories in ${dataLayout.toString()}`); + + const metadataJson = safe.fs.readFileSync(metadataFile, 'utf8'); + if (metadataJson === null) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Error loading fsmetadata.json:' + safe.error.message); + const metadata = safe.JSON.parse(metadataJson); + if (metadata === null) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Error parsing fsmetadata.json:' + safe.error.message); + + for (const emptyDir of metadata.emptyDirs) { + const [mkdirError] = await safe(fs.promises.mkdir(dataLayout.toLocalPath(emptyDir), { recursive: true })); + if (mkdirError) throw new BoxError(BoxError.FS_ERROR, `unable to create path: ${mkdirError.message}`); + } + + for (const execFile of metadata.execFiles) { + const [chmodError] = await safe(fs.promises.chmod(dataLayout.toLocalPath(execFile), parseInt('0755', 8))); + if (chmodError) throw new BoxError(BoxError.FS_ERROR, `unable to chmod: ${chmodError.message}`); + } + + for (const symlink of (metadata.symlinks || [])) { + if (!symlink.site) continue; + // the path may not exist if we had a directory full of symlinks + const [mkdirError] = await safe(fs.promises.mkdir(path.dirname(dataLayout.toLocalPath(symlink.path)), { recursive: true })); + if (mkdirError) throw new BoxError(BoxError.FS_ERROR, `unable to symlink (mkdir): ${mkdirError.message}`); + const [symlinkError] = await safe(fs.promises.symlink(symlink.site, dataLayout.toLocalPath(symlink.path), 'file')); + if (symlinkError) throw new BoxError(BoxError.FS_ERROR, `unable to symlink: ${symlinkError.message}`); + } +} + +async function copy(backupSite, fromPath, toPath, progressCallback) { + assert.strictEqual(typeof backupSite, 'object'); + assert.strictEqual(typeof fromPath, 'string'); + assert.strictEqual(typeof toPath, 'string'); + assert.strictEqual(typeof progressCallback, 'function'); + + await backupSites.storageApi(backupSite).copyDir(backupSite.config, backupSite.limits, fromPath, toPath, progressCallback); +} + +function getFileExtension(encryption) { + assert.strictEqual(typeof encryption, 'boolean'); + + return ''; // this also signals to backupcleanear that we are dealing with directories +} + async function sync(backupSite, remotePath, dataLayout, progressCallback) { assert.strictEqual(typeof backupSite, 'object'); assert.strictEqual(typeof remotePath, 'string'); @@ -136,74 +215,6 @@ async function sync(backupSite, remotePath, dataLayout, progressCallback) { }; } -// this is not part of 'snapshotting' because we need root access to traverse -async function saveFsMetadata(dataLayout, metadataFile) { - assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout'); - assert.strictEqual(typeof metadataFile, 'string'); - - // contains paths prefixed with './' - const metadata = { - emptyDirs: [], - execFiles: [], - symlinks: [] - }; - - // we assume small number of files. spawnSync will raise a ENOBUFS error after maxBuffer - for (const lp of dataLayout.localPaths()) { - const [emptyDirsError, emptyDirs] = await safe(shell.spawn('find', [lp, '-type', 'd', '-empty'], { encoding: 'utf8', maxLines: 80000 })); - if (emptyDirsError && emptyDirsError.stdoutLineCount >= 80000) throw new BoxError(BoxError.FS_ERROR, `Too many empty directories. Run "find ${lp} -type d -empty" to investigate`); - if (emptyDirsError) throw emptyDirsError; - if (emptyDirs.length) metadata.emptyDirs = metadata.emptyDirs.concat(emptyDirs.trim().split('\n').map((ed) => dataLayout.toRemotePath(ed))); - - const [execFilesError, execFiles] = await safe(shell.spawn('find', [lp, '-type', 'f', '-executable'], { encoding: 'utf8', maxLines: 20000 })); - if (execFilesError && execFilesError.stdoutLineCount >= 20000) throw new BoxError(BoxError.FS_ERROR, `Too many executable files. Run "find ${lp} -type f -executable" to investigate`); - if (execFilesError) throw execFilesError; - if (execFiles.length) metadata.execFiles = metadata.execFiles.concat(execFiles.trim().split('\n').map((ef) => dataLayout.toRemotePath(ef))); - - const [symlinkFilesError, symlinkFiles] = await safe(shell.spawn('find', [lp, '-type', 'l'], { encoding: 'utf8', maxLines: 20000 })); - if (symlinkFilesError && symlinkFilesError.stdoutLineCount >= 20000) throw new BoxError(BoxError.FS_ERROR, `Too many symlinks. Run "find ${lp} -type l" to investigate`); - if (symlinkFilesError) throw symlinkFilesError; - - if (symlinkFiles.length) metadata.symlinks = metadata.symlinks.concat(symlinkFiles.trim().split('\n').map((sl) => { - const site = safe.fs.readlinkSync(sl); - return { path: dataLayout.toRemotePath(sl), site }; - })); - } - - if (!safe.fs.writeFileSync(metadataFile, JSON.stringify(metadata, null, 2))) throw new BoxError(BoxError.FS_ERROR, `Error writing fs metadata: ${safe.error.message}`); -} - -async function restoreFsMetadata(dataLayout, metadataFile) { - assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout'); - assert.strictEqual(typeof metadataFile, 'string'); - - debug(`Recreating empty directories in ${dataLayout.toString()}`); - - const metadataJson = safe.fs.readFileSync(metadataFile, 'utf8'); - if (metadataJson === null) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Error loading fsmetadata.json:' + safe.error.message); - const metadata = safe.JSON.parse(metadataJson); - if (metadata === null) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Error parsing fsmetadata.json:' + safe.error.message); - - for (const emptyDir of metadata.emptyDirs) { - const [mkdirError] = await safe(fs.promises.mkdir(dataLayout.toLocalPath(emptyDir), { recursive: true })); - if (mkdirError) throw new BoxError(BoxError.FS_ERROR, `unable to create path: ${mkdirError.message}`); - } - - for (const execFile of metadata.execFiles) { - const [chmodError] = await safe(fs.promises.chmod(dataLayout.toLocalPath(execFile), parseInt('0755', 8))); - if (chmodError) throw new BoxError(BoxError.FS_ERROR, `unable to chmod: ${chmodError.message}`); - } - - for (const symlink of (metadata.symlinks || [])) { - if (!symlink.site) continue; - // the path may not exist if we had a directory full of symlinks - const [mkdirError] = await safe(fs.promises.mkdir(path.dirname(dataLayout.toLocalPath(symlink.path)), { recursive: true })); - if (mkdirError) throw new BoxError(BoxError.FS_ERROR, `unable to symlink (mkdir): ${mkdirError.message}`); - const [symlinkError] = await safe(fs.promises.symlink(symlink.site, dataLayout.toLocalPath(symlink.path), 'file')); - if (symlinkError) throw new BoxError(BoxError.FS_ERROR, `unable to symlink: ${symlinkError.message}`); - } -} - async function downloadDir(backupSite, remotePath, dataLayout, progressCallback) { assert.strictEqual(typeof backupSite, 'object'); assert.strictEqual(typeof remotePath, 'string'); @@ -297,21 +308,6 @@ async function upload(backupSite, remotePath, dataLayout, progressCallback) { return await sync(backupSite, remotePath, dataLayout, progressCallback); // { stats, integrityMap } } -async function copy(backupSite, fromPath, toPath, progressCallback) { - assert.strictEqual(typeof backupSite, 'object'); - assert.strictEqual(typeof fromPath, 'string'); - assert.strictEqual(typeof toPath, 'string'); - assert.strictEqual(typeof progressCallback, 'function'); - - await backupSites.storageApi(backupSite).copyDir(backupSite.config, backupSite.limits, fromPath, toPath, progressCallback); -} - -function getFileExtension(encryption) { - assert.strictEqual(typeof encryption, 'boolean'); - - return ''; // this also signals to backupcleanear that we are dealing with directories -} - async function verify(backupSite, remotePath, integrityMap, progressCallback) { assert.strictEqual(typeof backupSite, 'object'); assert.strictEqual(typeof remotePath, 'string'); @@ -378,6 +374,6 @@ export default { verify, getFileExtension, copy, - _saveFsMetadata, - _restoreFsMetadata, + _saveFsMetadata: saveFsMetadata, + _restoreFsMetadata: restoreFsMetadata, }; diff --git a/src/boxerror.js b/src/boxerror.js index 785a5d404..06c72dec7 100644 --- a/src/boxerror.js +++ b/src/boxerror.js @@ -4,8 +4,6 @@ import assert from 'node:assert'; import { HttpError } from '@cloudron/connect-lastmile'; import util from 'node:util'; -export default BoxError; - function BoxError(reason, errorOrMessage, extra = {}) { assert.strictEqual(typeof reason, 'string'); assert(errorOrMessage instanceof Error || typeof errorOrMessage === 'string', `string: ${errorOrMessage} type: ${typeof errorOrMessage} json: ${JSON.stringify(errorOrMessage)}`); @@ -111,3 +109,5 @@ BoxError.toHttpError = function (error) { return new HttpError(500, error); } }; + +export default BoxError; diff --git a/src/branding.js b/src/branding.js index dd6654d60..f19ef029f 100644 --- a/src/branding.js +++ b/src/branding.js @@ -65,6 +65,11 @@ async function setCloudronBackground(background) { await settings.setBlob(settings.CLOUDRON_BACKGROUND_KEY, background); } +async function getFooter() { + const value = await settings.get(settings.FOOTER_KEY); + return value || constants.FOOTER; +} + async function renderFooter() { const footer = await getFooter(); const year = new Date().getFullYear(); @@ -73,11 +78,6 @@ async function renderFooter() { .replace(/%VERSION%/g, constants.VERSION); } -async function getFooter() { - const value = await settings.get(settings.FOOTER_KEY); - return value || constants.FOOTER; -} - async function setFooter(footer, auditSource) { assert.strictEqual(typeof footer, 'string'); assert(auditSource && typeof auditSource === 'object'); diff --git a/src/cron.js b/src/cron.js index b4a974e87..95b59d1ea 100644 --- a/src/cron.js +++ b/src/cron.js @@ -86,6 +86,84 @@ function getCronSeed() { return { hour, minute }; } +async function handleBackupScheduleChanged(site) { + assert.strictEqual(typeof site, 'object'); + + const tz = await cloudron.getTimeZone(); + + debug(`handleBackupScheduleChanged: schedule ${site.schedule} (${tz})`); + + if (gJobs.backups.has(site.id)) gJobs.backups.get(site.id).stop(); + gJobs.backups.delete(site.id); + + if (site.schedule === constants.CRON_PATTERN_NEVER) return; + + const job = CronJob.from({ + cronTime: site.schedule, + onTick: async () => { + const t = await backupSites.get(site.id); + if (!t) return; + await safe(backupSites.startBackupTask(t, AuditSource.CRON), { debug }); + }, + start: true, + timeZone: tz + }); + gJobs.backups.set(site.id, job); +} + +async function handleAutoupdatePatternChanged(pattern) { + assert.strictEqual(typeof pattern, 'string'); + + const tz = await cloudron.getTimeZone(); + + debug(`autoupdatePatternChanged: pattern - ${pattern} (${tz})`); + + if (gJobs.autoUpdater) gJobs.autoUpdater.stop(); + gJobs.autoUpdater = null; + + if (pattern === constants.CRON_PATTERN_NEVER) return; + + gJobs.autoUpdater = CronJob.from({ + cronTime: pattern, + onTick: async () => await safe(updater.autoUpdate(AuditSource.CRON), { debug }), + start: true, + timeZone: tz + }); +} + +function handleDynamicDnsChanged(enabled) { + assert.strictEqual(typeof enabled, 'boolean'); + + debug('Dynamic DNS setting changed to %s', enabled); + + if (gJobs.dynamicDns) gJobs.dynamicDns.stop(); + gJobs.dynamicDns = null; + + if (!enabled) return; + + gJobs.dynamicDns = CronJob.from({ + // until we can be smarter about actual IP changes, lets ensure it every 10minutes + cronTime: '00 */10 * * * *', + onTick: async () => { await safe(dyndns.refreshDns(AuditSource.CRON), { debug }); }, + start: true + }); +} + +async function handleExternalLdapChanged(config) { + assert.strictEqual(typeof config, 'object'); + + if (gJobs.externalLdapSyncer) gJobs.externalLdapSyncer.stop(); + gJobs.externalLdapSyncer = null; + + if (config.provider === 'noop') return; + + gJobs.externalLdapSyncer = CronJob.from({ + cronTime: '00 00 */4 * * *', // every 4 hours + onTick: async () => await safe(externalLdap.startSyncer(AuditSource.CRON), { debug }), + start: true + }); +} + async function startJobs() { const { hour, minute } = getCronSeed(); @@ -195,93 +273,6 @@ async function startJobs() { await handleExternalLdapChanged(await externalLdap.getConfig()); } -async function handleBackupScheduleChanged(site) { - assert.strictEqual(typeof site, 'object'); - - const tz = await cloudron.getTimeZone(); - - debug(`handleBackupScheduleChanged: schedule ${site.schedule} (${tz})`); - - if (gJobs.backups.has(site.id)) gJobs.backups.get(site.id).stop(); - gJobs.backups.delete(site.id); - - if (site.schedule === constants.CRON_PATTERN_NEVER) return; - - const job = CronJob.from({ - cronTime: site.schedule, - onTick: async () => { - const t = await backupSites.get(site.id); - if (!t) return; - await safe(backupSites.startBackupTask(t, AuditSource.CRON), { debug }); - }, - start: true, - timeZone: tz - }); - gJobs.backups.set(site.id, job); -} - -async function handleTimeZoneChanged(tz) { - assert.strictEqual(typeof tz, 'string'); - - debug('handleTimeZoneChanged: recreating all jobs'); - await stopJobs(); - await scheduler.deleteJobs(); // have to re-create with new tz - await startJobs(); -} - -async function handleAutoupdatePatternChanged(pattern) { - assert.strictEqual(typeof pattern, 'string'); - - const tz = await cloudron.getTimeZone(); - - debug(`autoupdatePatternChanged: pattern - ${pattern} (${tz})`); - - if (gJobs.autoUpdater) gJobs.autoUpdater.stop(); - gJobs.autoUpdater = null; - - if (pattern === constants.CRON_PATTERN_NEVER) return; - - gJobs.autoUpdater = CronJob.from({ - cronTime: pattern, - onTick: async () => await safe(updater.autoUpdate(AuditSource.CRON), { debug }), - start: true, - timeZone: tz - }); -} - -function handleDynamicDnsChanged(enabled) { - assert.strictEqual(typeof enabled, 'boolean'); - - debug('Dynamic DNS setting changed to %s', enabled); - - if (gJobs.dynamicDns) gJobs.dynamicDns.stop(); - gJobs.dynamicDns = null; - - if (!enabled) return; - - gJobs.dynamicDns = CronJob.from({ - // until we can be smarter about actual IP changes, lets ensure it every 10minutes - cronTime: '00 */10 * * * *', - onTick: async () => { await safe(dyndns.refreshDns(AuditSource.CRON), { debug }); }, - start: true - }); -} - -async function handleExternalLdapChanged(config) { - assert.strictEqual(typeof config, 'object'); - - if (gJobs.externalLdapSyncer) gJobs.externalLdapSyncer.stop(); - gJobs.externalLdapSyncer = null; - - if (config.provider === 'noop') return; - - gJobs.externalLdapSyncer = CronJob.from({ - cronTime: '00 00 */4 * * *', // every 4 hours - onTick: async () => await safe(externalLdap.startSyncer(AuditSource.CRON), { debug }), - start: true - }); -} - async function stopJobs() { for (const jobName in gJobs) { if (!gJobs[jobName]) continue; @@ -295,6 +286,15 @@ async function stopJobs() { } } +async function handleTimeZoneChanged(tz) { + assert.strictEqual(typeof tz, 'string'); + + debug('handleTimeZoneChanged: recreating all jobs'); + await stopJobs(); + await scheduler.deleteJobs(); // have to re-create with new tz + await startJobs(); +} + export default { startJobs, diff --git a/src/dashboard.js b/src/dashboard.js index 711c83924..385f0a255 100644 --- a/src/dashboard.js +++ b/src/dashboard.js @@ -20,9 +20,6 @@ import userDirectory from './user-directory.js'; const debug = debugModule('box:dashboard'); -const _setLocation = setLocation; - - async function getLocation() { const domain = await settings.get(settings.DASHBOARD_DOMAIN_KEY); const subdomain = await settings.get(settings.DASHBOARD_SUBDOMAIN_KEY); @@ -73,6 +70,22 @@ async function getConfig() { }; } +async function prepareLocation(subdomain, domain, auditSource, progressCallback) { + assert.strictEqual(typeof subdomain, 'string'); + assert.strictEqual(typeof domain, 'string'); + assert.strictEqual(typeof auditSource, 'object'); + assert.strictEqual(typeof progressCallback, 'function'); + + const location = { subdomain, domain }; + const fqdn = dns.fqdn(subdomain, domain); + progressCallback({ percent: 20, message: `Updating DNS of ${fqdn}` }); + await dns.registerLocations([location], { overwriteDns: true }, progressCallback); + progressCallback({ percent: 40, message: `Waiting for DNS of ${fqdn}` }); + await dns.waitForLocations([location], progressCallback); + progressCallback({ percent: 60, message: `Getting certificate of ${fqdn}` }); + await reverseProxy.ensureCertificate(location, {}, auditSource); +} + async function startPrepareLocation(domain, auditSource) { assert.strictEqual(typeof domain, 'string'); assert.strictEqual(typeof auditSource, 'object'); @@ -96,22 +109,6 @@ async function startPrepareLocation(domain, auditSource) { return taskId; } -async function prepareLocation(subdomain, domain, auditSource, progressCallback) { - assert.strictEqual(typeof subdomain, 'string'); - assert.strictEqual(typeof domain, 'string'); - assert.strictEqual(typeof auditSource, 'object'); - assert.strictEqual(typeof progressCallback, 'function'); - - const location = { subdomain, domain }; - const fqdn = dns.fqdn(subdomain, domain); - progressCallback({ percent: 20, message: `Updating DNS of ${fqdn}` }); - await dns.registerLocations([location], { overwriteDns: true }, progressCallback); - progressCallback({ percent: 40, message: `Waiting for DNS of ${fqdn}` }); - await dns.waitForLocations([location], progressCallback); - progressCallback({ percent: 60, message: `Getting certificate of ${fqdn}` }); - await reverseProxy.ensureCertificate(location, {}, auditSource); -} - async function setupLocation(subdomain, domain, auditSource) { assert.strictEqual(typeof subdomain, 'string'); assert.strictEqual(typeof domain, 'string'); @@ -141,6 +138,8 @@ async function changeLocation(subdomain, domain, auditSource) { await safe(reverseProxy.removeDashboardConfig(oldLocation.subdomain, oldLocation.domain), { debug }); } +const _setLocation = setLocation; + export default { getLocation, clearLocation, diff --git a/src/database.js b/src/database.js index 25a129f80..7f44431b6 100644 --- a/src/database.js +++ b/src/database.js @@ -10,9 +10,6 @@ import shellModule from './shell.js'; const debug = debugModule('box:database'); const shell = shellModule('database'); -const _clear = clear; - - let gConnectionPool = null; const gDatabase = { @@ -23,6 +20,45 @@ const gDatabase = { name: 'box' }; +async function uninitialize() { + if (!gConnectionPool) return; + + await safe(gConnectionPool.end(), { debug }); + gConnectionPool = null; + debug('pool closed'); +} + +async function query(...args) { + assert.notStrictEqual(gConnectionPool, null, 'Database connection is already closed'); + + const [error, result] = await safe(gConnectionPool.query(...args)); // this is same as getConnection/query/release + if (error) throw new BoxError(BoxError.DATABASE_ERROR, error, { sqlCode: error.code, sqlMessage: error.sqlMessage || null }); + return result[0]; // the promise version returns a tuple of [rows, fields] +} + +async function transaction(queries) { + assert(Array.isArray(queries)); + + const [error, connection] = await safe(gConnectionPool.getConnection()); + if (error) throw new BoxError(BoxError.DATABASE_ERROR, error, { sqlCode: error.code, sqlMessage: error.sqlMessage }); + + try { + await connection.beginTransaction(); + const results = []; + for (const query of queries) { + const [rows /*, fields */] = await connection.query(query.query, query.args); + results.push(rows); + } + await connection.commit(); + connection.release(); // no await! + return results; + } catch (error) { + await safe(connection.rollback(), { debug }); + connection.release(); // no await! + throw new BoxError(BoxError.DATABASE_ERROR, error, { sqlCode: error.code, sqlMessage: error.sqlMessage || null }); + } +} + async function initialize() { if (gConnectionPool !== null) return; @@ -64,14 +100,6 @@ async function initialize() { }); } -async function uninitialize() { - if (!gConnectionPool) return; - - await safe(gConnectionPool.end(), { debug }); - gConnectionPool = null; - debug('pool closed'); -} - async function clear() { const tables = await query('SELECT table_name FROM information_schema.tables WHERE table_schema = ? AND table_name != ?', [ 'box', 'migrations' ]); const queries = [{ query: 'SET FOREIGN_KEY_CHECKS = 0' }]; @@ -81,37 +109,6 @@ async function clear() { await transaction(queries); } -async function query(...args) { - assert.notStrictEqual(gConnectionPool, null, 'Database connection is already closed'); - - const [error, result] = await safe(gConnectionPool.query(...args)); // this is same as getConnection/query/release - if (error) throw new BoxError(BoxError.DATABASE_ERROR, error, { sqlCode: error.code, sqlMessage: error.sqlMessage || null }); - return result[0]; // the promise version returns a tuple of [rows, fields] -} - -async function transaction(queries) { - assert(Array.isArray(queries)); - - const [error, connection] = await safe(gConnectionPool.getConnection()); - if (error) throw new BoxError(BoxError.DATABASE_ERROR, error, { sqlCode: error.code, sqlMessage: error.sqlMessage }); - - try { - await connection.beginTransaction(); - const results = []; - for (const query of queries) { - const [rows /*, fields */] = await connection.query(query.query, query.args); - results.push(rows); - } - await connection.commit(); - connection.release(); // no await! - return results; - } catch (error) { - await safe(connection.rollback(), { debug }); - connection.release(); // no await! - throw new BoxError(BoxError.DATABASE_ERROR, error, { sqlCode: error.code, sqlMessage: error.sqlMessage || null }); - } -} - async function runInTransaction(callback) { assert.strictEqual(typeof callback, 'function'); @@ -161,6 +158,8 @@ async function exportToFile(file) { if (error) throw new BoxError(BoxError.DATABASE_ERROR, error); } +const _clear = clear; + export default { initialize, uninitialize, diff --git a/src/dig.js b/src/dig.js index 16725de60..e43ed5f27 100644 --- a/src/dig.js +++ b/src/dig.js @@ -8,15 +8,6 @@ function _setMockResolve(fn) { _mockResolve = fn; } -function resolveWrapper(hostname, rrtype, options) { - if (_mockResolve) return _mockResolve(hostname, rrtype, options); - return resolve(hostname, rrtype, options); -} - - -// a note on TXT records. It doesn't have quotes ("") at the DNS level. Those quotes -// are added for DNS server software to enclose spaces. Such quotes may also be returned -// by the DNS REST API of some providers async function resolve(hostname, rrtype, options) { assert.strictEqual(typeof hostname, 'string'); assert.strictEqual(typeof rrtype, 'string'); @@ -33,6 +24,15 @@ async function resolve(hostname, rrtype, options) { return result; } + +// a note on TXT records. It doesn't have quotes ("") at the DNS level. Those quotes +// are added for DNS server software to enclose spaces. Such quotes may also be returned +// by the DNS REST API of some providers +function resolveWrapper(hostname, rrtype, options) { + if (_mockResolve) return _mockResolve(hostname, rrtype, options); + return resolve(hostname, rrtype, options); +} + export default { resolve: resolveWrapper, _setMockResolve, diff --git a/src/directoryserver.js b/src/directoryserver.js index dd1fd546c..7a168c9de 100644 --- a/src/directoryserver.js +++ b/src/directoryserver.js @@ -56,49 +56,82 @@ async function validateConfig(config) { if (!gotOne) throw new BoxError(BoxError.BAD_FIELD, 'allowlist must at least contain one IP or range'); } -async function applyConfig(config) { - assert.strictEqual(typeof config, 'object'); +async function authorize(req, res, next) { + debug('authorize: ', req.connection.ldap.bindDN.toString()); - // this is done only because it's easier for the shell script and the firewall service to get the value - if (config.enabled) { - if (!safe.fs.writeFileSync(paths.LDAP_ALLOWLIST_FILE, config.allowlist + '\n', 'utf8')) throw new BoxError(BoxError.FS_ERROR, safe.error.message); - } else { - safe.fs.unlinkSync(paths.LDAP_ALLOWLIST_FILE); - } + // this is for connection attempts without previous bind + if (req.connection.ldap.bindDN.equals('cn=anonymous')) return next(new ldap.InsufficientAccessRightsError()); - const [error] = await safe(shell.sudo([ SET_LDAP_ALLOWLIST_CMD ], {})); - if (error) throw new BoxError(BoxError.IPTABLES_ERROR, `Error setting ldap allowlist: ${error.message}`); + // we only allow this one DN to pass + if (!req.connection.ldap.bindDN.equals(constants.USER_DIRECTORY_LDAP_DN)) return next(new ldap.InsufficientAccessRightsError()); - if (!config.enabled) { - await stop(); - return; - } - - if (!gServer) await start(); + return next(); } -async function setConfig(directoryServerConfig, auditSource) { - assert.strictEqual(typeof directoryServerConfig, 'object'); - assert(auditSource && typeof auditSource === 'object'); +async function maybeRootDSE(req, res, next) { + debug(`maybeRootDSE: requested with scope:${req.scope} dn:${req.dn.toString()}`); - if (constants.DEMO) throw new BoxError(BoxError.BAD_STATE, 'Not allowed in demo mode'); + if (req.scope !== 'base') return next(new ldap.NoSuchObjectError()); // per the spec, rootDSE search require base scope + if (!req.dn || req.dn.toString() !== '') return next(new ldap.NoSuchObjectError()); - const oldConfig = await getConfig(); - - const config = { - enabled: directoryServerConfig.enabled, - secret: directoryServerConfig.secret, - allowlist: directoryServerConfig.allowlist || '' - }; - - await validateConfig(config); - await settings.setJson(settings.DIRECTORY_SERVER_KEY, config); - await applyConfig(config); - - await eventlog.add(eventlog.ACTION_DIRECTORY_SERVER_CONFIGURE, auditSource, { fromEnabled: oldConfig.enabled, toEnabled: config.enabled }); + res.send({ + dn: '', + attributes: { + objectclass: [ 'RootDSE', 'top', 'OpenLDAProotDSE' ], + supportedLDAPVersion: '3', + vendorName: 'Cloudron LDAP', + vendorVersion: '1.0.0', + supportedControl: [ ldap.PagedResultsControl.OID ], + supportedExtension: [] + } + }); + res.end(); } // helper function to deal with pagination +async function userAuth(req, res, next) { + // extract the common name which might have different attribute names + const cnAttributeName = Object.keys(req.dn.rdns[0].attrs)[0]; + const commonName = req.dn.rdns[0].attrs[cnAttributeName].value; + if (!commonName) return next(new ldap.NoSuchObjectError('Missing CN')); + + // totptoken is passed as the "attribute" using the '+' separator in the first RDNS of the request DN + // when totptoken attribute is present, it signals that we must enforce totp check + // totp check is currently requested by the client. this is the only way to auth against external cloudron dashboard, external cloudron app and external apps + const TOTPTOKEN_ATTRIBUTE_NAME = 'totptoken'; // This has to be in-sync with externalldap.js + const totpToken = TOTPTOKEN_ATTRIBUTE_NAME in req.dn.rdns[0].attrs ? req.dn.rdns[0].attrs[TOTPTOKEN_ATTRIBUTE_NAME].value : null; + const skipTotpCheck = !(TOTPTOKEN_ATTRIBUTE_NAME in req.dn.rdns[0].attrs); + + let verifyFunc; + if (cnAttributeName === 'mail') { + verifyFunc = users.verifyWithEmail; + } else if (commonName.indexOf('@') !== -1) { // if mail is specified, enforce mail check + verifyFunc = users.verifyWithEmail; + } else if (commonName.indexOf('uid-') === 0) { + verifyFunc = users.verifyWithId; + } else { + verifyFunc = users.verifyWithUsername; + } + + const [error, user] = await safe(verifyFunc(commonName, req.credentials || '', '', { totpToken, skipTotpCheck })); + if (error && error.reason === BoxError.NOT_FOUND) return next(new ldap.NoSuchObjectError(error.message)); + if (error && error.reason === BoxError.INVALID_CREDENTIALS) return next(new ldap.InvalidCredentialsError(error.message)); + if (error) return next(new ldap.OperationsError(error.message)); + + req.user = user; + + next(); +} + +async function stop() { + if (!gServer) return; + + debug('stopping server'); + + await util.promisify(gServer.close.bind(gServer))(); + gServer = null; +} + function finalSend(results, req, res, next) { const min = 0, max = results.length; let cookie = null; @@ -157,40 +190,7 @@ function finalSend(results, req, res, next) { next(); } -async function authorize(req, res, next) { - debug('authorize: ', req.connection.ldap.bindDN.toString()); - - // this is for connection attempts without previous bind - if (req.connection.ldap.bindDN.equals('cn=anonymous')) return next(new ldap.InsufficientAccessRightsError()); - - // we only allow this one DN to pass - if (!req.connection.ldap.bindDN.equals(constants.USER_DIRECTORY_LDAP_DN)) return next(new ldap.InsufficientAccessRightsError()); - - return next(); -} - -// https://ldapwiki.com/wiki/RootDSE / RFC 4512 - ldapsearch -x -h "${CLOUDRON_LDAP_SERVER}" -p "${CLOUDRON_LDAP_PORT}" -b "" -s base -// ldapjs seems to call this handler for everything when search === '' -async function maybeRootDSE(req, res, next) { - debug(`maybeRootDSE: requested with scope:${req.scope} dn:${req.dn.toString()}`); - - if (req.scope !== 'base') return next(new ldap.NoSuchObjectError()); // per the spec, rootDSE search require base scope - if (!req.dn || req.dn.toString() !== '') return next(new ldap.NoSuchObjectError()); - - res.send({ - dn: '', - attributes: { - objectclass: [ 'RootDSE', 'top', 'OpenLDAProotDSE' ], - supportedLDAPVersion: '3', - vendorName: 'Cloudron LDAP', - vendorVersion: '1.0.0', - supportedControl: [ ldap.PagedResultsControl.OID ], - supportedExtension: [] - } - }); - res.end(); -} - +// Will attach req.user if successful async function userSearch(req, res, next) { debug('user search: dn %s, scope %s, filter %s (from %s)', req.dn.toString(), req.scope, req.filter.toString(), req.connection.ldap.id); @@ -286,41 +286,6 @@ async function groupSearch(req, res, next) { finalSend(results, req, res, next); } -// Will attach req.user if successful -async function userAuth(req, res, next) { - // extract the common name which might have different attribute names - const cnAttributeName = Object.keys(req.dn.rdns[0].attrs)[0]; - const commonName = req.dn.rdns[0].attrs[cnAttributeName].value; - if (!commonName) return next(new ldap.NoSuchObjectError('Missing CN')); - - // totptoken is passed as the "attribute" using the '+' separator in the first RDNS of the request DN - // when totptoken attribute is present, it signals that we must enforce totp check - // totp check is currently requested by the client. this is the only way to auth against external cloudron dashboard, external cloudron app and external apps - const TOTPTOKEN_ATTRIBUTE_NAME = 'totptoken'; // This has to be in-sync with externalldap.js - const totpToken = TOTPTOKEN_ATTRIBUTE_NAME in req.dn.rdns[0].attrs ? req.dn.rdns[0].attrs[TOTPTOKEN_ATTRIBUTE_NAME].value : null; - const skipTotpCheck = !(TOTPTOKEN_ATTRIBUTE_NAME in req.dn.rdns[0].attrs); - - let verifyFunc; - if (cnAttributeName === 'mail') { - verifyFunc = users.verifyWithEmail; - } else if (commonName.indexOf('@') !== -1) { // if mail is specified, enforce mail check - verifyFunc = users.verifyWithEmail; - } else if (commonName.indexOf('uid-') === 0) { - verifyFunc = users.verifyWithId; - } else { - verifyFunc = users.verifyWithUsername; - } - - const [error, user] = await safe(verifyFunc(commonName, req.credentials || '', '', { totpToken, skipTotpCheck })); - if (error && error.reason === BoxError.NOT_FOUND) return next(new ldap.NoSuchObjectError(error.message)); - if (error && error.reason === BoxError.INVALID_CREDENTIALS) return next(new ldap.InvalidCredentialsError(error.message)); - if (error) return next(new ldap.OperationsError(error.message)); - - req.user = user; - - next(); -} - async function start() { assert(gServer === null, 'Already running'); @@ -383,13 +348,48 @@ async function start() { await util.promisify(gServer.listen.bind(gServer))(constants.USER_DIRECTORY_LDAPS_PORT, '::'); } -async function stop() { - if (!gServer) return; +// https://ldapwiki.com/wiki/RootDSE / RFC 4512 - ldapsearch -x -h "${CLOUDRON_LDAP_SERVER}" -p "${CLOUDRON_LDAP_PORT}" -b "" -s base +// ldapjs seems to call this handler for everything when search === '' +async function applyConfig(config) { + assert.strictEqual(typeof config, 'object'); - debug('stopping server'); + // this is done only because it's easier for the shell script and the firewall service to get the value + if (config.enabled) { + if (!safe.fs.writeFileSync(paths.LDAP_ALLOWLIST_FILE, config.allowlist + '\n', 'utf8')) throw new BoxError(BoxError.FS_ERROR, safe.error.message); + } else { + safe.fs.unlinkSync(paths.LDAP_ALLOWLIST_FILE); + } - await util.promisify(gServer.close.bind(gServer))(); - gServer = null; + const [error] = await safe(shell.sudo([ SET_LDAP_ALLOWLIST_CMD ], {})); + if (error) throw new BoxError(BoxError.IPTABLES_ERROR, `Error setting ldap allowlist: ${error.message}`); + + if (!config.enabled) { + await stop(); + return; + } + + if (!gServer) await start(); +} + +async function setConfig(directoryServerConfig, auditSource) { + assert.strictEqual(typeof directoryServerConfig, 'object'); + assert(auditSource && typeof auditSource === 'object'); + + if (constants.DEMO) throw new BoxError(BoxError.BAD_STATE, 'Not allowed in demo mode'); + + const oldConfig = await getConfig(); + + const config = { + enabled: directoryServerConfig.enabled, + secret: directoryServerConfig.secret, + allowlist: directoryServerConfig.allowlist || '' + }; + + await validateConfig(config); + await settings.setJson(settings.DIRECTORY_SERVER_KEY, config); + await applyConfig(config); + + await eventlog.add(eventlog.ACTION_DIRECTORY_SERVER_CONFIGURE, auditSource, { fromEnabled: oldConfig.enabled, toEnabled: config.enabled }); } async function checkCertificate() { diff --git a/src/dns/hetznercloud.js b/src/dns/hetznercloud.js index 263c7789b..aaf92428d 100644 --- a/src/dns/hetznercloud.js +++ b/src/dns/hetznercloud.js @@ -29,6 +29,18 @@ function injectPrivateFields(newConfig, currentConfig) { if (!Object.hasOwn(newConfig, 'token')) newConfig.token = currentConfig.token; } +async function wait(domainObject, subdomain, type, value, options) { + assert.strictEqual(typeof domainObject, 'object'); + assert.strictEqual(typeof subdomain, 'string'); + assert.strictEqual(typeof type, 'string'); + assert.strictEqual(typeof value, 'string'); + assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 } + + const fqdn = dns.fqdn(subdomain, domainObject.domain); + + await waitForDns(fqdn, domainObject.zoneName, type, value, options); +} + async function getZone(domainConfig, zoneName) { assert.strictEqual(typeof domainConfig, 'object'); assert.strictEqual(typeof zoneName, 'string'); @@ -91,6 +103,34 @@ async function waitForAction(domainConfig, id) { }); } +async function del(domainObject, location, type, values) { + assert.strictEqual(typeof domainObject, 'object'); + assert.strictEqual(typeof location, 'string'); + assert.strictEqual(typeof type, 'string'); + assert(Array.isArray(values)); + + const domainConfig = domainObject.config, + zoneName = domainObject.zoneName, + name = dns.getName(domainObject, location, type) || '@'; + + const zone = await getZone(domainConfig, zoneName); + const records = await getRecords(domainConfig, zone, name, type); + if (records.length === 0) return; + + const [error, response] = await safe(superagent.del(`${ENDPOINT}/zones/${zone.id}/rrsets/${name}/${type}`) + .set('Authorization', `Bearer ${domainConfig.token}`) + .timeout(30 * 1000) + .retry(5) + .ok(() => true)); + + if (error) throw new BoxError(BoxError.NETWORK_ERROR, error); + if (response.status === 404) return; + if (response.status === 403 || response.status === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response)); + if (response.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response)); + + await waitForAction(domainConfig, response.body.action.id); +} + async function upsert(domainObject, location, type, values) { assert.strictEqual(typeof domainObject, 'object'); assert.strictEqual(typeof location, 'string'); @@ -149,46 +189,6 @@ async function get(domainObject, location, type) { return result.map(function (record) { return record.value; }); } -async function del(domainObject, location, type, values) { - assert.strictEqual(typeof domainObject, 'object'); - assert.strictEqual(typeof location, 'string'); - assert.strictEqual(typeof type, 'string'); - assert(Array.isArray(values)); - - const domainConfig = domainObject.config, - zoneName = domainObject.zoneName, - name = dns.getName(domainObject, location, type) || '@'; - - const zone = await getZone(domainConfig, zoneName); - const records = await getRecords(domainConfig, zone, name, type); - if (records.length === 0) return; - - const [error, response] = await safe(superagent.del(`${ENDPOINT}/zones/${zone.id}/rrsets/${name}/${type}`) - .set('Authorization', `Bearer ${domainConfig.token}`) - .timeout(30 * 1000) - .retry(5) - .ok(() => true)); - - if (error) throw new BoxError(BoxError.NETWORK_ERROR, error); - if (response.status === 404) return; - if (response.status === 403 || response.status === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response)); - if (response.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response)); - - await waitForAction(domainConfig, response.body.action.id); -} - -async function wait(domainObject, subdomain, type, value, options) { - assert.strictEqual(typeof domainObject, 'object'); - assert.strictEqual(typeof subdomain, 'string'); - assert.strictEqual(typeof type, 'string'); - assert.strictEqual(typeof value, 'string'); - assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 } - - const fqdn = dns.fqdn(subdomain, domainObject.domain); - - await waitForDns(fqdn, domainObject.zoneName, type, value, options); -} - async function verifyDomainConfig(domainObject) { assert.strictEqual(typeof domainObject, 'object'); diff --git a/src/dns/waitfordns.js b/src/dns/waitfordns.js index 9d2e00ae1..1bc684a52 100644 --- a/src/dns/waitfordns.js +++ b/src/dns/waitfordns.js @@ -9,8 +9,6 @@ import _ from '../underscore.js'; const debug = debugModule('box:dns/waitfordns'); -export default waitForDns; - async function resolveIp(hostname, type, options) { assert.strictEqual(typeof hostname, 'string'); assert(type === 'A' || type === 'AAAA'); @@ -105,3 +103,5 @@ async function waitForDns(hostname, zoneName, type, value, options) { debug(`waitForDns: ${hostname} has propagated`); } + +export default waitForDns; diff --git a/src/docker.js b/src/docker.js index f1461e3fe..487dcc323 100644 --- a/src/docker.js +++ b/src/docker.js @@ -173,41 +173,6 @@ async function buildImage(dockerImage, sourceArchiveFilePath) { }); } -async function downloadImage(manifest) { - assert.strictEqual(typeof manifest, 'object'); - - debug(`downloadImage: ${manifest.dockerImage}`); - - const image = gConnection.getImage(manifest.dockerImage); - - const [error, result] = await safe(image.inspect()); - if (!error && result) return; // image is already present locally - - const parsedManifestRef = parseImageRef(manifest.dockerImage); - - await promiseRetry({ times: 10, interval: 5000, debug, retry: (pullError) => pullError.reason !== BoxError.FS_ERROR }, async () => { - // custom (non appstore) image - if (parsedManifestRef.registry !== null || !parsedManifestRef.fullRepositoryName.startsWith('cloudron/')) return await pullImage(manifest.dockerImage); - - // docker hub only uses first 64 bits for ipv6 addressing. this causes many ipv6 rate limit errors - // https://www.docker.com/blog/beta-ipv6-support-on-docker-hub-registry/ . as a hack, we try ipv4 explicity - let upstreamRef = null, pullError = null; - for (const registry of CLOUDRON_REGISTRIES) { - upstreamRef = `${registry}/${manifest.dockerImage}`; - [pullError] = await safe(pullImage(upstreamRef)); - if (!pullError) break; - } - - if (pullError || !upstreamRef) throw new BoxError(BoxError.DOCKER_ERROR, `Unable to pull ${manifest.dockerImage} from dockerhub or quay: ${pullError?.message}`); - - // retag the downloaded image to not have the registry name. this prevents 'docker run' from redownloading it - debug(`downloadImage: tagging ${upstreamRef} as ${parsedManifestRef.fullRepositoryName}:${parsedManifestRef.tag}`); - await gConnection.getImage(upstreamRef).tag({ repo: parsedManifestRef.fullRepositoryName, tag: parsedManifestRef.tag }); - debug(`downloadImage: untagging ${upstreamRef}`); - await deleteImage(upstreamRef); - }); -} - async function getVolumeMounts(app) { assert.strictEqual(typeof app, 'object'); @@ -281,8 +246,6 @@ async function getMounts(app) { return volumeMounts.concat(addonMounts); } -// This only returns ipv4 addresses -// We dont bind to ipv6 interfaces, public prefix changes and container restarts wont work async function getAddressesForPort53() { const [error, deviceLinks] = await safe(fs.promises.readdir('/sys/class/net')); // https://man7.org/linux/man-pages/man5/sysfs.5.html if (error) return []; @@ -304,6 +267,276 @@ async function getAddressesForPort53() { return addresses; } +// This only returns ipv4 addresses +// We dont bind to ipv6 interfaces, public prefix changes and container restarts wont work +async function startContainer(containerId) { + assert.strictEqual(typeof containerId, 'string'); + + const container = gConnection.getContainer(containerId); + + const [error] = await safe(container.start()); + if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Container ${containerId} not found`); + if (error && error.statusCode === 400) throw new BoxError(BoxError.BAD_FIELD, error); // e.g start.sh is not executable + if (error && error.statusCode !== 304) throw new BoxError(BoxError.DOCKER_ERROR, error); // 304 means already started +} + +async function restartContainer(containerId) { + assert.strictEqual(typeof containerId, 'string'); + + const container = gConnection.getContainer(containerId); + + const [error] = await safe(container.restart()); + if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Contanier ${containerId} not found`); + if (error && error.statusCode === 400) throw new BoxError(BoxError.BAD_FIELD, error); // e.g start.sh is not executable + if (error && error.statusCode !== 204) throw new BoxError(BoxError.DOCKER_ERROR, error); +} + +async function stopContainer(containerId) { + assert.strictEqual(typeof containerId, 'string'); + + debug(`stopContainer: stopping container ${containerId}`); + + const container = gConnection.getContainer(containerId); + + const options = { + t: 10 // wait for 10 seconds before killing it + }; + + let [error] = await safe(container.stop(options)); + if (error && (error.statusCode !== 304 && error.statusCode !== 404)) throw new BoxError(BoxError.DOCKER_ERROR, 'Error stopping container:' + error.message); + + [error] = await safe(container.wait()); + if (error && (error.statusCode !== 304 && error.statusCode !== 404)) throw new BoxError(BoxError.DOCKER_ERROR, 'Error waiting on container:' + error.message); +} + +async function deleteContainer(containerId) { // id can also be name + assert.strictEqual(typeof containerId, 'string'); + + debug(`deleteContainer: deleting ${containerId}`); + + const container = gConnection.getContainer(containerId); + + const removeOptions = { + force: true, // kill container if it's running + v: true // removes volumes associated with the container (but not host mounts) + }; + + const [error] = await safe(container.remove(removeOptions)); + if (error && error.statusCode === 404) return; + + if (error) { + debug('Error removing container %s : %o', containerId, error); + throw new BoxError(BoxError.DOCKER_ERROR, error); + } +} + +async function deleteContainers(appId, options) { + assert.strictEqual(typeof appId, 'string'); + assert.strictEqual(typeof options, 'object'); + + const labels = [ 'appId=' + appId ]; + if (options.managedOnly) labels.push('isCloudronManaged=true'); + + const [error, containers] = await safe(gConnection.listContainers({ all: 1, filters: JSON.stringify({ label: labels }) })); + if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); + + for (const container of containers) { + await deleteContainer(container.Id); + } +} + +async function stopContainers(appId) { + assert.strictEqual(typeof appId, 'string'); + + const [error, containers] = await safe(gConnection.listContainers({ all: 1, filters: JSON.stringify({ label: [ 'appId=' + appId ] }) })); + if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); + + for (const container of containers) { + await stopContainer(container.Id); + } +} + +async function deleteImage(imageRef) { + assert.strictEqual(typeof imageRef, 'string'); + + if (!imageRef) return; + if (imageRef.includes('//') || imageRef.startsWith('/')) return; // a common mistake is to paste a https:// as docker image. this results in a crash at runtime in dockerode module (https://github.com/apocas/dockerode/issues/548) + + const removeOptions = { + force: false, // might be shared with another instance of this app + noprune: false // delete untagged parents + }; + + // registry v1 used to pull down all *tags*. this meant that deleting image by tag was not enough (since that + // just removes the tag). we used to remove the image by id. this is not required anymore because aliases are + // not created anymore after https://github.com/docker/docker/pull/10571 + debug(`deleteImage: removing ${imageRef}`); + const [error] = await safe(gConnection.getImage(imageRef.replace(/@sha256:.*/,'')).remove(removeOptions)); // can't have the manifest id. won't remove anythin + if (error && error.statusCode === 400) return; // invalid image format. this can happen if user installed with a bad --docker-image + if (error && error.statusCode === 404) return; // not found + if (error && error.statusCode === 409) return; // another container using the image + + if (error) { + debug(`Error removing image ${imageRef} : %o`, error); + throw new BoxError(BoxError.DOCKER_ERROR, error); + } +} + +async function inspect(containerId) { + assert.strictEqual(typeof containerId, 'string'); + + const container = gConnection.getContainer(containerId); + + const [error, result] = await safe(container.inspect()); + if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Unable to find container ${containerId}`); + if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); + + return result; +} + +async function downloadImage(manifest) { + assert.strictEqual(typeof manifest, 'object'); + + debug(`downloadImage: ${manifest.dockerImage}`); + + const image = gConnection.getImage(manifest.dockerImage); + + const [error, result] = await safe(image.inspect()); + if (!error && result) return; // image is already present locally + + const parsedManifestRef = parseImageRef(manifest.dockerImage); + + await promiseRetry({ times: 10, interval: 5000, debug, retry: (pullError) => pullError.reason !== BoxError.FS_ERROR }, async () => { + // custom (non appstore) image + if (parsedManifestRef.registry !== null || !parsedManifestRef.fullRepositoryName.startsWith('cloudron/')) return await pullImage(manifest.dockerImage); + + // docker hub only uses first 64 bits for ipv6 addressing. this causes many ipv6 rate limit errors + // https://www.docker.com/blog/beta-ipv6-support-on-docker-hub-registry/ . as a hack, we try ipv4 explicity + let upstreamRef = null, pullError = null; + for (const registry of CLOUDRON_REGISTRIES) { + upstreamRef = `${registry}/${manifest.dockerImage}`; + [pullError] = await safe(pullImage(upstreamRef)); + if (!pullError) break; + } + + if (pullError || !upstreamRef) throw new BoxError(BoxError.DOCKER_ERROR, `Unable to pull ${manifest.dockerImage} from dockerhub or quay: ${pullError?.message}`); + + // retag the downloaded image to not have the registry name. this prevents 'docker run' from redownloading it + debug(`downloadImage: tagging ${upstreamRef} as ${parsedManifestRef.fullRepositoryName}:${parsedManifestRef.tag}`); + await gConnection.getImage(upstreamRef).tag({ repo: parsedManifestRef.fullRepositoryName, tag: parsedManifestRef.tag }); + debug(`downloadImage: untagging ${upstreamRef}`); + await deleteImage(upstreamRef); + }); +} + +async function getContainerIp(containerId) { + assert.strictEqual(typeof containerId, 'string'); + + if (constants.TEST) return '127.0.5.5'; + + const result = await inspect(containerId); + + const ip = safe.query(result, 'NetworkSettings.Networks.cloudron.IPAddress', null); + if (!ip) throw new BoxError(BoxError.DOCKER_ERROR, 'Error getting container IP'); + + return ip; +} + +async function createExec(containerId, options) { + assert.strictEqual(typeof containerId, 'string'); + assert.strictEqual(typeof options, 'object'); + + const container = gConnection.getContainer(containerId); + const [error, exec] = await safe(container.exec(options)); + if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Container ${containerId} not found`); + if (error && error.statusCode === 409) throw new BoxError(BoxError.BAD_STATE, error.message); // container restarting/not running + if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); + + return exec.id; +} + +async function getExec(execId) { + assert.strictEqual(typeof execId, 'string'); + + const exec = gConnection.getExec(execId); + const [error, result] = await safe(exec.inspect()); + if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Unable to find exec container ${execId}`); + if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); + + return { exitCode: result.ExitCode, running: result.Running }; +} + +async function startExec(execId, options) { + assert.strictEqual(typeof execId, 'string'); + assert.strictEqual(typeof options, 'object'); + + const exec = gConnection.getExec(execId); + const [error, stream] = await safe(exec.start(options)); /* in hijacked mode, stream is a net.socket */ + if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Exec container ${execId} not found`); + if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); + return stream; +} + +async function resizeExec(execId, options) { + assert.strictEqual(typeof execId, 'string'); + assert.strictEqual(typeof options, 'object'); + + const exec = gConnection.getExec(execId); + const [error] = await safe(exec.resize(options)); // { h, w } + if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Exec container ${execId} not found`); + if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); +} + +async function getEvents(options) { + assert.strictEqual(typeof options, 'object'); + + const [error, stream] = await safe(gConnection.getEvents(options)); + if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); + return stream; +} + +async function getStats(containerId, options) { + assert.strictEqual(typeof containerId, 'string'); + assert.strictEqual(typeof options, 'object'); + + const container = gConnection.getContainer(containerId); + + const [error, result] = await safe(container.stats({ stream: !!options.stream })); + if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Container ${containerId} not found`); + if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); + + return result; +} + +async function info() { + const [error, result] = await safe(gConnection.info()); + if (error) throw new BoxError(BoxError.DOCKER_ERROR, `Error connecting to docker: ${error.message}`); + return result; +} + +async function df(options) { + assert.strictEqual(typeof options, 'object'); + + const [error, result] = await safe(gConnection.df(options)); + if (error) throw new BoxError(BoxError.DOCKER_ERROR, `Error connecting to docker: ${error.message}`); + return result; +} + +async function update(name, memory) { + assert.strictEqual(typeof name, 'string'); + assert.strictEqual(typeof memory, 'number'); + + // scale back db containers, if possible. this is retried because updating memory constraints can fail + // with failed to write to memory.memsw.limit_in_bytes: write /sys/fs/cgroup/memory/docker/xx/memory.memsw.limit_in_bytes: device or resource busy + for (let times = 0; times < 10; times++) { + const [error] = await safe(shell.spawn('docker', ['update', '--memory', memory, '--memory-swap', '-1', name], { encoding: 'utf8' })); + if (!error) return; + await timers.setTimeout(60 * 1000); + } + + throw new BoxError(BoxError.DOCKER_ERROR, 'Unable to update container'); +} + async function createSubcontainer(app, name, cmd, options) { assert.strictEqual(typeof app, 'object'); assert.strictEqual(typeof name, 'string'); @@ -486,239 +719,6 @@ async function createContainer(app) { return await createSubcontainer(app, app.id /* name */, null /* cmd */, { } /* options */); } -async function startContainer(containerId) { - assert.strictEqual(typeof containerId, 'string'); - - const container = gConnection.getContainer(containerId); - - const [error] = await safe(container.start()); - if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Container ${containerId} not found`); - if (error && error.statusCode === 400) throw new BoxError(BoxError.BAD_FIELD, error); // e.g start.sh is not executable - if (error && error.statusCode !== 304) throw new BoxError(BoxError.DOCKER_ERROR, error); // 304 means already started -} - -async function restartContainer(containerId) { - assert.strictEqual(typeof containerId, 'string'); - - const container = gConnection.getContainer(containerId); - - const [error] = await safe(container.restart()); - if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Contanier ${containerId} not found`); - if (error && error.statusCode === 400) throw new BoxError(BoxError.BAD_FIELD, error); // e.g start.sh is not executable - if (error && error.statusCode !== 204) throw new BoxError(BoxError.DOCKER_ERROR, error); -} - -async function stopContainer(containerId) { - assert.strictEqual(typeof containerId, 'string'); - - debug(`stopContainer: stopping container ${containerId}`); - - const container = gConnection.getContainer(containerId); - - const options = { - t: 10 // wait for 10 seconds before killing it - }; - - let [error] = await safe(container.stop(options)); - if (error && (error.statusCode !== 304 && error.statusCode !== 404)) throw new BoxError(BoxError.DOCKER_ERROR, 'Error stopping container:' + error.message); - - [error] = await safe(container.wait()); - if (error && (error.statusCode !== 304 && error.statusCode !== 404)) throw new BoxError(BoxError.DOCKER_ERROR, 'Error waiting on container:' + error.message); -} - -async function deleteContainer(containerId) { // id can also be name - assert.strictEqual(typeof containerId, 'string'); - - debug(`deleteContainer: deleting ${containerId}`); - - const container = gConnection.getContainer(containerId); - - const removeOptions = { - force: true, // kill container if it's running - v: true // removes volumes associated with the container (but not host mounts) - }; - - const [error] = await safe(container.remove(removeOptions)); - if (error && error.statusCode === 404) return; - - if (error) { - debug('Error removing container %s : %o', containerId, error); - throw new BoxError(BoxError.DOCKER_ERROR, error); - } -} - -async function deleteContainers(appId, options) { - assert.strictEqual(typeof appId, 'string'); - assert.strictEqual(typeof options, 'object'); - - const labels = [ 'appId=' + appId ]; - if (options.managedOnly) labels.push('isCloudronManaged=true'); - - const [error, containers] = await safe(gConnection.listContainers({ all: 1, filters: JSON.stringify({ label: labels }) })); - if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); - - for (const container of containers) { - await deleteContainer(container.Id); - } -} - -async function stopContainers(appId) { - assert.strictEqual(typeof appId, 'string'); - - const [error, containers] = await safe(gConnection.listContainers({ all: 1, filters: JSON.stringify({ label: [ 'appId=' + appId ] }) })); - if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); - - for (const container of containers) { - await stopContainer(container.Id); - } -} - -async function deleteImage(imageRef) { - assert.strictEqual(typeof imageRef, 'string'); - - if (!imageRef) return; - if (imageRef.includes('//') || imageRef.startsWith('/')) return; // a common mistake is to paste a https:// as docker image. this results in a crash at runtime in dockerode module (https://github.com/apocas/dockerode/issues/548) - - const removeOptions = { - force: false, // might be shared with another instance of this app - noprune: false // delete untagged parents - }; - - // registry v1 used to pull down all *tags*. this meant that deleting image by tag was not enough (since that - // just removes the tag). we used to remove the image by id. this is not required anymore because aliases are - // not created anymore after https://github.com/docker/docker/pull/10571 - debug(`deleteImage: removing ${imageRef}`); - const [error] = await safe(gConnection.getImage(imageRef.replace(/@sha256:.*/,'')).remove(removeOptions)); // can't have the manifest id. won't remove anythin - if (error && error.statusCode === 400) return; // invalid image format. this can happen if user installed with a bad --docker-image - if (error && error.statusCode === 404) return; // not found - if (error && error.statusCode === 409) return; // another container using the image - - if (error) { - debug(`Error removing image ${imageRef} : %o`, error); - throw new BoxError(BoxError.DOCKER_ERROR, error); - } -} - -async function inspect(containerId) { - assert.strictEqual(typeof containerId, 'string'); - - const container = gConnection.getContainer(containerId); - - const [error, result] = await safe(container.inspect()); - if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Unable to find container ${containerId}`); - if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); - - return result; -} - -async function getContainerIp(containerId) { - assert.strictEqual(typeof containerId, 'string'); - - if (constants.TEST) return '127.0.5.5'; - - const result = await inspect(containerId); - - const ip = safe.query(result, 'NetworkSettings.Networks.cloudron.IPAddress', null); - if (!ip) throw new BoxError(BoxError.DOCKER_ERROR, 'Error getting container IP'); - - return ip; -} - -async function createExec(containerId, options) { - assert.strictEqual(typeof containerId, 'string'); - assert.strictEqual(typeof options, 'object'); - - const container = gConnection.getContainer(containerId); - const [error, exec] = await safe(container.exec(options)); - if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Container ${containerId} not found`); - if (error && error.statusCode === 409) throw new BoxError(BoxError.BAD_STATE, error.message); // container restarting/not running - if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); - - return exec.id; -} - -async function startExec(execId, options) { - assert.strictEqual(typeof execId, 'string'); - assert.strictEqual(typeof options, 'object'); - - const exec = gConnection.getExec(execId); - const [error, stream] = await safe(exec.start(options)); /* in hijacked mode, stream is a net.socket */ - if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Exec container ${execId} not found`); - if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); - return stream; -} - -async function getExec(execId) { - assert.strictEqual(typeof execId, 'string'); - - const exec = gConnection.getExec(execId); - const [error, result] = await safe(exec.inspect()); - if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Unable to find exec container ${execId}`); - if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); - - return { exitCode: result.ExitCode, running: result.Running }; -} - -async function resizeExec(execId, options) { - assert.strictEqual(typeof execId, 'string'); - assert.strictEqual(typeof options, 'object'); - - const exec = gConnection.getExec(execId); - const [error] = await safe(exec.resize(options)); // { h, w } - if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Exec container ${execId} not found`); - if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); -} - -async function getEvents(options) { - assert.strictEqual(typeof options, 'object'); - - const [error, stream] = await safe(gConnection.getEvents(options)); - if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); - return stream; -} - -async function getStats(containerId, options) { - assert.strictEqual(typeof containerId, 'string'); - assert.strictEqual(typeof options, 'object'); - - const container = gConnection.getContainer(containerId); - - const [error, result] = await safe(container.stats({ stream: !!options.stream })); - if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Container ${containerId} not found`); - if (error) throw new BoxError(BoxError.DOCKER_ERROR, error); - - return result; -} - -async function info() { - const [error, result] = await safe(gConnection.info()); - if (error) throw new BoxError(BoxError.DOCKER_ERROR, `Error connecting to docker: ${error.message}`); - return result; -} - -async function df(options) { - assert.strictEqual(typeof options, 'object'); - - const [error, result] = await safe(gConnection.df(options)); - if (error) throw new BoxError(BoxError.DOCKER_ERROR, `Error connecting to docker: ${error.message}`); - return result; -} - -async function update(name, memory) { - assert.strictEqual(typeof name, 'string'); - assert.strictEqual(typeof memory, 'number'); - - // scale back db containers, if possible. this is retried because updating memory constraints can fail - // with failed to write to memory.memsw.limit_in_bytes: write /sys/fs/cgroup/memory/docker/xx/memory.memsw.limit_in_bytes: device or resource busy - for (let times = 0; times < 10; times++) { - const [error] = await safe(shell.spawn('docker', ['update', '--memory', memory, '--memory-swap', '-1', name], { encoding: 'utf8' })); - if (!error) return; - await timers.setTimeout(60 * 1000); - } - - throw new BoxError(BoxError.DOCKER_ERROR, 'Unable to update container'); -} - export default { ping, diff --git a/src/groups.js b/src/groups.js index 340a55747..e00b3dffb 100644 --- a/src/groups.js +++ b/src/groups.js @@ -7,9 +7,6 @@ import database from './database.js'; import eventlog from './eventlog.js'; import safe from 'safetydance'; -const _getMembership = getMembership; - - const GROUPS_FIELDS = [ 'id', 'name', 'source' ].join(','); // keep this in sync with validateUsername @@ -290,6 +287,8 @@ async function setAllowedApps(group, appIds, auditSource) { } } +const _getMembership = getMembership; + export default { add, del, diff --git a/src/hat.js b/src/hat.js index e4b03cca3..40b7cf73b 100644 --- a/src/hat.js +++ b/src/hat.js @@ -1,7 +1,7 @@ import crypto from 'node:crypto'; -export default hat; - function hat(bits) { return crypto.randomBytes(bits / 8).toString('hex'); } + +export default hat; diff --git a/src/mail.js b/src/mail.js index df58dd61a..0ee2e9051 100644 --- a/src/mail.js +++ b/src/mail.js @@ -30,8 +30,6 @@ const OWNERTYPE_APP = 'app'; const TYPE_MAILBOX = 'mailbox'; const TYPE_LIST = 'list'; const TYPE_ALIAS = 'alias'; -const _delByDomain = delByDomain; -const _updateDomain = updateDomain; const DNS_OPTIONS = { timeout: 20000, tries: 4 }; @@ -831,20 +829,6 @@ async function listMailboxes(page, perPage) { return results; } -async function getStats(domain) { - assert.strictEqual(typeof domain, 'string'); - - const mailboxes = await listMailboxesByDomain(domain, 1, 10000); - const mailingLists = await listMailingListsByDomain(domain, 1, 10000); - - return { - mailboxCount: mailboxes.length, - pop3Count: mailboxes.filter(mb => mb.enablePop3).length, - aliasCount: mailboxes.map(mb => mb.aliases.length).reduce((a, b) => a + b, 0), - mailingListCount: mailingLists.length - }; -} - async function delByDomain(domain) { assert.strictEqual(typeof domain, 'string'); @@ -1061,6 +1045,20 @@ async function listMailingListsByDomain(domain, page, perPage) { return results; } +async function getStats(domain) { + assert.strictEqual(typeof domain, 'string'); + + const mailboxes = await listMailboxesByDomain(domain, 1, 10000); + const mailingLists = await listMailingListsByDomain(domain, 1, 10000); + + return { + mailboxCount: mailboxes.length, + pop3Count: mailboxes.filter(mb => mb.enablePop3).length, + aliasCount: mailboxes.map(mb => mb.aliases.length).reduce((a, b) => a + b, 0), + mailingListCount: mailingLists.length + }; +} + async function getMailingList(name, domain) { assert.strictEqual(typeof name, 'string'); assert.strictEqual(typeof domain, 'string'); @@ -1195,6 +1193,8 @@ async function checkStatus() { } } +const _delByDomain = delByDomain; + export default { getStatus, checkConfiguration, @@ -1237,5 +1237,5 @@ export default { TYPE_LIST, TYPE_ALIAS, _delByDomain, - _updateDomain, + _updateDomain: updateDomain, }; diff --git a/src/notifications.js b/src/notifications.js index 7c1ccce0c..880a1baa6 100644 --- a/src/notifications.js +++ b/src/notifications.js @@ -29,9 +29,6 @@ const TYPE_REBOOT = 'reboot'; const TYPE_UPDATE_UBUNTU = 'ubuntuUpdate'; const TYPE_BOX_UPDATE = 'boxUpdate'; const TYPE_MANUAL_APP_UPDATE_NEEDED = 'manualAppUpdate'; -const TYPE_DOMAIN_CONFIG_CHECK_FAILED = 'domainConfigCheckFailed'; -const _add = add; - const NOTIFICATION_FIELDS = [ 'id', 'eventId', 'type', 'title', 'message', 'creationTime', 'acknowledged', 'context' ]; @@ -371,6 +368,8 @@ async function onEvent(eventId, action, source, data) { } } +const TYPE_DOMAIN_CONFIG_CHECK_FAILED = 'domainConfigCheckFailed'; + export default { get, update, @@ -396,5 +395,5 @@ export default { TYPE_DOMAIN_CONFIG_CHECK_FAILED, pin, unpin, - _add, + _add: add, }; diff --git a/src/once.js b/src/once.js index cb5718d65..c48be8eff 100644 --- a/src/once.js +++ b/src/once.js @@ -2,8 +2,6 @@ import debugModule from 'debug'; const debug = debugModule('box:once'); -export default once; - // https://github.com/isaacs/once/blob/main/LICENSE (ISC) function once (fn) { const f = function () { @@ -17,3 +15,5 @@ function once (fn) { f.called = false; return f; } + +export default once; diff --git a/src/platform.js b/src/platform.js index 9ab8127a4..066fd516e 100644 --- a/src/platform.js +++ b/src/platform.js @@ -156,41 +156,6 @@ async function startInfra(restoreOptions) { await onInfraReady(true /* infraChanged */); } -async function initialize() { - debug('initialize: start platform'); - - await database.initialize(); - await tasks.stopAllTasks(); // when box code crashes, systemd will clean up the control-group but not the tasks - await locks.releaseAll(); - await backups.clearTasks(); - - // always generate webadmin config since we have no versioning mechanism for the ejs - const dashboardLocation = await dashboard.getLocation(); - if (dashboardLocation.domain) await onDashboardLocationSet(dashboardLocation.subdomain, dashboardLocation.domain); - - // configure nginx to be reachable by IP when not activated. for the moment, the IP based redirect exists even after domain is setup - // just in case user forgot or some network error happenned in the middle (then browser refresh takes you to activation page) - // we remove the config as a simple security measure to not expose IP <-> domain - const activated = await users.isActivated(); - if (!activated) { - debug('initialize: not activated. generating IP based redirection config'); - await safe(reverseProxy.writeDefaultConfig({ activated: false }), { debug }); // ok to fail if no disk space - } - - await updater.notifyBoxUpdate(); - - if (await users.isActivated()) safe(onActivated({ skipDnsSetup: false }), { debug }); // run in background -} - -async function uninitialize() { - debug('uninitializing platform'); - - if (await users.isActivated()) await onDeactivated(); - - await tasks.stopAllTasks(); // when box code is stopped/restarted, we get a chance to cleanup all the sudo+tasks - await database.uninitialize(); -} - async function onActivated(restoreOptions) { assert.strictEqual(typeof restoreOptions, 'object'); // { skipDnsSetup } @@ -219,6 +184,15 @@ async function onDeactivated() { await oidcServer.stop(); } +async function uninitialize() { + debug('uninitializing platform'); + + if (await users.isActivated()) await onDeactivated(); + + await tasks.stopAllTasks(); // when box code is stopped/restarted, we get a chance to cleanup all the sudo+tasks + await database.uninitialize(); +} + async function onDashboardLocationSet(subdomain, domain) { assert.strictEqual(typeof subdomain, 'string'); assert.strictEqual(typeof domain, 'string'); @@ -228,6 +202,32 @@ async function onDashboardLocationSet(subdomain, domain) { await oidcServer.start(); } +async function initialize() { + debug('initialize: start platform'); + + await database.initialize(); + await tasks.stopAllTasks(); // when box code crashes, systemd will clean up the control-group but not the tasks + await locks.releaseAll(); + await backups.clearTasks(); + + // always generate webadmin config since we have no versioning mechanism for the ejs + const dashboardLocation = await dashboard.getLocation(); + if (dashboardLocation.domain) await onDashboardLocationSet(dashboardLocation.subdomain, dashboardLocation.domain); + + // configure nginx to be reachable by IP when not activated. for the moment, the IP based redirect exists even after domain is setup + // just in case user forgot or some network error happenned in the middle (then browser refresh takes you to activation page) + // we remove the config as a simple security measure to not expose IP <-> domain + const activated = await users.isActivated(); + if (!activated) { + debug('initialize: not activated. generating IP based redirection config'); + await safe(reverseProxy.writeDefaultConfig({ activated: false }), { debug }); // ok to fail if no disk space + } + + await updater.notifyBoxUpdate(); + + if (await users.isActivated()) safe(onActivated({ skipDnsSetup: false }), { debug }); // run in background +} + async function onDashboardLocationChanged(auditSource) { assert.strictEqual(typeof auditSource, 'object'); diff --git a/src/promise-retry.js b/src/promise-retry.js index 5a0dfc4c7..5489a6cb9 100644 --- a/src/promise-retry.js +++ b/src/promise-retry.js @@ -2,8 +2,6 @@ import assert from 'node:assert'; import timers from 'timers/promises'; import util from 'node:util'; -export default promiseRetry; - async function promiseRetry(options, asyncFunction) { assert.strictEqual(typeof options, 'object'); assert(util.types.isAsyncFunction(asyncFunction)); @@ -21,3 +19,5 @@ async function promiseRetry(options, asyncFunction) { } } } + +export default promiseRetry; diff --git a/src/reverseproxy.js b/src/reverseproxy.js index ede4ee306..e3c29068a 100644 --- a/src/reverseproxy.js +++ b/src/reverseproxy.js @@ -67,15 +67,6 @@ async function providerMatches(domainObject, cert) { return !mismatch; } -async function notifyCertChange() { - await mailServer.checkCertificate(); - await shell.sudo([ RESTART_SERVICE_CMD, 'box' ], {}); // directory server - const allApps = (await apps.list()).filter(app => app.runState !== apps.RSTATE_STOPPED); - for (const app of allApps) { - if (app.manifest.addons?.tls) await setupTlsAddon(app); - } -} - async function reload() { if (constants.TEST) return; @@ -83,27 +74,6 @@ async function reload() { if (error) throw new BoxError(BoxError.NGINX_ERROR, `Error reloading nginx: ${error.message}`); } -async function setFallbackCertificate(domain, certificate) { - assert.strictEqual(typeof domain, 'string'); - assert(certificate && typeof certificate === 'object'); - - debug(`setFallbackCertificate: setting certs for domain ${domain}`); - if (!safe.fs.writeFileSync(path.join(paths.NGINX_CERT_DIR, `${domain}.host.cert`), certificate.cert)) throw new BoxError(BoxError.FS_ERROR, safe.error.message); - if (!safe.fs.writeFileSync(path.join(paths.NGINX_CERT_DIR, `${domain}.host.key`), certificate.key)) throw new BoxError(BoxError.FS_ERROR, safe.error.message); - - await reload(); - await notifyCertChange(); // if domain uses fallback certs, propagate immediately -} - -async function restoreFallbackCertificates() { - const result = await domains.list(); - - for (const domain of result) { - if (!safe.fs.writeFileSync(path.join(paths.NGINX_CERT_DIR, `${domain.domain}.host.cert`), domain.fallbackCertificate.cert)) throw new BoxError(BoxError.FS_ERROR, safe.error.message); - if (!safe.fs.writeFileSync(path.join(paths.NGINX_CERT_DIR, `${domain.domain}.host.key`), domain.fallbackCertificate.key)) throw new BoxError(BoxError.FS_ERROR, safe.error.message); - } -} - function getAppLocationsSync(app) { assert.strictEqual(typeof app, 'object'); @@ -184,7 +154,6 @@ async function getDirectoryServerCertificate() { return await getCertificate(dashboardLocation); } -// write if contents mismatch (thus preserving mtime) function writeFileSync(filePath, data) { assert.strictEqual(typeof filePath, 'string'); assert.strictEqual(typeof data, 'string'); @@ -195,6 +164,15 @@ function writeFileSync(filePath, data) { return true; } +async function restoreFallbackCertificates() { + const result = await domains.list(); + + for (const domain of result) { + if (!safe.fs.writeFileSync(path.join(paths.NGINX_CERT_DIR, `${domain.domain}.host.cert`), domain.fallbackCertificate.cert)) throw new BoxError(BoxError.FS_ERROR, safe.error.message); + if (!safe.fs.writeFileSync(path.join(paths.NGINX_CERT_DIR, `${domain.domain}.host.key`), domain.fallbackCertificate.key)) throw new BoxError(BoxError.FS_ERROR, safe.error.message); + } +} + async function setupTlsAddon(app) { assert.strictEqual(typeof app, 'object'); @@ -233,6 +211,28 @@ async function setupTlsAddon(app) { if (changed || removed) await docker.restartContainer(app.id); } +// write if contents mismatch (thus preserving mtime) +async function notifyCertChange() { + await mailServer.checkCertificate(); + await shell.sudo([ RESTART_SERVICE_CMD, 'box' ], {}); // directory server + const allApps = (await apps.list()).filter(app => app.runState !== apps.RSTATE_STOPPED); + for (const app of allApps) { + if (app.manifest.addons?.tls) await setupTlsAddon(app); + } +} + +async function setFallbackCertificate(domain, certificate) { + assert.strictEqual(typeof domain, 'string'); + assert(certificate && typeof certificate === 'object'); + + debug(`setFallbackCertificate: setting certs for domain ${domain}`); + if (!safe.fs.writeFileSync(path.join(paths.NGINX_CERT_DIR, `${domain}.host.cert`), certificate.cert)) throw new BoxError(BoxError.FS_ERROR, safe.error.message); + if (!safe.fs.writeFileSync(path.join(paths.NGINX_CERT_DIR, `${domain}.host.key`), certificate.key)) throw new BoxError(BoxError.FS_ERROR, safe.error.message); + + await reload(); + await notifyCertChange(); // if domain uses fallback certs, propagate immediately +} + // writes latest certificate to disk and returns the path async function writeCertificate(location) { assert.strictEqual(typeof location, 'object'); diff --git a/src/services.js b/src/services.js index 0717605a1..c58f78a71 100644 --- a/src/services.js +++ b/src/services.js @@ -51,217 +51,9 @@ const MV_VOLUME_CMD = path.join(import.meta.dirname, 'scripts/mvvolume.sh'); // setup can be called multiple times for the same app (configure crash restart) and existing data must not be lost // teardown is destructive. app data stored with the addon is lost // addons have 1-1 mapping with the manifest -const ADDONS = { - turn: { - setup: setupTurn, - teardown: teardownTurn, - backup: NOOP, - restore: NOOP, - getDynamicEnvironment: NOOP, - clear: NOOP, - }, - email: { - setup: setupEmail, - teardown: teardownEmail, - backup: NOOP, - restore: setupEmail, - getDynamicEnvironment: NOOP, - clear: NOOP, - }, - ldap: { - setup: setupLdap, - teardown: teardownLdap, - backup: NOOP, - restore: setupLdap, - getDynamicEnvironment: NOOP, - clear: NOOP, - }, - localstorage: { - setup: setupLocalStorage, - teardown: teardownLocalStorage, - backup: backupLocalStorage, // no backup because it's already inside app data - restore: restoreLocalStorage, - getDynamicEnvironment: NOOP, - clear: clearLocalStorage, - }, - mongodb: { - setup: setupMongoDb, - teardown: teardownMongoDb, - backup: backupMongoDb, - restore: restoreMongoDb, - getDynamicEnvironment: NOOP, - clear: clearMongodb, - }, - mysql: { - setup: setupMySql, - teardown: teardownMySql, - backup: backupMySql, - restore: restoreMySql, - getDynamicEnvironment: NOOP, - clear: clearMySql, - }, - postgresql: { - setup: setupPostgreSql, - teardown: teardownPostgreSql, - backup: backupPostgreSql, - restore: restorePostgreSql, - getDynamicEnvironment: NOOP, - clear: clearPostgreSql, - }, - proxyAuth: { - setup: setupProxyAuth, - teardown: teardownProxyAuth, - backup: NOOP, - restore: NOOP, - getDynamicEnvironment: NOOP, - clear: NOOP - }, - recvmail: { - setup: setupRecvMail, - teardown: teardownRecvMail, - backup: NOOP, - restore: setupRecvMail, - getDynamicEnvironment: NOOP, - clear: NOOP, - }, - redis: { - setup: setupRedis, - teardown: teardownRedis, - backup: backupRedis, - restore: restoreRedis, - getDynamicEnvironment: NOOP, - clear: clearRedis, - }, - sendmail: { - setup: setupSendMail, - teardown: teardownSendMail, - backup: NOOP, - restore: setupSendMail, - getDynamicEnvironment: NOOP, - clear: NOOP, - }, - scheduler: { - setup: NOOP, - teardown: NOOP, - backup: NOOP, - restore: NOOP, - getDynamicEnvironment: NOOP, - clear: NOOP, - }, - docker: { - setup: setupDocker, - teardown: teardownDocker, - backup: NOOP, - restore: NOOP, - getDynamicEnvironment: NOOP, - clear: NOOP, - }, - tls: { - setup: setupTls, - teardown: teardownTls, - backup: NOOP, - restore: NOOP, - getDynamicEnvironment: NOOP, - clear: NOOP, - }, - oauth: { // kept for backward compatibility. keep teardown for uninstall to work - setup: NOOP, - teardown: teardownOauth, - backup: NOOP, - restore: NOOP, - getDynamicEnvironment: NOOP, - clear: NOOP, - }, - oidc: { - setup: setupOidc, - teardown: teardownOidc, - backup: NOOP, - restore: setupOidc, - getDynamicEnvironment: getDynamicEnvironmentOidc, - clear: NOOP, - } -}; - // Lazily initialized to avoid circular dependency TDZ issues at module load time // (docker, mailServer, sftp may not be fully initialized when this module first evaluates) let _services; -function SERVICES() { - if (_services) return _services; - _services = { - turn: { - name: 'TURN', - status: statusTurn, - restart: docker.restartContainer.bind(null, 'turn'), - defaultMemoryLimit: 256 * 1024 * 1024 - }, - mail: { - name: 'Mail', - status: containerStatus.bind(null, 'mail'), - restart: mailServer.restart, - defaultMemoryLimit: mailServer.DEFAULT_MEMORY_LIMIT - }, - mongodb: { - name: 'MongoDB', - status: statusMongodb, - restart: restartMongodb, - defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024 - }, - mysql: { - name: 'MySQL', - status: containerStatus.bind(null, 'mysql'), - restart: docker.restartContainer.bind(null, 'mysql'), - defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024 - }, - postgresql: { - name: 'PostgreSQL', - status: containerStatus.bind(null, 'postgresql'), - restart: docker.restartContainer.bind(null, 'postgresql'), - defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024 - }, - docker: { - name: 'Docker', - status: statusDocker, - restart: restartDocker, - defaultMemoryLimit: 0 - }, - unbound: { - name: 'Unbound', - status: statusUnbound, - restart: restartUnbound, - defaultMemoryLimit: 0 - }, - sftp: { - name: 'Filemanager', - status: containerStatus.bind(null, 'sftp'), - restart: docker.restartContainer.bind(null, 'sftp'), - defaultMemoryLimit: sftp.DEFAULT_MEMORY_LIMIT - }, - graphite: { - name: 'Metrics', - status: statusGraphite, - restart: restartGraphite, - defaultMemoryLimit: 256 * 1024 * 1024 - }, - nginx: { - name: 'Nginx', - status: statusNginx, - restart: restartNginx, - defaultMemoryLimit: 0 - } - }; - return _services; -} - -const APP_SERVICES = { - redis: { - status: (instance, done) => containerStatus(`redis-${instance}`, done), - start: (instance, done) => docker.startContainer(`redis-${instance}`, done), - stop: (instance, done) => docker.stopContainer(`redis-${instance}`, done), - restart: (instance, done) => docker.restartContainer(`redis-${instance}`, done), - defaultMemoryLimit: 256 * 1024 * 1024 - } -}; - function requiresUpgrade(existingImageRef, currentImageRef) { const etag = docker.parseImageRef(existingImageRef), ctag = docker.parseImageRef(currentImageRef); @@ -337,17 +129,6 @@ async function containerStatus(containerName) { }; } -async function listServices() { - const serviceIds = Object.keys(SERVICES()).map(k => { return { id: k, name: SERVICES()[k].name }; }); - - const result = await apps.list(); - for (const app of result) { - if (app.manifest.addons?.redis && app.enableRedis) serviceIds.push({ id: `redis:${app.id}`, name: `Redis ${app.id}`}); - } - - return serviceIds; -} - async function getConfig() { return await settings.getJson(settings.SERVICES_CONFIG_KEY) || {}; } @@ -367,192 +148,16 @@ async function getServiceConfig(id) { return app.servicesConfig[name] || {}; } -async function getServiceStatus(id) { - assert.strictEqual(typeof id, 'string'); - - const [name, instance ] = id.split(':'); - let containerStatusFunc, service; - - if (instance) { - service = APP_SERVICES()[name]; - if (!service) throw new BoxError(BoxError.NOT_FOUND, 'Service not found'); - containerStatusFunc = service.status.bind(null, instance); - } else if (SERVICES()[name]) { - service = SERVICES()[name]; - containerStatusFunc = service.status; - } else { - throw new BoxError(BoxError.NOT_FOUND, 'Service not found'); +const APP_SERVICES = { + redis: { + status: (instance, done) => containerStatus(`redis-${instance}`, done), + start: (instance, done) => docker.startContainer(`redis-${instance}`, done), + stop: (instance, done) => docker.stopContainer(`redis-${instance}`, done), + restart: (instance, done) => docker.restartContainer(`redis-${instance}`, done), + defaultMemoryLimit: 256 * 1024 * 1024 } +}; - const result = { - name: name, - status: null, - memoryUsed: 0, - memoryPercent: 0, - error: null, - healthcheck: null, - config: {} - }; - - const status = await containerStatusFunc(); - result.status = status.status; - result.memoryUsed = status.memoryUsed; - result.memoryPercent = status.memoryPercent; - result.defaultMemoryLimit = service.defaultMemoryLimit; - result.error = status.error || null; - result.healthcheck = status.healthcheck || null; - - result.config = await getServiceConfig(id); - - if (!result.config.memoryLimit && service.defaultMemoryLimit) { - result.config.memoryLimit = service.defaultMemoryLimit; - } - - return result; -} - -async function configureService(id, data, auditSource) { - assert.strictEqual(typeof id, 'string'); - assert.strictEqual(typeof data, 'object'); - assert.strictEqual(typeof auditSource, 'object'); - - const [name, instance ] = id.split(':'); - let needsRebuild = false; - - if (instance) { - if (!APP_SERVICES()[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found'); - - const app = await apps.get(instance); - if (!app) throw new BoxError(BoxError.NOT_FOUND, 'App not found'); - - const servicesConfig = app.servicesConfig; - needsRebuild = servicesConfig[name]?.recoveryMode != data.recoveryMode; - servicesConfig[name] = data; - - await apps.update(instance, { servicesConfig }); - } else if (SERVICES()[name]) { - const servicesConfig = await getConfig(); - needsRebuild = servicesConfig[name]?.recoveryMode != data.recoveryMode; // intentional != since 'recoveryMode' may or may not be there - - servicesConfig[name] = data; - - await settings.setJson(settings.SERVICES_CONFIG_KEY, servicesConfig); - } else { - throw new BoxError(BoxError.NOT_FOUND, 'No such service'); - } - - debug(`configureService: ${id} rebuild=${needsRebuild}`); - - // do this in background - if (needsRebuild) { - safe(rebuildService(id, auditSource), { debug }); - } else { - safe(applyMemoryLimit(id), { debug }); - } - - await eventlog.add(eventlog.ACTION_SERVICE_CONFIGURE, auditSource, { id, data }); -} - -async function getServiceLogs(id, options) { - assert.strictEqual(typeof id, 'string'); - assert(options && typeof options === 'object'); - - const [name, instance ] = id.split(':'); - - if (instance) { - if (!APP_SERVICES()[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found'); - } else if (!SERVICES()[name]) { - throw new BoxError(BoxError.NOT_FOUND, 'Service not found'); - } - - debug(`getServiceLogs: getting logs for ${name}`); - - let cp; - - if (name === 'docker' || name === 'unbound') { - cp = logs.journalctl(name, options); - } else if (name === 'nginx') { - cp = logs.tail(['/var/log/nginx/access.log', '/var/log/nginx/error.log'], { lines: options.lines, follow: options.follow }); - } else { - const containerName = APP_SERVICES()[name] ? `${name}-${instance}` : name; - cp = logs.tail([path.join(paths.LOG_DIR, containerName, 'app.log')], { lines: options.lines, follow: options.follow }); - } - - const logStream = new logs.LogStream({ format: options.format || 'json', source: name }); - logStream.on('close', () => cp.terminate()); // the caller has to call destroy() on logStream. destroy() of Transform emits 'close' - - cp.stdout.pipe(logStream); - - return logStream; -} - -async function rebuildService(id, auditSource) { - assert.strictEqual(typeof id, 'string'); - assert.strictEqual(typeof auditSource, 'object'); - - if (constants.TEST && !process.env.TEST_CREATE_INFRA) return; - - // this attempts to recreate the service docker container if they don't exist but platform infra version is unchanged - // passing an infra version of 'none' will not attempt to purge existing data - const [name, instance] = id.split(':'); - - switch (name) { - case 'turn': - await startTurn({ version: 'none' }); - break; - case 'mongodb': - await startMongodb({ version: 'none' }); - break; - case 'postgresql': - await startPostgresql({ version: 'none' }); - break; - case 'mysql': - await startMysql({ version: 'none' }); - break; - case 'sftp': - await sftp.start({ version: 'none' }); - break; - case 'graphite': - await startGraphite({ version: 'none' }); - break; - case 'mail': - await mailServer.start({ version: 'none' }); - break; - case 'redis': { - await safe(shell.spawn('docker', ['rm', '-f', `redis-${instance}`], { encoding: 'utf8' })); // ignore error - const app = await apps.get(instance); - if (app) await setupRedis(app, app.manifest.addons.redis); // starts the container - break; - } - default: - // nothing to rebuild for now. - } - - safe(applyMemoryLimit(id), { debug }); // do this in background. ok to fail - - await eventlog.add(eventlog.ACTION_SERVICE_REBUILD, auditSource, { id }); -} - -async function restartService(id, auditSource) { - assert.strictEqual(typeof id, 'string'); - assert.strictEqual(typeof auditSource, 'object'); - - const [name, instance ] = id.split(':'); - - if (instance) { - if (!APP_SERVICES()[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found'); - - await APP_SERVICES()[name].restart(instance); - } else if (SERVICES()[name]) { - await SERVICES()[name].restart(); - } else { - throw new BoxError(BoxError.NOT_FOUND, 'Service not found'); - } - - await eventlog.add(eventlog.ACTION_SERVICE_RESTART, auditSource, { id }); -} - -// in the future, we can refcount and lazy start global services async function startAppServices(app) { assert.strictEqual(typeof app, 'object'); @@ -566,7 +171,6 @@ async function startAppServices(app) { } } -// in the future, we can refcount and stop global services as well async function stopAppServices(app) { assert.strictEqual(typeof app, 'object'); @@ -598,94 +202,39 @@ async function waitForContainer(containerName, tokenEnvName) { }); } -async function setupAddons(app, addons) { - assert.strictEqual(typeof app, 'object'); - assert(!addons || typeof addons === 'object'); - - if (!addons) return; - - debug('setupAddons: Setting up %j', Object.keys(addons)); - - for (const addon of Object.keys(addons)) { - if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`); - - debug(`setupAddons: setting up addon ${addon} with options ${JSON.stringify(addons[addon])}`); - - await ADDONS[addon].setup(app, addons[addon]); - } -} - -async function teardownAddons(app, addons) { - assert.strictEqual(typeof app, 'object'); - assert(!addons || typeof addons === 'object'); - - if (!addons) return; - - debug('teardownAddons: Tearing down %j', Object.keys(addons)); - - for (const addon of Object.keys(addons)) { - if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`); - - debug(`teardownAddons: Tearing down addon ${addon} with options ${JSON.stringify(addons[addon])}`); - - await ADDONS[addon].teardown(app, addons[addon]); - } -} - -async function backupAddons(app, addons) { - assert.strictEqual(typeof app, 'object'); - assert(!addons || typeof addons === 'object'); - - if (!addons) return; - - debug('backupAddons: backing up %j', Object.keys(addons)); - - for (const addon of Object.keys(addons)) { - if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`); - - await ADDONS[addon].backup(app, addons[addon]); - } -} - -async function clearAddons(app, addons) { - assert.strictEqual(typeof app, 'object'); - assert(!addons || typeof addons === 'object'); - - if (!addons) return; - - debug('clearAddons: clearing %j', Object.keys(addons)); - - for (const addon of Object.keys(addons)) { - if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`); - - await ADDONS[addon].clear(app, addons[addon]); - } -} - -async function restoreAddons(app, addons) { - assert.strictEqual(typeof app, 'object'); - assert(!addons || typeof addons === 'object'); - - if (!addons) return; - - debug('restoreAddons: restoring %j', Object.keys(addons)); - - for (const addon of Object.keys(addons)) { - if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`); - - await ADDONS[addon].restore(app, addons[addon]); - } -} - -async function importAppDatabase(app, addon) { - assert.strictEqual(typeof app, 'object'); +async function exportDatabase(addon) { assert.strictEqual(typeof addon, 'string'); - if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`); + debug(`exportDatabase: exporting ${addon}`); - await ADDONS[addon].setup(app, app.manifest.addons[addon]); - await ADDONS[addon].clear(app, app.manifest.addons[addon]); // clear in case we crashed in a restore - await ADDONS[addon].restore(app, app.manifest.addons[addon]); + if (fs.existsSync(path.join(paths.ADDON_CONFIG_DIR, `exported-${addon}`))) { + debug(`exportDatabase: already exported addon ${addon} in previous run`); + return; + } + + const allApps = await apps.list(); + + for (const app of allApps) { + if (!app.manifest.addons || !(addon in app.manifest.addons)) continue; // app doesn't use the addon + if (app.installationState === apps.ISTATE_ERROR) continue; // missing db causes crash in old app addon containers + + debug(`exportDatabase: exporting addon ${addon} of app ${app.id}`); + + // eslint-disable-next-line no-use-before-define -- circular: ADDONS references setup fns, setup fns call exportDatabase + const [error] = await safe(ADDONS[addon].backup(app, app.manifest.addons[addon])); + if (error) { + debug(`exportDatabase: error exporting ${addon} of app ${app.id}. %o`, error); + // for errored apps, we can ignore if export had an error + if (app.installationState === apps.ISTATE_ERROR) continue; + throw error; + } + } + + safe.fs.writeFileSync(path.join(paths.ADDON_CONFIG_DIR, `exported-${addon}`), '', 'utf8'); + if (safe.error) throw BoxError(BoxError.FS_ERROR, 'Error writing export checkpoint file'); + // note: after this point, we are restart safe. it's ok if the box code crashes at this point + await shell.spawn('docker', ['rm', '-f', addon], { encoding: 'utf8' }); // what if db writes something when quitting ... + await shell.sudo([ RMADDONDIR_CMD, addon ], {}); // ready to start afresh } async function importDatabase(addon) { @@ -712,141 +261,6 @@ async function importDatabase(addon) { safe.fs.unlinkSync(path.join(paths.ADDON_CONFIG_DIR, `exported-${addon}`)); // clean up for future migrations } -async function exportDatabase(addon) { - assert.strictEqual(typeof addon, 'string'); - - debug(`exportDatabase: exporting ${addon}`); - - if (fs.existsSync(path.join(paths.ADDON_CONFIG_DIR, `exported-${addon}`))) { - debug(`exportDatabase: already exported addon ${addon} in previous run`); - return; - } - - const allApps = await apps.list(); - - for (const app of allApps) { - if (!app.manifest.addons || !(addon in app.manifest.addons)) continue; // app doesn't use the addon - if (app.installationState === apps.ISTATE_ERROR) continue; // missing db causes crash in old app addon containers - - debug(`exportDatabase: exporting addon ${addon} of app ${app.id}`); - - const [error] = await safe(ADDONS[addon].backup(app, app.manifest.addons[addon])); - if (error) { - debug(`exportDatabase: error exporting ${addon} of app ${app.id}. %o`, error); - // for errored apps, we can ignore if export had an error - if (app.installationState === apps.ISTATE_ERROR) continue; - throw error; - } - } - - safe.fs.writeFileSync(path.join(paths.ADDON_CONFIG_DIR, `exported-${addon}`), '', 'utf8'); - if (safe.error) throw BoxError(BoxError.FS_ERROR, 'Error writing export checkpoint file'); - // note: after this point, we are restart safe. it's ok if the box code crashes at this point - await shell.spawn('docker', ['rm', '-f', addon], { encoding: 'utf8' }); // what if db writes something when quitting ... - await shell.sudo([ RMADDONDIR_CMD, addon ], {}); // ready to start afresh -} - -async function applyMemoryLimit(id) { - assert.strictEqual(typeof id, 'string'); - - const [name, instance] = id.split(':'); - let containerName, memoryLimit; - const serviceConfig = await getServiceConfig(id); - - if (instance) { - if (!APP_SERVICES()[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found'); - - containerName = `${name}-${instance}`; - memoryLimit = serviceConfig && serviceConfig.memoryLimit ? serviceConfig.memoryLimit : APP_SERVICES()[name].defaultMemoryLimit; - } else if (SERVICES()[name]) { - if (name === 'mongodb' && !await hasAVX()) { - debug('applyMemoryLimit: skipping mongodb because CPU does not have AVX'); - return; - } - - containerName = name; - memoryLimit = serviceConfig && serviceConfig.memoryLimit ? serviceConfig.memoryLimit : SERVICES()[name].defaultMemoryLimit; - } else { - throw new BoxError(BoxError.NOT_FOUND, 'No such service'); - } - - debug(`applyMemoryLimit: ${containerName} ${JSON.stringify(serviceConfig)}`); - - await docker.update(containerName, memoryLimit); -} - -async function startServices(existingInfra, progressCallback) { - assert.strictEqual(typeof existingInfra, 'object'); - assert.strictEqual(typeof progressCallback, 'function'); - - // name must match the image name in infra - const services = [ - { name: 'mail', label: 'Mail', fn: mailServer.start }, // start this first to reduce email downtime - { name: 'turn', label: 'TURN', fn: startTurn }, - { name: 'mysql', label: 'MySQL', fn: startMysql }, - { name: 'postgresql', label: 'PostgreSQL', fn: startPostgresql }, - { name: 'mongodb', label: 'MongoDB', fn: startMongodb }, - { name: 'redis', label: 'Redis', fn: startRedis }, - { name: 'graphite', label: 'Graphite', fn: startGraphite }, - { name: 'sftp', label: 'Filemanager',fn: sftp.start }, - ]; - - const fullRebuild = existingInfra.version !== infra.version; - if (!fullRebuild && !existingInfra.images) throw new BoxError(BoxError.INTERNAL_ERROR, 'existing infra images is corrupt'); - - for (const {name, label, fn} of services) { - if (!fullRebuild && infra.images[name] === existingInfra.images[name]) continue; - - progressCallback({ message: `Starting ${label} service`}); - await fn(existingInfra); - } - - // we always start db containers with unlimited memory. we then scale them down per configuration - for (const id of [ 'mysql', 'postgresql', 'mongodb' ]) { - safe(applyMemoryLimit(id), { debug }); // no waiting. and it's ok if applying service configs fails - } -} - -async function getEnvironment(app) { - assert.strictEqual(typeof app, 'object'); - - // contains values for environment from addonConfigs db - const result = await addonConfigs.getByAppId(app.id); - - // convert result to object to ensure unique env names if we overwrite static ones from the previously stored value in addonconfigs - let env = {}; - result.forEach(e => { env[e.name] = e.value; }); - - // get environment configs which are dynamic e.g generated based on dashboard domain and are not stored in db - for (const addon in (app.manifest.addons || {})) { - const configs = await ADDONS[addon].getDynamicEnvironment(app, {}); - if (configs) env = { ...env, ...configs }; - } - - return Object.keys(env).map(function (e) { return e + '=' + env[e]; }); -} - -function getContainerNamesSync(app, addons) { - assert.strictEqual(typeof app, 'object'); - assert(!addons || typeof addons === 'object'); - - let names = []; - - if (!addons) return names; - - for (const addon in addons) { - switch (addon) { - case 'scheduler': - // names here depend on how scheduler.js creates containers - names = names.concat(Object.keys(addons.scheduler).map(function (taskName) { return app.id + '-' + taskName; })); - break; - default: break; - } - } - - return names; -} - async function setupLocalStorage(app, options) { assert.strictEqual(typeof app, 'object'); assert.strictEqual(typeof options, 'object'); @@ -972,54 +386,6 @@ async function setupTurn(app, options) { await addonConfigs.set(app.id, 'turn', env); } -async function startTurn(existingInfra) { - assert.strictEqual(typeof existingInfra, 'object'); - - const serviceConfig = await getServiceConfig('turn'); - const image = infra.images.turn; - const memoryLimit = serviceConfig.memoryLimit || SERVICES()['turn'].defaultMemoryLimit; - const { fqdn:realm } = await dashboard.getLocation(); - - let turnSecret = await blobs.getString(blobs.ADDON_TURN_SECRET); - if (!turnSecret) { - debug('startTurn: generating turn secret'); - turnSecret = 'a' + crypto.randomBytes(15).toString('hex'); // prefix with a to ensure string starts with a letter - await blobs.setString(blobs.ADDON_TURN_SECRET, turnSecret); - } - - const readOnly = !serviceConfig.recoveryMode ? '--read-only' : ''; - const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : ''; - const verboseLogs = serviceConfig.recoveryMode ? 'true' : ''; - - // docker's userland proxy spins a process for every mapped port. this makes exposing port ranges unviable - // despite --userland-proxy=false, docker create a firewall rule for each port in a range. this takes over 20s to start/stop containers - // constants.TURN_PORT, constants.TURN_TLS_PORT, constants.TURN_UDP_PORT_START, constants.TURN_UDP_PORT_END are exposed automatically in host mode - // https://github.com/moby/moby/issues/8356 and https://github.com/moby/moby/issues/14856 https://github.com/moby/moby/issues/36214 - const runCmd = `docker run --restart=unless-stopped -d --name=turn \ - --hostname turn \ - --net host \ - --log-driver syslog \ - --log-opt syslog-address=unix://${paths.SYSLOG_SOCKET_FILE} \ - --log-opt syslog-format=rfc5424 \ - --log-opt tag=turn \ - -m ${memoryLimit} \ - --memory-swap -1 \ - -e CLOUDRON_TURN_SECRET=${turnSecret} \ - -e CLOUDRON_REALM=${realm} \ - -e CLOUDRON_VERBOSE_LOGS=${verboseLogs} \ - --label isCloudronManaged=true \ - ${readOnly} -v /tmp -v /run ${image} ${cmd}`; - - debug('startTurn: stopping and deleting previous turn container'); - await docker.stopContainer('turn'); - await docker.deleteContainer('turn'); - - debug('startTurn: starting turn container'); - await shell.bash(runCmd, { encoding: 'utf8' }); - - if (existingInfra.version !== 'none' && existingInfra.images.turn !== image) await docker.deleteImage(existingInfra.images.turn); -} - async function teardownTurn(app, options) { assert.strictEqual(typeof app, 'object'); assert.strictEqual(typeof options, 'object'); @@ -1793,34 +1159,6 @@ async function teardownDocker(app, options) { await addonConfigs.unset(app.id, 'docker'); } -async function startRedis(existingInfra) { - assert.strictEqual(typeof existingInfra, 'object'); - - const image = infra.images.redis; - const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.redis, image); - - const allApps = await apps.list(); - - for (const app of allApps) { - if (!app.manifest.addons || !('redis' in app.manifest.addons)) continue; // app doesn't use the addon - - const redisName = `redis-${app.id}`; - - if (upgrading) await backupRedis(app, {}); - - debug(`startRedis: stopping and deleting previous redis container ${redisName}`); - await docker.stopContainer(redisName); - await docker.deleteContainer(redisName); - - debug(`startRedis: starting redis container ${redisName}`); - await setupRedis(app, app.manifest.addons.redis); // starts the container - } - - if (upgrading) await importDatabase('redis'); - - if (existingInfra.version !== 'none' && existingInfra.images.redis !== image) await docker.deleteImage(existingInfra.images.redis); -} - async function setupRedis(app, options) { assert.strictEqual(typeof app, 'object'); assert.strictEqual(typeof options, 'object'); @@ -1930,6 +1268,34 @@ async function backupRedis(app, options) { if (error) throw new BoxError(BoxError.ADDONS_ERROR, `Error backing up Redis: ${error.message}`); } +async function startRedis(existingInfra) { + assert.strictEqual(typeof existingInfra, 'object'); + + const image = infra.images.redis; + const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.redis, image); + + const allApps = await apps.list(); + + for (const app of allApps) { + if (!app.manifest.addons || !('redis' in app.manifest.addons)) continue; // app doesn't use the addon + + const redisName = `redis-${app.id}`; + + if (upgrading) await backupRedis(app, {}); + + debug(`startRedis: stopping and deleting previous redis container ${redisName}`); + await docker.stopContainer(redisName); + await docker.deleteContainer(redisName); + + debug(`startRedis: starting redis container ${redisName}`); + await setupRedis(app, app.manifest.addons.redis); // starts the container + } + + if (upgrading) await importDatabase('redis'); + + if (existingInfra.version !== 'none' && existingInfra.images.redis !== image) await docker.deleteImage(existingInfra.images.redis); +} + async function restoreRedis(app, options) { assert.strictEqual(typeof app, 'object'); assert.strictEqual(typeof options, 'object'); @@ -2046,6 +1412,378 @@ async function restartGraphite() { await docker.restartContainer('graphite'); } +function SERVICES() { + if (_services) return _services; + _services = { + turn: { + name: 'TURN', + status: statusTurn, + restart: docker.restartContainer.bind(null, 'turn'), + defaultMemoryLimit: 256 * 1024 * 1024 + }, + mail: { + name: 'Mail', + status: containerStatus.bind(null, 'mail'), + restart: mailServer.restart, + defaultMemoryLimit: mailServer.DEFAULT_MEMORY_LIMIT + }, + mongodb: { + name: 'MongoDB', + status: statusMongodb, + restart: restartMongodb, + defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024 + }, + mysql: { + name: 'MySQL', + status: containerStatus.bind(null, 'mysql'), + restart: docker.restartContainer.bind(null, 'mysql'), + defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024 + }, + postgresql: { + name: 'PostgreSQL', + status: containerStatus.bind(null, 'postgresql'), + restart: docker.restartContainer.bind(null, 'postgresql'), + defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024 + }, + docker: { + name: 'Docker', + status: statusDocker, + restart: restartDocker, + defaultMemoryLimit: 0 + }, + unbound: { + name: 'Unbound', + status: statusUnbound, + restart: restartUnbound, + defaultMemoryLimit: 0 + }, + sftp: { + name: 'Filemanager', + status: containerStatus.bind(null, 'sftp'), + restart: docker.restartContainer.bind(null, 'sftp'), + defaultMemoryLimit: sftp.DEFAULT_MEMORY_LIMIT + }, + graphite: { + name: 'Metrics', + status: statusGraphite, + restart: restartGraphite, + defaultMemoryLimit: 256 * 1024 * 1024 + }, + nginx: { + name: 'Nginx', + status: statusNginx, + restart: restartNginx, + defaultMemoryLimit: 0 + } + }; + return _services; +} + +async function listServices() { + const serviceIds = Object.keys(SERVICES()).map(k => { return { id: k, name: SERVICES()[k].name }; }); + + const result = await apps.list(); + for (const app of result) { + if (app.manifest.addons?.redis && app.enableRedis) serviceIds.push({ id: `redis:${app.id}`, name: `Redis ${app.id}`}); + } + + return serviceIds; +} + +async function getServiceStatus(id) { + assert.strictEqual(typeof id, 'string'); + + const [name, instance ] = id.split(':'); + let containerStatusFunc, service; + + if (instance) { + service = APP_SERVICES()[name]; + if (!service) throw new BoxError(BoxError.NOT_FOUND, 'Service not found'); + containerStatusFunc = service.status.bind(null, instance); + } else if (SERVICES()[name]) { + service = SERVICES()[name]; + containerStatusFunc = service.status; + } else { + throw new BoxError(BoxError.NOT_FOUND, 'Service not found'); + } + + const result = { + name: name, + status: null, + memoryUsed: 0, + memoryPercent: 0, + error: null, + healthcheck: null, + config: {} + }; + + const status = await containerStatusFunc(); + result.status = status.status; + result.memoryUsed = status.memoryUsed; + result.memoryPercent = status.memoryPercent; + result.defaultMemoryLimit = service.defaultMemoryLimit; + result.error = status.error || null; + result.healthcheck = status.healthcheck || null; + + result.config = await getServiceConfig(id); + + if (!result.config.memoryLimit && service.defaultMemoryLimit) { + result.config.memoryLimit = service.defaultMemoryLimit; + } + + return result; +} + +async function getServiceLogs(id, options) { + assert.strictEqual(typeof id, 'string'); + assert(options && typeof options === 'object'); + + const [name, instance ] = id.split(':'); + + if (instance) { + if (!APP_SERVICES()[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found'); + } else if (!SERVICES()[name]) { + throw new BoxError(BoxError.NOT_FOUND, 'Service not found'); + } + + debug(`getServiceLogs: getting logs for ${name}`); + + let cp; + + if (name === 'docker' || name === 'unbound') { + cp = logs.journalctl(name, options); + } else if (name === 'nginx') { + cp = logs.tail(['/var/log/nginx/access.log', '/var/log/nginx/error.log'], { lines: options.lines, follow: options.follow }); + } else { + const containerName = APP_SERVICES()[name] ? `${name}-${instance}` : name; + cp = logs.tail([path.join(paths.LOG_DIR, containerName, 'app.log')], { lines: options.lines, follow: options.follow }); + } + + const logStream = new logs.LogStream({ format: options.format || 'json', source: name }); + logStream.on('close', () => cp.terminate()); // the caller has to call destroy() on logStream. destroy() of Transform emits 'close' + + cp.stdout.pipe(logStream); + + return logStream; +} + +async function restartService(id, auditSource) { + assert.strictEqual(typeof id, 'string'); + assert.strictEqual(typeof auditSource, 'object'); + + const [name, instance ] = id.split(':'); + + if (instance) { + if (!APP_SERVICES()[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found'); + + await APP_SERVICES()[name].restart(instance); + } else if (SERVICES()[name]) { + await SERVICES()[name].restart(); + } else { + throw new BoxError(BoxError.NOT_FOUND, 'Service not found'); + } + + await eventlog.add(eventlog.ACTION_SERVICE_RESTART, auditSource, { id }); +} + +async function applyMemoryLimit(id) { + assert.strictEqual(typeof id, 'string'); + + const [name, instance] = id.split(':'); + let containerName, memoryLimit; + const serviceConfig = await getServiceConfig(id); + + if (instance) { + if (!APP_SERVICES()[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found'); + + containerName = `${name}-${instance}`; + memoryLimit = serviceConfig && serviceConfig.memoryLimit ? serviceConfig.memoryLimit : APP_SERVICES()[name].defaultMemoryLimit; + } else if (SERVICES()[name]) { + if (name === 'mongodb' && !await hasAVX()) { + debug('applyMemoryLimit: skipping mongodb because CPU does not have AVX'); + return; + } + + containerName = name; + memoryLimit = serviceConfig && serviceConfig.memoryLimit ? serviceConfig.memoryLimit : SERVICES()[name].defaultMemoryLimit; + } else { + throw new BoxError(BoxError.NOT_FOUND, 'No such service'); + } + + debug(`applyMemoryLimit: ${containerName} ${JSON.stringify(serviceConfig)}`); + + await docker.update(containerName, memoryLimit); +} + +async function startTurn(existingInfra) { + assert.strictEqual(typeof existingInfra, 'object'); + + const serviceConfig = await getServiceConfig('turn'); + const image = infra.images.turn; + const memoryLimit = serviceConfig.memoryLimit || SERVICES()['turn'].defaultMemoryLimit; + const { fqdn:realm } = await dashboard.getLocation(); + + let turnSecret = await blobs.getString(blobs.ADDON_TURN_SECRET); + if (!turnSecret) { + debug('startTurn: generating turn secret'); + turnSecret = 'a' + crypto.randomBytes(15).toString('hex'); // prefix with a to ensure string starts with a letter + await blobs.setString(blobs.ADDON_TURN_SECRET, turnSecret); + } + + const readOnly = !serviceConfig.recoveryMode ? '--read-only' : ''; + const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : ''; + const verboseLogs = serviceConfig.recoveryMode ? 'true' : ''; + + // docker's userland proxy spins a process for every mapped port. this makes exposing port ranges unviable + // despite --userland-proxy=false, docker create a firewall rule for each port in a range. this takes over 20s to start/stop containers + // constants.TURN_PORT, constants.TURN_TLS_PORT, constants.TURN_UDP_PORT_START, constants.TURN_UDP_PORT_END are exposed automatically in host mode + // https://github.com/moby/moby/issues/8356 and https://github.com/moby/moby/issues/14856 https://github.com/moby/moby/issues/36214 + const runCmd = `docker run --restart=unless-stopped -d --name=turn \ + --hostname turn \ + --net host \ + --log-driver syslog \ + --log-opt syslog-address=unix://${paths.SYSLOG_SOCKET_FILE} \ + --log-opt syslog-format=rfc5424 \ + --log-opt tag=turn \ + -m ${memoryLimit} \ + --memory-swap -1 \ + -e CLOUDRON_TURN_SECRET=${turnSecret} \ + -e CLOUDRON_REALM=${realm} \ + -e CLOUDRON_VERBOSE_LOGS=${verboseLogs} \ + --label isCloudronManaged=true \ + ${readOnly} -v /tmp -v /run ${image} ${cmd}`; + + debug('startTurn: stopping and deleting previous turn container'); + await docker.stopContainer('turn'); + await docker.deleteContainer('turn'); + + debug('startTurn: starting turn container'); + await shell.bash(runCmd, { encoding: 'utf8' }); + + if (existingInfra.version !== 'none' && existingInfra.images.turn !== image) await docker.deleteImage(existingInfra.images.turn); +} + +async function rebuildService(id, auditSource) { + assert.strictEqual(typeof id, 'string'); + assert.strictEqual(typeof auditSource, 'object'); + + if (constants.TEST && !process.env.TEST_CREATE_INFRA) return; + + // this attempts to recreate the service docker container if they don't exist but platform infra version is unchanged + // passing an infra version of 'none' will not attempt to purge existing data + const [name, instance] = id.split(':'); + + switch (name) { + case 'turn': + await startTurn({ version: 'none' }); + break; + case 'mongodb': + await startMongodb({ version: 'none' }); + break; + case 'postgresql': + await startPostgresql({ version: 'none' }); + break; + case 'mysql': + await startMysql({ version: 'none' }); + break; + case 'sftp': + await sftp.start({ version: 'none' }); + break; + case 'graphite': + await startGraphite({ version: 'none' }); + break; + case 'mail': + await mailServer.start({ version: 'none' }); + break; + case 'redis': { + await safe(shell.spawn('docker', ['rm', '-f', `redis-${instance}`], { encoding: 'utf8' })); // ignore error + const app = await apps.get(instance); + if (app) await setupRedis(app, app.manifest.addons.redis); // starts the container + break; + } + default: + // nothing to rebuild for now. + } + + safe(applyMemoryLimit(id), { debug }); // do this in background. ok to fail + + await eventlog.add(eventlog.ACTION_SERVICE_REBUILD, auditSource, { id }); +} + +async function configureService(id, data, auditSource) { + assert.strictEqual(typeof id, 'string'); + assert.strictEqual(typeof data, 'object'); + assert.strictEqual(typeof auditSource, 'object'); + + const [name, instance ] = id.split(':'); + let needsRebuild = false; + + if (instance) { + if (!APP_SERVICES()[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found'); + + const app = await apps.get(instance); + if (!app) throw new BoxError(BoxError.NOT_FOUND, 'App not found'); + + const servicesConfig = app.servicesConfig; + needsRebuild = servicesConfig[name]?.recoveryMode != data.recoveryMode; + servicesConfig[name] = data; + + await apps.update(instance, { servicesConfig }); + } else if (SERVICES()[name]) { + const servicesConfig = await getConfig(); + needsRebuild = servicesConfig[name]?.recoveryMode != data.recoveryMode; // intentional != since 'recoveryMode' may or may not be there + + servicesConfig[name] = data; + + await settings.setJson(settings.SERVICES_CONFIG_KEY, servicesConfig); + } else { + throw new BoxError(BoxError.NOT_FOUND, 'No such service'); + } + + debug(`configureService: ${id} rebuild=${needsRebuild}`); + + // do this in background + if (needsRebuild) { + safe(rebuildService(id, auditSource), { debug }); + } else { + safe(applyMemoryLimit(id), { debug }); + } + + await eventlog.add(eventlog.ACTION_SERVICE_CONFIGURE, auditSource, { id, data }); +} + +async function startServices(existingInfra, progressCallback) { + assert.strictEqual(typeof existingInfra, 'object'); + assert.strictEqual(typeof progressCallback, 'function'); + + // name must match the image name in infra + const services = [ + { name: 'mail', label: 'Mail', fn: mailServer.start }, // start this first to reduce email downtime + { name: 'turn', label: 'TURN', fn: startTurn }, + { name: 'mysql', label: 'MySQL', fn: startMysql }, + { name: 'postgresql', label: 'PostgreSQL', fn: startPostgresql }, + { name: 'mongodb', label: 'MongoDB', fn: startMongodb }, + { name: 'redis', label: 'Redis', fn: startRedis }, + { name: 'graphite', label: 'Graphite', fn: startGraphite }, + { name: 'sftp', label: 'Filemanager',fn: sftp.start }, + ]; + + const fullRebuild = existingInfra.version !== infra.version; + if (!fullRebuild && !existingInfra.images) throw new BoxError(BoxError.INTERNAL_ERROR, 'existing infra images is corrupt'); + + for (const {name, label, fn} of services) { + if (!fullRebuild && infra.images[name] === existingInfra.images[name]) continue; + + progressCallback({ message: `Starting ${label} service`}); + await fn(existingInfra); + } + + // we always start db containers with unlimited memory. we then scale them down per configuration + for (const id of [ 'mysql', 'postgresql', 'mongodb' ]) { + safe(applyMemoryLimit(id), { debug }); // no waiting. and it's ok if applying service configs fails + } +} + async function teardownOauth(app, options) { assert.strictEqual(typeof app, 'object'); assert.strictEqual(typeof options, 'object'); @@ -2148,6 +1886,257 @@ async function moveDataDir(app, targetVolumeId, targetVolumePrefix) { } } +const ADDONS = { + turn: { + setup: setupTurn, + teardown: teardownTurn, + backup: NOOP, + restore: NOOP, + getDynamicEnvironment: NOOP, + clear: NOOP, + }, + email: { + setup: setupEmail, + teardown: teardownEmail, + backup: NOOP, + restore: setupEmail, + getDynamicEnvironment: NOOP, + clear: NOOP, + }, + ldap: { + setup: setupLdap, + teardown: teardownLdap, + backup: NOOP, + restore: setupLdap, + getDynamicEnvironment: NOOP, + clear: NOOP, + }, + localstorage: { + setup: setupLocalStorage, + teardown: teardownLocalStorage, + backup: backupLocalStorage, // no backup because it's already inside app data + restore: restoreLocalStorage, + getDynamicEnvironment: NOOP, + clear: clearLocalStorage, + }, + mongodb: { + setup: setupMongoDb, + teardown: teardownMongoDb, + backup: backupMongoDb, + restore: restoreMongoDb, + getDynamicEnvironment: NOOP, + clear: clearMongodb, + }, + mysql: { + setup: setupMySql, + teardown: teardownMySql, + backup: backupMySql, + restore: restoreMySql, + getDynamicEnvironment: NOOP, + clear: clearMySql, + }, + postgresql: { + setup: setupPostgreSql, + teardown: teardownPostgreSql, + backup: backupPostgreSql, + restore: restorePostgreSql, + getDynamicEnvironment: NOOP, + clear: clearPostgreSql, + }, + proxyAuth: { + setup: setupProxyAuth, + teardown: teardownProxyAuth, + backup: NOOP, + restore: NOOP, + getDynamicEnvironment: NOOP, + clear: NOOP + }, + recvmail: { + setup: setupRecvMail, + teardown: teardownRecvMail, + backup: NOOP, + restore: setupRecvMail, + getDynamicEnvironment: NOOP, + clear: NOOP, + }, + redis: { + setup: setupRedis, + teardown: teardownRedis, + backup: backupRedis, + restore: restoreRedis, + getDynamicEnvironment: NOOP, + clear: clearRedis, + }, + sendmail: { + setup: setupSendMail, + teardown: teardownSendMail, + backup: NOOP, + restore: setupSendMail, + getDynamicEnvironment: NOOP, + clear: NOOP, + }, + scheduler: { + setup: NOOP, + teardown: NOOP, + backup: NOOP, + restore: NOOP, + getDynamicEnvironment: NOOP, + clear: NOOP, + }, + docker: { + setup: setupDocker, + teardown: teardownDocker, + backup: NOOP, + restore: NOOP, + getDynamicEnvironment: NOOP, + clear: NOOP, + }, + tls: { + setup: setupTls, + teardown: teardownTls, + backup: NOOP, + restore: NOOP, + getDynamicEnvironment: NOOP, + clear: NOOP, + }, + oauth: { // kept for backward compatibility. keep teardown for uninstall to work + setup: NOOP, + teardown: teardownOauth, + backup: NOOP, + restore: NOOP, + getDynamicEnvironment: NOOP, + clear: NOOP, + }, + oidc: { + setup: setupOidc, + teardown: teardownOidc, + backup: NOOP, + restore: setupOidc, + getDynamicEnvironment: getDynamicEnvironmentOidc, + clear: NOOP, + } +}; + +async function setupAddons(app, addons) { + assert.strictEqual(typeof app, 'object'); + assert(!addons || typeof addons === 'object'); + + if (!addons) return; + + debug('setupAddons: Setting up %j', Object.keys(addons)); + + for (const addon of Object.keys(addons)) { + if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`); + + debug(`setupAddons: setting up addon ${addon} with options ${JSON.stringify(addons[addon])}`); + + await ADDONS[addon].setup(app, addons[addon]); + } +} + +async function teardownAddons(app, addons) { + assert.strictEqual(typeof app, 'object'); + assert(!addons || typeof addons === 'object'); + + if (!addons) return; + + debug('teardownAddons: Tearing down %j', Object.keys(addons)); + + for (const addon of Object.keys(addons)) { + if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`); + + debug(`teardownAddons: Tearing down addon ${addon} with options ${JSON.stringify(addons[addon])}`); + + await ADDONS[addon].teardown(app, addons[addon]); + } +} + +async function backupAddons(app, addons) { + assert.strictEqual(typeof app, 'object'); + assert(!addons || typeof addons === 'object'); + + if (!addons) return; + + debug('backupAddons: backing up %j', Object.keys(addons)); + + for (const addon of Object.keys(addons)) { + if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`); + + await ADDONS[addon].backup(app, addons[addon]); + } +} + +async function clearAddons(app, addons) { + assert.strictEqual(typeof app, 'object'); + assert(!addons || typeof addons === 'object'); + + if (!addons) return; + + debug('clearAddons: clearing %j', Object.keys(addons)); + + for (const addon of Object.keys(addons)) { + if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`); + + await ADDONS[addon].clear(app, addons[addon]); + } +} + +// in the future, we can refcount and lazy start global services +async function restoreAddons(app, addons) { + assert.strictEqual(typeof app, 'object'); + assert(!addons || typeof addons === 'object'); + + if (!addons) return; + + debug('restoreAddons: restoring %j', Object.keys(addons)); + + for (const addon of Object.keys(addons)) { + if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`); + + await ADDONS[addon].restore(app, addons[addon]); + } +} + +async function getEnvironment(app) { + assert.strictEqual(typeof app, 'object'); + + // contains values for environment from addonConfigs db + const result = await addonConfigs.getByAppId(app.id); + + // convert result to object to ensure unique env names if we overwrite static ones from the previously stored value in addonconfigs + let env = {}; + result.forEach(e => { env[e.name] = e.value; }); + + // get environment configs which are dynamic e.g generated based on dashboard domain and are not stored in db + for (const addon in (app.manifest.addons || {})) { + const configs = await ADDONS[addon].getDynamicEnvironment(app, {}); + if (configs) env = { ...env, ...configs }; + } + + return Object.keys(env).map(function (e) { return e + '=' + env[e]; }); +} + +function getContainerNamesSync(app, addons) { + assert.strictEqual(typeof app, 'object'); + assert(!addons || typeof addons === 'object'); + + let names = []; + + if (!addons) return names; + + for (const addon in addons) { + switch (addon) { + case 'scheduler': + // names here depend on how scheduler.js creates containers + names = names.concat(Object.keys(addons.scheduler).map(function (taskName) { return app.id + '-' + taskName; })); + break; + default: break; + } + } + + return names; +} + export default { getServiceConfig, diff --git a/src/settings.js b/src/settings.js index f0b45d79f..a740de55a 100644 --- a/src/settings.js +++ b/src/settings.js @@ -29,10 +29,6 @@ const REVERSE_PROXY_CONFIG_KEY = 'reverseproxy_config'; const SERVICES_CONFIG_KEY = 'services_config'; const TIME_ZONE_KEY = 'time_zone'; const TRUSTED_IPS_KEY = 'trusted_ips_key'; -const WEB_SERVER_ORIGIN_KEY = 'web_server_origin'; -const _clear = clear; -const _set = set; - const SETTINGS_FIELDS = [ 'name', 'value' ].join(','); const SETTINGS_BLOB_FIELDS = [ 'name', 'valueBlob' ].join(','); @@ -86,6 +82,10 @@ async function clear() { await database.query('DELETE FROM settings'); } +const WEB_SERVER_ORIGIN_KEY = 'web_server_origin'; + +const _clear = clear; + export default { get, set, @@ -122,5 +122,5 @@ export default { TRUSTED_IPS_KEY, WEB_SERVER_ORIGIN_KEY, _clear, - _set, + _set: set, }; diff --git a/src/shell.js b/src/shell.js index 9dc8a9c42..4e5b45d4f 100644 --- a/src/shell.js +++ b/src/shell.js @@ -8,21 +8,6 @@ import _ from './underscore.js'; const debug = debugModule('box:shell'); -export default shell; - -function shell(tag) { - assert.strictEqual(typeof tag, 'string'); - - return { - bash: bash.bind(null, tag), - spawn: spawn.bind(null, tag), - sudo: sudo.bind(null, tag), - }; -} - -const SUDO = '/usr/bin/sudo'; -const KILL_CHILD_CMD = path.join(import.meta.dirname, 'scripts/kill-child.sh'); - function lineCount(buffer) { assert(Buffer.isBuffer(buffer)); @@ -36,6 +21,9 @@ function lineCount(buffer) { return count; } +const SUDO = '/usr/bin/sudo'; +const KILL_CHILD_CMD = path.join(import.meta.dirname, 'scripts/kill-child.sh'); + function spawn(tag, file, args, options) { assert.strictEqual(typeof tag, 'string'); assert.strictEqual(typeof file, 'string'); @@ -159,3 +147,15 @@ async function sudo(tag, args, options) { const spawnArgs = [ ...sudoArgs, ...args ]; return await spawn(tag, SUDO, spawnArgs, options); } + +function shell(tag) { + assert.strictEqual(typeof tag, 'string'); + + return { + bash: bash.bind(null, tag), + spawn: spawn.bind(null, tag), + sudo: sudo.bind(null, tag), + }; +} + +export default shell; diff --git a/src/storage/s3.js b/src/storage/s3.js index 91b5a8f16..4844d626b 100644 --- a/src/storage/s3.js +++ b/src/storage/s3.js @@ -19,9 +19,6 @@ import _ from '../underscore.js'; const debug = debugModule('box:storage/s3'); -const _chunk = chunk; - - function S3_NOT_FOUND(error) { return error instanceof NoSuchKey || error instanceof NoSuchBucket; } @@ -353,6 +350,181 @@ function encodeCopySource(bucket, path) { return `${bucket}/${output}`; } +async function remove(config, remotePath) { + assert.strictEqual(typeof config, 'object'); + assert.strictEqual(typeof remotePath, 'string'); + + const s3 = createS3Client(config, { retryStrategy: RETRY_STRATEGY }); + + const fullRemotePath = path.join(config.prefix, remotePath); + + const deleteParams = { + Bucket: config.bucket, + Key: fullRemotePath + }; + + // deleteObject does not return error if key is not found + const [error] = await safe(s3.deleteObject(deleteParams)); + if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unable to remove ${fullRemotePath}. ${formatError(error)}`); +} + +function chunk(array, size) { + assert(Array.isArray(array)); + assert.strictEqual(typeof size, 'number'); + + const length = array.length; + if (!length) return []; + let index = 0, resIndex = 0; + const result = Array(Math.ceil(length / size)); + + for (; index < length; index += size) { + result[resIndex++] = array.slice(index, index+size); + } + + return result; +} + +async function removeDir(config, limits, remotePathPrefix, progressCallback) { + assert.strictEqual(typeof config, 'object'); + assert.strictEqual(typeof limits, 'object'); + assert.strictEqual(typeof remotePathPrefix, 'string'); + assert.strictEqual(typeof progressCallback, 'function'); + + // only use this client for DeleteObjects call. It forces md5 checksum and for anything else, it might crash + const deleteObjectsS3Client = createS3Client(config, { retryStrategy: RETRY_STRATEGY, deleteObjects: true }); + + let total = 0; + let marker = null; + while (true) { + const batch = await listDir(config, remotePathPrefix, 1000, marker); // returns entries relative to (root) prefix + + const entries = batch.entries; + total += entries.length; + + const chunkSize = limits.deleteConcurrency || (config._provider !== 'digitalocean-spaces' ? 1000 : 100); // throttle objects in each request + const chunks = chunk(entries, chunkSize); + + await async.eachSeries(chunks, async function deleteFiles(objects) { + const deleteParams = { + Bucket: config.bucket, + Delete: { + Objects: objects.map(function (o) { return { Key: path.join(config.prefix, o.path) }; }) + } + }; + + const fullFirstPath = path.join(config.prefix, objects[0].path), fullLastPath = path.join(config.prefix, objects[objects.length-1].path); + progressCallback({ message: `Removing ${objects.length} files from ${fullFirstPath} to ${fullLastPath}` }); + + // deleteObjects does not return error if key is not found + const [error] = await safe(deleteObjectsS3Client.deleteObjects(deleteParams)); + if (error) { + progressCallback({ message: `Unable to remove from ${fullFirstPath} to ${fullLastPath} ${error.message || error.Code}` }); + throw new BoxError(BoxError.EXTERNAL_ERROR, `Unable to remove from ${fullFirstPath} to ${fullLastPath}. error: ${error.message}`); + } + }); + + if (!batch.marker) break; + marker = batch.marker; + } + + progressCallback({ message: `Removed ${total} files` }); +} + +async function cleanup(config, progressCallback) { + assert.strictEqual(typeof config, 'object'); + assert.strictEqual(typeof progressCallback, 'function'); + + const s3 = createS3Client(config, { retryStrategy: RETRY_STRATEGY }); + + const uploads = await s3.listMultipartUploads({ Bucket: config.bucket, Prefix: config.prefix }); + progressCallback({ message: `Cleaning up any aborted multi-part uploads. count:${uploads.Uploads?.length || 0} truncated:${uploads.IsTruncated}` }); + if (!uploads.Uploads) return; + + for (const upload of uploads.Uploads) { + if (Date.now() - new Date(upload.Initiated) < 3 * 24 * 60 * 60 * 1000) continue; // 3 days ago + progressCallback({ message: `Cleaning up multi-part upload uploadId:${upload.UploadId} key:${upload.Key}` }); + await safe(s3.abortMultipartUpload({ Bucket: config.bucket, Key: upload.Key, UploadId: upload.UploadId }), { debug }); // ignore error + } +} + +async function verifyConfig({ id, provider, config }) { + assert.strictEqual(typeof id, 'string'); + assert.strictEqual(typeof provider, 'string'); + assert.strictEqual(typeof config, 'object'); + + if (typeof config.accessKeyId !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'accessKeyId must be a string'); + if (typeof config.secretAccessKey !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'secretAccessKey must be a string'); + + if (typeof config.bucket !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'bucket must be a string'); + // the node module seems to incorrectly accept bucket name with '/' + if (config.bucket.includes('/')) throw new BoxError(BoxError.BAD_FIELD, 'bucket name cannot contain "/"'); + + // names must be lowercase and start with a letter or number. can contain dashes + if (config.bucket.includes('_') || config.bucket.match(/[A-Z]/)) throw new BoxError(BoxError.BAD_FIELD, 'bucket name cannot contain "_" or capitals'); + + if (typeof config.prefix !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'prefix must be a string'); + if ('signatureVersion' in config && typeof config.signatureVersion !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'signatureVersion must be a string'); + if ('endpoint' in config) { + if (typeof config.endpoint !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'endpoint must be a string'); + if (!config.endpoint.startsWith('http://') && !config.endpoint.startsWith('https://')) throw new BoxError(BoxError.BAD_FIELD, 'endpoint must start with http:// or https://'); + } + + if ('region' in config && typeof config.region !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'region must be a string'); + + if ('acceptSelfSignedCerts' in config && typeof config.acceptSelfSignedCerts !== 'boolean') throw new BoxError(BoxError.BAD_FIELD, 'acceptSelfSignedCerts must be a boolean'); + if ('s3ForcePathStyle' in config && typeof config.s3ForcePathStyle !== 'boolean') throw new BoxError(BoxError.BAD_FIELD, 's3ForcePathStyle must be a boolean'); + + const putParams = { + Bucket: config.bucket, + Key: path.join(config.prefix, 'snapshot/cloudron-testfile'), + Body: 'testcontent' + }; + + const s3 = createS3Client(config, {}); + const [putError] = await safe(s3.putObject(putParams)); + if (putError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error put object cloudron-testfile. ${formatError(putError)}`); + + const listParams = { + Bucket: config.bucket, + Prefix: path.join(config.prefix, 'snapshot'), + MaxKeys: 1 + }; + + const [listError] = await safe(s3.listObjectsV2(listParams)); + if (listError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects. ${formatError(listError)}`); + + const delParams = { + Bucket: config.bucket, + Key: path.join(config.prefix, 'snapshot/cloudron-testfile') + }; + + const [delError] = await safe(s3.deleteObject(delParams)); + if (delError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error del object cloudron-testfile. ${formatError(delError)}`); + + const newConfig = _.pick(config, ['accessKeyId', 'secretAccessKey', 'bucket', 'prefix', 'signatureVersion', 'endpoint', 'region', 'acceptSelfSignedCerts', 's3ForcePathStyle' ]); + return { _provider: provider, ...newConfig }; +} + +async function setup(config) { + assert.strictEqual(typeof config, 'object'); +} + +// often, the AbortIncompleteMultipartUpload lifecycle rule is not added to the bucket resulting in large bucket sizes over time +async function teardown(config) { + assert.strictEqual(typeof config, 'object'); +} + +function removePrivateFields(config) { + delete config.secretAccessKey; + delete config._provider; + return config; +} + +function injectPrivateFields(newConfig, currentConfig) { + if (!Object.hasOwn(newConfig, 'secretAccessKey')) newConfig.secretAccessKey = currentConfig.secretAccessKey; + newConfig._provider = currentConfig._provider; +} + async function copyInternal(config, fullFromPath, fullToPath, fileSize, progressCallback) { assert.strictEqual(typeof config, 'object'); assert.strictEqual(typeof fullFromPath, 'string'); @@ -497,180 +669,7 @@ async function copyDir(config, limits, fromPath, toPath, progressCallback) { progressCallback({ message: `Copied ${total} files` }); } -async function remove(config, remotePath) { - assert.strictEqual(typeof config, 'object'); - assert.strictEqual(typeof remotePath, 'string'); - - const s3 = createS3Client(config, { retryStrategy: RETRY_STRATEGY }); - - const fullRemotePath = path.join(config.prefix, remotePath); - - const deleteParams = { - Bucket: config.bucket, - Key: fullRemotePath - }; - - // deleteObject does not return error if key is not found - const [error] = await safe(s3.deleteObject(deleteParams)); - if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unable to remove ${fullRemotePath}. ${formatError(error)}`); -} - -function chunk(array, size) { - assert(Array.isArray(array)); - assert.strictEqual(typeof size, 'number'); - - const length = array.length; - if (!length) return []; - let index = 0, resIndex = 0; - const result = Array(Math.ceil(length / size)); - - for (; index < length; index += size) { - result[resIndex++] = array.slice(index, index+size); - } - - return result; -} - -async function removeDir(config, limits, remotePathPrefix, progressCallback) { - assert.strictEqual(typeof config, 'object'); - assert.strictEqual(typeof limits, 'object'); - assert.strictEqual(typeof remotePathPrefix, 'string'); - assert.strictEqual(typeof progressCallback, 'function'); - - // only use this client for DeleteObjects call. It forces md5 checksum and for anything else, it might crash - const deleteObjectsS3Client = createS3Client(config, { retryStrategy: RETRY_STRATEGY, deleteObjects: true }); - - let total = 0; - let marker = null; - while (true) { - const batch = await listDir(config, remotePathPrefix, 1000, marker); // returns entries relative to (root) prefix - - const entries = batch.entries; - total += entries.length; - - const chunkSize = limits.deleteConcurrency || (config._provider !== 'digitalocean-spaces' ? 1000 : 100); // throttle objects in each request - const chunks = chunk(entries, chunkSize); - - await async.eachSeries(chunks, async function deleteFiles(objects) { - const deleteParams = { - Bucket: config.bucket, - Delete: { - Objects: objects.map(function (o) { return { Key: path.join(config.prefix, o.path) }; }) - } - }; - - const fullFirstPath = path.join(config.prefix, objects[0].path), fullLastPath = path.join(config.prefix, objects[objects.length-1].path); - progressCallback({ message: `Removing ${objects.length} files from ${fullFirstPath} to ${fullLastPath}` }); - - // deleteObjects does not return error if key is not found - const [error] = await safe(deleteObjectsS3Client.deleteObjects(deleteParams)); - if (error) { - progressCallback({ message: `Unable to remove from ${fullFirstPath} to ${fullLastPath} ${error.message || error.Code}` }); - throw new BoxError(BoxError.EXTERNAL_ERROR, `Unable to remove from ${fullFirstPath} to ${fullLastPath}. error: ${error.message}`); - } - }); - - if (!batch.marker) break; - marker = batch.marker; - } - - progressCallback({ message: `Removed ${total} files` }); -} - -// often, the AbortIncompleteMultipartUpload lifecycle rule is not added to the bucket resulting in large bucket sizes over time -async function cleanup(config, progressCallback) { - assert.strictEqual(typeof config, 'object'); - assert.strictEqual(typeof progressCallback, 'function'); - - const s3 = createS3Client(config, { retryStrategy: RETRY_STRATEGY }); - - const uploads = await s3.listMultipartUploads({ Bucket: config.bucket, Prefix: config.prefix }); - progressCallback({ message: `Cleaning up any aborted multi-part uploads. count:${uploads.Uploads?.length || 0} truncated:${uploads.IsTruncated}` }); - if (!uploads.Uploads) return; - - for (const upload of uploads.Uploads) { - if (Date.now() - new Date(upload.Initiated) < 3 * 24 * 60 * 60 * 1000) continue; // 3 days ago - progressCallback({ message: `Cleaning up multi-part upload uploadId:${upload.UploadId} key:${upload.Key}` }); - await safe(s3.abortMultipartUpload({ Bucket: config.bucket, Key: upload.Key, UploadId: upload.UploadId }), { debug }); // ignore error - } -} - -async function verifyConfig({ id, provider, config }) { - assert.strictEqual(typeof id, 'string'); - assert.strictEqual(typeof provider, 'string'); - assert.strictEqual(typeof config, 'object'); - - if (typeof config.accessKeyId !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'accessKeyId must be a string'); - if (typeof config.secretAccessKey !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'secretAccessKey must be a string'); - - if (typeof config.bucket !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'bucket must be a string'); - // the node module seems to incorrectly accept bucket name with '/' - if (config.bucket.includes('/')) throw new BoxError(BoxError.BAD_FIELD, 'bucket name cannot contain "/"'); - - // names must be lowercase and start with a letter or number. can contain dashes - if (config.bucket.includes('_') || config.bucket.match(/[A-Z]/)) throw new BoxError(BoxError.BAD_FIELD, 'bucket name cannot contain "_" or capitals'); - - if (typeof config.prefix !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'prefix must be a string'); - if ('signatureVersion' in config && typeof config.signatureVersion !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'signatureVersion must be a string'); - if ('endpoint' in config) { - if (typeof config.endpoint !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'endpoint must be a string'); - if (!config.endpoint.startsWith('http://') && !config.endpoint.startsWith('https://')) throw new BoxError(BoxError.BAD_FIELD, 'endpoint must start with http:// or https://'); - } - - if ('region' in config && typeof config.region !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'region must be a string'); - - if ('acceptSelfSignedCerts' in config && typeof config.acceptSelfSignedCerts !== 'boolean') throw new BoxError(BoxError.BAD_FIELD, 'acceptSelfSignedCerts must be a boolean'); - if ('s3ForcePathStyle' in config && typeof config.s3ForcePathStyle !== 'boolean') throw new BoxError(BoxError.BAD_FIELD, 's3ForcePathStyle must be a boolean'); - - const putParams = { - Bucket: config.bucket, - Key: path.join(config.prefix, 'snapshot/cloudron-testfile'), - Body: 'testcontent' - }; - - const s3 = createS3Client(config, {}); - const [putError] = await safe(s3.putObject(putParams)); - if (putError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error put object cloudron-testfile. ${formatError(putError)}`); - - const listParams = { - Bucket: config.bucket, - Prefix: path.join(config.prefix, 'snapshot'), - MaxKeys: 1 - }; - - const [listError] = await safe(s3.listObjectsV2(listParams)); - if (listError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects. ${formatError(listError)}`); - - const delParams = { - Bucket: config.bucket, - Key: path.join(config.prefix, 'snapshot/cloudron-testfile') - }; - - const [delError] = await safe(s3.deleteObject(delParams)); - if (delError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error del object cloudron-testfile. ${formatError(delError)}`); - - const newConfig = _.pick(config, ['accessKeyId', 'secretAccessKey', 'bucket', 'prefix', 'signatureVersion', 'endpoint', 'region', 'acceptSelfSignedCerts', 's3ForcePathStyle' ]); - return { _provider: provider, ...newConfig }; -} - -async function setup(config) { - assert.strictEqual(typeof config, 'object'); -} - -async function teardown(config) { - assert.strictEqual(typeof config, 'object'); -} - -function removePrivateFields(config) { - delete config.secretAccessKey; - delete config._provider; - return config; -} - -function injectPrivateFields(newConfig, currentConfig) { - if (!Object.hasOwn(newConfig, 'secretAccessKey')) newConfig.secretAccessKey = currentConfig.secretAccessKey; - newConfig._provider = currentConfig._provider; -} +const _chunk = chunk; export default { setup, diff --git a/src/volumes.js b/src/volumes.js index 86c9a4e78..f6eb31f26 100644 --- a/src/volumes.js +++ b/src/volumes.js @@ -12,9 +12,6 @@ import services from './services.js'; const debug = debugModule('box:volumes'); -const _validateHostPath = validateHostPath; - - const VOLUMES_FIELDS = [ 'id', 'name', 'hostPath', 'creationTime', 'mountType', 'mountOptionsJson' ].join(','); function postProcess(result) { @@ -206,6 +203,8 @@ async function mountAll() { } } +const _validateHostPath = validateHostPath; + export default { add, get,