3154 lines
131 KiB
JavaScript
3154 lines
131 KiB
JavaScript
import appTaskManager from './apptaskmanager.js';
|
|
import appstore from './appstore.js';
|
|
import archives from './archives.js';
|
|
import assert from 'node:assert';
|
|
import backups from './backups.js';
|
|
import backupSites from './backupsites.js';
|
|
import BoxError from './boxerror.js';
|
|
import constants from './constants.js';
|
|
import crypto from 'node:crypto';
|
|
import { CronTime } from 'cron';
|
|
import dashboard from './dashboard.js';
|
|
import database from './database.js';
|
|
import debugModule from 'debug';
|
|
import dns from './dns.js';
|
|
import docker from './docker.js';
|
|
import domains from './domains.js';
|
|
import eventlog from './eventlog.js';
|
|
import fs from 'node:fs';
|
|
import fileUtils from './file-utils.js';
|
|
import Location from './location.js';
|
|
import locks from './locks.js';
|
|
import logs from './logs.js';
|
|
import mail from './mail.js';
|
|
import manifestFormat from '@cloudron/manifest-format';
|
|
import mysql from 'mysql2';
|
|
import notifications from './notifications.js';
|
|
import once from './once.js';
|
|
import path from 'node:path';
|
|
import paths from './paths.js';
|
|
import { PassThrough } from 'node:stream';
|
|
import reverseProxy from './reverseproxy.js';
|
|
import safe from 'safetydance';
|
|
import semver from 'semver';
|
|
import services from './services.js';
|
|
import shellModule from './shell.js';
|
|
import tasks from './tasks.js';
|
|
import { Transform as TransformStream } from 'node:stream';
|
|
import users from './users.js';
|
|
import util from 'node:util';
|
|
import volumes from './volumes.js';
|
|
import _ from './underscore.js';
|
|
|
|
const debug = debugModule('box:apps');
|
|
const shell = shellModule('apps');
|
|
|
|
const PORT_TYPE_TCP = 'tcp';
|
|
const PORT_TYPE_UDP = 'udp';
|
|
const ISTATE_PENDING_INSTALL = 'pending_install';
|
|
const ISTATE_PENDING_CLONE = 'pending_clone';
|
|
const ISTATE_PENDING_CONFIGURE = 'pending_configure';
|
|
const ISTATE_PENDING_RECREATE_CONTAINER = 'pending_recreate_container';
|
|
const ISTATE_PENDING_LOCATION_CHANGE = 'pending_location_change';
|
|
const ISTATE_PENDING_SERVICES_CHANGE = 'pending_services_change';
|
|
const ISTATE_PENDING_DATA_DIR_MIGRATION = 'pending_data_dir_migration';
|
|
const ISTATE_PENDING_RESIZE = 'pending_resize';
|
|
const ISTATE_PENDING_DEBUG = 'pending_debug';
|
|
const ISTATE_PENDING_UNINSTALL = 'pending_uninstall';
|
|
const ISTATE_PENDING_RESTORE = 'pending_restore';
|
|
const ISTATE_PENDING_IMPORT = 'pending_import';
|
|
const ISTATE_PENDING_UPDATE = 'pending_update';
|
|
const ISTATE_PENDING_START = 'pending_start';
|
|
const ISTATE_PENDING_STOP = 'pending_stop';
|
|
const ISTATE_PENDING_RESTART = 'pending_restart';
|
|
const ISTATE_ERROR = 'error';
|
|
const ISTATE_INSTALLED = 'installed';
|
|
const RSTATE_RUNNING = 'running';
|
|
const RSTATE_STOPPED = 'stopped';
|
|
const ACCESS_LEVEL_ADMIN = 'admin';
|
|
const ACCESS_LEVEL_OPERATOR = 'operator';
|
|
const ACCESS_LEVEL_USER = 'user';
|
|
const ACCESS_LEVEL_NONE = '';
|
|
|
|
|
|
// NOTE: when adding fields here, update the clone and unarchive logic as well
|
|
const APPS_FIELDS_PREFIXED = [ 'apps.id', 'apps.appStoreId', 'apps.versionsUrl', 'apps.installationState', 'apps.errorJson', 'apps.runState',
|
|
'apps.health', 'apps.containerId', 'apps.manifestJson', 'apps.accessRestrictionJson', 'apps.memoryLimit', 'apps.cpuQuota',
|
|
'apps.label', 'apps.notes', 'apps.tagsJson', 'apps.taskId', 'apps.reverseProxyConfigJson', 'apps.servicesConfigJson', 'apps.operatorsJson',
|
|
'apps.sso', 'apps.devicesJson', 'apps.debugModeJson', 'apps.enableBackup', 'apps.proxyAuth', 'apps.containerIp', 'apps.crontab',
|
|
'apps.creationTime', 'apps.updateTime', 'apps.enableAutomaticUpdate', 'apps.upstreamUri', 'apps.checklistJson', 'apps.updateInfoJson',
|
|
'apps.enableMailbox', 'apps.mailboxDisplayName', 'apps.mailboxName', 'apps.mailboxDomain', 'apps.enableInbox', 'apps.inboxName', 'apps.inboxDomain',
|
|
'apps.enableTurn', 'apps.enableRedis', 'apps.storageVolumeId', 'apps.storageVolumePrefix', 'apps.ts', 'apps.healthTime', '(apps.icon IS NOT NULL) AS hasIcon', '(apps.packageIcon IS NOT NULL) AS hasPackageIcon' ].join(',');
|
|
|
|
// const PORT_BINDINGS_FIELDS = [ 'hostPort', 'type', 'environmentVariable', 'appId', 'count' ].join(',');
|
|
const LOCATION_FIELDS = [ 'appId', 'subdomain', 'domain', 'type', 'certificateJson' ];
|
|
|
|
const CHECKVOLUME_CMD = path.join(import.meta.dirname, 'scripts/checkvolume.sh');
|
|
|
|
// ports is a map of envvar -> hostPort
|
|
function translateToPortBindings(ports, manifest) {
|
|
assert.strictEqual(typeof ports, 'object');
|
|
assert.strictEqual(typeof manifest, 'object');
|
|
|
|
const portBindings = {};
|
|
|
|
if (!ports) return portBindings;
|
|
|
|
const tcpPorts = manifest.tcpPorts || {};
|
|
|
|
for (const portName in ports) {
|
|
const portType = portName in tcpPorts ? PORT_TYPE_TCP : PORT_TYPE_UDP;
|
|
const portCount = portName in tcpPorts ? tcpPorts[portName].portCount : manifest.udpPorts[portName].portCount; // since count is optional, this can be undefined
|
|
portBindings[portName] = { hostPort: ports[portName], type: portType, count: portCount || 1 };
|
|
}
|
|
|
|
return portBindings;
|
|
}
|
|
|
|
// translates the REST API ports (envvar -> hostPort) to database portBindings (envvar -> { hostPort, count, type })
|
|
function validateSecondaryDomains(secondaryDomains, manifest) {
|
|
assert.strictEqual(typeof secondaryDomains, 'object');
|
|
assert.strictEqual(typeof manifest, 'object');
|
|
|
|
const httpPorts = manifest.httpPorts || {};
|
|
|
|
for (const envName in httpPorts) { // each httpPort is required
|
|
if (!(envName in secondaryDomains)) return new BoxError(BoxError.BAD_FIELD, `secondaryDomain ${envName} is required`);
|
|
}
|
|
|
|
for (const envName in secondaryDomains) {
|
|
if (!(envName in httpPorts)) return new BoxError(BoxError.BAD_FIELD, `secondaryDomain ${envName} is not listed in manifest`);
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
function translateSecondaryDomains(secondaryDomains) {
|
|
assert(secondaryDomains && typeof secondaryDomains === 'object');
|
|
|
|
const result = [];
|
|
for (const envName in secondaryDomains) {
|
|
result.push({ domain: secondaryDomains[envName].domain, subdomain: secondaryDomains[envName].subdomain, environmentVariable: envName });
|
|
}
|
|
return result;
|
|
}
|
|
|
|
function parseCrontab(crontab) {
|
|
assert(crontab === null || typeof crontab === 'string');
|
|
|
|
// https://www.man7.org/linux/man-pages/man5/crontab.5.html#EXTENSIONS
|
|
const KNOWN_EXTENSIONS = {
|
|
'@service' : '@service', // runs once
|
|
'@reboot' : '@service',
|
|
'@yearly' : '0 0 1 1 *',
|
|
'@annually' : '0 0 1 1 *',
|
|
'@monthly' : '0 0 1 * *',
|
|
'@weekly' : '0 0 * * 0',
|
|
'@daily' : '0 0 * * *',
|
|
'@hourly' : '0 * * * *',
|
|
};
|
|
|
|
const result = [];
|
|
if (!crontab) return result;
|
|
|
|
const lines = crontab.split('\n');
|
|
for (let i = 0; i < lines.length; i++) {
|
|
const line = lines[i].trim();
|
|
if (!line || line.startsWith('#')) continue;
|
|
if (line.startsWith('@')) {
|
|
const parts = /^(@\S+)\s+(.+)$/.exec(line);
|
|
if (!parts) throw new BoxError(BoxError.BAD_FIELD, `Invalid cron configuration at line ${i+1}`);
|
|
const [, extension, command] = parts;
|
|
if (!KNOWN_EXTENSIONS[extension]) throw new BoxError(BoxError.BAD_FIELD, `Unknown extension pattern at line ${i+1}`);
|
|
result.push({ schedule: KNOWN_EXTENSIONS[extension], command });
|
|
} else {
|
|
const parts = /^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(.+)$/.exec(line);
|
|
if (!parts) throw new BoxError(BoxError.BAD_FIELD, `Invalid cron configuration at line ${i+1}`);
|
|
const schedule = parts.slice(1, 6).join(' ');
|
|
const command = parts[6];
|
|
|
|
try {
|
|
new CronTime('00 ' + schedule); // second is disallowed
|
|
} catch (error) {
|
|
throw new BoxError(BoxError.BAD_FIELD, `Invalid cron pattern at line ${i+1}: ${error.message}`);
|
|
}
|
|
|
|
if (command.length === 0) throw new BoxError(BoxError.BAD_FIELD, `Invalid cron pattern. Command must not be empty at line ${i+1}`); // not possible with the regexp we have
|
|
|
|
result.push({ schedule, command });
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
function getSchedulerConfig(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
let schedulerConfig = app.manifest.addons?.scheduler || null;
|
|
|
|
const crontab = parseCrontab(app.crontab);
|
|
if (crontab.length === 0) return schedulerConfig;
|
|
|
|
schedulerConfig = schedulerConfig || {};
|
|
// put a '.' because it is not a valid name for schedule name in manifestformat
|
|
crontab.forEach((c, idx) => schedulerConfig[`crontab.${idx}`] = c);
|
|
|
|
return schedulerConfig;
|
|
}
|
|
|
|
function validateAccessRestriction(accessRestriction) {
|
|
assert.strictEqual(typeof accessRestriction, 'object');
|
|
|
|
if (accessRestriction === null) return null;
|
|
|
|
if (accessRestriction.users) {
|
|
if (!Array.isArray(accessRestriction.users)) return new BoxError(BoxError.BAD_FIELD, 'users array property required');
|
|
if (!accessRestriction.users.every(function (e) { return typeof e === 'string'; })) return new BoxError(BoxError.BAD_FIELD, 'All users have to be strings');
|
|
}
|
|
|
|
if (accessRestriction.groups) {
|
|
if (!Array.isArray(accessRestriction.groups)) return new BoxError(BoxError.BAD_FIELD, 'groups array property required');
|
|
if (!accessRestriction.groups.every(function (e) { return typeof e === 'string'; })) return new BoxError(BoxError.BAD_FIELD, 'All groups have to be strings');
|
|
}
|
|
|
|
// TODO: maybe validate if the users and groups actually exist
|
|
return null;
|
|
}
|
|
|
|
// also validates operators
|
|
function validateCpuQuota(cpuQuota) {
|
|
assert.strictEqual(typeof cpuQuota, 'number');
|
|
|
|
if (cpuQuota < 1 || cpuQuota > 100) return new BoxError(BoxError.BAD_FIELD, 'cpuQuota has to be between 1 and 100');
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateDebugMode(debugMode) {
|
|
assert.strictEqual(typeof debugMode, 'object');
|
|
|
|
if (debugMode === null) return null;
|
|
if ('cmd' in debugMode && debugMode.cmd !== null && !Array.isArray(debugMode.cmd)) return new BoxError(BoxError.BAD_FIELD, 'debugMode.cmd must be an array or null' );
|
|
if ('readonlyRootfs' in debugMode && typeof debugMode.readonlyRootfs !== 'boolean') return new BoxError(BoxError.BAD_FIELD, 'debugMode.readonlyRootfs must be a boolean' );
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateRobotsTxt(robotsTxt) {
|
|
if (robotsTxt === null) return null;
|
|
|
|
// this is the nginx limit on inline strings. if we really hit this, we have to generate a file
|
|
if (robotsTxt.length > 4096) return new BoxError(BoxError.BAD_FIELD, 'robotsTxt must be less than 4096');
|
|
|
|
// TODO: validate the robots file? we escape the string when templating the nginx config right now
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateCsp(csp) {
|
|
if (csp === null) return null;
|
|
|
|
if (csp.length > 4096) return new BoxError(BoxError.BAD_FIELD, 'CSP must be less than 4096');
|
|
if (csp.includes('"')) return new BoxError(BoxError.BAD_FIELD, 'CSP cannot contains double quotes');
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateUpstreamUri(upstreamUri) {
|
|
assert.strictEqual(typeof upstreamUri, 'string');
|
|
|
|
if (!upstreamUri) return new BoxError(BoxError.BAD_FIELD, 'upstreamUri cannot be empty');
|
|
|
|
const uri = safe(() => new URL(upstreamUri));
|
|
if (!uri) return new BoxError(BoxError.BAD_FIELD, `upstreamUri is invalid: ${safe.error.message}`);
|
|
if (uri.protocol !== 'http:' && uri.protocol !== 'https:') return new BoxError(BoxError.BAD_FIELD, 'upstreamUri has an unsupported scheme');
|
|
if (uri.search || uri.hash) return new BoxError(BoxError.BAD_FIELD, 'upstreamUri cannot have search or hash');
|
|
if (uri.pathname !== '/') return new BoxError(BoxError.BAD_FIELD, 'upstreamUri cannot have a path');
|
|
|
|
// we use the uri in a named location @wellknown-upstream. nginx does not support having paths in it
|
|
if (upstreamUri.endsWith('/')) return new BoxError(BoxError.BAD_FIELD, 'upstreamUri cannot have a path');
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateLabel(label) {
|
|
if (label === null) return null;
|
|
|
|
if (label.length > 128) return new BoxError(BoxError.BAD_FIELD, 'label must be less than 128');
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateTags(tags) {
|
|
if (tags.length > 64) return new BoxError(BoxError.BAD_FIELD, 'Can only set up to 64 tags');
|
|
|
|
if (tags.some(tag => tag.length == 0)) return new BoxError(BoxError.BAD_FIELD, 'tag cannot be empty');
|
|
if (tags.some(tag => tag.length > 128)) return new BoxError(BoxError.BAD_FIELD, 'tag must be less than 128');
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateEnv(env) {
|
|
for (const key in env) {
|
|
if (key.length > 512) return new BoxError(BoxError.BAD_FIELD, 'Max env var key length is 512');
|
|
// http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html
|
|
if (!/^[a-zA-Z_][a-zA-Z0-9_]*$/.test(key)) return new BoxError(BoxError.BAD_FIELD, `"${key}" is not a valid environment variable`);
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
function getDuplicateErrorDetails(errorMessage, locations, portBindings) {
|
|
assert.strictEqual(typeof errorMessage, 'string');
|
|
assert(Array.isArray(locations));
|
|
assert.strictEqual(typeof portBindings, 'object');
|
|
|
|
const match = errorMessage.match(/Duplicate entry '(.*)' for key '(.*)'/);
|
|
if (!match) {
|
|
debug('Unexpected SQL error message.', errorMessage);
|
|
return new BoxError(BoxError.DATABASE_ERROR, errorMessage);
|
|
}
|
|
|
|
// check if a location conflicts
|
|
if (match[2] === 'locations.subdomain') {
|
|
for (let i = 0; i < locations.length; i++) {
|
|
const { subdomain, domain, type } = locations[i];
|
|
if (match[1] !== (subdomain ? `${subdomain}-${domain}` : domain)) continue;
|
|
|
|
return new BoxError(BoxError.ALREADY_EXISTS, `${type} location '${dns.fqdn(subdomain, domain)}' is in use`);
|
|
}
|
|
}
|
|
|
|
for (const portName in portBindings) {
|
|
if (portBindings[portName].hostPort === parseInt(match[1])) return new BoxError(BoxError.ALREADY_EXISTS, `Port ${match[1]} is in use`);
|
|
}
|
|
|
|
if (match[2] === 'apps_storageVolume') {
|
|
return new BoxError(BoxError.BAD_FIELD, `Storage directory ${match[1]} is in use`);
|
|
}
|
|
|
|
return new BoxError(BoxError.ALREADY_EXISTS, `${match[2]} '${match[1]}' is in use`);
|
|
}
|
|
|
|
function getMemoryLimit(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
let memoryLimit = app.memoryLimit || app.manifest.memoryLimit || 0;
|
|
|
|
if (memoryLimit === -1) { // unrestricted
|
|
memoryLimit = 0;
|
|
} else if (memoryLimit === 0 || memoryLimit < constants.DEFAULT_MEMORY_LIMIT) { // ensure we never go below minimum (in case we change the default)
|
|
memoryLimit = constants.DEFAULT_MEMORY_LIMIT;
|
|
}
|
|
|
|
return memoryLimit;
|
|
}
|
|
|
|
function postProcess(result) {
|
|
assert.strictEqual(typeof result, 'object');
|
|
|
|
assert(result.manifestJson === null || typeof result.manifestJson === 'string');
|
|
result.manifest = safe.JSON.parse(result.manifestJson);
|
|
delete result.manifestJson;
|
|
|
|
assert(result.tagsJson === null || typeof result.tagsJson === 'string');
|
|
result.tags = safe.JSON.parse(result.tagsJson) || [];
|
|
delete result.tagsJson;
|
|
|
|
assert(result.checklistJson === null || typeof result.checklistJson === 'string');
|
|
result.checklist = safe.JSON.parse(result.checklistJson) || {};
|
|
delete result.checklistJson;
|
|
|
|
assert(result.updateInfoJson === null || typeof result.updateInfoJson === 'string');
|
|
result.updateInfo = safe.JSON.parse(result.updateInfoJson) || null;
|
|
delete result.updateInfoJson;
|
|
|
|
assert(result.reverseProxyConfigJson === null || typeof result.reverseProxyConfigJson === 'string');
|
|
result.reverseProxyConfig = safe.JSON.parse(result.reverseProxyConfigJson) || {};
|
|
delete result.reverseProxyConfigJson;
|
|
|
|
assert(result.hostPorts === null || typeof result.hostPorts === 'string');
|
|
assert(result.environmentVariables === null || typeof result.environmentVariables === 'string');
|
|
|
|
result.portBindings = {};
|
|
const hostPorts = result.hostPorts === null ? [ ] : result.hostPorts.split(',');
|
|
const environmentVariables = result.environmentVariables === null ? [ ] : result.environmentVariables.split(',');
|
|
const portTypes = result.portTypes === null ? [ ] : result.portTypes.split(',');
|
|
const portCounts = result.portCounts === null ? [ ] : result.portCounts.split(',');
|
|
|
|
delete result.hostPorts;
|
|
delete result.environmentVariables;
|
|
delete result.portTypes;
|
|
delete result.portCounts;
|
|
|
|
for (let i = 0; i < environmentVariables.length; i++) {
|
|
result.portBindings[environmentVariables[i]] = { hostPort: parseInt(hostPorts[i], 10), type: portTypes[i], count: parseInt(portCounts[i], 10) };
|
|
}
|
|
|
|
assert(result.accessRestrictionJson === null || typeof result.accessRestrictionJson === 'string');
|
|
result.accessRestriction = safe.JSON.parse(result.accessRestrictionJson);
|
|
if (result.accessRestriction && !result.accessRestriction.users) result.accessRestriction.users = [];
|
|
delete result.accessRestrictionJson;
|
|
|
|
result.operators = safe.JSON.parse(result.operatorsJson);
|
|
if (result.operators && !result.operators.users) result.operators.users = [];
|
|
delete result.operatorsJson;
|
|
|
|
result.sso = !!result.sso;
|
|
result.enableBackup = !!result.enableBackup;
|
|
result.enableAutomaticUpdate = !!result.enableAutomaticUpdate;
|
|
result.enableMailbox = !!result.enableMailbox;
|
|
result.enableInbox = !!result.enableInbox;
|
|
result.proxyAuth = !!result.proxyAuth;
|
|
result.hasIcon = !!result.hasIcon;
|
|
result.hasPackageIcon = !!result.hasPackageIcon;
|
|
|
|
assert(result.debugModeJson === null || typeof result.debugModeJson === 'string');
|
|
result.debugMode = safe.JSON.parse(result.debugModeJson);
|
|
delete result.debugModeJson;
|
|
|
|
assert(result.servicesConfigJson === null || typeof result.servicesConfigJson === 'string');
|
|
result.servicesConfig = safe.JSON.parse(result.servicesConfigJson) || {};
|
|
delete result.servicesConfigJson;
|
|
|
|
const subdomains = JSON.parse(result.subdomains),
|
|
parsedDomains = JSON.parse(result.domains),
|
|
subdomainTypes = JSON.parse(result.subdomainTypes),
|
|
subdomainEnvironmentVariables = JSON.parse(result.subdomainEnvironmentVariables),
|
|
subdomainCertificateJsons = JSON.parse(result.subdomainCertificateJsons);
|
|
|
|
delete result.subdomains;
|
|
delete result.domains;
|
|
delete result.subdomainTypes;
|
|
delete result.subdomainEnvironmentVariables;
|
|
delete result.subdomainCertificateJsons;
|
|
|
|
result.secondaryDomains = [];
|
|
result.redirectDomains = [];
|
|
result.aliasDomains = [];
|
|
for (let i = 0; i < subdomainTypes.length; i++) {
|
|
const subdomain = subdomains[i], domain = parsedDomains[i], certificate = safe.JSON.parse(subdomainCertificateJsons[i]);
|
|
|
|
if (subdomainTypes[i] === Location.TYPE_PRIMARY) {
|
|
result.subdomain = subdomain;
|
|
result.domain = domain;
|
|
result.certificate = certificate;
|
|
} else if (subdomainTypes[i] === Location.TYPE_SECONDARY) {
|
|
result.secondaryDomains.push({ domain, subdomain, certificate, environmentVariable: subdomainEnvironmentVariables[i] });
|
|
} else if (subdomainTypes[i] === Location.TYPE_REDIRECT) {
|
|
result.redirectDomains.push({ domain, subdomain, certificate });
|
|
} else if (subdomainTypes[i] === Location.TYPE_ALIAS) {
|
|
result.aliasDomains.push({ domain, subdomain, certificate });
|
|
}
|
|
}
|
|
|
|
const envNames = JSON.parse(result.envNames), envValues = JSON.parse(result.envValues);
|
|
delete result.envNames;
|
|
delete result.envValues;
|
|
result.env = {};
|
|
for (let i = 0; i < envNames.length; i++) { // NOTE: envNames is [ null ] when env of an app is empty
|
|
if (envNames[i]) result.env[envNames[i]] = envValues[i];
|
|
}
|
|
|
|
const volumeIds = JSON.parse(result.volumeIds);
|
|
delete result.volumeIds;
|
|
const volumeReadOnlys = JSON.parse(result.volumeReadOnlys);
|
|
delete result.volumeReadOnlys;
|
|
|
|
result.mounts = volumeIds[0] === null ? [] : volumeIds.map((v, idx) => { return { volumeId: v, readOnly: !!volumeReadOnlys[idx] }; }); // NOTE: volumeIds is [null] when volumes of an app is empty
|
|
|
|
result.error = safe.JSON.parse(result.errorJson);
|
|
delete result.errorJson;
|
|
|
|
result.taskId = result.taskId ? String(result.taskId) : null;
|
|
result.devices = result.devicesJson ? JSON.parse(result.devicesJson) : {};
|
|
delete result.devicesJson;
|
|
}
|
|
|
|
function isAdmin(user) {
|
|
assert.strictEqual(typeof user, 'object');
|
|
|
|
return users.compareRoles(user.role, users.ROLE_ADMIN) >= 0;
|
|
}
|
|
|
|
function isOperator(app, user) {
|
|
assert.strictEqual(typeof app, 'object'); // IMPORTANT: can also be applink
|
|
assert.strictEqual(typeof user, 'object');
|
|
|
|
if (!app.operators) return isAdmin(user);
|
|
|
|
if (app.operators.users.includes(user.id)) return true;
|
|
if (!app.operators.groups) return isAdmin(user);
|
|
if (app.operators.groups.some(function (gid) { return Array.isArray(user.groupIds) && user.groupIds.includes(gid); })) return true;
|
|
|
|
return isAdmin(user);
|
|
}
|
|
|
|
function canAccess(app, user) {
|
|
assert.strictEqual(typeof app, 'object'); // IMPORTANT: can also be applink
|
|
assert.strictEqual(typeof user, 'object');
|
|
|
|
if (app.accessRestriction === null) return true;
|
|
|
|
if (app.accessRestriction.users.includes(user.id)) return true;
|
|
if (!app.accessRestriction.groups) return isOperator(app, user);
|
|
if (app.accessRestriction.groups.some(function (gid) { return Array.isArray(user.groupIds) && user.groupIds.includes(gid); })) return true;
|
|
|
|
return isOperator(app, user);
|
|
}
|
|
|
|
function accessLevel(app, user) {
|
|
if (isAdmin(user)) return ACCESS_LEVEL_ADMIN;
|
|
if (isOperator(app, user)) return ACCESS_LEVEL_OPERATOR;
|
|
return canAccess(app, user) ? ACCESS_LEVEL_USER : ACCESS_LEVEL_NONE;
|
|
}
|
|
|
|
function pickFields(app, level) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof level, 'string');
|
|
|
|
if (level === ACCESS_LEVEL_NONE) return null; // cannot happen!
|
|
|
|
let result;
|
|
if (level === ACCESS_LEVEL_USER) {
|
|
result = _.pick(app, [
|
|
'id', 'appStoreId', 'versionsUrl', 'installationState', 'error', 'runState', 'health', 'taskId', 'accessRestriction',
|
|
'secondaryDomains', 'redirectDomains', 'aliasDomains', 'sso', 'subdomain', 'domain', 'fqdn', 'certificate',
|
|
'manifest', 'portBindings', 'iconUrl', 'creationTime', 'ts', 'tags', 'label', 'upstreamUri']);
|
|
} else { // admin or operator
|
|
result = _.pick(app, [
|
|
'id', 'appStoreId', 'versionsUrl', 'installationState', 'error', 'runState', 'health', 'taskId',
|
|
'subdomain', 'domain', 'fqdn', 'certificate', 'crontab', 'upstreamUri',
|
|
'accessRestriction', 'manifest', 'portBindings', 'iconUrl', 'memoryLimit', 'cpuQuota', 'operators',
|
|
'sso', 'debugMode', 'reverseProxyConfig', 'enableBackup', 'creationTime', 'updateTime', 'ts', 'tags',
|
|
'label', 'notes', 'secondaryDomains', 'redirectDomains', 'aliasDomains', 'devices', 'env', 'enableAutomaticUpdate',
|
|
'storageVolumeId', 'storageVolumePrefix', 'mounts', 'enableTurn', 'enableRedis', 'checklist',
|
|
'enableMailbox', 'mailboxDisplayName', 'mailboxName', 'mailboxDomain', 'enableInbox', 'inboxName', 'inboxDomain', 'updateInfo']);
|
|
}
|
|
|
|
// remove private certificate key
|
|
if (result.certificate) delete result.certificate.key;
|
|
result.secondaryDomains.forEach(sd => { if (sd.certificate) delete sd.certificate.key; });
|
|
result.aliasDomains.forEach(ad => { if (ad.certificate) delete ad.certificate.key; });
|
|
result.redirectDomains.forEach(rd => { if (rd.certificate) delete rd.certificate.key; });
|
|
|
|
return result;
|
|
}
|
|
|
|
async function checkForPortBindingConflict(portBindings, options) {
|
|
assert.strictEqual(typeof portBindings, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const existingPortBindings = options.appId
|
|
? await database.query('SELECT * FROM appPortBindings WHERE appId != ?', [ options.appId ])
|
|
: await database.query('SELECT * FROM appPortBindings', []);
|
|
|
|
if (existingPortBindings.length === 0) return;
|
|
|
|
const tcpPortBindings = existingPortBindings.filter((p) => p.type === 'tcp');
|
|
const udpPortBindings = existingPortBindings.filter((p) => p.type === 'udp');
|
|
|
|
for (const portName in portBindings) {
|
|
const portBinding = portBindings[portName];
|
|
const existingPortBinding = portBinding.type === 'tcp' ? tcpPortBindings : udpPortBindings;
|
|
|
|
const found = existingPortBinding.find((epb) => {
|
|
// if one is true we dont have a conflict
|
|
// a1 <----> a2 b1 <-------> b2
|
|
// b1 <------> b2 a1 <-----> a2
|
|
const a2 = (epb.hostPort + epb.count - 1);
|
|
const b1 = portBinding.hostPort;
|
|
const b2 = (portBinding.hostPort + portBinding.count - 1);
|
|
const a1 = epb.hostPort;
|
|
|
|
return !((a2 < b1) || (b2 < a1));
|
|
});
|
|
|
|
if (found) throw new BoxError(BoxError.CONFLICT, `Conflicting ${portBinding.type} port ${portBinding.hostPort}`);
|
|
}
|
|
}
|
|
|
|
async function add(id, appStoreId, versionsUrl, manifest, subdomain, domain, portBindings, data) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert.strictEqual(typeof appStoreId, 'string');
|
|
assert.strictEqual(typeof versionsUrl, 'string');
|
|
assert(manifest && typeof manifest === 'object');
|
|
assert.strictEqual(typeof manifest.version, 'string');
|
|
assert.strictEqual(typeof subdomain, 'string');
|
|
assert.strictEqual(typeof domain, 'string');
|
|
assert(portBindings && typeof portBindings === 'object');
|
|
assert(data && typeof data === 'object');
|
|
|
|
const manifestJson = JSON.stringify(manifest),
|
|
accessRestrictionJson = data.accessRestriction ? JSON.stringify(data.accessRestriction) : null,
|
|
operatorsJson = data.operators ? JSON.stringify(data.operators) : null,
|
|
memoryLimit = data.memoryLimit || 0,
|
|
cpuQuota = data.cpuQuota || 100,
|
|
installationState = data.installationState,
|
|
runState = data.runState,
|
|
sso = 'sso' in data ? data.sso : null,
|
|
debugModeJson = data.debugMode ? JSON.stringify(data.debugMode) : null,
|
|
devicesJson = data.devices ? JSON.stringify(data.devices) : null,
|
|
env = data.env || {},
|
|
label = data.label || null,
|
|
tagsJson = data.tags ? JSON.stringify(data.tags) : null,
|
|
checklistJson = data.checklist ? JSON.stringify(data.checklist) : null,
|
|
mailboxName = data.mailboxName || null,
|
|
mailboxDomain = data.mailboxDomain || null,
|
|
mailboxDisplayName = data.mailboxDisplayName || '',
|
|
reverseProxyConfigJson = data.reverseProxyConfig ? JSON.stringify(data.reverseProxyConfig) : null,
|
|
servicesConfigJson = data.servicesConfig ? JSON.stringify(data.servicesConfig) : null,
|
|
enableMailbox = data.enableMailbox || false,
|
|
upstreamUri = data.upstreamUri || '',
|
|
enableTurn = 'enableTurn' in data ? data.enableTurn : true,
|
|
icon = data.icon || null,
|
|
notes = data.notes || null,
|
|
crontab = data.crontab || null,
|
|
enableBackup = 'enableBackup' in data ? data.enableBackup : true,
|
|
enableAutomaticUpdate = 'enableAutomaticUpdate' in data ? data.enableAutomaticUpdate : true;
|
|
|
|
// when redis is optional, do not enable it by default. it's mostly used for caching in those setups
|
|
const enableRedis = 'enableRedis' in data ? data.enableRedis : !manifest.addons?.redis?.optional;
|
|
|
|
await checkForPortBindingConflict(portBindings, { appId: null });
|
|
|
|
const queries = [];
|
|
|
|
queries.push({
|
|
query: 'INSERT INTO apps (id, appStoreId, versionsUrl, manifestJson, installationState, runState, accessRestrictionJson, operatorsJson, memoryLimit, cpuQuota, '
|
|
+ 'sso, debugModeJson, mailboxName, mailboxDomain, label, tagsJson, reverseProxyConfigJson, checklistJson, servicesConfigJson, icon, '
|
|
+ 'enableMailbox, mailboxDisplayName, upstreamUri, enableTurn, enableRedis, devicesJson, notes, crontab, enableBackup, enableAutomaticUpdate) '
|
|
+ ' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
|
|
args: [ id, appStoreId, versionsUrl, manifestJson, installationState, runState, accessRestrictionJson, operatorsJson, memoryLimit, cpuQuota,
|
|
sso, debugModeJson, mailboxName, mailboxDomain, label, tagsJson, reverseProxyConfigJson, checklistJson, servicesConfigJson, icon,
|
|
enableMailbox, mailboxDisplayName, upstreamUri, enableTurn, enableRedis, devicesJson, notes, crontab,
|
|
enableBackup, enableAutomaticUpdate
|
|
]
|
|
});
|
|
|
|
queries.push({
|
|
query: 'INSERT INTO locations (appId, domain, subdomain, type) VALUES (?, ?, ?, ?)',
|
|
args: [ id, domain, subdomain, Location.TYPE_PRIMARY ]
|
|
});
|
|
|
|
Object.keys(portBindings).forEach(function (portEnv) {
|
|
queries.push({
|
|
query: 'INSERT INTO appPortBindings (environmentVariable, hostPort, type, appId, count) VALUES (?, ?, ?, ?, ?)',
|
|
args: [ portEnv, portBindings[portEnv].hostPort, portBindings[portEnv].type, id, portBindings[portEnv].count ]
|
|
});
|
|
});
|
|
|
|
Object.keys(env).forEach(function (name) {
|
|
queries.push({
|
|
query: 'INSERT INTO appEnvVars (appId, name, value) VALUES (?, ?, ?)',
|
|
args: [ id, name, env[name] ]
|
|
});
|
|
});
|
|
|
|
if (data.secondaryDomains) {
|
|
data.secondaryDomains.forEach(function (d) {
|
|
queries.push({
|
|
query: 'INSERT INTO locations (appId, domain, subdomain, type, environmentVariable) VALUES (?, ?, ?, ?, ?)',
|
|
args: [ id, d.domain, d.subdomain, Location.TYPE_SECONDARY, d.environmentVariable ]
|
|
});
|
|
});
|
|
}
|
|
|
|
if (data.redirectDomains) {
|
|
data.redirectDomains.forEach(function (d) {
|
|
queries.push({
|
|
query: 'INSERT INTO locations (appId, domain, subdomain, type) VALUES (?, ?, ?, ?)',
|
|
args: [ id, d.domain, d.subdomain, Location.TYPE_REDIRECT ]
|
|
});
|
|
});
|
|
}
|
|
|
|
if (data.aliasDomains) {
|
|
data.aliasDomains.forEach(function (d) {
|
|
queries.push({
|
|
query: 'INSERT INTO locations (appId, domain, subdomain, type) VALUES (?, ?, ?, ?)',
|
|
args: [ id, d.domain, d.subdomain, Location.TYPE_ALIAS ]
|
|
});
|
|
});
|
|
}
|
|
|
|
const [error] = await safe(database.transaction(queries));
|
|
if (error && error.sqlCode === 'ER_DUP_ENTRY') throw new BoxError(BoxError.ALREADY_EXISTS, error.message);
|
|
if (error && error.sqlCode === 'ER_NO_REFERENCED_ROW_2') throw new BoxError(BoxError.NOT_FOUND, 'no such domain');
|
|
if (error) throw new BoxError(BoxError.DATABASE_ERROR, error);
|
|
}
|
|
|
|
// note: this value cannot be cached as it depends on enableAutomaticUpdate and runState
|
|
async function getIcons(id) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
|
|
const results = await database.query('SELECT icon, packageIcon FROM apps WHERE id = ?', [ id ]);
|
|
if (results.length === 0) return null;
|
|
return { icon: results[0].icon, packageIcon: results[0].packageIcon };
|
|
}
|
|
|
|
// attaches computed properties
|
|
async function getIcon(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const icons = await getIcons(app.id);
|
|
if (!icons) throw new BoxError(BoxError.NOT_FOUND, 'No such app');
|
|
|
|
if (!options.original && icons.icon) return icons.icon;
|
|
if (icons.packageIcon) return icons.packageIcon;
|
|
|
|
return null;
|
|
}
|
|
|
|
async function updateWithConstraints(id, app, constraints) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof constraints, 'string');
|
|
assert(!('portBindings' in app) || typeof app.portBindings === 'object');
|
|
assert(!('accessRestriction' in app) || typeof app.accessRestriction === 'object' || app.accessRestriction === '');
|
|
assert(!('secondaryDomains' in app) || Array.isArray(app.secondaryDomains));
|
|
assert(!('redirectDomains' in app) || Array.isArray(app.redirectDomains));
|
|
assert(!('aliasDomains' in app) || Array.isArray(app.aliasDomains));
|
|
assert(!('tags' in app) || Array.isArray(app.tags));
|
|
assert(!('checklist' in app) || typeof app.checklist === 'object');
|
|
assert(!('env' in app) || typeof app.env === 'object');
|
|
|
|
const queries = [ ];
|
|
|
|
if ('portBindings' in app) {
|
|
const portBindings = app.portBindings;
|
|
|
|
await checkForPortBindingConflict(portBindings, { appId: id });
|
|
|
|
// replace entries by app id
|
|
queries.push({ query: 'DELETE FROM appPortBindings WHERE appId = ?', args: [ id ] });
|
|
Object.keys(portBindings).forEach(function (env) {
|
|
const values = [ portBindings[env].hostPort, portBindings[env].type, env, id, portBindings[env].count ];
|
|
queries.push({ query: 'INSERT INTO appPortBindings (hostPort, type, environmentVariable, appId, count) VALUES(?, ?, ?, ?, ?)', args: values });
|
|
});
|
|
}
|
|
|
|
if ('env' in app) {
|
|
queries.push({ query: 'DELETE FROM appEnvVars WHERE appId = ?', args: [ id ] });
|
|
|
|
Object.keys(app.env).forEach(function (name) {
|
|
queries.push({
|
|
query: 'INSERT INTO appEnvVars (appId, name, value) VALUES (?, ?, ?)',
|
|
args: [ id, name, app.env[name] ]
|
|
});
|
|
});
|
|
}
|
|
|
|
if ('subdomain' in app && 'domain' in app) { // must be updated together as they are unique together
|
|
queries.push({ query: 'DELETE FROM locations WHERE appId = ?', args: [ id ]}); // all locations of an app must be updated together
|
|
queries.push({ query: 'INSERT INTO locations (appId, domain, subdomain, type) VALUES (?, ?, ?, ?)', args: [ id, app.domain, app.subdomain, Location.TYPE_PRIMARY ]});
|
|
|
|
if ('secondaryDomains' in app) {
|
|
app.secondaryDomains.forEach(function (d) {
|
|
queries.push({ query: 'INSERT INTO locations (appId, domain, subdomain, type, environmentVariable) VALUES (?, ?, ?, ?, ?)', args: [ id, d.domain, d.subdomain, Location.TYPE_SECONDARY, d.environmentVariable ]});
|
|
});
|
|
}
|
|
|
|
if ('redirectDomains' in app) {
|
|
app.redirectDomains.forEach(function (d) {
|
|
queries.push({ query: 'INSERT INTO locations (appId, domain, subdomain, type) VALUES (?, ?, ?, ?)', args: [ id, d.domain, d.subdomain, Location.TYPE_REDIRECT ]});
|
|
});
|
|
}
|
|
|
|
if ('aliasDomains' in app) {
|
|
app.aliasDomains.forEach(function (d) {
|
|
queries.push({ query: 'INSERT INTO locations (appId, domain, subdomain, type) VALUES (?, ?, ?, ?)', args: [ id, d.domain, d.subdomain, Location.TYPE_ALIAS ]});
|
|
});
|
|
}
|
|
}
|
|
|
|
if ('mounts' in app) {
|
|
queries.push({ query: 'DELETE FROM appMounts WHERE appId = ?', args: [ id ]});
|
|
app.mounts.forEach(function (m) {
|
|
queries.push({ query: 'INSERT INTO appMounts (appId, volumeId, readOnly) VALUES (?, ?, ?)', args: [ id, m.volumeId, m.readOnly ]});
|
|
});
|
|
}
|
|
|
|
const fields = [ ], values = [ ];
|
|
for (const p in app) {
|
|
if (p === 'manifest' || p === 'tags' || p === 'checklist' || p === 'accessRestriction' || p === 'devices' || p === 'debugMode' || p === 'error'
|
|
|| p === 'reverseProxyConfig' || p === 'servicesConfig' || p === 'operators' || p === 'updateInfo') {
|
|
fields.push(`${p}Json = ?`);
|
|
values.push(JSON.stringify(app[p]));
|
|
} else if (p !== 'portBindings' && p !== 'subdomain' && p !== 'domain' && p !== 'secondaryDomains' && p !== 'redirectDomains' && p !== 'aliasDomains' && p !== 'env' && p !== 'mounts') {
|
|
fields.push(p + ' = ?');
|
|
values.push(app[p]);
|
|
}
|
|
}
|
|
|
|
if (values.length !== 0) {
|
|
values.push(id);
|
|
queries.push({ query: 'UPDATE apps SET ' + fields.join(', ') + ' WHERE id = ? ' + constraints, args: values });
|
|
}
|
|
|
|
const [error, results] = await safe(database.transaction(queries));
|
|
if (error && error.sqlCode === 'ER_DUP_ENTRY') throw new BoxError(BoxError.ALREADY_EXISTS, error.message);
|
|
if (error) throw new BoxError(BoxError.DATABASE_ERROR, error);
|
|
if (results[results.length - 1].affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'App not found');
|
|
}
|
|
|
|
async function update(id, app) {
|
|
// ts is useful as a versioning mechanism (for example, icon changed). update the timestamp explicity in code instead of db.
|
|
// this way health and healthTime can be updated without changing ts
|
|
app.ts = new Date();
|
|
await updateWithConstraints(id, app, '');
|
|
}
|
|
|
|
function validateMemoryLimit(manifest, memoryLimit) {
|
|
assert.strictEqual(typeof manifest, 'object');
|
|
assert.strictEqual(typeof memoryLimit, 'number');
|
|
|
|
// max is not checked because docker allows any value for --memory
|
|
const min = manifest.memoryLimit || constants.DEFAULT_MEMORY_LIMIT;
|
|
|
|
// allow 0, which indicates that it is not set, the one from the manifest will be choosen but we don't commit any user value
|
|
// this is needed so an app update can change the value in the manifest, and if not set by the user, the new value should be used
|
|
if (memoryLimit === 0) return null;
|
|
|
|
// a special value that indicates unlimited memory
|
|
if (memoryLimit === -1) return null;
|
|
|
|
if (memoryLimit < min) return new BoxError(BoxError.BAD_FIELD, 'memoryLimit too small');
|
|
|
|
return null;
|
|
}
|
|
|
|
function canAutoupdateAppSync(app, updateInfo) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof updateInfo, 'object');
|
|
|
|
const manifest = updateInfo.manifest;
|
|
|
|
if (!app.enableAutomaticUpdate) return { code: false, reason: 'Automatic updates for the app is disabled' };
|
|
|
|
// for invalid subscriptions the appstore does not return a dockerImage
|
|
if (!manifest.dockerImage) return { code: false, reason: 'Invalid or Expired subscription' };
|
|
|
|
if (updateInfo.unstable) return { code: false, reason: 'Update is marked as unstable' }; // only manual update allowed for unstable updates
|
|
|
|
// for community apps, it's a warning sign when the repo changes (for example: versions domain gets hijacked)
|
|
if (docker.parseImageRef(manifest.dockerImage).fullRepositoryName !== docker.parseImageRef(app.manifest.dockerImage).fullRepositoryName) return { code: false, reason: 'Package docker image repository has changed' };
|
|
|
|
if ((semver.major(app.manifest.version) !== 0) && (semver.major(app.manifest.version) !== semver.major(manifest.version))) {
|
|
return { code: false, reason: 'Major package version requires review of breaking changes' }; // major changes are blocking
|
|
}
|
|
|
|
if (app.runState === RSTATE_STOPPED) return { code: false, reason: 'Stopped apps cannot run migration scripts' };
|
|
|
|
const newTcpPorts = manifest.tcpPorts || {};
|
|
const newUdpPorts = manifest.udpPorts || {};
|
|
const portBindings = app.portBindings; // this is never null
|
|
|
|
for (const portName in portBindings) {
|
|
if (!(portName in newTcpPorts) && !(portName in newUdpPorts)) return { code: false, reason: `${portName} port was in use but new update removes it` };
|
|
}
|
|
|
|
// it's fine if one or more (unused) port keys got removed
|
|
return { code: true, reason: '' };
|
|
}
|
|
|
|
function attachUpdateInfoProperties(app, updateInfo) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof updateInfo, 'object');
|
|
|
|
const { code, reason } = canAutoupdateAppSync(app, updateInfo);
|
|
updateInfo.isAutoUpdatable = code;
|
|
updateInfo.manualUpdateReason = reason;
|
|
}
|
|
|
|
function attachProperties(app, domainObjectMap) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof domainObjectMap, 'object');
|
|
|
|
app.iconUrl = app.hasIcon || app.hasPackageIcon ? `/api/v1/apps/${app.id}/icon` : null;
|
|
app.fqdn = dns.fqdn(app.subdomain, app.domain);
|
|
app.secondaryDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
app.redirectDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
app.aliasDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
|
|
if (app.updateInfo) attachUpdateInfoProperties(app, app.updateInfo);
|
|
}
|
|
|
|
async function setHealth(appId, health, healthTime) {
|
|
assert.strictEqual(typeof appId, 'string');
|
|
assert.strictEqual(typeof health, 'string');
|
|
assert(util.types.isDate(healthTime));
|
|
|
|
await updateWithConstraints(appId, { health, healthTime }, '');
|
|
}
|
|
|
|
async function setTask(appId, values, options) {
|
|
assert.strictEqual(typeof appId, 'string');
|
|
assert.strictEqual(typeof values, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
values.ts = new Date();
|
|
|
|
if (!options.requireNullTaskId) return await updateWithConstraints(appId, values, '');
|
|
|
|
if (options.requiredState === null) {
|
|
await updateWithConstraints(appId, values, 'AND taskId IS NULL');
|
|
} else {
|
|
await updateWithConstraints(appId, values, `AND taskId IS NULL AND installationState = "${options.requiredState}"`);
|
|
}
|
|
}
|
|
|
|
async function del(id) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
|
|
const queries = [
|
|
{ query: 'DELETE FROM locations WHERE appId = ?', args: [ id ] },
|
|
{ query: 'DELETE FROM appPortBindings WHERE appId = ?', args: [ id ] },
|
|
{ query: 'DELETE FROM appEnvVars WHERE appId = ?', args: [ id ] },
|
|
{ query: 'DELETE FROM appPasswords WHERE identifier = ?', args: [ id ] },
|
|
{ query: 'DELETE FROM appMounts WHERE appId = ?', args: [ id ] },
|
|
{ query: `UPDATE backupSites SET contentsJson = JSON_REMOVE(contentsJson, JSON_UNQUOTE(JSON_SEARCH(contentsJson, 'one', ?, NULL, '$.*[*]'))) WHERE contentsJson LIKE ${mysql.escape('%"' + id + '"%')}`, args: [ id ] },
|
|
{ query: 'DELETE FROM apps WHERE id = ?', args: [ id ] }
|
|
];
|
|
|
|
const results = await database.transaction(queries);
|
|
if (results[6].affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'App not found');
|
|
}
|
|
|
|
async function clear() {
|
|
await database.query('DELETE FROM locations');
|
|
await database.query('DELETE FROM appPortBindings');
|
|
await database.query('DELETE FROM appAddonConfigs');
|
|
await database.query('DELETE FROM appEnvVars');
|
|
await database.query('DELETE FROM apps');
|
|
}
|
|
|
|
// each query simply join apps table with another table by id. we then join the full result together
|
|
const PB_QUERY = 'SELECT id, GROUP_CONCAT(CAST(appPortBindings.hostPort AS CHAR(6))) AS hostPorts, GROUP_CONCAT(appPortBindings.environmentVariable) AS environmentVariables, GROUP_CONCAT(appPortBindings.type) AS portTypes, GROUP_CONCAT(CAST(appPortBindings.count AS CHAR(6))) AS portCounts FROM apps LEFT JOIN appPortBindings ON apps.id = appPortBindings.appId GROUP BY apps.id';
|
|
const ENV_QUERY = 'SELECT id, JSON_ARRAYAGG(appEnvVars.name) AS envNames, JSON_ARRAYAGG(appEnvVars.value) AS envValues FROM apps LEFT JOIN appEnvVars ON apps.id = appEnvVars.appId GROUP BY apps.id';
|
|
const SUBDOMAIN_QUERY = 'SELECT id, JSON_ARRAYAGG(locations.subdomain) AS subdomains, JSON_ARRAYAGG(locations.domain) AS domains, JSON_ARRAYAGG(locations.type) AS subdomainTypes, JSON_ARRAYAGG(locations.environmentVariable) AS subdomainEnvironmentVariables, JSON_ARRAYAGG(locations.certificateJson) AS subdomainCertificateJsons FROM apps LEFT JOIN locations ON apps.id = locations.appId GROUP BY apps.id';
|
|
const MOUNTS_QUERY = 'SELECT id, JSON_ARRAYAGG(appMounts.volumeId) AS volumeIds, JSON_ARRAYAGG(appMounts.readOnly) AS volumeReadOnlys FROM apps LEFT JOIN appMounts ON apps.id = appMounts.appId GROUP BY apps.id';
|
|
const APPS_QUERY = `SELECT ${APPS_FIELDS_PREFIXED}, hostPorts, environmentVariables, portTypes, portCounts, envNames, envValues, subdomains, domains, subdomainTypes, subdomainEnvironmentVariables, subdomainCertificateJsons, volumeIds, volumeReadOnlys FROM apps`
|
|
+ ` LEFT JOIN (${PB_QUERY}) AS q1 on q1.id = apps.id`
|
|
+ ` LEFT JOIN (${ENV_QUERY}) AS q2 on q2.id = apps.id`
|
|
+ ` LEFT JOIN (${SUBDOMAIN_QUERY}) AS q3 on q3.id = apps.id`
|
|
+ ` LEFT JOIN (${MOUNTS_QUERY}) AS q4 on q4.id = apps.id`;
|
|
|
|
async function get(id) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
|
|
const domainObjectMap = await domains.getDomainObjectMap();
|
|
|
|
const result = await database.query(`${APPS_QUERY} WHERE apps.id = ?`, [ id ]);
|
|
if (result.length === 0) return null;
|
|
|
|
postProcess(result[0]);
|
|
attachProperties(result[0], domainObjectMap);
|
|
return result[0];
|
|
}
|
|
|
|
async function getStorageDir(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
if (!app.manifest.addons?.localstorage) return null;
|
|
|
|
if (!app.storageVolumeId) return path.join(paths.APPS_DATA_DIR, app.id, 'data');
|
|
const volume = await volumes.get(app.storageVolumeId);
|
|
if (!volume) throw new BoxError(BoxError.NOT_FOUND, 'Volume not found'); // not possible
|
|
return path.join(volume.hostPath, app.storageVolumePrefix);
|
|
}
|
|
|
|
async function checkStorage(app, volumeId, prefix) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof volumeId, 'string');
|
|
assert.strictEqual(typeof prefix, 'string');
|
|
|
|
const volume = await volumes.get(volumeId);
|
|
if (volume === null) throw new BoxError(BoxError.BAD_FIELD, 'Storage volume not found');
|
|
|
|
// lack of file perms makes these unsupported
|
|
if (volume.mountType === 'cifs' || volume.mountType === 'sshfs') throw new BoxError(BoxError.BAD_FIELD, `${volume.mountType} volumes cannot be used as data directory`);
|
|
|
|
const status = await volumes.getStatus(volume);
|
|
if (status.state !== 'active') throw new BoxError(BoxError.BAD_FIELD, 'Volume is not active');
|
|
|
|
if (path.isAbsolute(prefix)) throw new BoxError(BoxError.BAD_FIELD, `prefix "${prefix}" must be a relative path`);
|
|
if (prefix.endsWith('/')) throw new BoxError(BoxError.BAD_FIELD, `prefix "${prefix}" contains trailing slash`);
|
|
if (prefix !== '' && path.normalize(prefix) !== prefix) throw new BoxError(BoxError.BAD_FIELD, `prefix "${prefix}" is not a normalized path`);
|
|
|
|
const sourceDir = await getStorageDir(app);
|
|
if (sourceDir === null) throw new BoxError(BoxError.BAD_STATE, 'App does not use localstorage addon');
|
|
|
|
const targetDir = path.join(volume.hostPath, prefix);
|
|
const rel = path.relative(sourceDir, targetDir);
|
|
if (!rel.startsWith('../') && rel.split('/').length > 1) throw new BoxError(BoxError.BAD_FIELD, 'Only one level subdirectory moves are supported');
|
|
|
|
const [error] = await safe(shell.sudo([ CHECKVOLUME_CMD, targetDir, sourceDir ], {}));
|
|
if (error && error.code === 2) throw new BoxError(BoxError.BAD_FIELD, `Target directory ${targetDir} is not empty`);
|
|
if (error && error.code === 3) throw new BoxError(BoxError.BAD_FIELD, `Target directory ${targetDir} does not support chown`);
|
|
|
|
return null;
|
|
}
|
|
|
|
async function getByIpAddress(ip) {
|
|
assert.strictEqual(typeof ip, 'string');
|
|
|
|
const domainObjectMap = await domains.getDomainObjectMap();
|
|
|
|
const result = await database.query(`${APPS_QUERY} WHERE apps.containerIp = ?`, [ ip ]);
|
|
if (result.length === 0) return null;
|
|
|
|
postProcess(result[0]);
|
|
attachProperties(result[0], domainObjectMap);
|
|
return result[0];
|
|
}
|
|
|
|
async function list() {
|
|
const domainObjectMap = await domains.getDomainObjectMap();
|
|
|
|
const results = await database.query(`${APPS_QUERY} ORDER BY apps.id`, [ ]);
|
|
results.forEach(postProcess);
|
|
results.forEach((app) => attachProperties(app, domainObjectMap));
|
|
return results;
|
|
}
|
|
|
|
// returns the app associated with this IP (app or scheduler)
|
|
async function getByFqdn(fqdn) {
|
|
assert.strictEqual(typeof fqdn, 'string');
|
|
|
|
const result = await list();
|
|
const app = result.find(function (a) { return a.fqdn === fqdn; });
|
|
return app;
|
|
}
|
|
|
|
async function listByUser(user) {
|
|
assert.strictEqual(typeof user, 'object');
|
|
|
|
const result = await list();
|
|
return result.filter((app) => canAccess(app, user));
|
|
}
|
|
|
|
async function getTask(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
if (!app.taskId) return null;
|
|
return await tasks.get(app.taskId);
|
|
}
|
|
|
|
function mailboxNameForSubdomain(subdomain, manifest) {
|
|
if (subdomain) return `${subdomain}.app`;
|
|
if (manifest.title) return manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '') + '.app';
|
|
return 'noreply.app';
|
|
}
|
|
|
|
async function onTaskFinished(error, appId, installationState, taskId, auditSource) {
|
|
assert(!error || typeof error === 'object');
|
|
assert.strictEqual(typeof appId, 'string');
|
|
assert.strictEqual(typeof installationState, 'string');
|
|
assert.strictEqual(typeof taskId, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const success = !error;
|
|
const errorMessage = error?.message || null;
|
|
|
|
const app = await get(appId);
|
|
const task = await tasks.get(taskId);
|
|
if (!app || !task) return;
|
|
|
|
switch (installationState) {
|
|
case ISTATE_PENDING_DATA_DIR_MIGRATION:
|
|
if (success) await safe(services.rebuildService('sftp', auditSource), { debug });
|
|
break;
|
|
case ISTATE_PENDING_UPDATE: {
|
|
const fromManifest = success ? task.args[1].updateConfig.manifest : app.manifest;
|
|
const toManifest = success ? app.manifest : task.args[1].updateConfig.manifest;
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_UPDATE_FINISH, auditSource, { app, toManifest, fromManifest, success, errorMessage });
|
|
await notifications.unpin(notifications.TYPE_MANUAL_APP_UPDATE_NEEDED, { context: app.id });
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
async function getCount() {
|
|
const result = await database.query('SELECT COUNT(*) AS total FROM apps');
|
|
return result[0].total;
|
|
}
|
|
|
|
async function setAccessRestriction(app, accessRestriction, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof accessRestriction, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = validateAccessRestriction(accessRestriction);
|
|
if (error) throw error;
|
|
|
|
await update(appId, { accessRestriction });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, accessRestriction });
|
|
}
|
|
|
|
async function setOperators(app, operators, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof operators, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = validateAccessRestriction(operators); // not a typo. same structure for operators and accessRestriction
|
|
if (error) throw error;
|
|
|
|
await update(appId, { operators });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, operators });
|
|
}
|
|
|
|
async function setCrontab(app, crontab, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(crontab === null || typeof crontab === 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
parseCrontab(crontab);
|
|
|
|
await update(appId, { crontab });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, crontab });
|
|
}
|
|
|
|
async function setUpstreamUri(app, upstreamUri, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof upstreamUri, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
if (app.manifest.id !== constants.PROXY_APP_APPSTORE_ID) throw new BoxError(BoxError.BAD_FIELD, 'upstreamUri can only be set for proxy app');
|
|
|
|
const appId = app.id;
|
|
const error = validateUpstreamUri(upstreamUri);
|
|
if (error) throw error;
|
|
|
|
await reverseProxy.writeAppConfigs(Object.assign({}, app, { upstreamUri }));
|
|
|
|
await update(appId, { upstreamUri });
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, upstreamUri });
|
|
}
|
|
|
|
async function setLabel(app, label, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof label, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = validateLabel(label);
|
|
if (error) throw error;
|
|
|
|
await update(appId, { label });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, label });
|
|
}
|
|
|
|
async function setTags(app, tags, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(Array.isArray(tags));
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = validateTags(tags);
|
|
if (error) throw error;
|
|
|
|
await update(appId, { tags });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, tags });
|
|
}
|
|
|
|
async function setNotes(app, notes, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof notes, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
await update(app.id, { notes });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId: app.id, app, notes });
|
|
}
|
|
|
|
async function setChecklistItem(app, checklistItemKey, acknowledged, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof checklistItemKey, 'string');
|
|
assert.strictEqual(typeof acknowledged, 'boolean');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
if (!app.checklist[checklistItemKey]) throw new BoxError(BoxError.NOT_FOUND, 'no such checklist item');
|
|
|
|
// nothing changed
|
|
if (app.checklist[checklistItemKey].acknowledged === acknowledged) return;
|
|
|
|
const checklist = app.checklist;
|
|
checklist[checklistItemKey].acknowledged = acknowledged;
|
|
checklist[checklistItemKey].changedAt = Date.now();
|
|
checklist[checklistItemKey].changedBy = auditSource.username;
|
|
|
|
await update(app.id, { checklist });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId: app.id, app, checklist });
|
|
}
|
|
|
|
async function setIcon(app, icon, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(icon === null || typeof icon === 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
|
|
if (icon) {
|
|
icon = Buffer.from(icon, 'base64');
|
|
if (icon.length === 0) throw new BoxError(BoxError.BAD_FIELD, 'icon is not base64');
|
|
}
|
|
|
|
await update(appId, { icon });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, iconChanged: true });
|
|
}
|
|
|
|
async function setAutomaticBackup(app, enable, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof enable, 'boolean');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
await update(appId, { enableBackup: enable });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, enableBackup: enable });
|
|
}
|
|
|
|
async function setAutomaticUpdate(app, enable, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof enable, 'boolean');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
await update(appId, { enableAutomaticUpdate: enable });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, enableAutomaticUpdate: enable });
|
|
}
|
|
|
|
async function setReverseProxyConfig(app, reverseProxyConfig, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof reverseProxyConfig, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
reverseProxyConfig = Object.assign({ robotsTxt: null, csp: null, hstsPreload: false }, reverseProxyConfig);
|
|
|
|
const appId = app.id;
|
|
let error = validateCsp(reverseProxyConfig.csp);
|
|
if (error) throw error;
|
|
|
|
error = validateRobotsTxt(reverseProxyConfig.robotsTxt);
|
|
if (error) throw error;
|
|
|
|
await reverseProxy.writeAppConfigs(Object.assign({}, app, { reverseProxyConfig }));
|
|
|
|
await update(appId, { reverseProxyConfig });
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, reverseProxyConfig });
|
|
}
|
|
|
|
async function getLocation(subdomain, domain) {
|
|
assert.strictEqual(typeof subdomain, 'string');
|
|
assert.strictEqual(typeof domain, 'string');
|
|
|
|
const result = await database.query(`SELECT ${LOCATION_FIELDS} FROM locations WHERE subdomain=? AND domain=?`, [ subdomain, domain ]);
|
|
if (result.length === 0) return null;
|
|
|
|
return new Location(subdomain, domain, result[0].type, safe.JSON.parse(result[0].certificateJson));
|
|
}
|
|
|
|
async function validateLocations(locations) {
|
|
assert(Array.isArray(locations));
|
|
|
|
const domainObjectMap = await domains.getDomainObjectMap();
|
|
|
|
const RESERVED_SUBDOMAINS = [
|
|
constants.SMTP_SUBDOMAIN,
|
|
constants.IMAP_SUBDOMAIN
|
|
];
|
|
|
|
const dashboardLocation = await dashboard.getLocation();
|
|
for (const location of locations) {
|
|
if (!(location.domain in domainObjectMap)) return new BoxError(BoxError.BAD_FIELD, `No such domain in ${location.type} location`);
|
|
|
|
let subdomain = location.subdomain;
|
|
if (location.type === Location.TYPE_ALIAS && subdomain.startsWith('*')) {
|
|
if (subdomain === '*') continue;
|
|
subdomain = subdomain.replace(/^\*\./, ''); // remove *.
|
|
}
|
|
|
|
if (RESERVED_SUBDOMAINS.indexOf(subdomain) !== -1) return new BoxError(BoxError.BAD_FIELD, `subdomain '${subdomain}' is reserved`);
|
|
|
|
if (location.fqdn === dashboardLocation.fqdn) return new BoxError(BoxError.BAD_FIELD, `subdomain '${subdomain}' is reserved for dashboard`);
|
|
|
|
const error = dns.validateHostname(subdomain, location.domain);
|
|
if (error) return new BoxError(BoxError.BAD_FIELD, `Bad ${location.type} location: ${error.message}`);
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
async function setCertificate(app, data, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(data && typeof data === 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const { subdomain, domain, cert, key } = data;
|
|
|
|
const domainObject = await domains.get(domain);
|
|
if (domainObject === null) throw new BoxError(BoxError.NOT_FOUND, 'Domain not found');
|
|
|
|
if (cert && key) await reverseProxy.validateCertificate(subdomain, domain, { cert, key });
|
|
|
|
const certificate = cert && key ? { cert, key } : null;
|
|
const result = await database.query('UPDATE locations SET certificateJson=? WHERE location=? AND domain=?', [ certificate ? JSON.stringify(certificate) : null, subdomain, domain ]);
|
|
if (result.affectedRows === 0) throw new BoxError(BoxError.NOT_FOUND, 'Location not found');
|
|
|
|
const location = await getLocation(subdomain, domain); // fresh location object with type
|
|
await reverseProxy.setUserCertificate(app, location);
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId: app.id, app, subdomain, domain, cert });
|
|
}
|
|
|
|
async function getLogPaths(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
const appId = app.id;
|
|
|
|
const filePaths = [];
|
|
filePaths.push(path.join(paths.LOG_DIR, appId, 'apptask.log'));
|
|
filePaths.push(path.join(paths.LOG_DIR, appId, 'app.log'));
|
|
if (app.manifest.addons && app.manifest.addons.redis) filePaths.push(path.join(paths.LOG_DIR, `redis-${appId}/app.log`));
|
|
|
|
if (app.manifest.logPaths) {
|
|
const [error, result] = await safe(docker.inspect(app.containerId));
|
|
if (!error) {
|
|
const runVolume = result.Mounts.find(function (mount) { return mount.Destination === '/run'; });
|
|
const tmpVolume = result.Mounts.find(function (mount) { return mount.Destination === '/tmp'; });
|
|
const dataVolume = result.Mounts.find(function (mount) { return mount.Destination === '/app/data'; });
|
|
|
|
// note: wild cards are not supported yet in logPaths since that will require shell expansion
|
|
for (const logPath of app.manifest.logPaths) {
|
|
if (logPath.startsWith('/tmp/')) filePaths.push(`${tmpVolume.Source}/${logPath.slice('/tmp/'.length)}`);
|
|
else if (logPath.startsWith('/run/')) filePaths.push(`${runVolume.Source}/${logPath.slice('/run/'.length)}`);
|
|
else if (logPath.startsWith('/app/data/')) filePaths.push(`${dataVolume.Source}/${logPath.slice('/app/data/'.length)}`);
|
|
}
|
|
}
|
|
}
|
|
|
|
return filePaths;
|
|
}
|
|
|
|
async function getLogs(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(options && typeof options === 'object');
|
|
|
|
assert.strictEqual(typeof options.lines, 'number');
|
|
assert.strictEqual(typeof options.format, 'string');
|
|
assert.strictEqual(typeof options.follow, 'boolean');
|
|
|
|
const appId = app.id;
|
|
|
|
const logPaths = await getLogPaths(app);
|
|
const cp = logs.tail(logPaths, { lines: options.lines, follow: options.follow, sudo: true }); // need sudo access for paths inside app container (manifest.logPaths)
|
|
|
|
const logStream = new logs.LogStream({ format: options.format || 'json', source: appId });
|
|
logStream.on('close', () => cp.terminate()); // the caller has to call destroy() on logStream. destroy() of Transform emits 'close'
|
|
|
|
cp.stdout.pipe(logStream);
|
|
|
|
return logStream;
|
|
}
|
|
|
|
async function appendLogLine(app, line) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof line, 'string');
|
|
|
|
const logFilePath = path.join(paths.LOG_DIR, app.id, 'app.log');
|
|
const isoDate = new Date(new Date().toUTCString()).toISOString();
|
|
|
|
if (!safe.fs.appendFileSync(logFilePath, `${isoDate} ${line}\n`)) console.error(`Could not append log line for app ${app.id} at ${logFilePath}: ${safe.error.message}`);
|
|
}
|
|
|
|
async function checkManifest(manifest) {
|
|
assert(manifest && typeof manifest === 'object');
|
|
|
|
if (manifest.manifestVersion !== 2) return new BoxError(BoxError.BAD_FIELD, 'Manifest version must be 2');
|
|
|
|
if (!manifest.dockerImage) return new BoxError(BoxError.BAD_FIELD, 'Missing dockerImage'); // dockerImage is optional in manifest
|
|
|
|
if (semver.valid(manifest.maxBoxVersion) && semver.gt(constants.VERSION, manifest.maxBoxVersion)) {
|
|
return new BoxError(BoxError.BAD_FIELD, 'Box version exceeds Apps maxBoxVersion');
|
|
}
|
|
|
|
if (semver.valid(manifest.minBoxVersion) && semver.gt(manifest.minBoxVersion, constants.VERSION)) {
|
|
return new BoxError(BoxError.BAD_FIELD, 'App version requires a new platform version');
|
|
}
|
|
|
|
const error = await services.checkAddonsSupport(manifest.addons || {});
|
|
return error;
|
|
}
|
|
|
|
async function createExec(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(options && typeof options === 'object');
|
|
|
|
if (app.manifest.id === constants.PROXY_APP_APPSTORE_ID) throw new BoxError(BoxError.BAD_FIELD, 'cannot exec on proxy app');
|
|
|
|
const cmd = options.cmd || [ '/bin/bash' ];
|
|
assert(Array.isArray(cmd) && cmd.length > 0);
|
|
|
|
if (app.installationState !== ISTATE_INSTALLED || app.runState !== RSTATE_RUNNING) {
|
|
throw new BoxError(BoxError.BAD_STATE, 'App not installed or running');
|
|
}
|
|
|
|
const createOptions = {
|
|
AttachStdin: true,
|
|
AttachStdout: true,
|
|
AttachStderr: true,
|
|
// A pseudo tty is a terminal which processes can detect (for example, disable colored output)
|
|
// Creating a pseudo terminal also assigns a terminal driver which detects control sequences
|
|
// When passing binary data, tty must be disabled. In addition, the stdout/stderr becomes a single
|
|
// unified stream because of the nature of a tty (see https://github.com/docker/docker/issues/19696)
|
|
Tty: options.tty,
|
|
Cmd: cmd
|
|
};
|
|
|
|
// currently the webterminal and cli sets C.UTF-8
|
|
if (options.lang) createOptions.Env = [ 'LANG=' + options.lang ];
|
|
|
|
if (options.cwd) createOptions.WorkingDir = options.cwd;
|
|
|
|
return await docker.createExec(app.containerId, createOptions);
|
|
}
|
|
|
|
async function startExec(app, execId, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof execId, 'string');
|
|
assert(options && typeof options === 'object');
|
|
|
|
if (app.installationState !== ISTATE_INSTALLED || app.runState !== RSTATE_RUNNING) {
|
|
throw new BoxError(BoxError.BAD_STATE, 'App not installed or running');
|
|
}
|
|
|
|
const startOptions = {
|
|
Detach: false,
|
|
Tty: options.tty,
|
|
// hijacking upgrades the docker connection from http to tcp. because of this upgrade,
|
|
// we can work with half-close connections (not defined in http). this way, the client
|
|
// can properly signal that stdin is EOF by closing it's side of the socket. In http,
|
|
// the whole connection will be dropped when stdin get EOF.
|
|
// https://github.com/apocas/dockerode/commit/b4ae8a03707fad5de893f302e4972c1e758592fe
|
|
hijack: true,
|
|
stream: true,
|
|
stdin: true,
|
|
stdout: true,
|
|
stderr: true
|
|
};
|
|
|
|
const stream = await docker.startExec(execId, startOptions);
|
|
|
|
if (options.rows && options.columns) {
|
|
// there is a race where resizing too early results in a 404 "no such exec"
|
|
// https://git.cloudron.io/cloudron/box/issues/549
|
|
setTimeout(async function () {
|
|
await safe(docker.resizeExec(execId, { h: options.rows, w: options.columns }, { debug }));
|
|
}, 2000);
|
|
}
|
|
|
|
return stream;
|
|
}
|
|
|
|
async function getExec(app, execId) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof execId, 'string');
|
|
|
|
return await docker.getExec(execId);
|
|
}
|
|
|
|
async function listBackups(app, page, perPage) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(typeof page === 'number' && page > 0);
|
|
assert(typeof perPage === 'number' && perPage > 0);
|
|
|
|
return await backups.listByIdentifierAndStatePaged(app.id, backups.BACKUP_STATE_NORMAL, page, perPage);
|
|
}
|
|
|
|
async function listEventlog(app, filter, page, perPage) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof filter, 'object');
|
|
assert.strictEqual(typeof page, 'number');
|
|
assert.strictEqual(typeof perPage, 'number');
|
|
|
|
const fullFilter = {
|
|
actions: [],
|
|
search: app.id,
|
|
from: filter.from,
|
|
to: filter.to
|
|
};
|
|
return await eventlog.listPaged(fullFilter, page, perPage);
|
|
}
|
|
|
|
async function drainStream(stream) {
|
|
return new Promise((resolve, reject) => {
|
|
let data = '';
|
|
stream.setEncoding('utf8');
|
|
stream.on('error', (error) => reject(new BoxError.FS_ERROR, error.message));
|
|
stream.on('data', function (d) { data += d; });
|
|
stream.on('end', function () {
|
|
resolve(data);
|
|
});
|
|
});
|
|
}
|
|
|
|
async function downloadFile(app, filePath) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof filePath, 'string');
|
|
|
|
const statExecId = await createExec(app, { cmd: [ 'stat', '--printf=%F-%s', filePath ], tty: true });
|
|
const statStream = await startExec(app, statExecId, { tty: true });
|
|
const data = await drainStream(statStream);
|
|
|
|
const parts = data.split('-');
|
|
if (parts.length !== 2) throw new BoxError(BoxError.NOT_FOUND, 'file does not exist');
|
|
|
|
const type = parts[0];
|
|
let filename, cmd, size;
|
|
|
|
if (type === 'regular file') {
|
|
cmd = [ 'cat', filePath ];
|
|
size = parseInt(parts[1], 10);
|
|
filename = path.basename(filePath);
|
|
if (isNaN(size)) throw new BoxError(BoxError.NOT_FOUND, 'file does not exist');
|
|
} else if (type === 'directory') {
|
|
cmd = ['tar', 'zcf', '-', '-C', filePath, '.'];
|
|
filename = path.basename(filePath) + '.tar.gz';
|
|
size = 0; // unknown
|
|
} else {
|
|
throw new BoxError(BoxError.NOT_FOUND, 'only files or dirs can be downloaded');
|
|
}
|
|
|
|
const execId = await createExec(app, { cmd, tty: false });
|
|
const inputStream = await startExec(app, execId, { tty: false });
|
|
|
|
// transforms the docker stream into a normal stream
|
|
const stdoutStream = new TransformStream({
|
|
transform: function (chunk, ignoredEncoding, callback) {
|
|
this._buffer = this._buffer ? Buffer.concat([this._buffer, chunk]) : chunk;
|
|
|
|
for (;;) {
|
|
if (this._buffer.length < 8) break; // header is 8 bytes
|
|
|
|
const streamType = this._buffer.readUInt8(0);
|
|
const len = this._buffer.readUInt32BE(4);
|
|
|
|
if (this._buffer.length < (8 + len)) break; // not enough
|
|
|
|
const payload = this._buffer.slice(8, 8 + len);
|
|
|
|
this._buffer = this._buffer.slice(8+len); // consumed
|
|
|
|
if (streamType === 1) this.push(payload);
|
|
}
|
|
|
|
callback();
|
|
}
|
|
});
|
|
|
|
inputStream.pipe(stdoutStream);
|
|
|
|
return { stream: stdoutStream, filename, size };
|
|
}
|
|
|
|
async function uploadFile(app, sourceFilePath, destFilePath) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof sourceFilePath, 'string');
|
|
assert.strictEqual(typeof destFilePath, 'string');
|
|
|
|
// the built-in bash printf understands "%q" but not /usr/bin/printf.
|
|
// ' gets replaced with '\'' . the first closes the quote and last one starts a new one
|
|
const escapedDestFilePath = await shell.bash(`printf %q '${destFilePath.replace(/'/g, '\'\\\'\'')}'`, { encoding: 'utf8' });
|
|
debug(`uploadFile: ${sourceFilePath} -> ${escapedDestFilePath}`);
|
|
|
|
const execId = await createExec(app, { cmd: [ 'bash', '-c', `cat - > ${escapedDestFilePath}` ], tty: false });
|
|
const destStream = await startExec(app, execId, { tty: false });
|
|
|
|
return new Promise((resolve, reject) => {
|
|
const done = once(error => reject(new BoxError(BoxError.FS_ERROR, error.message)));
|
|
|
|
const sourceStream = fs.createReadStream(sourceFilePath);
|
|
sourceStream.on('error', done);
|
|
destStream.on('error', done);
|
|
|
|
destStream.on('finish', resolve);
|
|
|
|
sourceStream.pipe(destStream);
|
|
});
|
|
}
|
|
|
|
async function writeConfig(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
if (!safe.fs.writeFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/config.json'), JSON.stringify(app, null, 4))) {
|
|
throw new BoxError(BoxError.FS_ERROR, 'Error creating config.json: ' + safe.error.message);
|
|
}
|
|
|
|
const [error, icons] = await safe(getIcons(app.id));
|
|
if (!error && icons.icon) safe.fs.writeFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/icon.png'), icons.icon);
|
|
}
|
|
|
|
async function loadConfig(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
const appConfig = safe.JSON.parse(safe.fs.readFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/config.json')));
|
|
let data = {};
|
|
if (appConfig) {
|
|
data = _.pick(appConfig, ['memoryLimit', 'cpuQuota', 'enableBackup', 'reverseProxyConfig', 'env', 'servicesConfig', 'label', 'tags', 'enableAutomaticUpdate']);
|
|
}
|
|
|
|
const icon = safe.fs.readFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/icon.png'));
|
|
if (icon) data.icon = icon;
|
|
|
|
await update(app.id, data);
|
|
}
|
|
|
|
function validatePorts(ports, manifest) {
|
|
assert.strictEqual(typeof ports, 'object');
|
|
assert.strictEqual(typeof manifest, 'object');
|
|
|
|
// keep the public ports in sync with firewall rules in setup/start/cloudron-firewall.sh
|
|
// these ports are reserved even if we listen only on 127.0.0.1 because we setup HostIp to be 127.0.0.1
|
|
// for custom tcp ports
|
|
const RESERVED_PORTS = [
|
|
22, /* ssh */
|
|
25, /* smtp */
|
|
80, /* http */
|
|
143, /* imap */
|
|
202, /* alternate ssh */
|
|
222, /* proftd */
|
|
443, /* https */
|
|
465, /* smtps */
|
|
587, /* submission */
|
|
993, /* imaps */
|
|
995, /* pop3s */
|
|
2003, /* graphite (lo) */
|
|
constants.PORT, /* app server (lo) */
|
|
constants.AUTHWALL_PORT, /* protected sites */
|
|
constants.INTERNAL_SMTP_PORT, /* internal smtp port (lo) */
|
|
constants.LDAP_PORT,
|
|
3306, /* mysql (lo) */
|
|
3478, /* turn,stun */
|
|
4190, /* managesieve */
|
|
5349, /* turn,stun TLS */
|
|
8000, /* ESXi monitoring */
|
|
];
|
|
|
|
const RESERVED_PORT_RANGES = [
|
|
[constants.TURN_UDP_PORT_START, constants.TURN_UDP_PORT_END] /* turn udp ports */
|
|
];
|
|
|
|
const ALLOWED_PORTS = [
|
|
53, // dns 53 is special and adblocker apps can use them
|
|
853 // dns over tls
|
|
];
|
|
|
|
if (!ports) return null;
|
|
|
|
const tcpPorts = manifest.tcpPorts || {};
|
|
const udpPorts = manifest.udpPorts || {};
|
|
|
|
for (const portName in ports) {
|
|
if (!/^[A-Z0-9_]+$/.test(portName)) return new BoxError(BoxError.BAD_FIELD, `${portName} is not a valid environment variable in ports`);
|
|
|
|
const hostPort = ports[portName];
|
|
if (!Number.isInteger(hostPort)) return new BoxError(BoxError.BAD_FIELD, `${hostPort} is not an integer in ${portName} ports`);
|
|
if (RESERVED_PORTS.indexOf(hostPort) !== -1) return new BoxError(BoxError.BAD_FIELD, `Port ${hostPort} for ${portName} is reserved in ports`);
|
|
if (RESERVED_PORT_RANGES.find(range => (hostPort >= range[0] && hostPort <= range[1]))) return new BoxError(BoxError.BAD_FIELD, `Port ${hostPort} for ${portName} is reserved in ports`);
|
|
if (ALLOWED_PORTS.indexOf(hostPort) === -1 && (hostPort <= 1023 || hostPort > 65535)) return new BoxError(BoxError.BAD_FIELD, `${hostPort} for ${portName} is not in permitted range in ports`);
|
|
|
|
// it is OK if there is no 1-1 mapping between values in manifest.tcpPorts and ports. missing values implies the service is disabled
|
|
const portSpec = tcpPorts[portName] || udpPorts[portName];
|
|
if (!portSpec) return new BoxError(BoxError.BAD_FIELD, `Invalid portBinding ${portName}`);
|
|
if (portSpec.readOnly && portSpec.defaultValue !== hostPort) return new BoxError(BoxError.BAD_FIELD, `portBinding ${portName} is readOnly and cannot have a different value that the default`);
|
|
if ((hostPort + (portSpec.portCount || 1)) > 65535) return new BoxError(BoxError.BAD_FIELD, `${hostPort}+${portSpec.portCount} for ${portName} exceeds valid port range`);
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateDevices(devices) {
|
|
for (const key in devices) {
|
|
if (key.indexOf('/dev/') !== 0) return new BoxError(BoxError.BAD_FIELD, `"${key}" must start with /dev/`);
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
async function scheduleTask(appId, installationState, taskId, auditSource) {
|
|
assert.strictEqual(typeof appId, 'string');
|
|
assert.strictEqual(typeof installationState, 'string');
|
|
assert.strictEqual(typeof taskId, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
let memoryLimit = 400;
|
|
if (installationState === ISTATE_PENDING_CLONE || installationState === ISTATE_PENDING_RESTORE
|
|
|| installationState === ISTATE_PENDING_IMPORT || installationState === ISTATE_PENDING_UPDATE) {
|
|
const sites = await backupSites.listByContentForUpdates(appId);
|
|
memoryLimit = sites.reduce((acc, cur) => cur.limits?.memoryLimit ? Math.max(cur.limits.memoryLimit/1024/1024, acc) : acc, 400);
|
|
} else if (installationState === ISTATE_PENDING_DATA_DIR_MIGRATION) {
|
|
memoryLimit = 1024; // cp takes more memory than we think
|
|
}
|
|
|
|
const options = { timeout: 20 * 60 * 60 * 1000 /* 20 hours */, nice: 15, memoryLimit };
|
|
|
|
appTaskManager.scheduleTask(appId, taskId, options, async function (error) {
|
|
debug(`scheduleTask: task ${taskId} of ${appId} completed. error: %o`, error);
|
|
if (error?.code === tasks.ECRASHED || error?.code === tasks.ESTOPPED) { // if task crashed, update the error
|
|
debug(`Apptask crashed/stopped: ${error.message}`);
|
|
const appError = {
|
|
message: error.message,
|
|
reason: BoxError.TASK_ERROR,
|
|
crashed: error.code === tasks.ECRASHED,
|
|
stopped: error.code === tasks.ESTOPPED,
|
|
taskId,
|
|
installationState
|
|
};
|
|
await safe(update(appId, { installationState: ISTATE_ERROR, error: appError, taskId: null }), { debug });
|
|
} else if (!(installationState === ISTATE_PENDING_UNINSTALL && !error)) { // clear out taskId except for successful uninstall
|
|
await safe(update(appId, { taskId: null }), { debug });
|
|
}
|
|
|
|
await safe(onTaskFinished(error, appId, installationState, taskId, auditSource), { debug }); // ignore error
|
|
});
|
|
}
|
|
|
|
async function addTask(appId, installationState, task, auditSource) {
|
|
assert.strictEqual(typeof appId, 'string');
|
|
assert.strictEqual(typeof installationState, 'string');
|
|
assert.strictEqual(typeof task, 'object'); // { args, values }
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const { args, values } = task;
|
|
// TODO: match the SQL logic to match checkAppState. this means checking the error.installationState and installationState. Unfortunately, former is JSON right now
|
|
const requiredState = null; // 'requiredState' in task ? task.requiredState : ISTATE_INSTALLED;
|
|
const scheduleNow = 'scheduleNow' in task ? task.scheduleNow : true;
|
|
const requireNullTaskId = 'requireNullTaskId' in task ? task.requireNullTaskId : true;
|
|
|
|
const taskId = await tasks.add(tasks.TASK_APP, [ appId, args ]);
|
|
|
|
const [updateError] = await safe(setTask(appId, Object.assign({ installationState, taskId, error: null }, values), { requiredState, requireNullTaskId }));
|
|
if (updateError && updateError.reason === BoxError.NOT_FOUND) throw new BoxError(BoxError.BAD_STATE, 'Another task is scheduled for this app'); // could be because app went away OR a taskId exists
|
|
if (updateError) throw updateError;
|
|
|
|
if (scheduleNow) await safe(scheduleTask(appId, installationState, taskId, auditSource), { debug }); // ignore error
|
|
|
|
return taskId;
|
|
}
|
|
|
|
function checkAppState(app, state) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof state, 'string');
|
|
|
|
if (app.taskId) return new BoxError(BoxError.BAD_STATE, `Locked by task ${app.taskId} : ${app.installationState} / ${app.runState}`);
|
|
|
|
if (app.installationState === ISTATE_ERROR) {
|
|
// allow task to be called again if that was the errored task
|
|
if (app.error.installationState === state) return null;
|
|
|
|
// allow uninstall from any state
|
|
if (state !== ISTATE_PENDING_UNINSTALL && state !== ISTATE_PENDING_RESTORE && state !== ISTATE_PENDING_IMPORT) return new BoxError(BoxError.BAD_STATE, 'Not allowed in error state');
|
|
}
|
|
|
|
if (app.runState === RSTATE_STOPPED) {
|
|
// can't backup or restore since app addons are down. can't update because migration scripts won't run
|
|
if (state === ISTATE_PENDING_UPDATE || state === ISTATE_PENDING_RESTORE || state === ISTATE_PENDING_IMPORT) return new BoxError(BoxError.BAD_STATE, 'Not allowed in stopped state');
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
async function install(data, auditSource) {
|
|
assert(data && typeof data === 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
assert.strictEqual(typeof data.manifest, 'object'); // manifest is already downloaded
|
|
|
|
const features = appstore.getFeatures();
|
|
const installedAppCount = await getCount();
|
|
if (features.appMaxCount <= installedAppCount) throw new BoxError(BoxError.PLAN_LIMIT, 'app limit reached');
|
|
|
|
const subdomain = data.subdomain.toLowerCase(),
|
|
domain = data.domain.toLowerCase(),
|
|
accessRestriction = data.accessRestriction || null,
|
|
operators = data.operators || null,
|
|
memoryLimit = data.memoryLimit || 0,
|
|
cpuQuota = data.cpuQuota || 100,
|
|
debugMode = data.debugMode || null,
|
|
enableBackup = 'enableBackup' in data ? data.enableBackup : true,
|
|
enableAutomaticUpdate = 'enableAutomaticUpdate' in data ? data.enableAutomaticUpdate : true,
|
|
redirectDomains = data.redirectDomains || [],
|
|
aliasDomains = data.aliasDomains || [],
|
|
devices = data.devices || {},
|
|
env = data.env || {},
|
|
label = data.label || null,
|
|
tags = data.tags || [],
|
|
overwriteDns = 'overwriteDns' in data ? data.overwriteDns : false,
|
|
skipDnsSetup = 'skipDnsSetup' in data ? data.skipDnsSetup : false,
|
|
enableTurn = 'enableTurn' in data ? data.enableTurn : true,
|
|
appStoreId = data.appStoreId || '',
|
|
versionsUrl = data.versionsUrl || '',
|
|
upstreamUri = data.upstreamUri || '',
|
|
manifest = data.manifest,
|
|
notes = data.notes || null,
|
|
crontab = data.crontab || null;
|
|
|
|
// when redis is optional, do not enable it by default. it's mostly used for caching in those setups
|
|
const enableRedis = 'enableRedis' in data ? data.enableRedis : !manifest.addons?.redis?.optional;
|
|
|
|
let error = manifestFormat.parse(manifest);
|
|
if (error) throw new BoxError(BoxError.BAD_FIELD, `Manifest error: ${error.message}`);
|
|
|
|
if (data.sourceArchiveFilePath) manifest.dockerImage = `local/${manifest.id}:${manifest.version}-${Date.now()}`;
|
|
|
|
error = await checkManifest(manifest);
|
|
if (error) throw error;
|
|
|
|
error = validatePorts(data.ports || null, manifest);
|
|
if (error) throw error;
|
|
const portBindings = translateToPortBindings(data.ports || null, manifest);
|
|
|
|
error = validateAccessRestriction(accessRestriction);
|
|
if (error) throw error;
|
|
|
|
error = validateAccessRestriction(operators); // not a typo. same structure for operators and accessRestriction
|
|
if (error) throw error;
|
|
|
|
error = validateMemoryLimit(manifest, memoryLimit);
|
|
if (error) throw error;
|
|
|
|
error = validateDebugMode(debugMode);
|
|
if (error) throw error;
|
|
|
|
error = validateLabel(label);
|
|
if (error) throw error;
|
|
|
|
error = validateCpuQuota(cpuQuota);
|
|
if (error) throw error;
|
|
|
|
parseCrontab(crontab);
|
|
|
|
if ('upstreamUri' in data) error = validateUpstreamUri(upstreamUri);
|
|
if (error) throw error;
|
|
|
|
error = validateTags(tags);
|
|
if (error) throw error;
|
|
|
|
error = validateSecondaryDomains(data.secondaryDomains || {}, manifest);
|
|
if (error) throw error;
|
|
const secondaryDomains = translateSecondaryDomains(data.secondaryDomains || {});
|
|
|
|
let sso = 'sso' in data ? data.sso : null;
|
|
if ('sso' in data && !('optionalSso' in manifest)) throw new BoxError(BoxError.BAD_FIELD, 'sso can only be specified for apps with optionalSso');
|
|
// if sso was unspecified, enable it by default if possible
|
|
if (sso === null) sso = !!manifest.addons?.ldap || !!manifest.addons?.proxyAuth || !!manifest.addons?.oidc;
|
|
|
|
error = validateDevices(devices);
|
|
if (error) throw error;
|
|
|
|
error = validateEnv(env);
|
|
if (error) throw error;
|
|
|
|
if (constants.DEMO && constants.DEMO_BLOCKED_APPS.includes(appStoreId)) throw new BoxError(BoxError.BAD_FIELD, 'This app is blocked in the demo');
|
|
|
|
// sendmail is enabled by default
|
|
const enableMailbox = 'enableMailbox' in data ? data.enableMailbox : true;
|
|
const mailboxName = manifest.addons?.sendmail ? mailboxNameForSubdomain(subdomain, manifest) : null;
|
|
const mailboxDomain = manifest.addons?.sendmail ? domain : null;
|
|
|
|
let icon = data.icon || null;
|
|
if (icon) {
|
|
icon = Buffer.from(icon, 'base64');
|
|
if (icon.length === 0) throw new BoxError(BoxError.BAD_FIELD, 'icon is not base64');
|
|
}
|
|
|
|
const locations = [new Location(subdomain, domain, Location.TYPE_PRIMARY)]
|
|
.concat(secondaryDomains.map(sd => new Location(sd.subdomain, sd.domain, Location.TYPE_SECONDARY)))
|
|
.concat(redirectDomains.map(rd => new Location(rd.subdomain, rd.domain, Location.TYPE_REDIRECT)))
|
|
.concat(aliasDomains.map(ad => new Location(ad.subdomain, ad.domain, Location.TYPE_ALIAS)));
|
|
|
|
error = await validateLocations(locations);
|
|
if (error) throw error;
|
|
|
|
if (constants.DEMO && (await getCount() >= constants.DEMO_APP_LIMIT)) throw new BoxError(BoxError.BAD_STATE, 'Too many installed apps, please uninstall a few and try again');
|
|
|
|
const appId = crypto.randomUUID();
|
|
debug(`Installing app ${appId}`);
|
|
|
|
const app = {
|
|
accessRestriction,
|
|
operators,
|
|
memoryLimit,
|
|
cpuQuota,
|
|
sso,
|
|
debugMode,
|
|
mailboxName,
|
|
mailboxDomain,
|
|
enableBackup,
|
|
enableAutomaticUpdate,
|
|
secondaryDomains,
|
|
redirectDomains,
|
|
aliasDomains,
|
|
env,
|
|
devices,
|
|
label,
|
|
tags,
|
|
icon,
|
|
enableMailbox,
|
|
upstreamUri,
|
|
enableTurn,
|
|
enableRedis,
|
|
notes,
|
|
crontab,
|
|
runState: RSTATE_RUNNING,
|
|
installationState: ISTATE_PENDING_INSTALL
|
|
};
|
|
|
|
const [addError] = await safe(add(appId, appStoreId, versionsUrl, manifest, subdomain, domain, portBindings, app));
|
|
if (addError && addError.reason === BoxError.ALREADY_EXISTS) throw getDuplicateErrorDetails(addError.message, locations, portBindings);
|
|
if (addError) throw addError;
|
|
|
|
if (data.sourceArchiveFilePath) await fileUtils.renameFile(data.sourceArchiveFilePath, `${paths.SOURCE_ARCHIVES_DIR}/${appId}.tar.gz`);
|
|
|
|
const task = {
|
|
args: { restoreConfig: null, skipDnsSetup, overwriteDns },
|
|
values: { },
|
|
requiredState: app.installationState
|
|
};
|
|
|
|
const taskId = await addTask(appId, app.installationState, task, auditSource);
|
|
|
|
const newApp = Object.assign({}, _.omit(app, ['icon']), { appStoreId, versionsUrl, manifest, subdomain, domain, portBindings });
|
|
newApp.fqdn = dns.fqdn(newApp.subdomain, newApp.domain);
|
|
newApp.secondaryDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
newApp.redirectDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
newApp.aliasDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_INSTALL, auditSource, { appId, sourceBuild: !!data.sourceArchiveFilePath, app: newApp, taskId });
|
|
|
|
return { id : appId, taskId };
|
|
}
|
|
|
|
async function setMemoryLimit(app, memoryLimit, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof memoryLimit, 'number');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
let error = checkAppState(app, ISTATE_PENDING_RESIZE);
|
|
if (error) throw error;
|
|
|
|
error = validateMemoryLimit(app.manifest, memoryLimit);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { memoryLimit }
|
|
};
|
|
const taskId = await addTask(appId, ISTATE_PENDING_RESIZE, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, memoryLimit, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
// never fails just prints error
|
|
async function setCpuQuota(app, cpuQuota, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof cpuQuota, 'number');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
let error = checkAppState(app, ISTATE_PENDING_RESIZE);
|
|
if (error) throw error;
|
|
|
|
error = validateCpuQuota(cpuQuota);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { cpuQuota }
|
|
};
|
|
const taskId = await safe(addTask(appId, ISTATE_PENDING_RESIZE, task, auditSource));
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, cpuQuota, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
// does a re-configure when called from most states. for install/clone errors, it re-installs with an optional manifest
|
|
// re-configure can take a dockerImage but not a manifest because re-configure does not clean up addons
|
|
async function setMounts(app, mounts, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(Array.isArray(mounts));
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = checkAppState(app, ISTATE_PENDING_RECREATE_CONTAINER);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { mounts }
|
|
};
|
|
const [taskError, taskId] = await safe(addTask(appId, ISTATE_PENDING_RECREATE_CONTAINER, task, auditSource));
|
|
if (taskError && taskError.reason === BoxError.ALREADY_EXISTS) throw new BoxError(BoxError.CONFLICT, 'Duplicate mount points');
|
|
if (taskError) throw taskError;
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, mounts, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setDevices(app, devices, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof devices, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
let error = checkAppState(app, ISTATE_PENDING_RECREATE_CONTAINER);
|
|
if (error) throw error;
|
|
|
|
error = validateDevices(devices);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { devices }
|
|
};
|
|
const [taskError, taskId] = await safe(addTask(appId, ISTATE_PENDING_RECREATE_CONTAINER, task, auditSource));
|
|
if (taskError) throw taskError;
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, devices, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setEnvironment(app, env, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof env, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
let error = checkAppState(app, ISTATE_PENDING_RECREATE_CONTAINER);
|
|
if (error) throw error;
|
|
|
|
error = validateEnv(env);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { env }
|
|
};
|
|
const taskId = await addTask(appId, ISTATE_PENDING_RECREATE_CONTAINER, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, env, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setDebugMode(app, debugMode, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof debugMode, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
let error = checkAppState(app, ISTATE_PENDING_DEBUG);
|
|
if (error) throw error;
|
|
|
|
error = validateDebugMode(debugMode);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { debugMode }
|
|
};
|
|
const taskId = await addTask(appId, ISTATE_PENDING_DEBUG, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, debugMode, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setMailbox(app, data, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof data, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
assert.strictEqual(typeof data.enable, 'boolean');
|
|
|
|
const enableMailbox = data.enable;
|
|
|
|
const appId = app.id;
|
|
let error = checkAppState(app, ISTATE_PENDING_SERVICES_CHANGE);
|
|
if (error) throw error;
|
|
|
|
if (!app.manifest.addons?.sendmail) throw new BoxError(BoxError.BAD_FIELD, 'App does not use sendmail');
|
|
const optional = 'optional' in app.manifest.addons.sendmail ? app.manifest.addons.sendmail.optional : false;
|
|
if (!optional && !enableMailbox) throw new BoxError(BoxError.BAD_FIELD, 'App requires sendmail to be enabled');
|
|
|
|
const mailboxDisplayName = data.mailboxDisplayName || '';
|
|
let mailboxName = data.mailboxName || null;
|
|
const mailboxDomain = data.mailboxDomain || null;
|
|
|
|
if (enableMailbox) {
|
|
await mail.getDomain(mailboxDomain); // check if domain exists
|
|
|
|
if (mailboxName) {
|
|
error = mail.validateName(mailboxName);
|
|
if (error) throw new BoxError(BoxError.BAD_FIELD, error.message);
|
|
} else {
|
|
mailboxName = mailboxNameForSubdomain(app.subdomain, app.domain, app.manifest);
|
|
}
|
|
|
|
if (mailboxDisplayName) {
|
|
error = mail.validateDisplayName(mailboxDisplayName);
|
|
if (error) throw new BoxError(BoxError.BAD_FIELD, error.message);
|
|
}
|
|
}
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { enableMailbox, mailboxName, mailboxDomain, mailboxDisplayName }
|
|
};
|
|
const taskId = await addTask(appId, ISTATE_PENDING_SERVICES_CHANGE, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, mailboxName, mailboxDomain, mailboxDisplayName, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setInbox(app, data, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof data, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
assert.strictEqual(typeof data.enable, 'boolean');
|
|
|
|
const enableInbox = data.enable;
|
|
const appId = app.id;
|
|
let error = checkAppState(app, ISTATE_PENDING_SERVICES_CHANGE);
|
|
if (error) throw error;
|
|
|
|
if (!app.manifest.addons?.recvmail) throw new BoxError(BoxError.BAD_FIELD, 'App does not use recvmail addon');
|
|
|
|
const inboxName = data.inboxName || null;
|
|
const inboxDomain = data.inboxDomain || null;
|
|
if (enableInbox) {
|
|
const domain = await mail.getDomain(data.inboxDomain); // check if domain exists
|
|
if (!domain.enabled) throw new BoxError(BoxError.BAD_FIELD, 'Domain does not have incoming email enabled');
|
|
|
|
error = mail.validateName(data.inboxName);
|
|
if (error) throw new BoxError(BoxError.BAD_FIELD, error.message);
|
|
}
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { enableInbox, inboxName, inboxDomain }
|
|
};
|
|
const taskId = await addTask(appId, ISTATE_PENDING_SERVICES_CHANGE, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, enableInbox, inboxName, inboxDomain, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setTurn(app, enableTurn, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof enableTurn, 'boolean');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = checkAppState(app, ISTATE_PENDING_SERVICES_CHANGE);
|
|
if (error) throw error;
|
|
|
|
if (!app.manifest.addons?.turn) throw new BoxError(BoxError.BAD_FIELD, 'App does not use turn addon');
|
|
if (!app.manifest.addons.turn.optional) throw new BoxError(BoxError.BAD_FIELD, 'turn service is not optional');
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { enableTurn }
|
|
};
|
|
const taskId = await addTask(appId, ISTATE_PENDING_SERVICES_CHANGE, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, enableTurn, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setRedis(app, enableRedis, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof enableRedis, 'boolean');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = checkAppState(app, ISTATE_PENDING_SERVICES_CHANGE);
|
|
if (error) throw error;
|
|
|
|
if (!app.manifest.addons?.redis) throw new BoxError(BoxError.BAD_FIELD, 'App does not use redis addon');
|
|
if (!app.manifest.addons.redis.optional) throw new BoxError(BoxError.BAD_FIELD, 'redis service is not optional');
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { enableRedis }
|
|
};
|
|
const taskId = await addTask(appId, ISTATE_PENDING_SERVICES_CHANGE, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, enableRedis, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setLocation(app, data, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof data, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
let error = checkAppState(app, ISTATE_PENDING_LOCATION_CHANGE);
|
|
if (error) throw error;
|
|
|
|
const values = {
|
|
subdomain: data.subdomain.toLowerCase(),
|
|
domain: data.domain.toLowerCase(),
|
|
// these are intentionally reset, if not set
|
|
portBindings: {},
|
|
secondaryDomains: [],
|
|
redirectDomains: [],
|
|
aliasDomains: []
|
|
};
|
|
|
|
if ('ports' in data) {
|
|
error = validatePorts(data.ports || null, app.manifest);
|
|
if (error) throw error;
|
|
values.portBindings = translateToPortBindings(data.ports || null, app.manifest);
|
|
}
|
|
|
|
// rename the auto-created mailbox to match the new location
|
|
if (app.manifest.addons?.sendmail && app.mailboxName?.endsWith('.app')) {
|
|
values.mailboxName = mailboxNameForSubdomain(values.subdomain, app.manifest);
|
|
values.mailboxDomain = values.domain;
|
|
}
|
|
|
|
error = validateSecondaryDomains(data.secondaryDomains || {}, app.manifest);
|
|
if (error) throw error;
|
|
values.secondaryDomains = translateSecondaryDomains(data.secondaryDomains || {});
|
|
|
|
if ('redirectDomains' in data) {
|
|
values.redirectDomains = data.redirectDomains;
|
|
}
|
|
|
|
if ('aliasDomains' in data) {
|
|
values.aliasDomains = data.aliasDomains;
|
|
}
|
|
|
|
const locations = [new Location(values.subdomain, values.domain, Location.TYPE_PRIMARY)]
|
|
.concat(values.secondaryDomains.map(sd => new Location(sd.subdomain, sd.domain, Location.TYPE_SECONDARY)))
|
|
.concat(values.redirectDomains.map(rd => new Location(rd.subdomain, rd.domain, Location.TYPE_REDIRECT)))
|
|
.concat(values.aliasDomains.map(ad => new Location(ad.subdomain, ad.domain, Location.TYPE_ALIAS)));
|
|
|
|
error = await validateLocations(locations);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {
|
|
oldConfig: _.pick(app, ['subdomain', 'domain', 'fqdn', 'secondaryDomains', 'redirectDomains', 'aliasDomains', 'portBindings']),
|
|
skipDnsSetup: !!data.skipDnsSetup,
|
|
overwriteDns: !!data.overwriteDns
|
|
},
|
|
values
|
|
};
|
|
const [taskError, taskId] = await safe(addTask(appId, ISTATE_PENDING_LOCATION_CHANGE, task, auditSource));
|
|
if (taskError && taskError.reason !== BoxError.ALREADY_EXISTS) throw taskError;
|
|
if (taskError && taskError.reason === BoxError.ALREADY_EXISTS) throw getDuplicateErrorDetails(taskError.message, locations, values.portBindings);
|
|
|
|
values.fqdn = dns.fqdn(values.subdomain, values.domain);
|
|
values.secondaryDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
values.redirectDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
values.aliasDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, Object.assign({ appId, app, taskId }, values));
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setStorage(app, volumeId, volumePrefix, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(volumeId === null || typeof volumeId === 'string');
|
|
assert(volumePrefix === null || typeof volumePrefix === 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = checkAppState(app, ISTATE_PENDING_DATA_DIR_MIGRATION);
|
|
if (error) throw error;
|
|
|
|
if (volumeId) {
|
|
await checkStorage(app, volumeId, volumePrefix);
|
|
} else {
|
|
volumeId = volumePrefix = null;
|
|
}
|
|
|
|
const task = {
|
|
args: { newStorageVolumeId: volumeId, newStorageVolumePrefix: volumePrefix },
|
|
values: {}
|
|
};
|
|
const taskId = await addTask(appId, ISTATE_PENDING_DATA_DIR_MIGRATION, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, volumeId, volumePrefix, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function updateApp(app, data, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(data && typeof data === 'object');
|
|
assert(data.manifest && typeof data.manifest === 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const skipBackup = !!data.skipBackup, appId = app.id, manifest = data.manifest;
|
|
const values = { updateInfo: null }; // clear update indicator immediately
|
|
|
|
if (app.runState === RSTATE_STOPPED) throw new BoxError(BoxError.BAD_STATE, 'Stopped apps cannot be updated');
|
|
|
|
let error = checkAppState(app, ISTATE_PENDING_UPDATE);
|
|
if (error) throw error;
|
|
|
|
error = manifestFormat.parse(manifest);
|
|
if (error) throw new BoxError(BoxError.BAD_FIELD, 'Manifest error:' + error.message);
|
|
|
|
if (data.sourceArchiveFilePath) manifest.dockerImage = `local/${manifest.id}:${manifest.version}-${Date.now()}`;
|
|
|
|
error = await checkManifest(manifest);
|
|
if (error) throw error;
|
|
|
|
if (!skipBackup) {
|
|
const sites = await backupSites.listByContentForUpdates(app.id);
|
|
if (sites.length === 0) throw new BoxError(BoxError.BAD_STATE, 'App has no backup site for updates');
|
|
}
|
|
|
|
const updateConfig = { skipBackup, manifest }; // this will clear appStoreId/versionsUrl when updating from a repo and set it if passed in for update route
|
|
if ('appStoreId' in data) updateConfig.appStoreId = data.appStoreId;
|
|
if ('versionsUrl' in data) updateConfig.versionsUrl = data.versionsUrl;
|
|
|
|
// prevent user from installing a app with different manifest id over an existing app
|
|
// this allows cloudron install -f --app <appid> for an app installed from the appStore
|
|
if (app.manifest.id !== updateConfig.manifest.id) {
|
|
if (!data.force) throw new BoxError(BoxError.BAD_FIELD, 'manifest id does not match. force to override');
|
|
}
|
|
|
|
// suffix '0' if prerelease is missing for semver.lte to work as expected
|
|
const currentVersion = semver.prerelease(app.manifest.version) ? app.manifest.version : `${app.manifest.version}-0`;
|
|
const updateVersion = semver.prerelease(updateConfig.manifest.version) ? updateConfig.manifest.version : `${updateConfig.manifest.version}-0`;
|
|
if ((app.appStoreId !== '' || app.versionsUrl !== '') && semver.lte(updateVersion, currentVersion)) {
|
|
if (!data.force) throw new BoxError(BoxError.BAD_FIELD, 'Downgrades are not permitted for apps installed from AppStore or Community. force to override');
|
|
}
|
|
|
|
if ('icon' in data) {
|
|
if (data.icon) {
|
|
data.icon = Buffer.from(data.icon, 'base64');
|
|
if (data.icon.length === 0) throw new BoxError(BoxError.BAD_FIELD, 'icon is not base64');
|
|
}
|
|
values.icon = data.icon;
|
|
}
|
|
|
|
// do not update apps in debug mode
|
|
if (app.debugMode && !data.force) throw new BoxError(BoxError.BAD_STATE, 'debug mode enabled. force to override');
|
|
|
|
// Ensure we update the memory limit in case the new app requires more memory as a minimum
|
|
// 0 and -1 are special updateConfig for memory limit indicating unset and unlimited
|
|
if (app.memoryLimit > 0 && updateConfig.manifest.memoryLimit && app.memoryLimit < updateConfig.manifest.memoryLimit) {
|
|
updateConfig.memoryLimit = updateConfig.manifest.memoryLimit;
|
|
}
|
|
|
|
if (!manifest.addons?.sendmail) { // clear if the update removed addon
|
|
values.mailboxName = values.mailboxDomain = null;
|
|
} else if (!app.mailboxName || app.mailboxName.endsWith('.app')) { // allocate since update added the addon
|
|
values.mailboxName = mailboxNameForSubdomain(app.subdomain, manifest);
|
|
values.mailboxDomain = app.domain;
|
|
}
|
|
|
|
if (!manifest.addons?.recvmail) { // clear if the update removed addon. required for fk constraint
|
|
values.enableInbox = false;
|
|
values.inboxName = values.inboxDomain = null;
|
|
}
|
|
|
|
const hasSso = !!updateConfig.manifest.addons?.proxyAuth || !!updateConfig.manifest.addons?.ldap || !!manifest.addons?.oidc;
|
|
if (!hasSso && app.sso) values.sso = false; // turn off sso flag, if the update removes sso options
|
|
|
|
if (data.sourceArchiveFilePath) await fileUtils.renameFile(data.sourceArchiveFilePath, `${paths.SOURCE_ARCHIVES_DIR}/${appId}.tar.gz`);
|
|
|
|
const task = {
|
|
args: { updateConfig },
|
|
values
|
|
};
|
|
const taskId = await addTask(appId, ISTATE_PENDING_UPDATE, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_UPDATE, auditSource, { appId, app, sourceBuild: !!data.sourceArchiveFilePath, skipBackup, toManifest: manifest, fromManifest: app.manifest, force: data.force, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function repair(app, data, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof data, 'object'); // { manifest }
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
let errorState = (app.error && app.error.installationState) || ISTATE_PENDING_CONFIGURE;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: {},
|
|
requiredState: null
|
|
};
|
|
|
|
// maybe split this into a separate route like reinstall?
|
|
if (errorState === ISTATE_PENDING_INSTALL || errorState === ISTATE_PENDING_CLONE) {
|
|
task.args = { skipDnsSetup: false, overwriteDns: true };
|
|
if (data.manifest) {
|
|
let error = manifestFormat.parse(data.manifest);
|
|
if (error) throw new BoxError(BoxError.BAD_FIELD, `manifest error: ${error.message}`);
|
|
|
|
error = await checkManifest(data.manifest);
|
|
if (error) throw error;
|
|
|
|
if (!data.manifest.addons?.sendmail) { // clear if repair removed addon
|
|
task.values.mailboxName = task.values.mailboxDomain = null;
|
|
} else if (!app.mailboxName || app.mailboxName.endsWith('.app')) { // allocate since repair added the addon
|
|
task.values.mailboxName = mailboxNameForSubdomain(app.subdomain, data.manifest);
|
|
task.values.mailboxDomain = app.domain;
|
|
}
|
|
|
|
task.values.manifest = data.manifest;
|
|
task.args.oldManifest = app.manifest;
|
|
}
|
|
} else {
|
|
errorState = ISTATE_PENDING_CONFIGURE;
|
|
if (data.dockerImage) {
|
|
const newManifest = Object.assign({}, app.manifest, { dockerImage: data.dockerImage });
|
|
task.values.manifest = newManifest;
|
|
}
|
|
}
|
|
|
|
const taskId = await addTask(appId, errorState, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_REPAIR, auditSource, { app, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function restore(app, backupId, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof backupId, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
|
|
let error = checkAppState(app, ISTATE_PENDING_RESTORE);
|
|
if (error) throw error;
|
|
|
|
// for empty or null backupId, use existing manifest to mimic a reinstall
|
|
const restoreBackup = backupId ? await backups.get(backupId) : { manifest: app.manifest };
|
|
if (!restoreBackup) throw new BoxError(BoxError.BAD_FIELD, 'No such backup');
|
|
const manifest = restoreBackup.manifest;
|
|
|
|
if (!manifest) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Could not get restore manifest');
|
|
if (restoreBackup.encryptionVersion === 1) throw new BoxError(BoxError.BAD_FIELD, 'This encrypted backup was created with an older Cloudron version and has to be restored using the CLI tool');
|
|
|
|
// re-validate because this new box version may not accept old configs
|
|
error = await checkManifest(manifest);
|
|
if (error) throw error;
|
|
|
|
const values = { manifest };
|
|
if (!manifest.addons?.sendmail) { // clear if restore removed addon
|
|
values.mailboxName = values.mailboxDomain = null;
|
|
} else if (!app.mailboxName || app.mailboxName.endsWith('.app')) { // allocate since restore added the addon
|
|
values.mailboxName = mailboxNameForSubdomain(app.subdomain, manifest);
|
|
values.mailboxDomain = app.domain;
|
|
}
|
|
|
|
if (!manifest.addons?.recvmail) { // recvmail is always optional. clear if restore removed addon
|
|
values.enableInbox = false;
|
|
values.inboxName = values.inboxDomain = null;
|
|
}
|
|
|
|
const restoreConfig = { backupId: restoreBackup.id };
|
|
|
|
const task = {
|
|
args: {
|
|
restoreConfig,
|
|
oldManifest: app.manifest,
|
|
skipDnsSetup: !!backupId, // if this is a restore, just skip dns setup. only re-installs should setup dns
|
|
overwriteDns: true
|
|
},
|
|
values
|
|
};
|
|
|
|
const taskId = await addTask(appId, ISTATE_PENDING_RESTORE, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_RESTORE, auditSource, { app, backupId: restoreBackup.id, remotePath: restoreBackup.remotePath, fromManifest: app.manifest, toManifest: manifest, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function importApp(app, data, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof data, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
|
|
const error = checkAppState(app, ISTATE_PENDING_IMPORT);
|
|
if (error) throw error;
|
|
|
|
let restoreConfig;
|
|
|
|
if (data.remotePath) { // if not provided, we import in-place
|
|
const backupSite = await backupSites.createPseudo({
|
|
id: `appimport-${app.id}`,
|
|
provider: data.provider,
|
|
config: data.config,
|
|
format: data.format,
|
|
encryptionPassword: data.encryptionPassword ?? null,
|
|
encryptedFilenames: data.encryptedFilenames ?? false
|
|
});
|
|
|
|
restoreConfig = { remotePath: data.remotePath, backupSite };
|
|
} else { // inPlace
|
|
restoreConfig = { inPlace: true };
|
|
}
|
|
|
|
const task = {
|
|
args: {
|
|
restoreConfig,
|
|
oldManifest: app.manifest,
|
|
skipDnsSetup: false,
|
|
overwriteDns: true
|
|
},
|
|
values: {}
|
|
};
|
|
const taskId = await addTask(appId, ISTATE_PENDING_IMPORT, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_IMPORT, auditSource, { app, remotePath: data.remotePath, inPlace: data.inPlace, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
function canBackupApp(app) {
|
|
// only backup apps that are installed or specific pending states
|
|
|
|
// stopped apps cannot be backed up because addons might be down (redis)
|
|
if (app.runState === RSTATE_STOPPED) return false;
|
|
|
|
// we used to check the health here but that doesn't work for stopped apps. it's better to just fail
|
|
// and inform the user if the backup fails and the app addons have not been setup yet.
|
|
return app.installationState === ISTATE_INSTALLED ||
|
|
app.installationState === ISTATE_PENDING_CONFIGURE ||
|
|
app.installationState === ISTATE_PENDING_UPDATE; // called from apptask
|
|
}
|
|
|
|
async function exportApp(app, backupSiteId, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof backupSiteId, 'string'); // FIXME: this is not used at all in snapshotOnly mode
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
|
|
if (!canBackupApp(app)) throw new BoxError(BoxError.BAD_STATE, 'App cannot be backed up in this state');
|
|
|
|
const taskId = await tasks.add(`${tasks.TASK_APP_BACKUP_PREFIX}${app.id}`, [ appId, backupSiteId, { snapshotOnly: true } ]);
|
|
safe(tasks.startTask(taskId, {}), { debug }); // background
|
|
return { taskId };
|
|
}
|
|
|
|
async function clone(app, data, user, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof data, 'object');
|
|
assert(user && typeof user === 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const subdomain = data.subdomain.toLowerCase(),
|
|
domain = data.domain.toLowerCase(),
|
|
backupId = data.backupId,
|
|
overwriteDns = 'overwriteDns' in data ? data.overwriteDns : false,
|
|
skipDnsSetup = 'skipDnsSetup' in data ? data.skipDnsSetup : false;
|
|
|
|
assert.strictEqual(typeof backupId, 'string');
|
|
assert.strictEqual(typeof subdomain, 'string');
|
|
assert.strictEqual(typeof domain, 'string');
|
|
|
|
const cloneBackup = await backups.get(backupId);
|
|
|
|
if (!cloneBackup) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
|
|
if (!cloneBackup.manifest) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Could not detect restore manifest');
|
|
if (cloneBackup.encryptionVersion === 1) throw new BoxError(BoxError.BAD_FIELD, 'This encrypted backup was created with an older Cloudron version and cannot be cloned');
|
|
|
|
const manifest = cloneBackup.manifest, appStoreId = app.appStoreId, versionsUrl = app.versionsUrl;
|
|
|
|
let error = validateSecondaryDomains(data.secondaryDomains || {}, manifest);
|
|
if (error) throw error;
|
|
const secondaryDomains = translateSecondaryDomains(data.secondaryDomains || {});
|
|
|
|
const locations = [new Location(subdomain, domain, Location.TYPE_PRIMARY)]
|
|
.concat(secondaryDomains.map(sd => new Location(sd.subdomain, sd.domain, Location.TYPE_SECONDARY)));
|
|
|
|
error = await validateLocations(locations);
|
|
if (error) throw error;
|
|
|
|
// re-validate because this new box version may not accept old configs
|
|
error = await checkManifest(manifest);
|
|
if (error) throw error;
|
|
|
|
error = validatePorts(data.ports || null, manifest);
|
|
if (error) throw error;
|
|
const portBindings = translateToPortBindings(data.ports || null, manifest);
|
|
|
|
// should we copy the original app's mailbox settings instead?
|
|
const mailboxName = manifest.addons?.sendmail ? mailboxNameForSubdomain(subdomain, manifest) : null;
|
|
const mailboxDomain = manifest.addons?.sendmail ? domain : null;
|
|
|
|
const newAppId = crypto.randomUUID();
|
|
|
|
// label, checklist intentionally omitted . icon is loaded in apptask from the backup
|
|
const dolly = _.pick(cloneBackup.appConfig || app, ['memoryLimit', 'cpuQuota', 'crontab', 'reverseProxyConfig', 'env', 'servicesConfig', 'tags', 'devices',
|
|
'enableMailbox', 'mailboxDisplayName', 'mailboxName', 'mailboxDomain', 'enableInbox', 'inboxName', 'inboxDomain', 'debugMode',
|
|
'enableTurn', 'enableRedis', 'mounts', 'enableBackup', 'enableAutomaticUpdate', 'accessRestriction', 'operators', 'sso',
|
|
'notes', 'checklist']);
|
|
|
|
if (!manifest.addons?.recvmail) dolly.inboxDomain = null; // needed because we are cloning _current_ app settings with old manifest
|
|
|
|
const obj = Object.assign(dolly, {
|
|
installationState: ISTATE_PENDING_CLONE,
|
|
runState: RSTATE_RUNNING,
|
|
mailboxName,
|
|
mailboxDomain,
|
|
secondaryDomains,
|
|
redirectDomains: [],
|
|
aliasDomains: [],
|
|
label: dolly.label ? `${dolly.label}-clone` : '',
|
|
});
|
|
|
|
const [addError] = await safe(add(newAppId, appStoreId, versionsUrl, manifest, subdomain, domain, portBindings, obj));
|
|
if (addError && addError.reason === BoxError.ALREADY_EXISTS) throw getDuplicateErrorDetails(addError.message, locations, portBindings);
|
|
if (addError) throw addError;
|
|
|
|
const restoreConfig = { backupId: cloneBackup.id };
|
|
const task = {
|
|
args: { restoreConfig, overwriteDns, skipDnsSetup, oldManifest: null },
|
|
values: {},
|
|
requiredState: ISTATE_PENDING_CLONE
|
|
};
|
|
const taskId = await addTask(newAppId, ISTATE_PENDING_CLONE, task, auditSource);
|
|
|
|
const newApp = Object.assign({}, _.omit(obj, ['icon']), { appStoreId, versionsUrl, manifest, subdomain, domain, portBindings });
|
|
newApp.fqdn = dns.fqdn(newApp.subdomain, newApp.domain);
|
|
newApp.secondaryDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
newApp.redirectDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
newApp.aliasDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CLONE, auditSource, { appId: newAppId, oldAppId: app.id, backupId, remotePath: cloneBackup.remotePath, oldApp: app, newApp, taskId });
|
|
|
|
return { id: newAppId, taskId };
|
|
}
|
|
|
|
async function unarchive(archiveEntry, data, auditSource) {
|
|
assert.strictEqual(typeof archiveEntry, 'object');
|
|
assert.strictEqual(typeof data, 'object');
|
|
assert(auditSource && typeof auditSource === 'object');
|
|
|
|
const archiveBackup = await backups.get(archiveEntry.backupId);
|
|
const restoreConfig = { backupId: archiveBackup.id };
|
|
|
|
const subdomain = data.subdomain.toLowerCase(),
|
|
domain = data.domain.toLowerCase(),
|
|
overwriteDns = 'overwriteDns' in data ? data.overwriteDns : false;
|
|
|
|
const manifest = archiveBackup.manifest, appStoreId = archiveBackup.manifest.id, versionsUrl = archiveBackup.appConfig?.versionsUrl || '';
|
|
|
|
let error = validateSecondaryDomains(data.secondaryDomains || {}, manifest);
|
|
if (error) throw error;
|
|
const secondaryDomains = translateSecondaryDomains(data.secondaryDomains || {});
|
|
|
|
const locations = [new Location(subdomain, domain, Location.TYPE_PRIMARY)]
|
|
.concat(secondaryDomains.map(sd => new Location(sd.subdomain, sd.domain, Location.TYPE_SECONDARY)));
|
|
|
|
error = await validateLocations(locations);
|
|
if (error) throw error;
|
|
|
|
// re-validate because this new box version may not accept old configs
|
|
error = await checkManifest(manifest);
|
|
if (error) throw error;
|
|
|
|
error = validatePorts(data.ports || null, manifest);
|
|
if (error) throw error;
|
|
const portBindings = translateToPortBindings(data.ports || null, manifest);
|
|
|
|
const appId = crypto.randomUUID();
|
|
|
|
// appConfig is null for pre-8.2 backups
|
|
const dolly = _.pick(archiveBackup.appConfig || {}, ['memoryLimit', 'cpuQuota', 'crontab', 'reverseProxyConfig', 'env', 'servicesConfig',
|
|
'tags', 'label', 'enableMailbox', 'mailboxDisplayName', 'mailboxName', 'mailboxDomain', 'enableInbox', 'inboxName', 'inboxDomain', 'devices',
|
|
'enableTurn', 'enableRedis', 'mounts', 'enableBackup', 'enableAutomaticUpdate', 'accessRestriction', 'operators', 'sso',
|
|
'notes', 'checklist']);
|
|
|
|
// intentionally not filled up: redirectDomain, aliasDomains, mailboxDomain
|
|
const obj = Object.assign(dolly, {
|
|
secondaryDomains,
|
|
redirectDomains: [],
|
|
aliasDomains: [],
|
|
mailboxDomain: data.domain, // archive's mailboxDomain may not exist
|
|
runState: RSTATE_RUNNING,
|
|
installationState: ISTATE_PENDING_INSTALL,
|
|
sso: archiveBackup.appConfig ? archiveBackup.appConfig.sso : true // when no appConfig take a blind guess
|
|
});
|
|
obj.icon = (await archives.getIcons(archiveEntry.id))?.icon;
|
|
|
|
const [addError] = await safe(add(appId, appStoreId, versionsUrl, manifest, subdomain, domain, portBindings, obj));
|
|
if (addError && addError.reason === BoxError.ALREADY_EXISTS) throw getDuplicateErrorDetails(addError.message, locations, portBindings);
|
|
if (addError) throw addError;
|
|
|
|
const task = {
|
|
args: { restoreConfig, overwriteDns },
|
|
values: {},
|
|
requiredState: obj.installationState
|
|
};
|
|
|
|
const taskId = await addTask(appId, obj.installationState, task, auditSource);
|
|
|
|
const newApp = Object.assign({}, _.omit(obj, ['icon']), { appStoreId, versionsUrl, manifest, subdomain, domain, portBindings });
|
|
newApp.fqdn = dns.fqdn(newApp.subdomain, newApp.domain);
|
|
newApp.secondaryDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
newApp.redirectDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
newApp.aliasDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_INSTALL, auditSource, { appId, app: newApp, taskId });
|
|
|
|
return { id : appId, taskId };
|
|
}
|
|
|
|
async function uninstall(app, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = checkAppState(app, ISTATE_PENDING_UNINSTALL);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: {},
|
|
requiredState: null // can run in any state, as long as no task is active
|
|
};
|
|
const taskId = await addTask(appId, ISTATE_PENDING_UNINSTALL, task, auditSource);
|
|
await eventlog.add(eventlog.ACTION_APP_UNINSTALL, auditSource, { appId, app, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function archive(app, backupId, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof backupId, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
if (app.manifest.id === constants.PROXY_APP_APPSTORE_ID) throw new BoxError(BoxError.BAD_FIELD, 'cannot archive proxy app');
|
|
|
|
const result = await backups.listByIdentifierAndStatePaged(app.id, backups.BACKUP_STATE_NORMAL, 1, 1);
|
|
if (result.length === 0) throw new BoxError(BoxError.BAD_STATE, 'No recent backup to archive');
|
|
if (result[0].id !== backupId) throw new BoxError(BoxError.BAD_STATE, 'Latest backup id has changed');
|
|
|
|
const icons = await getIcons(app.id);
|
|
const archiveId = await archives.add(backupId, { icon: icons.icon, packageIcon: icons.packageIcon, appConfig: app }, auditSource);
|
|
const { taskId } = await uninstall(app, auditSource);
|
|
return { taskId, id: archiveId };
|
|
}
|
|
|
|
async function start(app, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = checkAppState(app, ISTATE_PENDING_START);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { runState: RSTATE_RUNNING }
|
|
};
|
|
const taskId = await addTask(appId, ISTATE_PENDING_START, task, auditSource);
|
|
await eventlog.add(eventlog.ACTION_APP_START, auditSource, { appId, app, taskId });
|
|
return { taskId };
|
|
}
|
|
|
|
async function stop(app, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = checkAppState(app, ISTATE_PENDING_STOP);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { runState: RSTATE_STOPPED }
|
|
};
|
|
const taskId = await addTask(appId, ISTATE_PENDING_STOP, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_STOP, auditSource, { appId, app, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function restart(app, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = checkAppState(app, ISTATE_PENDING_RESTART);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { runState: RSTATE_RUNNING }
|
|
};
|
|
const taskId = await addTask(appId, ISTATE_PENDING_RESTART, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_RESTART, auditSource, { appId, app, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
// auto-restart app tasks after a crash
|
|
async function backup(app, backupSiteId, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof backupSiteId, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
if (!canBackupApp(app)) throw new BoxError(BoxError.BAD_STATE, 'App cannot be backed up in this state');
|
|
|
|
const backupSite = await backupSites.get(backupSiteId);
|
|
if (!backupSite) throw new BoxError(BoxError.BAD_FIELD, 'No such backup site');
|
|
|
|
const taskId = await tasks.add(`${tasks.TASK_APP_BACKUP_PREFIX}${app.id}`, [ app.id, backupSite.id, { snapshotOnly: false } ]);
|
|
|
|
const memoryLimit = backupSite.limits?.memoryLimit ? Math.max(backupSite.limits.memoryLimit/1024/1024, 1024) : 1024;
|
|
|
|
// background
|
|
tasks.startTask(taskId, { timeout: 24 * 60 * 60 * 1000 /* 24 hours */, nice: 15, memoryLimit, oomScoreAdjust: -999 })
|
|
.then(async (backupId) => {
|
|
const completedBackup = await backups.get(backupId); // if task crashed, no result
|
|
await eventlog.add(eventlog.ACTION_APP_BACKUP_FINISH, auditSource, { app, success: !!completedBackup, errorMessage: '', remotePath: completedBackup?.remotePath, backupId: backupId });
|
|
})
|
|
.catch(async (error) => {
|
|
await eventlog.add(eventlog.ACTION_APP_BACKUP_FINISH, auditSource, { app, success: false, errorMessage: error.message });
|
|
})
|
|
.finally(async () => {
|
|
await locks.releaseByTaskId(taskId);
|
|
});
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_BACKUP, auditSource, { app, appId: app.id, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function updateBackup(app, backupId, data) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof backupId, 'string');
|
|
assert.strictEqual(typeof data, 'object');
|
|
|
|
const appBackup = await backups.get(backupId);
|
|
if (!appBackup) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
|
|
if (appBackup.identifier !== app.id) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found'); // some other app's backup
|
|
|
|
await backups.update(appBackup, data);
|
|
}
|
|
|
|
async function getBackupDownloadStream(app, backupId) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof backupId, 'string');
|
|
|
|
const downloadBackup = await backups.get(backupId);
|
|
if (!downloadBackup) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
|
|
if (downloadBackup.identifier !== app.id) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found'); // some other app's backup
|
|
|
|
const backupSite = await backupSites.get(downloadBackup.siteId);
|
|
if (!backupSite) throw new BoxError(BoxError.NOT_FOUND, 'Backup site not found'); // not possible
|
|
if (backupSite.format !== 'tgz') throw new BoxError(BoxError.BAD_STATE, 'only tgz backups can be downloaded');
|
|
|
|
const ps = new PassThrough();
|
|
|
|
const stream = await backupSites.storageApi(backupSite).download(backupSite.config, downloadBackup.remotePath);
|
|
stream.on('error', function(error) {
|
|
debug(`getBackupDownloadStream: read stream error: ${error.message}`);
|
|
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error));
|
|
});
|
|
stream.pipe(ps);
|
|
|
|
const now = (new Date()).toISOString().replace(/:|T/g,'-').replace(/\..*/,'');
|
|
const encryptionSuffix = downloadBackup.encryptionVersion ? '.enc' : '';
|
|
const filename = `app-backup-${now} (${app.fqdn}).tar.gz${encryptionSuffix}`;
|
|
|
|
return { stream: ps, filename };
|
|
}
|
|
|
|
async function restoreApps(apps, options, auditSource) {
|
|
assert(Array.isArray(apps));
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
apps = apps.filter(app => app.installationState !== ISTATE_ERROR); // remove errored apps. let them be 'repaired' by hand
|
|
apps = apps.filter(app => app.installationState !== ISTATE_PENDING_RESTORE); // safeguard against tasks being created non-stop if we crash on startup
|
|
|
|
for (const app of apps) {
|
|
const [error, results] = await safe(backups.listByIdentifierAndStatePaged(app.id, backups.BACKUP_STATE_NORMAL, 1, 1));
|
|
let installationState, restoreConfig;
|
|
if (!error && results.length) {
|
|
installationState = ISTATE_PENDING_RESTORE;
|
|
// intentionally ignore any backupSite provided during restore by the user because the site may not have all the apps
|
|
restoreConfig = { backupId: results[0].id };
|
|
} else {
|
|
installationState = ISTATE_PENDING_INSTALL;
|
|
restoreConfig = null;
|
|
}
|
|
|
|
const task = {
|
|
args: { restoreConfig, skipDnsSetup: options.skipDnsSetup, overwriteDns: true, oldManifest: null },
|
|
values: {},
|
|
scheduleNow: false, // task will be scheduled by autoRestartTasks when platform is ready
|
|
requireNullTaskId: false // ignore existing stale taskId
|
|
};
|
|
|
|
debug(`restoreApps: marking ${app.fqdn} as ${installationState} using restore config ${JSON.stringify(restoreConfig)}`);
|
|
|
|
const [addTaskError, taskId] = await safe(addTask(app.id, installationState, task, auditSource));
|
|
if (addTaskError) debug(`restoreApps: error marking ${app.fqdn} for restore: ${JSON.stringify(addTaskError)}`);
|
|
else debug(`restoreApps: marked ${app.id} as ${installationState} with taskId ${taskId}`);
|
|
}
|
|
}
|
|
|
|
async function configureApps(apps, options, auditSource) {
|
|
assert(Array.isArray(apps));
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
apps = apps.filter(app => app.installationState !== ISTATE_ERROR); // remove errored apps. let them be 'repaired' by hand
|
|
apps = apps.filter(app => app.installationState !== ISTATE_PENDING_CONFIGURE); // safeguard against tasks being created non-stop if we crash on startup
|
|
|
|
const scheduleNow = !!options.scheduleNow;
|
|
|
|
for (const app of apps) {
|
|
debug(`configureApps: marking ${app.fqdn} for reconfigure (scheduleNow: ${scheduleNow})`);
|
|
|
|
const task = {
|
|
args: {},
|
|
values: {},
|
|
scheduleNow,
|
|
requireNullTaskId: false // ignore existing stale taskId
|
|
};
|
|
|
|
const [addTaskError, taskId] = await safe(addTask(app.id, ISTATE_PENDING_CONFIGURE, task, auditSource));
|
|
if (addTaskError) debug(`configureApps: error marking ${app.fqdn} for configure: ${JSON.stringify(addTaskError)}`);
|
|
else debug(`configureApps: marked ${app.id} for re-configure with taskId ${taskId}`);
|
|
}
|
|
}
|
|
|
|
async function restartAppsUsingAddons(changedAddons, auditSource) {
|
|
assert(Array.isArray(changedAddons));
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
let apps = await list();
|
|
// TODO: This ends up restarting apps that have optional redis
|
|
apps = apps.filter(app => app.manifest.addons && _.intersection(Object.keys(app.manifest.addons), changedAddons).length !== 0);
|
|
apps = apps.filter(app => app.installationState !== ISTATE_ERROR); // remove errored apps. let them be 'repaired' by hand
|
|
apps = apps.filter(app => app.installationState !== ISTATE_PENDING_RESTART); // safeguard against tasks being created non-stop restart if we crash on startup
|
|
apps = apps.filter(app => app.runState !== RSTATE_STOPPED); // don't start stopped apps
|
|
|
|
for (const app of apps) {
|
|
debug(`restartAppsUsingAddons: marking ${app.fqdn} for restart`);
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { runState: RSTATE_RUNNING }
|
|
};
|
|
|
|
// stop apps before updating the databases because postgres will "lock" them preventing import
|
|
const [stopError] = await safe(docker.stopContainers(app.id));
|
|
if (stopError) debug(`restartAppsUsingAddons: error stopping ${app.fqdn}`, stopError);
|
|
|
|
const [addTaskError, taskId] = await safe(addTask(app.id, ISTATE_PENDING_RESTART, task, auditSource));
|
|
if (addTaskError) debug(`restartAppsUsingAddons: error marking ${app.fqdn} for restart: ${JSON.stringify(addTaskError)}`);
|
|
else debug(`restartAppsUsingAddons: marked ${app.id} for restart with taskId ${taskId}`);
|
|
}
|
|
}
|
|
|
|
async function schedulePendingTasks(auditSource) {
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
debug('schedulePendingTasks: scheduling app tasks');
|
|
|
|
const result = await list();
|
|
|
|
for (const app of result) {
|
|
if (!app.taskId) continue; // if not in any pending state, do nothing
|
|
|
|
debug(`schedulePendingTasks: schedule task for ${app.fqdn} ${app.id}: state=${app.installationState},taskId=${app.taskId}`);
|
|
|
|
await safe(scheduleTask(app.id, app.installationState, app.taskId, auditSource), { debug }); // ignore error
|
|
}
|
|
}
|
|
|
|
export default {
|
|
canAccess,
|
|
isOperator,
|
|
accessLevel,
|
|
pickFields,
|
|
|
|
// database crud
|
|
add,
|
|
update,
|
|
setHealth,
|
|
del,
|
|
|
|
get,
|
|
getByIpAddress,
|
|
getByFqdn,
|
|
list,
|
|
listByUser,
|
|
|
|
// user actions
|
|
install,
|
|
unarchive,
|
|
uninstall,
|
|
archive,
|
|
|
|
setAccessRestriction,
|
|
setOperators,
|
|
setCrontab,
|
|
setUpstreamUri,
|
|
setLabel,
|
|
setIcon,
|
|
setTags,
|
|
setNotes,
|
|
setChecklistItem,
|
|
setMemoryLimit,
|
|
setCpuQuota,
|
|
setMounts,
|
|
setDevices,
|
|
setAutomaticBackup,
|
|
setAutomaticUpdate,
|
|
setReverseProxyConfig,
|
|
setCertificate,
|
|
setDebugMode,
|
|
setEnvironment,
|
|
setMailbox,
|
|
setInbox,
|
|
setTurn,
|
|
setRedis,
|
|
setLocation,
|
|
setStorage,
|
|
repair,
|
|
|
|
restore,
|
|
importApp,
|
|
exportApp,
|
|
clone,
|
|
|
|
updateApp,
|
|
|
|
backup,
|
|
listBackups,
|
|
updateBackup,
|
|
getBackupDownloadStream,
|
|
|
|
getTask,
|
|
getLogPaths,
|
|
getLogs,
|
|
|
|
appendLogLine,
|
|
|
|
start,
|
|
stop,
|
|
restart,
|
|
|
|
createExec,
|
|
startExec,
|
|
getExec,
|
|
|
|
checkManifest,
|
|
|
|
restoreApps,
|
|
configureApps,
|
|
schedulePendingTasks,
|
|
restartAppsUsingAddons,
|
|
|
|
getStorageDir,
|
|
getIcon,
|
|
getMemoryLimit,
|
|
getSchedulerConfig,
|
|
|
|
listEventlog,
|
|
|
|
downloadFile,
|
|
uploadFile,
|
|
|
|
writeConfig,
|
|
loadConfig,
|
|
|
|
canBackupApp,
|
|
|
|
attachUpdateInfoProperties,
|
|
|
|
PORT_TYPE_TCP,
|
|
PORT_TYPE_UDP,
|
|
|
|
// task codes - the installation state is now a misnomer (keep in sync in UI)
|
|
ISTATE_PENDING_INSTALL,
|
|
ISTATE_PENDING_CLONE,
|
|
ISTATE_PENDING_CONFIGURE,
|
|
ISTATE_PENDING_RECREATE_CONTAINER,
|
|
ISTATE_PENDING_LOCATION_CHANGE,
|
|
ISTATE_PENDING_SERVICES_CHANGE,
|
|
ISTATE_PENDING_DATA_DIR_MIGRATION,
|
|
ISTATE_PENDING_RESIZE,
|
|
ISTATE_PENDING_DEBUG,
|
|
ISTATE_PENDING_UNINSTALL,
|
|
ISTATE_PENDING_RESTORE,
|
|
ISTATE_PENDING_IMPORT,
|
|
ISTATE_PENDING_UPDATE,
|
|
ISTATE_PENDING_START,
|
|
ISTATE_PENDING_STOP,
|
|
ISTATE_PENDING_RESTART,
|
|
ISTATE_ERROR,
|
|
ISTATE_INSTALLED,
|
|
|
|
// run states
|
|
RSTATE_RUNNING,
|
|
RSTATE_STOPPED,
|
|
|
|
// health states (keep in sync in UI)
|
|
HEALTH_HEALTHY: 'healthy',
|
|
HEALTH_UNHEALTHY: 'unhealthy',
|
|
HEALTH_ERROR: 'error',
|
|
HEALTH_DEAD: 'dead',
|
|
|
|
// app access levels
|
|
ACCESS_LEVEL_ADMIN,
|
|
ACCESS_LEVEL_OPERATOR,
|
|
ACCESS_LEVEL_USER,
|
|
ACCESS_LEVEL_NONE,
|
|
|
|
// exported for testing
|
|
_checkForPortBindingConflict: checkForPortBindingConflict,
|
|
_validatePorts: validatePorts,
|
|
_validateAccessRestriction: validateAccessRestriction,
|
|
_validateUpstreamUri: validateUpstreamUri,
|
|
_validateLocations: validateLocations,
|
|
_parseCrontab: parseCrontab,
|
|
_clear: clear,
|
|
};
|