Cleaner to separate things from the backups table. * icon, appConfig, appStoreIcon etc are only valid for archives * older version cloudron does not have appConfig in backups table (so it cannot be an archive entry)
3016 lines
124 KiB
JavaScript
3016 lines
124 KiB
JavaScript
'use strict';
|
|
|
|
exports = module.exports = {
|
|
canAccess,
|
|
isOperator,
|
|
accessLevel,
|
|
removeInternalFields,
|
|
removeRestrictedFields,
|
|
|
|
// database crud
|
|
add,
|
|
update,
|
|
setHealth,
|
|
del,
|
|
|
|
get,
|
|
getByIpAddress,
|
|
getByFqdn,
|
|
list,
|
|
listByUser,
|
|
|
|
// user actions
|
|
install,
|
|
uninstall,
|
|
archive,
|
|
|
|
setAccessRestriction,
|
|
setOperators,
|
|
setCrontab,
|
|
setUpstreamUri,
|
|
setLabel,
|
|
setIcon,
|
|
setTags,
|
|
setNotes,
|
|
setChecklistItem,
|
|
setMemoryLimit,
|
|
setCpuQuota,
|
|
setMounts,
|
|
setDevices,
|
|
setAutomaticBackup,
|
|
setAutomaticUpdate,
|
|
setReverseProxyConfig,
|
|
setCertificate,
|
|
setDebugMode,
|
|
setEnvironment,
|
|
setMailbox,
|
|
setInbox,
|
|
setTurn,
|
|
setRedis,
|
|
setLocation,
|
|
setStorage,
|
|
repair,
|
|
|
|
restore,
|
|
importApp,
|
|
exportApp,
|
|
clone,
|
|
|
|
updateApp,
|
|
|
|
backup,
|
|
listBackups,
|
|
updateBackup,
|
|
getBackupDownloadStream,
|
|
|
|
getTask,
|
|
getLogPaths,
|
|
getLogs,
|
|
|
|
appendLogLine,
|
|
|
|
start,
|
|
stop,
|
|
restart,
|
|
|
|
createExec,
|
|
startExec,
|
|
getExec,
|
|
|
|
checkManifest,
|
|
|
|
canAutoupdateApp,
|
|
autoupdateApps,
|
|
|
|
restoreApps,
|
|
configureApps,
|
|
schedulePendingTasks,
|
|
restartAppsUsingAddons,
|
|
|
|
getStorageDir,
|
|
getIcon,
|
|
getMemoryLimit,
|
|
getSchedulerConfig,
|
|
|
|
listEventlog,
|
|
|
|
downloadFile,
|
|
uploadFile,
|
|
|
|
writeConfig,
|
|
loadConfig,
|
|
|
|
PORT_TYPE_TCP: 'tcp',
|
|
PORT_TYPE_UDP: 'udp',
|
|
|
|
// task codes - the installation state is now a misnomer (keep in sync in UI)
|
|
ISTATE_PENDING_INSTALL: 'pending_install', // installs and fresh reinstalls
|
|
ISTATE_PENDING_CLONE: 'pending_clone', // clone
|
|
ISTATE_PENDING_CONFIGURE: 'pending_configure', // infra update
|
|
ISTATE_PENDING_RECREATE_CONTAINER: 'pending_recreate_container', // env change or addon change
|
|
ISTATE_PENDING_LOCATION_CHANGE: 'pending_location_change',
|
|
ISTATE_PENDING_SERVICES_CHANGE: 'pending_services_change',
|
|
ISTATE_PENDING_DATA_DIR_MIGRATION: 'pending_data_dir_migration',
|
|
ISTATE_PENDING_RESIZE: 'pending_resize',
|
|
ISTATE_PENDING_DEBUG: 'pending_debug',
|
|
ISTATE_PENDING_UNINSTALL: 'pending_uninstall', // uninstallation
|
|
ISTATE_PENDING_RESTORE: 'pending_restore', // restore to previous backup or on upgrade
|
|
ISTATE_PENDING_IMPORT: 'pending_import', // import from external backup
|
|
ISTATE_PENDING_UPDATE: 'pending_update', // update from installed state preserving data
|
|
ISTATE_PENDING_BACKUP: 'pending_backup', // backup the app. this is state because it blocks other operations
|
|
ISTATE_PENDING_START: 'pending_start',
|
|
ISTATE_PENDING_STOP: 'pending_stop',
|
|
ISTATE_PENDING_RESTART: 'pending_restart',
|
|
ISTATE_ERROR: 'error', // error executing last pending_* command
|
|
ISTATE_INSTALLED: 'installed', // app is installed
|
|
|
|
// run states
|
|
RSTATE_RUNNING: 'running',
|
|
RSTATE_STOPPED: 'stopped', // app stopped by us
|
|
|
|
// health states (keep in sync in UI)
|
|
HEALTH_HEALTHY: 'healthy',
|
|
HEALTH_UNHEALTHY: 'unhealthy',
|
|
HEALTH_ERROR: 'error',
|
|
HEALTH_DEAD: 'dead',
|
|
|
|
// exported for testing
|
|
_checkForPortBindingConflict: checkForPortBindingConflict,
|
|
_validatePorts: validatePorts,
|
|
_validateAccessRestriction: validateAccessRestriction,
|
|
_validateUpstreamUri: validateUpstreamUri,
|
|
_validateLocations: validateLocations,
|
|
_parseCrontab: parseCrontab,
|
|
_clear: clear
|
|
};
|
|
|
|
const appstore = require('./appstore.js'),
|
|
appTaskManager = require('./apptaskmanager.js'),
|
|
archives = require('./archives.js'),
|
|
assert = require('assert'),
|
|
backups = require('./backups.js'),
|
|
BoxError = require('./boxerror.js'),
|
|
constants = require('./constants.js'),
|
|
{ CronTime } = require('cron'),
|
|
dashboard = require('./dashboard.js'),
|
|
database = require('./database.js'),
|
|
debug = require('debug')('box:apps'),
|
|
dns = require('./dns.js'),
|
|
docker = require('./docker.js'),
|
|
domains = require('./domains.js'),
|
|
eventlog = require('./eventlog.js'),
|
|
fs = require('fs'),
|
|
Location = require('./location.js'),
|
|
logs = require('./logs.js'),
|
|
mail = require('./mail.js'),
|
|
manifestFormat = require('cloudron-manifestformat'),
|
|
notifications = require('./notifications.js'),
|
|
once = require('./once.js'),
|
|
path = require('path'),
|
|
paths = require('./paths.js'),
|
|
PassThrough = require('stream').PassThrough,
|
|
reverseProxy = require('./reverseproxy.js'),
|
|
safe = require('safetydance'),
|
|
semver = require('semver'),
|
|
services = require('./services.js'),
|
|
shell = require('./shell.js')('apps'),
|
|
storage = require('./storage.js'),
|
|
tasks = require('./tasks.js'),
|
|
tgz = require('./backupformat/tgz.js'),
|
|
TransformStream = require('stream').Transform,
|
|
users = require('./users.js'),
|
|
util = require('util'),
|
|
uuid = require('uuid'),
|
|
validator = require('validator'),
|
|
volumes = require('./volumes.js'),
|
|
_ = require('underscore');
|
|
|
|
const APPS_FIELDS_PREFIXED = [ 'apps.id', 'apps.appStoreId', 'apps.installationState', 'apps.errorJson', 'apps.runState',
|
|
'apps.health', 'apps.containerId', 'apps.manifestJson', 'apps.accessRestrictionJson', 'apps.memoryLimit', 'apps.cpuQuota',
|
|
'apps.label', 'apps.notes', 'apps.tagsJson', 'apps.taskId', 'apps.reverseProxyConfigJson', 'apps.servicesConfigJson', 'apps.operatorsJson',
|
|
'apps.sso', 'apps.devicesJson', 'apps.debugModeJson', 'apps.enableBackup', 'apps.proxyAuth', 'apps.containerIp', 'apps.crontab',
|
|
'apps.creationTime', 'apps.updateTime', 'apps.enableAutomaticUpdate', 'apps.upstreamUri', 'apps.checklistJson',
|
|
'apps.enableMailbox', 'apps.mailboxDisplayName', 'apps.mailboxName', 'apps.mailboxDomain', 'apps.enableInbox', 'apps.inboxName', 'apps.inboxDomain',
|
|
'apps.enableTurn', 'apps.enableRedis', 'apps.storageVolumeId', 'apps.storageVolumePrefix', 'apps.ts', 'apps.healthTime', '(apps.icon IS NOT NULL) AS hasIcon', '(apps.appStoreIcon IS NOT NULL) AS hasAppStoreIcon' ].join(',');
|
|
|
|
// const PORT_BINDINGS_FIELDS = [ 'hostPort', 'type', 'environmentVariable', 'appId', 'count' ].join(',');
|
|
const LOCATION_FIELDS = [ 'appId', 'subdomain', 'domain', 'type', 'certificateJson' ];
|
|
|
|
const CHECKVOLUME_CMD = path.join(__dirname, 'scripts/checkvolume.sh');
|
|
|
|
// ports is a map of envvar -> hostPort
|
|
function validatePorts(ports, manifest) {
|
|
assert.strictEqual(typeof ports, 'object');
|
|
assert.strictEqual(typeof manifest, 'object');
|
|
|
|
// keep the public ports in sync with firewall rules in setup/start/cloudron-firewall.sh
|
|
// these ports are reserved even if we listen only on 127.0.0.1 because we setup HostIp to be 127.0.0.1
|
|
// for custom tcp ports
|
|
const RESERVED_PORTS = [
|
|
22, /* ssh */
|
|
25, /* smtp */
|
|
80, /* http */
|
|
143, /* imap */
|
|
202, /* alternate ssh */
|
|
222, /* proftd */
|
|
443, /* https */
|
|
465, /* smtps */
|
|
587, /* submission */
|
|
993, /* imaps */
|
|
995, /* pop3s */
|
|
2003, /* graphite (lo) */
|
|
constants.PORT, /* app server (lo) */
|
|
constants.AUTHWALL_PORT, /* protected sites */
|
|
constants.INTERNAL_SMTP_PORT, /* internal smtp port (lo) */
|
|
constants.LDAP_PORT,
|
|
3306, /* mysql (lo) */
|
|
3478, /* turn,stun */
|
|
4190, /* managesieve */
|
|
5349, /* turn,stun TLS */
|
|
8000, /* ESXi monitoring */
|
|
];
|
|
|
|
const RESERVED_PORT_RANGES = [
|
|
[50000, 51000] /* turn udp ports */
|
|
];
|
|
|
|
const ALLOWED_PORTS = [
|
|
53, // dns 53 is special and adblocker apps can use them
|
|
853 // dns over tls
|
|
];
|
|
|
|
if (!ports) return null;
|
|
|
|
const tcpPorts = manifest.tcpPorts || {};
|
|
const udpPorts = manifest.udpPorts || {};
|
|
|
|
for (const portName in ports) {
|
|
if (!/^[A-Z0-9_]+$/.test(portName)) return new BoxError(BoxError.BAD_FIELD, `${portName} is not a valid environment variable in ports`);
|
|
|
|
const hostPort = ports[portName];
|
|
if (!Number.isInteger(hostPort)) return new BoxError(BoxError.BAD_FIELD, `${hostPort} is not an integer in ${portName} ports`);
|
|
if (RESERVED_PORTS.indexOf(hostPort) !== -1) return new BoxError(BoxError.BAD_FIELD, `Port ${hostPort} for ${portName} is reserved in ports`);
|
|
if (RESERVED_PORT_RANGES.find(range => (hostPort >= range[0] && hostPort <= range[1]))) return new BoxError(BoxError.BAD_FIELD, `Port ${hostPort} for ${portName} is reserved in ports`);
|
|
if (ALLOWED_PORTS.indexOf(hostPort) === -1 && (hostPort <= 1023 || hostPort > 65535)) return new BoxError(BoxError.BAD_FIELD, `${hostPort} for ${portName} is not in permitted range in ports`);
|
|
|
|
// it is OK if there is no 1-1 mapping between values in manifest.tcpPorts and ports. missing values implies the service is disabled
|
|
const portSpec = tcpPorts[portName] || udpPorts[portName];
|
|
if (!portSpec) return new BoxError(BoxError.BAD_FIELD, `Invalid portBinding ${portName}`);
|
|
if (portSpec.readOnly && portSpec.defaultValue !== hostPort) return new BoxError(BoxError.BAD_FIELD, `portBinding ${portName} is readOnly and cannot have a different value that the default`);
|
|
if ((hostPort + (portSpec.portCount || 1)) > 65535) return new BoxError(BoxError.BAD_FIELD, `${hostPort}+${portSpec.portCount} for ${portName} exceeds valid port range`);
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
// translates the REST API ports (envvar -> hostPort) to database portBindings (envvar -> { hostPort, count, type })
|
|
function translateToPortBindings(ports, manifest) {
|
|
assert.strictEqual(typeof ports, 'object');
|
|
assert.strictEqual(typeof manifest, 'object');
|
|
|
|
const portBindings = {};
|
|
|
|
if (!ports) return portBindings;
|
|
|
|
const tcpPorts = manifest.tcpPorts || {};
|
|
|
|
for (const portName in ports) {
|
|
const portType = portName in tcpPorts ? exports.PORT_TYPE_TCP : exports.PORT_TYPE_UDP;
|
|
const portCount = portName in tcpPorts ? tcpPorts[portName].portCount : manifest.udpPorts[portName].portCount; // since count is optional, this can be undefined
|
|
portBindings[portName] = { hostPort: ports[portName], type: portType, count: portCount || 1 };
|
|
}
|
|
|
|
return portBindings;
|
|
}
|
|
|
|
function validateSecondaryDomains(secondaryDomains, manifest) {
|
|
assert.strictEqual(typeof secondaryDomains, 'object');
|
|
assert.strictEqual(typeof manifest, 'object');
|
|
|
|
const httpPorts = manifest.httpPorts || {};
|
|
|
|
for (const envName in httpPorts) { // each httpPort is required
|
|
if (!(envName in secondaryDomains)) return new BoxError(BoxError.BAD_FIELD, `secondaryDomain ${envName} is required`);
|
|
}
|
|
|
|
for (const envName in secondaryDomains) {
|
|
if (!(envName in httpPorts)) return new BoxError(BoxError.BAD_FIELD, `secondaryDomain ${envName} is not listed in manifest`);
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
function translateSecondaryDomains(secondaryDomains) {
|
|
assert(secondaryDomains && typeof secondaryDomains === 'object');
|
|
|
|
const result = [];
|
|
for (const envName in secondaryDomains) {
|
|
result.push({ domain: secondaryDomains[envName].domain, subdomain: secondaryDomains[envName].subdomain, environmentVariable: envName });
|
|
}
|
|
return result;
|
|
}
|
|
|
|
function parseCrontab(crontab) {
|
|
assert(crontab === null || typeof crontab === 'string');
|
|
|
|
// https://www.man7.org/linux/man-pages/man5/crontab.5.html#EXTENSIONS
|
|
const KNOWN_EXTENSIONS = {
|
|
'@service' : '@service', // runs once
|
|
'@reboot' : '@service',
|
|
'@yearly' : '0 0 1 1 *',
|
|
'@annually' : '0 0 1 1 *',
|
|
'@monthly' : '0 0 1 * *',
|
|
'@weekly' : '0 0 * * 0',
|
|
'@daily' : '0 0 * * *',
|
|
'@hourly' : '0 * * * *',
|
|
};
|
|
|
|
const result = [];
|
|
if (!crontab) return result;
|
|
|
|
const lines = crontab.split('\n');
|
|
for (let i = 0; i < lines.length; i++) {
|
|
const line = lines[i].trim();
|
|
if (!line || line.startsWith('#')) continue;
|
|
if (line.startsWith('@')) {
|
|
const parts = /^(@\S+)\s+(.+)$/.exec(line);
|
|
if (!parts) throw new BoxError(BoxError.BAD_FIELD, `Invalid cron configuration at line ${i+1}`);
|
|
const [, extension, command] = parts;
|
|
if (!KNOWN_EXTENSIONS[extension]) throw new BoxError(BoxError.BAD_FIELD, `Unknown extension pattern at line ${i+1}`);
|
|
result.push({ schedule: KNOWN_EXTENSIONS[extension], command });
|
|
} else {
|
|
const parts = /^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(.+)$/.exec(line);
|
|
if (!parts) throw new BoxError(BoxError.BAD_FIELD, `Invalid cron configuration at line ${i+1}`);
|
|
const schedule = parts.slice(1, 6).join(' ');
|
|
const command = parts[6];
|
|
|
|
try {
|
|
new CronTime('00 ' + schedule); // second is disallowed
|
|
} catch (error) {
|
|
throw new BoxError(BoxError.BAD_FIELD, `Invalid cron pattern at line ${i+1}: ${error.message}`);
|
|
}
|
|
|
|
if (command.length === 0) throw new BoxError(BoxError.BAD_FIELD, `Invalid cron pattern. Command must not be empty at line ${i+1}`); // not possible with the regexp we have
|
|
|
|
result.push({ schedule, command });
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
function getSchedulerConfig(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
let schedulerConfig = app.manifest.addons?.scheduler || null;
|
|
|
|
const crontab = parseCrontab(app.crontab);
|
|
if (crontab.length === 0) return schedulerConfig;
|
|
|
|
schedulerConfig = schedulerConfig || {};
|
|
// put a '.' because it is not a valid name for schedule name in manifestformat
|
|
crontab.forEach((c, idx) => schedulerConfig[`crontab.${idx}`] = c);
|
|
|
|
return schedulerConfig;
|
|
}
|
|
|
|
// also validates operators
|
|
function validateAccessRestriction(accessRestriction) {
|
|
assert.strictEqual(typeof accessRestriction, 'object');
|
|
|
|
if (accessRestriction === null) return null;
|
|
|
|
if (accessRestriction.users) {
|
|
if (!Array.isArray(accessRestriction.users)) return new BoxError(BoxError.BAD_FIELD, 'users array property required');
|
|
if (!accessRestriction.users.every(function (e) { return typeof e === 'string'; })) return new BoxError(BoxError.BAD_FIELD, 'All users have to be strings');
|
|
}
|
|
|
|
if (accessRestriction.groups) {
|
|
if (!Array.isArray(accessRestriction.groups)) return new BoxError(BoxError.BAD_FIELD, 'groups array property required');
|
|
if (!accessRestriction.groups.every(function (e) { return typeof e === 'string'; })) return new BoxError(BoxError.BAD_FIELD, 'All groups have to be strings');
|
|
}
|
|
|
|
// TODO: maybe validate if the users and groups actually exist
|
|
return null;
|
|
}
|
|
|
|
function validateMemoryLimit(manifest, memoryLimit) {
|
|
assert.strictEqual(typeof manifest, 'object');
|
|
assert.strictEqual(typeof memoryLimit, 'number');
|
|
|
|
// max is not checked because docker allows any value for --memory
|
|
const min = manifest.memoryLimit || constants.DEFAULT_MEMORY_LIMIT;
|
|
|
|
// allow 0, which indicates that it is not set, the one from the manifest will be choosen but we don't commit any user value
|
|
// this is needed so an app update can change the value in the manifest, and if not set by the user, the new value should be used
|
|
if (memoryLimit === 0) return null;
|
|
|
|
// a special value that indicates unlimited memory
|
|
if (memoryLimit === -1) return null;
|
|
|
|
if (memoryLimit < min) return new BoxError(BoxError.BAD_FIELD, 'memoryLimit too small');
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateCpuQuota(cpuQuota) {
|
|
assert.strictEqual(typeof cpuQuota, 'number');
|
|
|
|
if (cpuQuota < 1 || cpuQuota > 100) return new BoxError(BoxError.BAD_FIELD, 'cpuQuota has to be between 1 and 100');
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateDebugMode(debugMode) {
|
|
assert.strictEqual(typeof debugMode, 'object');
|
|
|
|
if (debugMode === null) return null;
|
|
if ('cmd' in debugMode && debugMode.cmd !== null && !Array.isArray(debugMode.cmd)) return new BoxError(BoxError.BAD_FIELD, 'debugMode.cmd must be an array or null' );
|
|
if ('readonlyRootfs' in debugMode && typeof debugMode.readonlyRootfs !== 'boolean') return new BoxError(BoxError.BAD_FIELD, 'debugMode.readonlyRootfs must be a boolean' );
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateRobotsTxt(robotsTxt) {
|
|
if (robotsTxt === null) return null;
|
|
|
|
// this is the nginx limit on inline strings. if we really hit this, we have to generate a file
|
|
if (robotsTxt.length > 4096) return new BoxError(BoxError.BAD_FIELD, 'robotsTxt must be less than 4096');
|
|
|
|
// TODO: validate the robots file? we escape the string when templating the nginx config right now
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateCsp(csp) {
|
|
if (csp === null) return null;
|
|
|
|
if (csp.length > 4096) return new BoxError(BoxError.BAD_FIELD, 'CSP must be less than 4096');
|
|
if (csp.includes('"')) return new BoxError(BoxError.BAD_FIELD, 'CSP cannot contains double quotes');
|
|
if (csp.includes('\n')) return new BoxError(BoxError.BAD_FIELD, 'CSP cannot contain newlines');
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateUpstreamUri(upstreamUri) {
|
|
assert.strictEqual(typeof upstreamUri, 'string');
|
|
|
|
if (!upstreamUri) return new BoxError(BoxError.BAD_FIELD, 'upstreamUri cannot be empty');
|
|
|
|
const uri = safe(() => new URL(upstreamUri));
|
|
if (!uri) return new BoxError(BoxError.BAD_FIELD, `upstreamUri is invalid: ${safe.error.message}`);
|
|
if (uri.protocol !== 'http:' && uri.protocol !== 'https:') return new BoxError(BoxError.BAD_FIELD, 'upstreamUri has an unsupported scheme');
|
|
if (uri.search || uri.hash) return new BoxError(BoxError.BAD_FIELD, 'upstreamUri cannot have search or hash');
|
|
if (uri.pathname !== '/') return new BoxError(BoxError.BAD_FIELD, 'upstreamUri cannot have a path');
|
|
|
|
// we use the uri in a named location @wellknown-upstream. nginx does not support having paths in it
|
|
if (upstreamUri.endsWith('/')) return new BoxError(BoxError.BAD_FIELD, 'upstreamUri cannot have a path');
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateLabel(label) {
|
|
if (label === null) return null;
|
|
|
|
if (label.length > 128) return new BoxError(BoxError.BAD_FIELD, 'label must be less than 128');
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateTags(tags) {
|
|
if (tags.length > 64) return new BoxError(BoxError.BAD_FIELD, 'Can only set up to 64 tags');
|
|
|
|
if (tags.some(tag => tag.length == 0)) return new BoxError(BoxError.BAD_FIELD, 'tag cannot be empty');
|
|
if (tags.some(tag => tag.length > 128)) return new BoxError(BoxError.BAD_FIELD, 'tag must be less than 128');
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateDevices(devices) {
|
|
for (const key in devices) {
|
|
if (key.indexOf('/dev/') !== 0) return new BoxError(BoxError.BAD_FIELD, `"${key}" must start with /dev/`);
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
function validateEnv(env) {
|
|
for (const key in env) {
|
|
if (key.length > 512) return new BoxError(BoxError.BAD_FIELD, 'Max env var key length is 512');
|
|
// http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html
|
|
if (!/^[a-zA-Z_][a-zA-Z0-9_]*$/.test(key)) return new BoxError(BoxError.BAD_FIELD, `"${key}" is not a valid environment variable`);
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
async function checkStorage(app, volumeId, prefix) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof volumeId, 'string');
|
|
assert.strictEqual(typeof prefix, 'string');
|
|
|
|
const volume = await volumes.get(volumeId);
|
|
if (volume === null) throw new BoxError(BoxError.BAD_FIELD, 'Storage volume not found');
|
|
|
|
// lack of file perms makes these unsupported
|
|
if (volume.mountType === 'cifs' || volume.mountType === 'sshfs') throw new BoxError(BoxError.BAD_FIELD, `${volume.mountType} volumes cannot be used as data directory`);
|
|
|
|
const status = await volumes.getStatus(volume);
|
|
if (status.state !== 'active') throw new BoxError(BoxError.BAD_FIELD, 'Volume is not active');
|
|
|
|
if (path.isAbsolute(prefix)) throw new BoxError(BoxError.BAD_FIELD, `prefix "${prefix}" must be a relative path`);
|
|
if (prefix.endsWith('/')) throw new BoxError(BoxError.BAD_FIELD, `prefix "${prefix}" contains trailing slash`);
|
|
if (prefix !== '' && path.normalize(prefix) !== prefix) throw new BoxError(BoxError.BAD_FIELD, `prefix "${prefix}" is not a normalized path`);
|
|
|
|
const sourceDir = await getStorageDir(app);
|
|
if (sourceDir === null) throw new BoxError(BoxError.BAD_STATE, 'App does not use localstorage addon');
|
|
|
|
const targetDir = path.join(volume.hostPath, prefix);
|
|
const rel = path.relative(sourceDir, targetDir);
|
|
if (!rel.startsWith('../') && rel.split('/').length > 1) throw new BoxError(BoxError.BAD_FIELD, 'Only one level subdirectory moves are supported');
|
|
|
|
const [error] = await safe(shell.promises.sudo([ CHECKVOLUME_CMD, targetDir, sourceDir ], {}));
|
|
if (error && error.code === 2) throw new BoxError(BoxError.BAD_FIELD, `Target directory ${targetDir} is not empty`);
|
|
if (error && error.code === 3) throw new BoxError(BoxError.BAD_FIELD, `Target directory ${targetDir} does not support chown`);
|
|
|
|
return null;
|
|
}
|
|
|
|
function getDuplicateErrorDetails(errorMessage, locations, portBindings) {
|
|
assert.strictEqual(typeof errorMessage, 'string');
|
|
assert(Array.isArray(locations));
|
|
assert.strictEqual(typeof portBindings, 'object');
|
|
|
|
const match = errorMessage.match(/ER_DUP_ENTRY: Duplicate entry '(.*)' for key '(.*)'/);
|
|
if (!match) {
|
|
debug('Unexpected SQL error message.', errorMessage);
|
|
return new BoxError(BoxError.DATABASE_ERROR, errorMessage);
|
|
}
|
|
|
|
// check if a location conflicts
|
|
if (match[2] === 'locations.subdomain') {
|
|
for (let i = 0; i < locations.length; i++) {
|
|
const { subdomain, domain, type } = locations[i];
|
|
if (match[1] !== (subdomain ? `${subdomain}-${domain}` : domain)) continue;
|
|
|
|
return new BoxError(BoxError.ALREADY_EXISTS, `${type} location '${dns.fqdn(subdomain, domain)}' is in use`);
|
|
}
|
|
}
|
|
|
|
for (const portName in portBindings) {
|
|
if (portBindings[portName].hostPort === parseInt(match[1])) return new BoxError(BoxError.ALREADY_EXISTS, `Port ${match[1]} is in use`);
|
|
}
|
|
|
|
if (match[2] === 'apps_storageVolume') {
|
|
return new BoxError(BoxError.BAD_FIELD, `Storage directory ${match[1]} is in use`);
|
|
}
|
|
|
|
return new BoxError(BoxError.ALREADY_EXISTS, `${match[2]} '${match[1]}' is in use`);
|
|
}
|
|
|
|
async function getStorageDir(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
if (!app.manifest.addons?.localstorage) return null;
|
|
|
|
if (!app.storageVolumeId) return path.join(paths.APPS_DATA_DIR, app.id, 'data');
|
|
const volume = await volumes.get(app.storageVolumeId);
|
|
if (!volume) throw new BoxError(BoxError.NOT_FOUND, 'Volume not found'); // not possible
|
|
return path.join(volume.hostPath, app.storageVolumePrefix);
|
|
}
|
|
|
|
function removeCertificateKeys(app) {
|
|
if (app.certificate) delete app.certificate.key;
|
|
app.secondaryDomains.forEach(sd => { if (sd.certificate) delete sd.certificate.key; });
|
|
app.aliasDomains.forEach(ad => { if (ad.certificate) delete ad.certificate.key; });
|
|
app.redirectDomains.forEach(rd => { if (rd.certificate) delete rd.certificate.key; });
|
|
}
|
|
|
|
function removeInternalFields(app) {
|
|
const result = _.pick(app,
|
|
'id', 'appStoreId', 'installationState', 'error', 'runState', 'health', 'taskId',
|
|
'subdomain', 'domain', 'fqdn', 'certificate', 'crontab', 'upstreamUri',
|
|
'accessRestriction', 'manifest', 'portBindings', 'iconUrl', 'memoryLimit', 'cpuQuota', 'operators',
|
|
'sso', 'debugMode', 'reverseProxyConfig', 'enableBackup', 'creationTime', 'updateTime', 'ts', 'tags',
|
|
'label', 'notes', 'secondaryDomains', 'redirectDomains', 'aliasDomains', 'devices', 'env', 'enableAutomaticUpdate',
|
|
'storageVolumeId', 'storageVolumePrefix', 'mounts', 'enableTurn', 'enableRedis', 'checklist',
|
|
'enableMailbox', 'mailboxDisplayName', 'mailboxName', 'mailboxDomain', 'enableInbox', 'inboxName', 'inboxDomain');
|
|
|
|
removeCertificateKeys(result);
|
|
return result;
|
|
}
|
|
|
|
// non-admins can only see these
|
|
function removeRestrictedFields(app) {
|
|
const result = _.pick(app,
|
|
'id', 'appStoreId', 'installationState', 'error', 'runState', 'health', 'taskId', 'accessRestriction', 'checklist',
|
|
'secondaryDomains', 'redirectDomains', 'aliasDomains', 'sso', 'subdomain', 'domain', 'fqdn', 'certificate',
|
|
'manifest', 'portBindings', 'iconUrl', 'creationTime', 'ts', 'tags', 'label', 'notes', 'enableBackup', 'upstreamUri');
|
|
|
|
removeCertificateKeys(result);
|
|
return result;
|
|
}
|
|
|
|
async function getIcon(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const icons = await getIcons(app.id);
|
|
if (!icons) throw new BoxError(BoxError.NOT_FOUND, 'No such app');
|
|
|
|
if (!options.original && icons.icon) return icons.icon;
|
|
if (icons.appStoreIcon) return icons.appStoreIcon;
|
|
|
|
return null;
|
|
}
|
|
|
|
function getMemoryLimit(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
let memoryLimit = app.memoryLimit || app.manifest.memoryLimit || 0;
|
|
|
|
if (memoryLimit === -1) { // unrestricted
|
|
memoryLimit = 0;
|
|
} else if (memoryLimit === 0 || memoryLimit < constants.DEFAULT_MEMORY_LIMIT) { // ensure we never go below minimum (in case we change the default)
|
|
memoryLimit = constants.DEFAULT_MEMORY_LIMIT;
|
|
}
|
|
|
|
return memoryLimit;
|
|
}
|
|
|
|
function postProcess(result) {
|
|
assert.strictEqual(typeof result, 'object');
|
|
|
|
assert(result.manifestJson === null || typeof result.manifestJson === 'string');
|
|
result.manifest = safe.JSON.parse(result.manifestJson);
|
|
delete result.manifestJson;
|
|
|
|
assert(result.tagsJson === null || typeof result.tagsJson === 'string');
|
|
result.tags = safe.JSON.parse(result.tagsJson) || [];
|
|
delete result.tagsJson;
|
|
|
|
assert(result.checklistJson === null || typeof result.checklistJson === 'string');
|
|
result.checklist = safe.JSON.parse(result.checklistJson) || {};
|
|
delete result.checklistJson;
|
|
|
|
assert(result.reverseProxyConfigJson === null || typeof result.reverseProxyConfigJson === 'string');
|
|
result.reverseProxyConfig = safe.JSON.parse(result.reverseProxyConfigJson) || {};
|
|
delete result.reverseProxyConfigJson;
|
|
|
|
assert(result.hostPorts === null || typeof result.hostPorts === 'string');
|
|
assert(result.environmentVariables === null || typeof result.environmentVariables === 'string');
|
|
|
|
result.portBindings = {};
|
|
const hostPorts = result.hostPorts === null ? [ ] : result.hostPorts.split(',');
|
|
const environmentVariables = result.environmentVariables === null ? [ ] : result.environmentVariables.split(',');
|
|
const portTypes = result.portTypes === null ? [ ] : result.portTypes.split(',');
|
|
const portCounts = result.portCounts === null ? [ ] : result.portCounts.split(',');
|
|
|
|
delete result.hostPorts;
|
|
delete result.environmentVariables;
|
|
delete result.portTypes;
|
|
delete result.portCounts;
|
|
|
|
for (let i = 0; i < environmentVariables.length; i++) {
|
|
result.portBindings[environmentVariables[i]] = { hostPort: parseInt(hostPorts[i], 10), type: portTypes[i], count: parseInt(portCounts[i], 10) };
|
|
}
|
|
|
|
assert(result.accessRestrictionJson === null || typeof result.accessRestrictionJson === 'string');
|
|
result.accessRestriction = safe.JSON.parse(result.accessRestrictionJson);
|
|
if (result.accessRestriction && !result.accessRestriction.users) result.accessRestriction.users = [];
|
|
delete result.accessRestrictionJson;
|
|
|
|
result.operators = safe.JSON.parse(result.operatorsJson);
|
|
if (result.operators && !result.operators.users) result.operators.users = [];
|
|
delete result.operatorsJson;
|
|
|
|
result.sso = !!result.sso;
|
|
result.enableBackup = !!result.enableBackup;
|
|
result.enableAutomaticUpdate = !!result.enableAutomaticUpdate;
|
|
result.enableMailbox = !!result.enableMailbox;
|
|
result.enableInbox = !!result.enableInbox;
|
|
result.proxyAuth = !!result.proxyAuth;
|
|
result.hasIcon = !!result.hasIcon;
|
|
result.hasAppStoreIcon = !!result.hasAppStoreIcon;
|
|
|
|
assert(result.debugModeJson === null || typeof result.debugModeJson === 'string');
|
|
result.debugMode = safe.JSON.parse(result.debugModeJson);
|
|
delete result.debugModeJson;
|
|
|
|
assert(result.servicesConfigJson === null || typeof result.servicesConfigJson === 'string');
|
|
result.servicesConfig = safe.JSON.parse(result.servicesConfigJson) || {};
|
|
delete result.servicesConfigJson;
|
|
|
|
const subdomains = JSON.parse(result.subdomains),
|
|
domains = JSON.parse(result.domains),
|
|
subdomainTypes = JSON.parse(result.subdomainTypes),
|
|
subdomainEnvironmentVariables = JSON.parse(result.subdomainEnvironmentVariables),
|
|
subdomainCertificateJsons = JSON.parse(result.subdomainCertificateJsons);
|
|
|
|
delete result.subdomains;
|
|
delete result.domains;
|
|
delete result.subdomainTypes;
|
|
delete result.subdomainEnvironmentVariables;
|
|
delete result.subdomainCertificateJsons;
|
|
|
|
result.secondaryDomains = [];
|
|
result.redirectDomains = [];
|
|
result.aliasDomains = [];
|
|
for (let i = 0; i < subdomainTypes.length; i++) {
|
|
const subdomain = subdomains[i], domain = domains[i], certificate = safe.JSON.parse(subdomainCertificateJsons[i]);
|
|
|
|
if (subdomainTypes[i] === Location.TYPE_PRIMARY) {
|
|
result.subdomain = subdomain;
|
|
result.domain = domain;
|
|
result.certificate = certificate;
|
|
} else if (subdomainTypes[i] === Location.TYPE_SECONDARY) {
|
|
result.secondaryDomains.push({ domain, subdomain, certificate, environmentVariable: subdomainEnvironmentVariables[i] });
|
|
} else if (subdomainTypes[i] === Location.TYPE_REDIRECT) {
|
|
result.redirectDomains.push({ domain, subdomain, certificate });
|
|
} else if (subdomainTypes[i] === Location.TYPE_ALIAS) {
|
|
result.aliasDomains.push({ domain, subdomain, certificate });
|
|
}
|
|
}
|
|
|
|
const envNames = JSON.parse(result.envNames), envValues = JSON.parse(result.envValues);
|
|
delete result.envNames;
|
|
delete result.envValues;
|
|
result.env = {};
|
|
for (let i = 0; i < envNames.length; i++) { // NOTE: envNames is [ null ] when env of an app is empty
|
|
if (envNames[i]) result.env[envNames[i]] = envValues[i];
|
|
}
|
|
|
|
const volumeIds = JSON.parse(result.volumeIds);
|
|
delete result.volumeIds;
|
|
const volumeReadOnlys = JSON.parse(result.volumeReadOnlys);
|
|
delete result.volumeReadOnlys;
|
|
|
|
result.mounts = volumeIds[0] === null ? [] : volumeIds.map((v, idx) => { return { volumeId: v, readOnly: !!volumeReadOnlys[idx] }; }); // NOTE: volumeIds is [null] when volumes of an app is empty
|
|
|
|
result.error = safe.JSON.parse(result.errorJson);
|
|
delete result.errorJson;
|
|
|
|
result.taskId = result.taskId ? String(result.taskId) : null;
|
|
|
|
// result.devices = {
|
|
// '/dev/ttyUSB10': {
|
|
// // future options
|
|
// },
|
|
// '/dev/hidraw0': {}
|
|
// };
|
|
|
|
result.devices = result.devicesJson ? JSON.parse(result.devicesJson) : {};
|
|
delete result.devicesJson;
|
|
}
|
|
|
|
// attaches computed properties
|
|
function attachProperties(app, domainObjectMap) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof domainObjectMap, 'object');
|
|
|
|
app.iconUrl = app.hasIcon || app.hasAppStoreIcon ? `/api/v1/apps/${app.id}/icon` : null;
|
|
app.fqdn = dns.fqdn(app.subdomain, app.domain);
|
|
app.secondaryDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
app.redirectDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
app.aliasDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
}
|
|
|
|
function isAdmin(user) {
|
|
assert.strictEqual(typeof user, 'object');
|
|
|
|
return users.compareRoles(user.role, users.ROLE_ADMIN) >= 0;
|
|
}
|
|
|
|
function isOperator(app, user) {
|
|
assert.strictEqual(typeof app, 'object'); // IMPORTANT: can also be applink
|
|
assert.strictEqual(typeof user, 'object');
|
|
|
|
if (!app.operators) return isAdmin(user);
|
|
|
|
if (app.operators.users.some(function (e) { return e === user.id; })) return true;
|
|
if (!app.operators.groups) return isAdmin(user);
|
|
if (app.operators.groups.some(function (gid) { return Array.isArray(user.groupIds) && user.groupIds.indexOf(gid) !== -1; })) return true;
|
|
|
|
return isAdmin(user);
|
|
}
|
|
|
|
function canAccess(app, user) {
|
|
assert.strictEqual(typeof app, 'object'); // IMPORTANT: can also be applink
|
|
assert.strictEqual(typeof user, 'object');
|
|
|
|
if (app.accessRestriction === null) return true;
|
|
|
|
if (app.accessRestriction.users.some(function (e) { return e === user.id; })) return true;
|
|
if (!app.accessRestriction.groups) return isOperator(app, user);
|
|
if (app.accessRestriction.groups.some(function (gid) { return Array.isArray(user.groupIds) && user.groupIds.indexOf(gid) !== -1; })) return true;
|
|
|
|
return isOperator(app, user);
|
|
}
|
|
|
|
function accessLevel(app, user) {
|
|
if (isAdmin(user)) return 'admin';
|
|
if (isOperator(app, user)) return 'operator';
|
|
return canAccess(app, user) ? 'user' : null;
|
|
}
|
|
|
|
async function checkForPortBindingConflict(portBindings, options) {
|
|
assert.strictEqual(typeof portBindings, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const existingPortBindings = options.appId
|
|
? await database.query('SELECT * FROM appPortBindings WHERE appId != ?', [ options.appId ])
|
|
: await database.query('SELECT * FROM appPortBindings', []);
|
|
|
|
if (existingPortBindings.length === 0) return;
|
|
|
|
const tcpPortBindings = existingPortBindings.filter((p) => p.type === 'tcp');
|
|
const udpPortBindings = existingPortBindings.filter((p) => p.type === 'udp');
|
|
|
|
for (const portName in portBindings) {
|
|
const portBinding = portBindings[portName];
|
|
const existingPortBinding = portBinding.type === 'tcp' ? tcpPortBindings : udpPortBindings;
|
|
|
|
const found = existingPortBinding.find((epb) => {
|
|
// if one is true we dont have a conflict
|
|
// a1 <----> a2 b1 <-------> b2
|
|
// b1 <------> b2 a1 <-----> a2
|
|
const a2 = (epb.hostPort + epb.count - 1);
|
|
const b1 = portBinding.hostPort;
|
|
const b2 = (portBinding.hostPort + portBinding.count - 1);
|
|
const a1 = epb.hostPort;
|
|
|
|
return !((a2 < b1) || (b2 < a1));
|
|
});
|
|
|
|
if (found) throw new BoxError(BoxError.CONFLICT, `Conflicting ${portBinding.type} port ${portBinding.hostPort}`);
|
|
}
|
|
}
|
|
|
|
async function add(id, appStoreId, manifest, subdomain, domain, portBindings, data) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert.strictEqual(typeof appStoreId, 'string');
|
|
assert(manifest && typeof manifest === 'object');
|
|
assert.strictEqual(typeof manifest.version, 'string');
|
|
assert.strictEqual(typeof subdomain, 'string');
|
|
assert.strictEqual(typeof domain, 'string');
|
|
assert(portBindings && typeof portBindings === 'object');
|
|
assert(data && typeof data === 'object');
|
|
|
|
const manifestJson = JSON.stringify(manifest),
|
|
accessRestriction = data.accessRestriction || null,
|
|
accessRestrictionJson = JSON.stringify(accessRestriction),
|
|
memoryLimit = data.memoryLimit || 0,
|
|
cpuQuota = data.cpuQuota || 100,
|
|
installationState = data.installationState,
|
|
runState = data.runState,
|
|
sso = 'sso' in data ? data.sso : null,
|
|
debugModeJson = data.debugMode ? JSON.stringify(data.debugMode) : null,
|
|
devicesJson = data.devices ? JSON.stringify(data.devices) : null,
|
|
env = data.env || {},
|
|
label = data.label || null,
|
|
tagsJson = data.tags ? JSON.stringify(data.tags) : null,
|
|
checklistJson = data.checklist ? JSON.stringify(data.checklist) : null,
|
|
mailboxName = data.mailboxName || null,
|
|
mailboxDomain = data.mailboxDomain || null,
|
|
mailboxDisplayName = data.mailboxDisplayName || '',
|
|
reverseProxyConfigJson = data.reverseProxyConfig ? JSON.stringify(data.reverseProxyConfig) : null,
|
|
servicesConfigJson = data.servicesConfig ? JSON.stringify(data.servicesConfig) : null,
|
|
enableMailbox = data.enableMailbox || false,
|
|
upstreamUri = data.upstreamUri || '',
|
|
enableTurn = 'enableTurn' in data ? data.enableTurn : true,
|
|
enableRedis = 'enableRedis' in data ? data.enableRedis : true,
|
|
icon = data.icon || null;
|
|
|
|
await checkForPortBindingConflict(portBindings, { appId: null });
|
|
|
|
const queries = [];
|
|
|
|
queries.push({
|
|
query: 'INSERT INTO apps (id, appStoreId, manifestJson, installationState, runState, accessRestrictionJson, memoryLimit, cpuQuota, '
|
|
+ 'sso, debugModeJson, mailboxName, mailboxDomain, label, tagsJson, reverseProxyConfigJson, checklistJson, servicesConfigJson, icon, '
|
|
+ 'enableMailbox, mailboxDisplayName, upstreamUri, enableTurn, enableRedis, devicesJson) '
|
|
+ ' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
|
|
args: [ id, appStoreId, manifestJson, installationState, runState, accessRestrictionJson, memoryLimit, cpuQuota,
|
|
sso, debugModeJson, mailboxName, mailboxDomain, label, tagsJson, reverseProxyConfigJson, checklistJson, servicesConfigJson, icon,
|
|
enableMailbox, mailboxDisplayName, upstreamUri, enableTurn, enableRedis, devicesJson ]
|
|
});
|
|
|
|
queries.push({
|
|
query: 'INSERT INTO locations (appId, domain, subdomain, type) VALUES (?, ?, ?, ?)',
|
|
args: [ id, domain, subdomain, Location.TYPE_PRIMARY ]
|
|
});
|
|
|
|
Object.keys(portBindings).forEach(function (env) {
|
|
queries.push({
|
|
query: 'INSERT INTO appPortBindings (environmentVariable, hostPort, type, appId, count) VALUES (?, ?, ?, ?, ?)',
|
|
args: [ env, portBindings[env].hostPort, portBindings[env].type, id, portBindings[env].count ]
|
|
});
|
|
});
|
|
|
|
Object.keys(env).forEach(function (name) {
|
|
queries.push({
|
|
query: 'INSERT INTO appEnvVars (appId, name, value) VALUES (?, ?, ?)',
|
|
args: [ id, name, env[name] ]
|
|
});
|
|
});
|
|
|
|
if (data.secondaryDomains) {
|
|
data.secondaryDomains.forEach(function (d) {
|
|
queries.push({
|
|
query: 'INSERT INTO locations (appId, domain, subdomain, type, environmentVariable) VALUES (?, ?, ?, ?, ?)',
|
|
args: [ id, d.domain, d.subdomain, Location.TYPE_SECONDARY, d.environmentVariable ]
|
|
});
|
|
});
|
|
}
|
|
|
|
if (data.redirectDomains) {
|
|
data.redirectDomains.forEach(function (d) {
|
|
queries.push({
|
|
query: 'INSERT INTO locations (appId, domain, subdomain, type) VALUES (?, ?, ?, ?)',
|
|
args: [ id, d.domain, d.subdomain, Location.TYPE_REDIRECT ]
|
|
});
|
|
});
|
|
}
|
|
|
|
if (data.aliasDomains) {
|
|
data.aliasDomains.forEach(function (d) {
|
|
queries.push({
|
|
query: 'INSERT INTO locations (appId, domain, subdomain, type) VALUES (?, ?, ?, ?)',
|
|
args: [ id, d.domain, d.subdomain, Location.TYPE_ALIAS ]
|
|
});
|
|
});
|
|
}
|
|
|
|
const [error] = await safe(database.transaction(queries));
|
|
if (error && error.code === 'ER_DUP_ENTRY') throw new BoxError(BoxError.ALREADY_EXISTS, error.message);
|
|
if (error && error.code === 'ER_NO_REFERENCED_ROW_2') throw new BoxError(BoxError.NOT_FOUND, 'no such domain');
|
|
if (error) throw new BoxError(BoxError.DATABASE_ERROR, error);
|
|
}
|
|
|
|
async function getIcons(id) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
|
|
const results = await database.query('SELECT icon, appStoreIcon FROM apps WHERE id = ?', [ id ]);
|
|
if (results.length === 0) return null;
|
|
return { icon: results[0].icon, appStoreIcon: results[0].appStoreIcon };
|
|
}
|
|
|
|
async function updateWithConstraints(id, app, constraints) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof constraints, 'string');
|
|
assert(!('portBindings' in app) || typeof app.portBindings === 'object');
|
|
assert(!('accessRestriction' in app) || typeof app.accessRestriction === 'object' || app.accessRestriction === '');
|
|
assert(!('secondaryDomains' in app) || Array.isArray(app.secondaryDomains));
|
|
assert(!('redirectDomains' in app) || Array.isArray(app.redirectDomains));
|
|
assert(!('aliasDomains' in app) || Array.isArray(app.aliasDomains));
|
|
assert(!('tags' in app) || Array.isArray(app.tags));
|
|
assert(!('checklist' in app) || typeof app.checklist === 'object');
|
|
assert(!('env' in app) || typeof app.env === 'object');
|
|
|
|
const queries = [ ];
|
|
|
|
if ('portBindings' in app) {
|
|
const portBindings = app.portBindings;
|
|
|
|
await checkForPortBindingConflict(portBindings, { appId: id });
|
|
|
|
// replace entries by app id
|
|
queries.push({ query: 'DELETE FROM appPortBindings WHERE appId = ?', args: [ id ] });
|
|
Object.keys(portBindings).forEach(function (env) {
|
|
const values = [ portBindings[env].hostPort, portBindings[env].type, env, id, portBindings[env].count ];
|
|
queries.push({ query: 'INSERT INTO appPortBindings (hostPort, type, environmentVariable, appId, count) VALUES(?, ?, ?, ?, ?)', args: values });
|
|
});
|
|
}
|
|
|
|
if ('env' in app) {
|
|
queries.push({ query: 'DELETE FROM appEnvVars WHERE appId = ?', args: [ id ] });
|
|
|
|
Object.keys(app.env).forEach(function (name) {
|
|
queries.push({
|
|
query: 'INSERT INTO appEnvVars (appId, name, value) VALUES (?, ?, ?)',
|
|
args: [ id, name, app.env[name] ]
|
|
});
|
|
});
|
|
}
|
|
|
|
if ('subdomain' in app && 'domain' in app) { // must be updated together as they are unique together
|
|
queries.push({ query: 'DELETE FROM locations WHERE appId = ?', args: [ id ]}); // all locations of an app must be updated together
|
|
queries.push({ query: 'INSERT INTO locations (appId, domain, subdomain, type) VALUES (?, ?, ?, ?)', args: [ id, app.domain, app.subdomain, Location.TYPE_PRIMARY ]});
|
|
|
|
if ('secondaryDomains' in app) {
|
|
app.secondaryDomains.forEach(function (d) {
|
|
queries.push({ query: 'INSERT INTO locations (appId, domain, subdomain, type, environmentVariable) VALUES (?, ?, ?, ?, ?)', args: [ id, d.domain, d.subdomain, Location.TYPE_SECONDARY, d.environmentVariable ]});
|
|
});
|
|
}
|
|
|
|
if ('redirectDomains' in app) {
|
|
app.redirectDomains.forEach(function (d) {
|
|
queries.push({ query: 'INSERT INTO locations (appId, domain, subdomain, type) VALUES (?, ?, ?, ?)', args: [ id, d.domain, d.subdomain, Location.TYPE_REDIRECT ]});
|
|
});
|
|
}
|
|
|
|
if ('aliasDomains' in app) {
|
|
app.aliasDomains.forEach(function (d) {
|
|
queries.push({ query: 'INSERT INTO locations (appId, domain, subdomain, type) VALUES (?, ?, ?, ?)', args: [ id, d.domain, d.subdomain, Location.TYPE_ALIAS ]});
|
|
});
|
|
}
|
|
}
|
|
|
|
if ('mounts' in app) {
|
|
queries.push({ query: 'DELETE FROM appMounts WHERE appId = ?', args: [ id ]});
|
|
app.mounts.forEach(function (m) {
|
|
queries.push({ query: 'INSERT INTO appMounts (appId, volumeId, readOnly) VALUES (?, ?, ?)', args: [ id, m.volumeId, m.readOnly ]});
|
|
});
|
|
}
|
|
|
|
const fields = [ ], values = [ ];
|
|
for (const p in app) {
|
|
if (p === 'manifest' || p === 'tags' || p === 'checklist' || p === 'accessRestriction' || p === 'devices' || p === 'debugMode' || p === 'error' || p === 'reverseProxyConfig' || p === 'servicesConfig' || p === 'operators') {
|
|
fields.push(`${p}Json = ?`);
|
|
values.push(JSON.stringify(app[p]));
|
|
} else if (p !== 'portBindings' && p !== 'subdomain' && p !== 'domain' && p !== 'secondaryDomains' && p !== 'redirectDomains' && p !== 'aliasDomains' && p !== 'env' && p !== 'mounts') {
|
|
fields.push(p + ' = ?');
|
|
values.push(app[p]);
|
|
}
|
|
}
|
|
|
|
if (values.length !== 0) {
|
|
values.push(id);
|
|
queries.push({ query: 'UPDATE apps SET ' + fields.join(', ') + ' WHERE id = ? ' + constraints, args: values });
|
|
}
|
|
|
|
const [error, results] = await safe(database.transaction(queries));
|
|
if (error && error.code === 'ER_DUP_ENTRY') throw new BoxError(BoxError.ALREADY_EXISTS, error.message);
|
|
if (error) throw new BoxError(BoxError.DATABASE_ERROR, error);
|
|
if (results[results.length - 1].affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'App not found');
|
|
}
|
|
|
|
async function update(id, app) {
|
|
// ts is useful as a versioning mechanism (for example, icon changed). update the timestamp explicity in code instead of db.
|
|
// this way health and healthTime can be updated without changing ts
|
|
app.ts = new Date();
|
|
await updateWithConstraints(id, app, '');
|
|
}
|
|
|
|
async function setHealth(appId, health, healthTime) {
|
|
assert.strictEqual(typeof appId, 'string');
|
|
assert.strictEqual(typeof health, 'string');
|
|
assert(util.types.isDate(healthTime));
|
|
|
|
await updateWithConstraints(appId, { health, healthTime }, '');
|
|
}
|
|
|
|
async function setTask(appId, values, options) {
|
|
assert.strictEqual(typeof appId, 'string');
|
|
assert.strictEqual(typeof values, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
values.ts = new Date();
|
|
|
|
if (!options.requireNullTaskId) return await updateWithConstraints(appId, values, '');
|
|
|
|
if (options.requiredState === null) {
|
|
await updateWithConstraints(appId, values, 'AND taskId IS NULL');
|
|
} else {
|
|
await updateWithConstraints(appId, values, `AND taskId IS NULL AND installationState = "${options.requiredState}"`);
|
|
}
|
|
}
|
|
|
|
async function del(id) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
|
|
const queries = [
|
|
{ query: 'DELETE FROM locations WHERE appId = ?', args: [ id ] },
|
|
{ query: 'DELETE FROM appPortBindings WHERE appId = ?', args: [ id ] },
|
|
{ query: 'DELETE FROM appEnvVars WHERE appId = ?', args: [ id ] },
|
|
{ query: 'DELETE FROM appPasswords WHERE identifier = ?', args: [ id ] },
|
|
{ query: 'DELETE FROM appMounts WHERE appId = ?', args: [ id ] },
|
|
{ query: 'DELETE FROM apps WHERE id = ?', args: [ id ] }
|
|
];
|
|
|
|
const results = await database.transaction(queries);
|
|
if (results[5].affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'App not found');
|
|
}
|
|
|
|
async function clear() {
|
|
await database.query('DELETE FROM locations');
|
|
await database.query('DELETE FROM appPortBindings');
|
|
await database.query('DELETE FROM appAddonConfigs');
|
|
await database.query('DELETE FROM appEnvVars');
|
|
await database.query('DELETE FROM apps');
|
|
}
|
|
|
|
// each query simply join apps table with another table by id. we then join the full result together
|
|
const PB_QUERY = 'SELECT id, GROUP_CONCAT(CAST(appPortBindings.hostPort AS CHAR(6))) AS hostPorts, GROUP_CONCAT(appPortBindings.environmentVariable) AS environmentVariables, GROUP_CONCAT(appPortBindings.type) AS portTypes, GROUP_CONCAT(CAST(appPortBindings.count AS CHAR(6))) AS portCounts FROM apps LEFT JOIN appPortBindings ON apps.id = appPortBindings.appId GROUP BY apps.id';
|
|
const ENV_QUERY = 'SELECT id, JSON_ARRAYAGG(appEnvVars.name) AS envNames, JSON_ARRAYAGG(appEnvVars.value) AS envValues FROM apps LEFT JOIN appEnvVars ON apps.id = appEnvVars.appId GROUP BY apps.id';
|
|
const SUBDOMAIN_QUERY = 'SELECT id, JSON_ARRAYAGG(locations.subdomain) AS subdomains, JSON_ARRAYAGG(locations.domain) AS domains, JSON_ARRAYAGG(locations.type) AS subdomainTypes, JSON_ARRAYAGG(locations.environmentVariable) AS subdomainEnvironmentVariables, JSON_ARRAYAGG(locations.certificateJson) AS subdomainCertificateJsons FROM apps LEFT JOIN locations ON apps.id = locations.appId GROUP BY apps.id';
|
|
const MOUNTS_QUERY = 'SELECT id, JSON_ARRAYAGG(appMounts.volumeId) AS volumeIds, JSON_ARRAYAGG(appMounts.readOnly) AS volumeReadOnlys FROM apps LEFT JOIN appMounts ON apps.id = appMounts.appId GROUP BY apps.id';
|
|
const APPS_QUERY = `SELECT ${APPS_FIELDS_PREFIXED}, hostPorts, environmentVariables, portTypes, portCounts, envNames, envValues, subdomains, domains, subdomainTypes, subdomainEnvironmentVariables, subdomainCertificateJsons, volumeIds, volumeReadOnlys FROM apps`
|
|
+ ` LEFT JOIN (${PB_QUERY}) AS q1 on q1.id = apps.id`
|
|
+ ` LEFT JOIN (${ENV_QUERY}) AS q2 on q2.id = apps.id`
|
|
+ ` LEFT JOIN (${SUBDOMAIN_QUERY}) AS q3 on q3.id = apps.id`
|
|
+ ` LEFT JOIN (${MOUNTS_QUERY}) AS q4 on q4.id = apps.id`;
|
|
|
|
async function get(id) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
|
|
const domainObjectMap = await domains.getDomainObjectMap();
|
|
|
|
const result = await database.query(`${APPS_QUERY} WHERE apps.id = ?`, [ id ]);
|
|
if (result.length === 0) return null;
|
|
|
|
postProcess(result[0]);
|
|
attachProperties(result[0], domainObjectMap);
|
|
|
|
return result[0];
|
|
}
|
|
|
|
// returns the app associated with this IP (app or scheduler)
|
|
async function getByIpAddress(ip) {
|
|
assert.strictEqual(typeof ip, 'string');
|
|
|
|
const domainObjectMap = await domains.getDomainObjectMap();
|
|
|
|
const result = await database.query(`${APPS_QUERY} WHERE apps.containerIp = ?`, [ ip ]);
|
|
if (result.length === 0) return null;
|
|
|
|
postProcess(result[0]);
|
|
attachProperties(result[0], domainObjectMap);
|
|
return result[0];
|
|
}
|
|
|
|
async function list() {
|
|
const domainObjectMap = await domains.getDomainObjectMap();
|
|
|
|
const results = await database.query(`${APPS_QUERY} ORDER BY apps.id`, [ ]);
|
|
results.forEach(postProcess);
|
|
results.forEach((app) => attachProperties(app, domainObjectMap));
|
|
return results;
|
|
}
|
|
|
|
async function getByFqdn(fqdn) {
|
|
assert.strictEqual(typeof fqdn, 'string');
|
|
|
|
const result = await list();
|
|
const app = result.find(function (a) { return a.fqdn === fqdn; });
|
|
return app;
|
|
}
|
|
|
|
async function listByUser(user) {
|
|
assert.strictEqual(typeof user, 'object');
|
|
|
|
const result = await list();
|
|
return result.filter((app) => canAccess(app, user));
|
|
}
|
|
|
|
async function getTask(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
if (!app.taskId) return null;
|
|
return await tasks.get(app.taskId);
|
|
}
|
|
|
|
function mailboxNameForSubdomain(subdomain, manifest) {
|
|
if (subdomain) return `${subdomain}.app`;
|
|
if (manifest.title) return manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '') + '.app';
|
|
return 'noreply.app';
|
|
}
|
|
|
|
async function onTaskFinished(error, appId, installationState, taskId, auditSource) {
|
|
assert(!error || typeof error === 'object');
|
|
assert.strictEqual(typeof appId, 'string');
|
|
assert.strictEqual(typeof installationState, 'string');
|
|
assert.strictEqual(typeof taskId, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const success = !error;
|
|
const errorMessage = error?.message || null;
|
|
|
|
const app = await get(appId);
|
|
const task = await tasks.get(taskId);
|
|
if (!app || !task) return;
|
|
|
|
switch (installationState) {
|
|
case exports.ISTATE_PENDING_DATA_DIR_MIGRATION:
|
|
if (success) await safe(services.rebuildService('sftp', auditSource), { debug });
|
|
break;
|
|
case exports.ISTATE_PENDING_UPDATE: {
|
|
const fromManifest = success ? task.args[1].updateConfig.manifest : app.manifest;
|
|
const toManifest = success ? app.manifest : task.args[1].updateConfig.manifest;
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_UPDATE_FINISH, auditSource, { app, toManifest, fromManifest, success, errorMessage });
|
|
break;
|
|
}
|
|
case exports.ISTATE_PENDING_BACKUP: {
|
|
const backup = task.result ? await backups.get(task.result) : null; // if task crashed, no result
|
|
await eventlog.add(eventlog.ACTION_APP_BACKUP_FINISH, auditSource, { app, success, errorMessage, remotePath: backup?.remotePath, backupId: task.result });
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
async function scheduleTask(appId, installationState, taskId, auditSource) {
|
|
assert.strictEqual(typeof appId, 'string');
|
|
assert.strictEqual(typeof installationState, 'string');
|
|
assert.strictEqual(typeof taskId, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const backupConfig = await backups.getConfig();
|
|
|
|
let memoryLimit = 400;
|
|
if (installationState === exports.ISTATE_PENDING_BACKUP || installationState === exports.ISTATE_PENDING_CLONE || installationState === exports.ISTATE_PENDING_RESTORE
|
|
|| installationState === exports.ISTATE_PENDING_IMPORT || installationState === exports.ISTATE_PENDING_UPDATE) {
|
|
memoryLimit = backupConfig.limits?.memoryLimit ? Math.max(backupConfig.limits.memoryLimit/1024/1024, 400) : 400;
|
|
} else if (installationState === exports.ISTATE_PENDING_DATA_DIR_MIGRATION) {
|
|
memoryLimit = 1024; // cp takes more memory than we think
|
|
}
|
|
|
|
const options = { timeout: 20 * 60 * 60 * 1000 /* 20 hours */, nice: 15, memoryLimit };
|
|
|
|
appTaskManager.scheduleTask(appId, taskId, options, async function (error) {
|
|
debug(`scheduleTask: task ${taskId} of ${appId} completed`);
|
|
if (error && (error.code === tasks.ECRASHED || error.code === tasks.ESTOPPED)) { // if task crashed, update the error
|
|
debug(`Apptask crashed/stopped: ${error.message}`);
|
|
const boxError = new BoxError(BoxError.TASK_ERROR, error.message);
|
|
boxError.details.crashed = error.code === tasks.ECRASHED;
|
|
boxError.details.stopped = error.code === tasks.ESTOPPED;
|
|
// see also apptask makeTaskError
|
|
boxError.details.taskId = taskId;
|
|
boxError.details.installationState = installationState;
|
|
await safe(update(appId, { installationState: exports.ISTATE_ERROR, error: boxError.toPlainObject(), taskId: null }), { debug });
|
|
} else if (!(installationState === exports.ISTATE_PENDING_UNINSTALL && !error)) { // clear out taskId except for successful uninstall
|
|
await safe(update(appId, { taskId: null }), { debug });
|
|
}
|
|
|
|
await safe(onTaskFinished(error, appId, installationState, taskId, auditSource), { debug }); // ignore error
|
|
});
|
|
}
|
|
|
|
async function addTask(appId, installationState, task, auditSource) {
|
|
assert.strictEqual(typeof appId, 'string');
|
|
assert.strictEqual(typeof installationState, 'string');
|
|
assert.strictEqual(typeof task, 'object'); // { args, values }
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const { args, values } = task;
|
|
// TODO: match the SQL logic to match checkAppState. this means checking the error.installationState and installationState. Unfortunately, former is JSON right now
|
|
const requiredState = null; // 'requiredState' in task ? task.requiredState : exports.ISTATE_INSTALLED;
|
|
const scheduleNow = 'scheduleNow' in task ? task.scheduleNow : true;
|
|
const requireNullTaskId = 'requireNullTaskId' in task ? task.requireNullTaskId : true;
|
|
|
|
const taskId = await tasks.add(tasks.TASK_APP, [ appId, args ]);
|
|
|
|
const [updateError] = await safe(setTask(appId, Object.assign({ installationState, taskId, error: null }, values), { requiredState, requireNullTaskId }));
|
|
if (updateError && updateError.reason === BoxError.NOT_FOUND) throw new BoxError(BoxError.BAD_STATE, 'Another task is scheduled for this app'); // could be because app went away OR a taskId exists
|
|
if (updateError) throw updateError;
|
|
|
|
if (scheduleNow) await safe(scheduleTask(appId, installationState, taskId, auditSource), { debug }); // ignore error
|
|
|
|
return taskId;
|
|
}
|
|
|
|
function checkAppState(app, state) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof state, 'string');
|
|
|
|
if (app.taskId) return new BoxError(BoxError.BAD_STATE, `Locked by task ${app.taskId} : ${app.installationState} / ${app.runState}`);
|
|
|
|
if (app.installationState === exports.ISTATE_ERROR) {
|
|
// allow task to be called again if that was the errored task
|
|
if (app.error.installationState === state) return null;
|
|
|
|
// allow uninstall from any state
|
|
if (state !== exports.ISTATE_PENDING_UNINSTALL && state !== exports.ISTATE_PENDING_RESTORE && state !== exports.ISTATE_PENDING_IMPORT) return new BoxError(BoxError.BAD_STATE, 'Not allowed in error state');
|
|
}
|
|
|
|
if (app.runState === exports.RSTATE_STOPPED) {
|
|
// can't backup or restore since app addons are down. can't update because migration scripts won't run
|
|
if (state === exports.ISTATE_PENDING_UPDATE || state === exports.ISTATE_PENDING_BACKUP || state === exports.ISTATE_PENDING_RESTORE || state === exports.ISTATE_PENDING_IMPORT) return new BoxError(BoxError.BAD_STATE, 'Not allowed in stopped state');
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
async function validateLocations(locations) {
|
|
assert(Array.isArray(locations));
|
|
|
|
const domainObjectMap = await domains.getDomainObjectMap();
|
|
|
|
const RESERVED_SUBDOMAINS = [
|
|
constants.SMTP_SUBDOMAIN,
|
|
constants.IMAP_SUBDOMAIN
|
|
];
|
|
|
|
const dashboardLocation = await dashboard.getLocation();
|
|
for (const location of locations) {
|
|
if (!(location.domain in domainObjectMap)) return new BoxError(BoxError.BAD_FIELD, `No such domain in ${location.type} location`);
|
|
|
|
let subdomain = location.subdomain;
|
|
if (location.type === Location.TYPE_ALIAS && subdomain.startsWith('*')) {
|
|
if (subdomain === '*') continue;
|
|
subdomain = subdomain.replace(/^\*\./, ''); // remove *.
|
|
}
|
|
|
|
if (RESERVED_SUBDOMAINS.indexOf(subdomain) !== -1) return new BoxError(BoxError.BAD_FIELD, `subdomain '${subdomain}' is reserved`);
|
|
|
|
if (location.fqdn === dashboardLocation.fqdn) return new BoxError(BoxError.BAD_FIELD, `subdomain '${subdomain}' is reserved for dashboard`);
|
|
|
|
const error = dns.validateHostname(subdomain, location.domain);
|
|
if (error) return new BoxError(BoxError.BAD_FIELD, `Bad ${location.type} location: ${error.message}`);
|
|
}
|
|
|
|
return null;
|
|
}
|
|
|
|
async function getCount() {
|
|
const result = await database.query('SELECT COUNT(*) AS total FROM apps');
|
|
return result[0].total;
|
|
}
|
|
|
|
async function install(data, auditSource) {
|
|
assert(data && typeof data === 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
assert.strictEqual(typeof data.manifest, 'object'); // manifest is already downloaded
|
|
|
|
const subdomain = data.subdomain.toLowerCase(),
|
|
domain = data.domain.toLowerCase(),
|
|
accessRestriction = data.accessRestriction || null,
|
|
memoryLimit = data.memoryLimit || 0,
|
|
debugMode = data.debugMode || null,
|
|
enableBackup = 'enableBackup' in data ? data.enableBackup : true,
|
|
enableAutomaticUpdate = 'enableAutomaticUpdate' in data ? data.enableAutomaticUpdate : true,
|
|
redirectDomains = data.redirectDomains || [],
|
|
aliasDomains = data.aliasDomains || [],
|
|
devices = data.devices || {},
|
|
env = data.env || {},
|
|
label = data.label || null,
|
|
tags = data.tags || [],
|
|
overwriteDns = 'overwriteDns' in data ? data.overwriteDns : false,
|
|
skipDnsSetup = 'skipDnsSetup' in data ? data.skipDnsSetup : false,
|
|
enableTurn = 'enableTurn' in data ? data.enableTurn : true,
|
|
enableRedis = 'enableRedis' in data ? data.enableRedis : true,
|
|
appStoreId = data.appStoreId,
|
|
upstreamUri = data.upstreamUri || '',
|
|
manifest = data.manifest;
|
|
|
|
let error = manifestFormat.parse(manifest);
|
|
if (error) throw new BoxError(BoxError.BAD_FIELD, `Manifest error: ${error.message}`);
|
|
|
|
error = await checkManifest(manifest);
|
|
if (error) throw error;
|
|
|
|
error = validatePorts(data.ports || null, manifest);
|
|
if (error) throw error;
|
|
const portBindings = translateToPortBindings(data.ports || null, manifest);
|
|
|
|
error = validateAccessRestriction(accessRestriction);
|
|
if (error) throw error;
|
|
|
|
error = validateMemoryLimit(manifest, memoryLimit);
|
|
if (error) throw error;
|
|
|
|
error = validateDebugMode(debugMode);
|
|
if (error) throw error;
|
|
|
|
error = validateLabel(label);
|
|
if (error) throw error;
|
|
|
|
if ('upstreamUri' in data) error = validateUpstreamUri(upstreamUri);
|
|
if (error) throw error;
|
|
|
|
error = validateTags(tags);
|
|
if (error) throw error;
|
|
|
|
error = validateSecondaryDomains(data.secondaryDomains || {}, manifest);
|
|
if (error) throw error;
|
|
const secondaryDomains = translateSecondaryDomains(data.secondaryDomains || {});
|
|
|
|
let sso = 'sso' in data ? data.sso : null;
|
|
if ('sso' in data && !('optionalSso' in manifest)) throw new BoxError(BoxError.BAD_FIELD, 'sso can only be specified for apps with optionalSso');
|
|
// if sso was unspecified, enable it by default if possible
|
|
if (sso === null) sso = !!manifest.addons?.ldap || !!manifest.addons?.proxyAuth || !!manifest.addons?.oidc;
|
|
|
|
error = validateDevices(devices);
|
|
if (error) throw error;
|
|
|
|
error = validateEnv(env);
|
|
if (error) throw error;
|
|
|
|
if (constants.DEMO && constants.DEMO_BLOCKED_APPS.includes(appStoreId)) throw new BoxError(BoxError.BAD_FIELD, 'This app is blocked in the demo');
|
|
|
|
// sendmail is enabled by default
|
|
const enableMailbox = 'enableMailbox' in data ? data.enableMailbox : true;
|
|
const mailboxName = manifest.addons?.sendmail ? mailboxNameForSubdomain(subdomain, manifest) : null;
|
|
const mailboxDomain = manifest.addons?.sendmail ? domain : null;
|
|
|
|
let icon = data.icon || null;
|
|
if (icon) {
|
|
if (!validator.isBase64(icon)) throw new BoxError(BoxError.BAD_FIELD, 'icon is not base64');
|
|
icon = Buffer.from(icon, 'base64');
|
|
}
|
|
|
|
const locations = [new Location(subdomain, domain, Location.TYPE_PRIMARY)]
|
|
.concat(secondaryDomains.map(sd => new Location(sd.subdomain, sd.domain, Location.TYPE_SECONDARY)))
|
|
.concat(redirectDomains.map(rd => new Location(rd.subdomain, rd.domain, Location.TYPE_REDIRECT)))
|
|
.concat(aliasDomains.map(ad => new Location(ad.subdomain, ad.domain, Location.TYPE_ALIAS)));
|
|
|
|
error = await validateLocations(locations);
|
|
if (error) throw error;
|
|
|
|
if (constants.DEMO && (await getCount() >= constants.DEMO_APP_LIMIT)) throw new BoxError(BoxError.BAD_STATE, 'Too many installed apps, please uninstall a few and try again');
|
|
|
|
let restoreConfig = null;
|
|
if ('backupId' in data) { // install from archive
|
|
const backup = await backups.get(data.backupId);
|
|
if (!backup) throw new BoxError(BoxError.BAD_FIELD, 'Backup not found in archive');
|
|
restoreConfig = { remotePath: backup.remotePath, backupFormat: backup.format };
|
|
}
|
|
|
|
const appId = uuid.v4();
|
|
debug(`Installing app ${appId}`);
|
|
|
|
const app = {
|
|
accessRestriction,
|
|
memoryLimit,
|
|
sso,
|
|
debugMode,
|
|
mailboxName,
|
|
mailboxDomain,
|
|
enableBackup,
|
|
enableAutomaticUpdate,
|
|
secondaryDomains,
|
|
redirectDomains,
|
|
aliasDomains,
|
|
env,
|
|
devices,
|
|
label,
|
|
tags,
|
|
icon,
|
|
enableMailbox,
|
|
upstreamUri,
|
|
enableTurn,
|
|
enableRedis,
|
|
runState: exports.RSTATE_RUNNING,
|
|
installationState: exports.ISTATE_PENDING_INSTALL
|
|
};
|
|
|
|
const [addError] = await safe(add(appId, appStoreId, manifest, subdomain, domain, portBindings, app));
|
|
if (addError && addError.reason === BoxError.ALREADY_EXISTS) throw getDuplicateErrorDetails(addError.message, locations, portBindings);
|
|
if (addError) throw addError;
|
|
|
|
await purchaseApp({ appId, appstoreId: appStoreId, manifestId: manifest.id || 'customapp' });
|
|
|
|
const task = {
|
|
args: { restoreConfig, skipDnsSetup, overwriteDns },
|
|
values: { },
|
|
requiredState: app.installationState
|
|
};
|
|
|
|
const taskId = await addTask(appId, app.installationState, task, auditSource);
|
|
|
|
const newApp = Object.assign({}, _.omit(app, 'icon'), { appStoreId, manifest, subdomain, domain, portBindings });
|
|
newApp.fqdn = dns.fqdn(newApp.subdomain, newApp.domain);
|
|
newApp.secondaryDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
newApp.redirectDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
newApp.aliasDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_INSTALL, auditSource, { appId, app: newApp, taskId });
|
|
|
|
return { id : appId, taskId };
|
|
}
|
|
|
|
async function setAccessRestriction(app, accessRestriction, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof accessRestriction, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = validateAccessRestriction(accessRestriction);
|
|
if (error) throw error;
|
|
|
|
await update(appId, { accessRestriction });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, accessRestriction });
|
|
}
|
|
|
|
async function setOperators(app, operators, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof operators, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = validateAccessRestriction(operators); // not a typo. same structure for operators and accessRestriction
|
|
if (error) throw error;
|
|
|
|
await update(appId, { operators });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, operators });
|
|
}
|
|
|
|
async function setCrontab(app, crontab, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(crontab === null || typeof crontab === 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
parseCrontab(crontab);
|
|
|
|
await update(appId, { crontab });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, crontab });
|
|
}
|
|
|
|
async function setUpstreamUri(app, upstreamUri, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof upstreamUri, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
if (app.manifest.id !== constants.PROXY_APP_APPSTORE_ID) throw new BoxError(BoxError.BAD_FIELD, 'upstreamUri can only be set for proxy app');
|
|
|
|
const appId = app.id;
|
|
const error = validateUpstreamUri(upstreamUri);
|
|
if (error) throw error;
|
|
|
|
await reverseProxy.writeAppConfigs(Object.assign({}, app, { upstreamUri }));
|
|
|
|
await update(appId, { upstreamUri });
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, upstreamUri });
|
|
}
|
|
|
|
async function setLabel(app, label, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof label, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = validateLabel(label);
|
|
if (error) throw error;
|
|
|
|
await update(appId, { label });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, label });
|
|
}
|
|
|
|
async function setTags(app, tags, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(Array.isArray(tags));
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = validateTags(tags);
|
|
if (error) throw error;
|
|
|
|
await update(appId, { tags });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, tags });
|
|
}
|
|
|
|
async function setNotes(app, notes, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof notes, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
await update(app.id, { notes });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId: app.id, app, notes });
|
|
}
|
|
|
|
async function setChecklistItem(app, checklistItemKey, acknowledged, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof checklistItemKey, 'string');
|
|
assert.strictEqual(typeof acknowledged, 'boolean');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
if (!app.checklist[checklistItemKey]) throw new BoxError(BoxError.NOT_FOUND, 'no such checklist item');
|
|
|
|
// nothing changed
|
|
if (app.checklist[checklistItemKey].acknowledged === acknowledged) return;
|
|
|
|
const checklist = app.checklist;
|
|
checklist[checklistItemKey].acknowledged = acknowledged;
|
|
checklist[checklistItemKey].changedAt = Date.now();
|
|
checklist[checklistItemKey].changedBy = auditSource.username;
|
|
|
|
await update(app.id, { checklist });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId: app.id, app, checklist });
|
|
}
|
|
|
|
async function setIcon(app, icon, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(icon === null || typeof icon === 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
|
|
if (icon) {
|
|
if (!validator.isBase64(icon)) throw new BoxError(BoxError.BAD_FIELD, 'icon is not base64');
|
|
icon = Buffer.from(icon, 'base64');
|
|
}
|
|
|
|
await update(appId, { icon });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, iconChanged: true });
|
|
}
|
|
|
|
async function setMemoryLimit(app, memoryLimit, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof memoryLimit, 'number');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
let error = checkAppState(app, exports.ISTATE_PENDING_RESIZE);
|
|
if (error) throw error;
|
|
|
|
error = validateMemoryLimit(app.manifest, memoryLimit);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { memoryLimit }
|
|
};
|
|
const taskId = await addTask(appId, exports.ISTATE_PENDING_RESIZE, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, memoryLimit, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setCpuQuota(app, cpuQuota, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof cpuQuota, 'number');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
let error = checkAppState(app, exports.ISTATE_PENDING_RESIZE);
|
|
if (error) throw error;
|
|
|
|
error = validateCpuQuota(cpuQuota);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { cpuQuota }
|
|
};
|
|
const taskId = await safe(addTask(appId, exports.ISTATE_PENDING_RESIZE, task, auditSource));
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, cpuQuota, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setMounts(app, mounts, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(Array.isArray(mounts));
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = checkAppState(app, exports.ISTATE_PENDING_RECREATE_CONTAINER);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { mounts }
|
|
};
|
|
const [taskError, taskId] = await safe(addTask(appId, exports.ISTATE_PENDING_RECREATE_CONTAINER, task, auditSource));
|
|
if (taskError && taskError.reason === BoxError.ALREADY_EXISTS) throw new BoxError(BoxError.CONFLICT, 'Duplicate mount points');
|
|
if (taskError) throw taskError;
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, mounts, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setDevices(app, devices, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof devices, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
let error = checkAppState(app, exports.ISTATE_PENDING_RECREATE_CONTAINER);
|
|
if (error) throw error;
|
|
|
|
error = validateDevices(devices);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { devices }
|
|
};
|
|
const [taskError, taskId] = await safe(addTask(appId, exports.ISTATE_PENDING_RECREATE_CONTAINER, task, auditSource));
|
|
if (taskError) throw taskError;
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, devices, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setEnvironment(app, env, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof env, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
let error = checkAppState(app, exports.ISTATE_PENDING_RECREATE_CONTAINER);
|
|
if (error) throw error;
|
|
|
|
error = validateEnv(env);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { env }
|
|
};
|
|
const taskId = await addTask(appId, exports.ISTATE_PENDING_RECREATE_CONTAINER, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, env, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setDebugMode(app, debugMode, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof debugMode, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
let error = checkAppState(app, exports.ISTATE_PENDING_DEBUG);
|
|
if (error) throw error;
|
|
|
|
error = validateDebugMode(debugMode);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { debugMode }
|
|
};
|
|
const taskId = await addTask(appId, exports.ISTATE_PENDING_DEBUG, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, debugMode, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setMailbox(app, data, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof data, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
assert.strictEqual(typeof data.enable, 'boolean');
|
|
|
|
const enableMailbox = data.enable;
|
|
|
|
const appId = app.id;
|
|
let error = checkAppState(app, exports.ISTATE_PENDING_SERVICES_CHANGE);
|
|
if (error) throw error;
|
|
|
|
if (!app.manifest.addons?.sendmail) throw new BoxError(BoxError.BAD_FIELD, 'App does not use sendmail');
|
|
const optional = 'optional' in app.manifest.addons.sendmail ? app.manifest.addons.sendmail.optional : false;
|
|
if (!optional && !enableMailbox) throw new BoxError(BoxError.BAD_FIELD, 'App requires sendmail to be enabled');
|
|
|
|
const mailboxDisplayName = data.mailboxDisplayName || '';
|
|
let mailboxName = data.mailboxName || null;
|
|
const mailboxDomain = data.mailboxDomain || null;
|
|
|
|
if (enableMailbox) {
|
|
await mail.getDomain(mailboxDomain); // check if domain exists
|
|
|
|
if (mailboxName) {
|
|
error = mail.validateName(mailboxName);
|
|
if (error) throw new BoxError(BoxError.BAD_FIELD, error.message);
|
|
} else {
|
|
mailboxName = mailboxNameForSubdomain(app.subdomain, app.domain, app.manifest);
|
|
}
|
|
|
|
if (mailboxDisplayName) {
|
|
error = mail.validateDisplayName(mailboxDisplayName);
|
|
if (error) throw new BoxError(BoxError.BAD_FIELD, error.message);
|
|
}
|
|
}
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { enableMailbox, mailboxName, mailboxDomain, mailboxDisplayName }
|
|
};
|
|
const taskId = await addTask(appId, exports.ISTATE_PENDING_SERVICES_CHANGE, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, mailboxName, mailboxDomain, mailboxDisplayName, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setInbox(app, data, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof data, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
assert.strictEqual(typeof data.enable, 'boolean');
|
|
|
|
const enableInbox = data.enable;
|
|
const appId = app.id;
|
|
let error = checkAppState(app, exports.ISTATE_PENDING_SERVICES_CHANGE);
|
|
if (error) throw error;
|
|
|
|
if (!app.manifest.addons?.recvmail) throw new BoxError(BoxError.BAD_FIELD, 'App does not use recvmail addon');
|
|
|
|
const inboxName = data.inboxName || null;
|
|
const inboxDomain = data.inboxDomain || null;
|
|
if (enableInbox) {
|
|
const domain = await mail.getDomain(data.inboxDomain); // check if domain exists
|
|
if (!domain.enabled) throw new BoxError(BoxError.BAD_FIELD, 'Domain does not have incoming email enabled');
|
|
|
|
error = mail.validateName(data.inboxName);
|
|
if (error) throw new BoxError(BoxError.BAD_FIELD, error.message);
|
|
}
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { enableInbox, inboxName, inboxDomain }
|
|
};
|
|
const taskId = await addTask(appId, exports.ISTATE_PENDING_SERVICES_CHANGE, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, enableInbox, inboxName, inboxDomain, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setTurn(app, enableTurn, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof enableTurn, 'boolean');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = checkAppState(app, exports.ISTATE_PENDING_SERVICES_CHANGE);
|
|
if (error) throw error;
|
|
|
|
if (!app.manifest.addons?.turn) throw new BoxError(BoxError.BAD_FIELD, 'App does not use turn addon');
|
|
if (!app.manifest.addons.turn.optional) throw new BoxError(BoxError.BAD_FIELD, 'turn service is not optional');
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { enableTurn }
|
|
};
|
|
const taskId = await addTask(appId, exports.ISTATE_PENDING_SERVICES_CHANGE, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, enableTurn, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setRedis(app, enableRedis, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof enableRedis, 'boolean');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = checkAppState(app, exports.ISTATE_PENDING_SERVICES_CHANGE);
|
|
if (error) throw error;
|
|
|
|
if (!app.manifest.addons?.redis) throw new BoxError(BoxError.BAD_FIELD, 'App does not use redis addon');
|
|
if (!app.manifest.addons.redis.optional) throw new BoxError(BoxError.BAD_FIELD, 'redis service is not optional');
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { enableRedis }
|
|
};
|
|
const taskId = await addTask(appId, exports.ISTATE_PENDING_SERVICES_CHANGE, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, enableRedis, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setAutomaticBackup(app, enable, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof enable, 'boolean');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
await update(appId, { enableBackup: enable });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, enableBackup: enable });
|
|
}
|
|
|
|
async function setAutomaticUpdate(app, enable, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof enable, 'boolean');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
await update(appId, { enableAutomaticUpdate: enable });
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, enableAutomaticUpdate: enable });
|
|
}
|
|
|
|
async function setReverseProxyConfig(app, reverseProxyConfig, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof reverseProxyConfig, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
reverseProxyConfig = Object.assign({ robotsTxt: null, csp: null, hstsPreload: false }, reverseProxyConfig);
|
|
|
|
const appId = app.id;
|
|
let error = validateCsp(reverseProxyConfig.csp);
|
|
if (error) throw error;
|
|
|
|
error = validateRobotsTxt(reverseProxyConfig.robotsTxt);
|
|
if (error) throw error;
|
|
|
|
await reverseProxy.writeAppConfigs(Object.assign({}, app, { reverseProxyConfig }));
|
|
|
|
await update(appId, { reverseProxyConfig });
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, reverseProxyConfig });
|
|
}
|
|
|
|
async function getLocation(subdomain, domain) {
|
|
assert.strictEqual(typeof subdomain, 'string');
|
|
assert.strictEqual(typeof domain, 'string');
|
|
|
|
const result = await database.query(`SELECT ${LOCATION_FIELDS} FROM locations WHERE subdomain=? AND domain=?`, [ subdomain, domain ]);
|
|
if (result.length === 0) return null;
|
|
|
|
return new Location(subdomain, domain, result[0].type, safe.JSON.parse(result[0].certificateJson));
|
|
}
|
|
|
|
async function setCertificate(app, data, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(data && typeof data === 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const { subdomain, domain, cert, key } = data;
|
|
|
|
const domainObject = await domains.get(domain);
|
|
if (domainObject === null) throw new BoxError(BoxError.NOT_FOUND, 'Domain not found');
|
|
|
|
if (cert && key) await reverseProxy.validateCertificate(subdomain, domain, { cert, key });
|
|
|
|
const certificate = cert && key ? { cert, key } : null;
|
|
const result = await database.query('UPDATE locations SET certificateJson=? WHERE location=? AND domain=?', [ certificate ? JSON.stringify(certificate) : null, subdomain, domain ]);
|
|
if (result.affectedRows === 0) throw new BoxError(BoxError.NOT_FOUND, 'Location not found');
|
|
|
|
const location = await getLocation(subdomain, domain); // fresh location object with type
|
|
await reverseProxy.setUserCertificate(app, location);
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId: app.id, app, subdomain, domain, cert });
|
|
}
|
|
|
|
async function setLocation(app, data, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof data, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
let error = checkAppState(app, exports.ISTATE_PENDING_LOCATION_CHANGE);
|
|
if (error) throw error;
|
|
|
|
const values = {
|
|
subdomain: data.subdomain.toLowerCase(),
|
|
domain: data.domain.toLowerCase(),
|
|
// these are intentionally reset, if not set
|
|
portBindings: {},
|
|
secondaryDomains: [],
|
|
redirectDomains: [],
|
|
aliasDomains: []
|
|
};
|
|
|
|
if ('ports' in data) {
|
|
error = validatePorts(data.ports || null, app.manifest);
|
|
if (error) throw error;
|
|
values.portBindings = translateToPortBindings(data.ports || null, app.manifest);
|
|
}
|
|
|
|
// rename the auto-created mailbox to match the new location
|
|
if (app.manifest.addons?.sendmail && app.mailboxName?.endsWith('.app')) {
|
|
values.mailboxName = mailboxNameForSubdomain(values.subdomain, app.manifest);
|
|
values.mailboxDomain = values.domain;
|
|
}
|
|
|
|
error = validateSecondaryDomains(data.secondaryDomains || {}, app.manifest);
|
|
if (error) throw error;
|
|
values.secondaryDomains = translateSecondaryDomains(data.secondaryDomains || {});
|
|
|
|
if ('redirectDomains' in data) {
|
|
values.redirectDomains = data.redirectDomains;
|
|
}
|
|
|
|
if ('aliasDomains' in data) {
|
|
values.aliasDomains = data.aliasDomains;
|
|
}
|
|
|
|
const locations = [new Location(values.subdomain, values.domain, Location.TYPE_PRIMARY)]
|
|
.concat(values.secondaryDomains.map(sd => new Location(sd.subdomain, sd.domain, Location.TYPE_SECONDARY)))
|
|
.concat(values.redirectDomains.map(rd => new Location(rd.subdomain, rd.domain, Location.TYPE_REDIRECT)))
|
|
.concat(values.aliasDomains.map(ad => new Location(ad.subdomain, ad.domain, Location.TYPE_ALIAS)));
|
|
|
|
error = await validateLocations(locations);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {
|
|
oldConfig: _.pick(app, 'subdomain', 'domain', 'fqdn', 'secondaryDomains', 'redirectDomains', 'aliasDomains', 'portBindings'),
|
|
skipDnsSetup: !!data.skipDnsSetup,
|
|
overwriteDns: !!data.overwriteDns
|
|
},
|
|
values
|
|
};
|
|
const [taskError, taskId] = await safe(addTask(appId, exports.ISTATE_PENDING_LOCATION_CHANGE, task, auditSource));
|
|
if (taskError && taskError.reason !== BoxError.ALREADY_EXISTS) throw taskError;
|
|
if (taskError && taskError.reason === BoxError.ALREADY_EXISTS) throw getDuplicateErrorDetails(taskError.message, locations, values.portBindings);
|
|
|
|
values.fqdn = dns.fqdn(values.subdomain, values.domain);
|
|
values.secondaryDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
values.redirectDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
values.aliasDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, Object.assign({ appId, app, taskId }, values));
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function setStorage(app, volumeId, volumePrefix, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(volumeId === null || typeof volumeId === 'string');
|
|
assert(volumePrefix === null || typeof volumePrefix === 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = checkAppState(app, exports.ISTATE_PENDING_DATA_DIR_MIGRATION);
|
|
if (error) throw error;
|
|
|
|
if (volumeId) {
|
|
await checkStorage(app, volumeId, volumePrefix);
|
|
} else {
|
|
volumeId = volumePrefix = null;
|
|
}
|
|
|
|
const task = {
|
|
args: { newStorageVolumeId: volumeId, newStorageVolumePrefix: volumePrefix },
|
|
values: {}
|
|
};
|
|
const taskId = await addTask(appId, exports.ISTATE_PENDING_DATA_DIR_MIGRATION, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, volumeId, volumePrefix, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function updateApp(app, data, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(data && typeof data === 'object');
|
|
assert(data.manifest && typeof data.manifest === 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const skipBackup = !!data.skipBackup, appId = app.id, manifest = data.manifest;
|
|
const values = {};
|
|
|
|
if (app.runState === exports.RSTATE_STOPPED) throw new BoxError(BoxError.BAD_STATE, 'Stopped apps cannot be updated');
|
|
|
|
let error = checkAppState(app, exports.ISTATE_PENDING_UPDATE);
|
|
if (error) throw error;
|
|
|
|
error = manifestFormat.parse(manifest);
|
|
if (error) throw new BoxError(BoxError.BAD_FIELD, 'Manifest error:' + error.message);
|
|
|
|
error = await checkManifest(manifest);
|
|
if (error) throw error;
|
|
|
|
const updateConfig = { skipBackup, manifest }; // this will clear appStoreId when updating from a repo and set it if passed in for update route
|
|
if ('appStoreId' in data) updateConfig.appStoreId = data.appStoreId;
|
|
|
|
// prevent user from installing a app with different manifest id over an existing app
|
|
// this allows cloudron install -f --app <appid> for an app installed from the appStore
|
|
if (app.manifest.id !== updateConfig.manifest.id) {
|
|
if (!data.force) throw new BoxError(BoxError.BAD_FIELD, 'manifest id does not match. force to override');
|
|
}
|
|
|
|
// suffix '0' if prerelease is missing for semver.lte to work as expected
|
|
const currentVersion = semver.prerelease(app.manifest.version) ? app.manifest.version : `${app.manifest.version}-0`;
|
|
const updateVersion = semver.prerelease(updateConfig.manifest.version) ? updateConfig.manifest.version : `${updateConfig.manifest.version}-0`;
|
|
if (app.appStoreId !== '' && semver.lte(updateVersion, currentVersion)) {
|
|
if (!data.force) throw new BoxError(BoxError.BAD_FIELD, 'Downgrades are not permitted for apps installed from AppStore. force to override');
|
|
}
|
|
|
|
if ('icon' in data) {
|
|
if (data.icon) {
|
|
if (!validator.isBase64(data.icon)) throw new BoxError(BoxError.BAD_FIELD, 'icon is not base64');
|
|
data.icon = Buffer.from(data.icon, 'base64');
|
|
}
|
|
values.icon = data.icon;
|
|
}
|
|
|
|
// do not update apps in debug mode
|
|
if (app.debugMode && !data.force) throw new BoxError(BoxError.BAD_STATE, 'debug mode enabled. force to override');
|
|
|
|
// Ensure we update the memory limit in case the new app requires more memory as a minimum
|
|
// 0 and -1 are special updateConfig for memory limit indicating unset and unlimited
|
|
if (app.memoryLimit > 0 && updateConfig.manifest.memoryLimit && app.memoryLimit < updateConfig.manifest.memoryLimit) {
|
|
updateConfig.memoryLimit = updateConfig.manifest.memoryLimit;
|
|
}
|
|
|
|
if (!manifest.addons?.sendmail) { // clear if the update removed addon
|
|
values.mailboxName = values.mailboxDomain = null;
|
|
} else if (!app.mailboxName || app.mailboxName.endsWith('.app')) { // allocate since update added the addon
|
|
values.mailboxName = mailboxNameForSubdomain(app.subdomain, manifest);
|
|
values.mailboxDomain = app.domain;
|
|
}
|
|
|
|
if (!manifest.addons?.recvmail) { // clear if the update removed addon. required for fk constraint
|
|
values.enableInbox = false;
|
|
values.inboxName = values.inboxDomain = null;
|
|
}
|
|
|
|
const hasSso = !!updateConfig.manifest.addons?.proxyAuth || !!updateConfig.manifest.addons?.ldap || !!manifest.addons?.oidc;
|
|
if (!hasSso && app.sso) values.sso = false; // turn off sso flag, if the update removes sso options
|
|
|
|
const task = {
|
|
args: { updateConfig },
|
|
values
|
|
};
|
|
const taskId = await addTask(appId, exports.ISTATE_PENDING_UPDATE, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_UPDATE, auditSource, { appId, app, skipBackup, toManifest: manifest, fromManifest: app.manifest, force: data.force, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function getLogPaths(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
const appId = app.id;
|
|
|
|
const filePaths = [];
|
|
filePaths.push(path.join(paths.LOG_DIR, appId, 'apptask.log'));
|
|
filePaths.push(path.join(paths.LOG_DIR, appId, 'app.log'));
|
|
if (app.manifest.addons && app.manifest.addons.redis) filePaths.push(path.join(paths.LOG_DIR, `redis-${appId}/app.log`));
|
|
|
|
if (app.manifest.logPaths) {
|
|
const [error, result] = await safe(docker.inspect(app.containerId));
|
|
if (!error) {
|
|
const runVolume = result.Mounts.find(function (mount) { return mount.Destination === '/run'; });
|
|
const tmpVolume = result.Mounts.find(function (mount) { return mount.Destination === '/tmp'; });
|
|
const dataVolume = result.Mounts.find(function (mount) { return mount.Destination === '/app/data'; });
|
|
|
|
// note: wild cards are not supported yet in logPaths since that will require shell expansion
|
|
for (const logPath of app.manifest.logPaths) {
|
|
if (logPath.startsWith('/tmp/')) filePaths.push(`${tmpVolume.Source}/${logPath.slice('/tmp/'.length)}`);
|
|
else if (logPath.startsWith('/run/')) filePaths.push(`${runVolume.Source}/${logPath.slice('/run/'.length)}`);
|
|
else if (logPath.startsWith('/app/data/')) filePaths.push(`${dataVolume.Source}/${logPath.slice('/app/data/'.length)}`);
|
|
}
|
|
}
|
|
}
|
|
|
|
return filePaths;
|
|
}
|
|
|
|
async function getLogs(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(options && typeof options === 'object');
|
|
|
|
assert.strictEqual(typeof options.lines, 'number');
|
|
assert.strictEqual(typeof options.format, 'string');
|
|
assert.strictEqual(typeof options.follow, 'boolean');
|
|
|
|
const appId = app.id;
|
|
|
|
const logPaths = await getLogPaths(app);
|
|
const cp = logs.tail(logPaths, { lines: options.lines, follow: options.follow, sudo: true }); // need sudo access for paths inside app container (manifest.logPaths)
|
|
|
|
const logStream = new logs.LogStream({ format: options.format || 'json', source: appId });
|
|
logStream.on('close', () => cp.terminate()); // the caller has to call destroy() on logStream. destroy() of Transform emits 'close'
|
|
|
|
cp.stdout.pipe(logStream);
|
|
|
|
return logStream;
|
|
}
|
|
|
|
// never fails just prints error
|
|
async function appendLogLine(app, line) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof line, 'string');
|
|
|
|
const logFilePath = path.join(paths.LOG_DIR, app.id, 'app.log');
|
|
const isoDate = new Date(new Date().toUTCString()).toISOString();
|
|
|
|
if (!safe.fs.appendFileSync(logFilePath, `${isoDate} ${line}\n`)) console.error(`Could not append log line for app ${app.id} at ${logFilePath}: ${safe.error.message}`);
|
|
}
|
|
|
|
// does a re-configure when called from most states. for install/clone errors, it re-installs with an optional manifest
|
|
// re-configure can take a dockerImage but not a manifest because re-configure does not clean up addons
|
|
async function repair(app, data, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof data, 'object'); // { manifest }
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
let errorState = (app.error && app.error.installationState) || exports.ISTATE_PENDING_CONFIGURE;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: {},
|
|
requiredState: null
|
|
};
|
|
|
|
// maybe split this into a separate route like reinstall?
|
|
if (errorState === exports.ISTATE_PENDING_INSTALL || errorState === exports.ISTATE_PENDING_CLONE) {
|
|
task.args = { skipDnsSetup: false, overwriteDns: true };
|
|
if (data.manifest) {
|
|
let error = manifestFormat.parse(data.manifest);
|
|
if (error) throw new BoxError(BoxError.BAD_FIELD, `manifest error: ${error.message}`);
|
|
|
|
error = await checkManifest(data.manifest);
|
|
if (error) throw error;
|
|
|
|
if (!data.manifest.addons?.sendmail) { // clear if repair removed addon
|
|
task.values.mailboxName = task.values.mailboxDomain = null;
|
|
} else if (!app.mailboxName || app.mailboxName.endsWith('.app')) { // allocate since repair added the addon
|
|
task.values.mailboxName = mailboxNameForSubdomain(app.subdomain, data.manifest);
|
|
task.values.mailboxDomain = app.domain;
|
|
}
|
|
|
|
task.values.manifest = data.manifest;
|
|
task.args.oldManifest = app.manifest;
|
|
}
|
|
} else {
|
|
errorState = exports.ISTATE_PENDING_CONFIGURE;
|
|
if (data.dockerImage) {
|
|
const newManifest = Object.assign({}, app.manifest, { dockerImage: data.dockerImage });
|
|
task.values.manifest = newManifest;
|
|
}
|
|
}
|
|
|
|
const taskId = await addTask(appId, errorState, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_REPAIR, auditSource, { app, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function restore(app, backupId, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof backupId, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
|
|
let error = checkAppState(app, exports.ISTATE_PENDING_RESTORE);
|
|
if (error) throw error;
|
|
|
|
// for empty or null backupId, use existing manifest to mimic a reinstall
|
|
const backupInfo = backupId ? await backups.get(backupId) : { manifest: app.manifest };
|
|
const manifest = backupInfo.manifest;
|
|
|
|
if (!manifest) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Could not get restore manifest');
|
|
if (backupInfo.encryptionVersion === 1) throw new BoxError(BoxError.BAD_FIELD, 'This encrypted backup was created with an older Cloudron version and has to be restored using the CLI tool');
|
|
|
|
// re-validate because this new box version may not accept old configs
|
|
error = await checkManifest(manifest);
|
|
if (error) throw error;
|
|
|
|
const values = { manifest };
|
|
if (!manifest.addons?.sendmail) { // clear if restore removed addon
|
|
values.mailboxName = values.mailboxDomain = null;
|
|
} else if (!app.mailboxName || app.mailboxName.endsWith('.app')) { // allocate since restore added the addon
|
|
values.mailboxName = mailboxNameForSubdomain(app.subdomain, manifest);
|
|
values.mailboxDomain = app.domain;
|
|
}
|
|
|
|
if (!manifest.addons?.recvmail) { // recvmail is always optional. clear if restore removed addon
|
|
values.enableInbox = false;
|
|
values.inboxName = values.inboxDomain = null;
|
|
}
|
|
|
|
const restoreConfig = { remotePath: backupInfo.remotePath, backupFormat: backupInfo.format };
|
|
|
|
const task = {
|
|
args: {
|
|
restoreConfig,
|
|
oldManifest: app.manifest,
|
|
skipDnsSetup: !!backupId, // if this is a restore, just skip dns setup. only re-installs should setup dns
|
|
overwriteDns: true
|
|
},
|
|
values
|
|
};
|
|
|
|
const taskId = await addTask(appId, exports.ISTATE_PENDING_RESTORE, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_RESTORE, auditSource, { app, backupId: backupInfo.id, remotePath: backupInfo.remotePath, fromManifest: app.manifest, toManifest: manifest, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function importApp(app, data, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof data, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
|
|
const { remotePath, backupFormat, backupConfig } = data;
|
|
|
|
let error = checkAppState(app, exports.ISTATE_PENDING_IMPORT);
|
|
if (error) throw error;
|
|
|
|
let restoreConfig;
|
|
|
|
if (data.remotePath) { // if not provided, we import in-place
|
|
error = backups.validateFormat(backupFormat);
|
|
if (error) throw error;
|
|
|
|
if ('password' in backupConfig) {
|
|
backupConfig.encryption = backups.generateEncryptionKeysSync(backupConfig.password);
|
|
delete backupConfig.password;
|
|
} else {
|
|
backupConfig.encryption = null;
|
|
}
|
|
|
|
await backups.setupManagedStorage(backupConfig, `/mnt/appimport-${app.id}`); // this validates mountOptions . this is not cleaned up, it's fine
|
|
backupConfig.rootPath = backups.getRootPath(backupConfig, `/mnt/appimport-${app.id}`);
|
|
error = await backups.testStorage(Object.assign({ mountPath: `/mnt/appimport-${app.id}` }, backupConfig)); // this validates provider and it's api options. requires mountPath
|
|
if (error) throw error;
|
|
|
|
restoreConfig = { remotePath, backupFormat, backupConfig };
|
|
} else {
|
|
restoreConfig = { remotePath: null };
|
|
}
|
|
|
|
const task = {
|
|
args: {
|
|
restoreConfig,
|
|
oldManifest: app.manifest,
|
|
skipDnsSetup: false,
|
|
overwriteDns: true
|
|
},
|
|
values: {}
|
|
};
|
|
const taskId = await addTask(appId, exports.ISTATE_PENDING_IMPORT, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_IMPORT, auditSource, { app: app, remotePath, fromManifest: app.manifest, toManifest: app.manifest, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function exportApp(app, data, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof data, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
|
|
const error = checkAppState(app, exports.ISTATE_PENDING_BACKUP);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: { snapshotOnly: true },
|
|
values: {}
|
|
};
|
|
|
|
const taskId = await addTask(appId, exports.ISTATE_PENDING_BACKUP, task, auditSource);
|
|
return { taskId };
|
|
}
|
|
|
|
async function purchaseApp(data) {
|
|
assert.strictEqual(typeof data, 'object');
|
|
|
|
const [purchaseError] = await safe(appstore.purchaseApp(data));
|
|
if (!purchaseError) return;
|
|
|
|
await del(data.appId);
|
|
|
|
throw purchaseError;
|
|
}
|
|
|
|
async function clone(app, data, user, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof data, 'object');
|
|
assert(user && typeof user === 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const subdomain = data.subdomain.toLowerCase(),
|
|
domain = data.domain.toLowerCase(),
|
|
backupId = data.backupId,
|
|
overwriteDns = 'overwriteDns' in data ? data.overwriteDns : false,
|
|
skipDnsSetup = 'skipDnsSetup' in data ? data.skipDnsSetup : false;
|
|
|
|
assert.strictEqual(typeof backupId, 'string');
|
|
assert.strictEqual(typeof subdomain, 'string');
|
|
assert.strictEqual(typeof domain, 'string');
|
|
|
|
const backupInfo = await backups.get(backupId);
|
|
|
|
if (!backupInfo) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
|
|
if (!backupInfo.manifest) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Could not detect restore manifest');
|
|
if (backupInfo.encryptionVersion === 1) throw new BoxError(BoxError.BAD_FIELD, 'This encrypted backup was created with an older Cloudron version and cannot be cloned');
|
|
|
|
const manifest = backupInfo.manifest, appStoreId = app.appStoreId;
|
|
|
|
let error = validateSecondaryDomains(data.secondaryDomains || {}, manifest);
|
|
if (error) throw error;
|
|
const secondaryDomains = translateSecondaryDomains(data.secondaryDomains || {});
|
|
|
|
const locations = [new Location(subdomain, domain, Location.TYPE_PRIMARY)]
|
|
.concat(secondaryDomains.map(sd => new Location(sd.subdomain, sd.domain, Location.TYPE_SECONDARY)));
|
|
|
|
error = await validateLocations(locations);
|
|
if (error) throw error;
|
|
|
|
// re-validate because this new box version may not accept old configs
|
|
error = await checkManifest(manifest);
|
|
if (error) throw error;
|
|
|
|
error = validatePorts(data.ports || null, manifest);
|
|
if (error) throw error;
|
|
const portBindings = translateToPortBindings(data.ports || null, manifest);
|
|
|
|
// should we copy the original app's mailbox settings instead?
|
|
const mailboxName = manifest.addons?.sendmail ? mailboxNameForSubdomain(subdomain, manifest) : null;
|
|
const mailboxDomain = manifest.addons?.sendmail ? domain : null;
|
|
|
|
const newAppId = uuid.v4();
|
|
|
|
const icons = await getIcons(app.id);
|
|
|
|
const dolly = _.pick(app, 'memoryLimit', 'cpuQuota', 'crontab', 'reverseProxyConfig', 'env', 'servicesConfig', 'tags',
|
|
'enableMailbox', 'mailboxDisplayName', 'mailboxName', 'mailboxDomain', 'enableInbox', 'inboxName', 'inboxDomain',
|
|
'enableTurn', 'enableRedis', 'mounts', 'enableBackup', 'enableAutomaticUpdate', 'accessRestriction', 'operators', 'sso');
|
|
|
|
if (!manifest.addons?.recvmail) dolly.inboxDomain = null; // needed because we are cloning _current_ app settings with old manifest
|
|
|
|
const obj = Object.assign(dolly, {
|
|
installationState: exports.ISTATE_PENDING_CLONE,
|
|
runState: exports.RSTATE_RUNNING,
|
|
mailboxName,
|
|
mailboxDomain,
|
|
secondaryDomains,
|
|
redirectDomains: [],
|
|
aliasDomains: [],
|
|
label: app.label ? `${app.label}-clone` : '',
|
|
icon: icons.icon,
|
|
});
|
|
|
|
const [addError] = await safe(add(newAppId, appStoreId, manifest, subdomain, domain, portBindings, obj));
|
|
if (addError && addError.reason === BoxError.ALREADY_EXISTS) throw getDuplicateErrorDetails(addError.message, locations, portBindings);
|
|
if (addError) throw addError;
|
|
|
|
await purchaseApp({ appId: newAppId, appstoreId: app.appStoreId, manifestId: manifest.id || 'customapp' });
|
|
|
|
const restoreConfig = { remotePath: backupInfo.remotePath, backupFormat: backupInfo.format };
|
|
const task = {
|
|
args: { restoreConfig, overwriteDns, skipDnsSetup, oldManifest: null },
|
|
values: {},
|
|
requiredState: exports.ISTATE_PENDING_CLONE
|
|
};
|
|
const taskId = await addTask(newAppId, exports.ISTATE_PENDING_CLONE, task, auditSource);
|
|
|
|
const newApp = Object.assign({}, _.omit(obj, 'icon'), { appStoreId, manifest, subdomain, domain, portBindings });
|
|
newApp.fqdn = dns.fqdn(newApp.subdomain, newApp.domain);
|
|
newApp.secondaryDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
newApp.redirectDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
newApp.aliasDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, ad.domain); });
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_CLONE, auditSource, { appId: newAppId, oldAppId: app.id, backupId, remotePath: backupInfo.remotePath, oldApp: app, newApp, taskId });
|
|
|
|
return { id: newAppId, taskId };
|
|
}
|
|
|
|
async function uninstall(app, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = checkAppState(app, exports.ISTATE_PENDING_UNINSTALL);
|
|
if (error) throw error;
|
|
|
|
await appstore.unpurchaseApp(appId, { appstoreId: app.appStoreId, manifestId: app.manifest.id || 'customapp' });
|
|
|
|
const task = {
|
|
args: {},
|
|
values: {},
|
|
requiredState: null // can run in any state, as long as no task is active
|
|
};
|
|
const taskId = await addTask(appId, exports.ISTATE_PENDING_UNINSTALL, task, auditSource);
|
|
await eventlog.add(eventlog.ACTION_APP_UNINSTALL, auditSource, { appId, app, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function archive(app, backupId, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof backupId, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const result = await backups.getByIdentifierAndStatePaged(app.id, backups.BACKUP_STATE_NORMAL, 1, 1);
|
|
if (result.length === 0) throw new BoxError(BoxError.BAD_STATE, 'No recent backup to archive');
|
|
if (result[0].id !== backupId) throw new BoxError(BoxError.BAD_STATE, 'Latest backup id has changed');
|
|
|
|
const icons = await getIcons(app.id);
|
|
const { taskId } = await uninstall(app, auditSource);
|
|
await archives.add(backupId, { icon: icons.icon, appStoreIcon: icons.appStoreIcon, appConfig: app });
|
|
return { taskId };
|
|
}
|
|
|
|
async function start(app, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = checkAppState(app, exports.ISTATE_PENDING_START);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { runState: exports.RSTATE_RUNNING }
|
|
};
|
|
const taskId = await addTask(appId, exports.ISTATE_PENDING_START, task, auditSource);
|
|
await eventlog.add(eventlog.ACTION_APP_START, auditSource, { appId, app, taskId });
|
|
return { taskId };
|
|
}
|
|
|
|
async function stop(app, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = checkAppState(app, exports.ISTATE_PENDING_STOP);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { runState: exports.RSTATE_STOPPED }
|
|
};
|
|
const taskId = await addTask(appId, exports.ISTATE_PENDING_STOP, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_STOP, auditSource, { appId, app, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function restart(app, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
const error = checkAppState(app, exports.ISTATE_PENDING_RESTART);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { runState: exports.RSTATE_RUNNING }
|
|
};
|
|
const taskId = await addTask(appId, exports.ISTATE_PENDING_RESTART, task, auditSource);
|
|
|
|
await eventlog.add(eventlog.ACTION_APP_RESTART, auditSource, { appId, app, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function checkManifest(manifest) {
|
|
assert(manifest && typeof manifest === 'object');
|
|
|
|
if (manifest.manifestVersion !== 2) return new BoxError(BoxError.BAD_FIELD, 'Manifest version must be 2');
|
|
|
|
if (!manifest.dockerImage) return new BoxError(BoxError.BAD_FIELD, 'Missing dockerImage'); // dockerImage is optional in manifest
|
|
|
|
if (semver.valid(manifest.maxBoxVersion) && semver.gt(constants.VERSION, manifest.maxBoxVersion)) {
|
|
return new BoxError(BoxError.BAD_FIELD, 'Box version exceeds Apps maxBoxVersion');
|
|
}
|
|
|
|
if (semver.valid(manifest.minBoxVersion) && semver.gt(manifest.minBoxVersion, constants.VERSION)) {
|
|
return new BoxError(BoxError.BAD_FIELD, 'App version requires a new platform version');
|
|
}
|
|
|
|
const error = await services.checkAddonsSupport(manifest.addons || {});
|
|
return error;
|
|
}
|
|
|
|
async function createExec(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(options && typeof options === 'object');
|
|
|
|
if (app.manifest.id === constants.PROXY_APP_APPSTORE_ID) throw new BoxError(BoxError.BAD_FIELD, 'cannot exec on proxy app');
|
|
|
|
const cmd = options.cmd || [ '/bin/bash' ];
|
|
assert(Array.isArray(cmd) && cmd.length > 0);
|
|
|
|
if (app.installationState !== exports.ISTATE_INSTALLED || app.runState !== exports.RSTATE_RUNNING) {
|
|
throw new BoxError(BoxError.BAD_STATE, 'App not installed or running');
|
|
}
|
|
|
|
const createOptions = {
|
|
AttachStdin: true,
|
|
AttachStdout: true,
|
|
AttachStderr: true,
|
|
// A pseudo tty is a terminal which processes can detect (for example, disable colored output)
|
|
// Creating a pseudo terminal also assigns a terminal driver which detects control sequences
|
|
// When passing binary data, tty must be disabled. In addition, the stdout/stderr becomes a single
|
|
// unified stream because of the nature of a tty (see https://github.com/docker/docker/issues/19696)
|
|
Tty: options.tty,
|
|
Cmd: cmd
|
|
};
|
|
|
|
// currently the webterminal and cli sets C.UTF-8
|
|
if (options.lang) createOptions.Env = [ 'LANG=' + options.lang ];
|
|
|
|
return await docker.createExec(app.containerId, createOptions);
|
|
}
|
|
|
|
async function startExec(app, execId, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof execId, 'string');
|
|
assert(options && typeof options === 'object');
|
|
|
|
if (app.installationState !== exports.ISTATE_INSTALLED || app.runState !== exports.RSTATE_RUNNING) {
|
|
throw new BoxError(BoxError.BAD_STATE, 'App not installed or running');
|
|
}
|
|
|
|
const startOptions = {
|
|
Detach: false,
|
|
Tty: options.tty,
|
|
// hijacking upgrades the docker connection from http to tcp. because of this upgrade,
|
|
// we can work with half-close connections (not defined in http). this way, the client
|
|
// can properly signal that stdin is EOF by closing it's side of the socket. In http,
|
|
// the whole connection will be dropped when stdin get EOF.
|
|
// https://github.com/apocas/dockerode/commit/b4ae8a03707fad5de893f302e4972c1e758592fe
|
|
hijack: true,
|
|
stream: true,
|
|
stdin: true,
|
|
stdout: true,
|
|
stderr: true
|
|
};
|
|
|
|
const stream = await docker.startExec(execId, startOptions);
|
|
|
|
if (options.rows && options.columns) {
|
|
// there is a race where resizing too early results in a 404 "no such exec"
|
|
// https://git.cloudron.io/cloudron/box/issues/549
|
|
setTimeout(async function () {
|
|
await safe(docker.resizeExec(execId, { h: options.rows, w: options.columns }, { debug }));
|
|
}, 2000);
|
|
}
|
|
|
|
return stream;
|
|
}
|
|
|
|
async function getExec(app, execId) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof execId, 'string');
|
|
|
|
return await docker.getExec(execId);
|
|
}
|
|
|
|
function canAutoupdateApp(app, updateInfo) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof updateInfo, 'object');
|
|
|
|
const manifest = updateInfo.manifest;
|
|
|
|
if (!app.enableAutomaticUpdate) return false;
|
|
|
|
// for invalid subscriptions the appstore does not return a dockerImage
|
|
if (!manifest.dockerImage) return false;
|
|
|
|
if (updateInfo.unstable) return false; // only manual update allowed for unstable updates
|
|
|
|
if ((semver.major(app.manifest.version) !== 0) && (semver.major(app.manifest.version) !== semver.major(manifest.version))) return false; // major changes are blocking
|
|
|
|
if (app.runState === exports.RSTATE_STOPPED) return false; // stopped apps won't run migration scripts and shouldn't be updated
|
|
|
|
const newTcpPorts = manifest.tcpPorts || { };
|
|
const newUdpPorts = manifest.udpPorts || { };
|
|
const portBindings = app.portBindings; // this is never null
|
|
|
|
for (const portName in portBindings) {
|
|
if (!(portName in newTcpPorts) && !(portName in newUdpPorts)) return false; // portName was in use but new update removes it
|
|
}
|
|
|
|
// it's fine if one or more (unused) keys got removed
|
|
return true;
|
|
}
|
|
|
|
async function autoupdateApps(updateInfo, auditSource) { // updateInfo is { appId -> { manifest } }
|
|
assert.strictEqual(typeof updateInfo, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
for (const appId of Object.keys(updateInfo)) {
|
|
const [getError, app] = await safe(get(appId));
|
|
if (getError) {
|
|
debug(`Cannot autoupdate app ${appId}: ${getError.message}`);
|
|
continue;
|
|
}
|
|
|
|
if (!canAutoupdateApp(app, updateInfo[appId])) {
|
|
debug(`app ${app.fqdn} requires manual update`);
|
|
notifications.alert(notifications.ALERT_MANUAL_APP_UPDATE, `${app.manifest.title} at ${app.fqdn} requires manual update to version ${updateInfo[appId].manifest.version}`, `Changelog:\n${updateInfo[appId].manifest.changelog}\n`, { persist: false });
|
|
continue;
|
|
}
|
|
|
|
const data = {
|
|
manifest: updateInfo[appId].manifest,
|
|
force: false
|
|
};
|
|
|
|
debug(`app ${app.fqdn} will be automatically updated`);
|
|
const [updateError] = await safe(updateApp(app, data, auditSource));
|
|
if (updateError) debug(`Error autoupdating ${appId}. ${updateError.message}`);
|
|
}
|
|
}
|
|
|
|
async function backup(app, auditSource) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const appId = app.id;
|
|
|
|
const error = checkAppState(app, exports.ISTATE_PENDING_BACKUP);
|
|
if (error) throw error;
|
|
|
|
const task = {
|
|
args: {},
|
|
values: {}
|
|
};
|
|
const taskId = await addTask(appId, exports.ISTATE_PENDING_BACKUP, task, auditSource);
|
|
await eventlog.add(eventlog.ACTION_APP_BACKUP, auditSource, { app, appId, taskId });
|
|
|
|
return { taskId };
|
|
}
|
|
|
|
async function listBackups(app, page, perPage) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(typeof page === 'number' && page > 0);
|
|
assert(typeof perPage === 'number' && perPage > 0);
|
|
|
|
return await backups.getByIdentifierAndStatePaged(app.id, backups.BACKUP_STATE_NORMAL, page, perPage);
|
|
}
|
|
|
|
async function updateBackup(app, backupId, data) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof backupId, 'string');
|
|
assert.strictEqual(typeof data, 'object');
|
|
|
|
const backup = await backups.get(backupId);
|
|
if (!backup) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
|
|
if (backup.identifier !== app.id) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found'); // some other app's backup
|
|
|
|
await backups.update(backupId, data);
|
|
}
|
|
|
|
async function getBackupDownloadStream(app, backupId) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof backupId, 'string');
|
|
|
|
const backup = await backups.get(backupId);
|
|
if (!backup) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
|
|
if (backup.identifier !== app.id) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found'); // some other app's backup
|
|
if (backup.format !== 'tgz') throw new BoxError(BoxError.BAD_STATE, 'only tgz backups can be downloaded');
|
|
|
|
const backupConfig = await backups.getConfig();
|
|
|
|
const ps = new PassThrough();
|
|
|
|
const stream = await storage.api(backupConfig.provider).download(backupConfig, tgz.getBackupFilePath(backupConfig, backup.remotePath));
|
|
stream.on('error', function(error) {
|
|
debug(`getBackupDownloadStream: read stream error: ${error.message}`);
|
|
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error));
|
|
});
|
|
stream.pipe(ps);
|
|
|
|
const now = (new Date()).toISOString().replace(/:|T/g,'-').replace(/\..*/,'');
|
|
const encryptionSuffix = backup.encryptionVersion ? '.enc' : '';
|
|
const filename = `app-backup-${now} (${app.fqdn}).tar.gz${encryptionSuffix}`;
|
|
|
|
return { stream: ps, filename };
|
|
}
|
|
|
|
async function restoreApps(apps, options, auditSource) {
|
|
assert(Array.isArray(apps));
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
apps = apps.filter(app => app.installationState !== exports.ISTATE_ERROR); // remove errored apps. let them be 'repaired' by hand
|
|
apps = apps.filter(app => app.installationState !== exports.ISTATE_PENDING_RESTORE); // safeguard against tasks being created non-stop if we crash on startup
|
|
|
|
for (const app of apps) {
|
|
const [error, result] = await safe(backups.getByIdentifierAndStatePaged(app.id, backups.BACKUP_STATE_NORMAL, 1, 1));
|
|
let installationState, restoreConfig, oldManifest;
|
|
if (!error && result.length) {
|
|
installationState = exports.ISTATE_PENDING_RESTORE;
|
|
restoreConfig = { remotePath: result[0].remotePath, backupFormat: result[0].format };
|
|
oldManifest = app.manifest;
|
|
} else {
|
|
installationState = exports.ISTATE_PENDING_INSTALL;
|
|
restoreConfig = null;
|
|
oldManifest = null;
|
|
}
|
|
|
|
const task = {
|
|
args: { restoreConfig, skipDnsSetup: options.skipDnsSetup, overwriteDns: true, oldManifest },
|
|
values: {},
|
|
scheduleNow: false, // task will be scheduled by autoRestartTasks when platform is ready
|
|
requireNullTaskId: false // ignore existing stale taskId
|
|
};
|
|
|
|
debug(`restoreApps: marking ${app.fqdn} for restore using restore config ${JSON.stringify(restoreConfig)}`);
|
|
|
|
const [addTaskError, taskId] = await safe(addTask(app.id, installationState, task, auditSource));
|
|
if (addTaskError) debug(`restoreApps: error marking ${app.fqdn} for restore: ${JSON.stringify(addTaskError)}`);
|
|
else debug(`restoreApps: marked ${app.id} for restore with taskId ${taskId}`);
|
|
}
|
|
}
|
|
|
|
async function configureApps(apps, options, auditSource) {
|
|
assert(Array.isArray(apps));
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
apps = apps.filter(app => app.installationState !== exports.ISTATE_ERROR); // remove errored apps. let them be 'repaired' by hand
|
|
apps = apps.filter(app => app.installationState !== exports.ISTATE_PENDING_CONFIGURE); // safeguard against tasks being created non-stop if we crash on startup
|
|
|
|
const scheduleNow = !!options.scheduleNow;
|
|
|
|
for (const app of apps) {
|
|
debug(`configureApps: marking ${app.fqdn} for reconfigure (scheduleNow: ${scheduleNow})`);
|
|
|
|
const task = {
|
|
args: {},
|
|
values: {},
|
|
scheduleNow,
|
|
requireNullTaskId: false // ignore existing stale taskId
|
|
};
|
|
|
|
const [addTaskError, taskId] = await safe(addTask(app.id, exports.ISTATE_PENDING_CONFIGURE, task, auditSource));
|
|
if (addTaskError) debug(`configureApps: error marking ${app.fqdn} for configure: ${JSON.stringify(addTaskError)}`);
|
|
else debug(`configureApps: marked ${app.id} for re-configure with taskId ${taskId}`);
|
|
}
|
|
}
|
|
|
|
async function restartAppsUsingAddons(changedAddons, auditSource) {
|
|
assert(Array.isArray(changedAddons));
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
let apps = await list();
|
|
// TODO: This ends up restarting apps that have optional redis
|
|
apps = apps.filter(app => app.manifest.addons && _.intersection(Object.keys(app.manifest.addons), changedAddons).length !== 0);
|
|
apps = apps.filter(app => app.installationState !== exports.ISTATE_ERROR); // remove errored apps. let them be 'repaired' by hand
|
|
apps = apps.filter(app => app.installationState !== exports.ISTATE_PENDING_RESTART); // safeguard against tasks being created non-stop restart if we crash on startup
|
|
apps = apps.filter(app => app.runState !== exports.RSTATE_STOPPED); // don't start stopped apps
|
|
|
|
for (const app of apps) {
|
|
debug(`restartAppsUsingAddons: marking ${app.fqdn} for restart`);
|
|
|
|
const task = {
|
|
args: {},
|
|
values: { runState: exports.RSTATE_RUNNING }
|
|
};
|
|
|
|
// stop apps before updating the databases because postgres will "lock" them preventing import
|
|
const [stopError] = await safe(docker.stopContainers(app.id));
|
|
if (stopError) debug(`restartAppsUsingAddons: error stopping ${app.fqdn}`, stopError);
|
|
|
|
const [addTaskError, taskId] = await safe(addTask(app.id, exports.ISTATE_PENDING_RESTART, task, auditSource));
|
|
if (addTaskError) debug(`restartAppsUsingAddons: error marking ${app.fqdn} for restart: ${JSON.stringify(addTaskError)}`);
|
|
else debug(`restartAppsUsingAddons: marked ${app.id} for restart with taskId ${taskId}`);
|
|
}
|
|
}
|
|
|
|
// auto-restart app tasks after a crash
|
|
async function schedulePendingTasks(auditSource) {
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
debug('schedulePendingTasks: scheduling app tasks');
|
|
|
|
const result = await list();
|
|
|
|
for (const app of result) {
|
|
if (!app.taskId) continue; // if not in any pending state, do nothing
|
|
|
|
debug(`schedulePendingTasks: schedule task for ${app.fqdn} ${app.id}: state=${app.installationState},taskId=${app.taskId}`);
|
|
|
|
await safe(scheduleTask(app.id, app.installationState, app.taskId, auditSource), { debug }); // ignore error
|
|
}
|
|
}
|
|
|
|
async function listEventlog(app, page, perPage) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof page, 'number');
|
|
assert.strictEqual(typeof perPage, 'number');
|
|
|
|
const actions = [];
|
|
const search = app.id;
|
|
return await eventlog.listPaged(actions, search, page, perPage);
|
|
}
|
|
|
|
async function drainStream(stream) {
|
|
return new Promise((resolve, reject) => {
|
|
let data = '';
|
|
stream.setEncoding('utf8');
|
|
stream.on('error', (error) => reject(new BoxError.FS_ERROR, error.message));
|
|
stream.on('data', function (d) { data += d; });
|
|
stream.on('end', function () {
|
|
resolve(data);
|
|
});
|
|
});
|
|
}
|
|
|
|
async function downloadFile(app, filePath) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof filePath, 'string');
|
|
|
|
const statExecId = await createExec(app, { cmd: [ 'stat', '--printf=%F-%s', filePath ], tty: true });
|
|
const statStream = await startExec(app, statExecId, { tty: true });
|
|
const data = await drainStream(statStream);
|
|
|
|
const parts = data.split('-');
|
|
if (parts.length !== 2) throw new BoxError(BoxError.NOT_FOUND, 'file does not exist');
|
|
|
|
const type = parts[0];
|
|
let filename, cmd, size;
|
|
|
|
if (type === 'regular file') {
|
|
cmd = [ 'cat', filePath ];
|
|
size = parseInt(parts[1], 10);
|
|
filename = path.basename(filePath);
|
|
if (isNaN(size)) throw new BoxError(BoxError.NOT_FOUND, 'file does not exist');
|
|
} else if (type === 'directory') {
|
|
cmd = ['tar', 'zcf', '-', '-C', filePath, '.'];
|
|
filename = path.basename(filePath) + '.tar.gz';
|
|
size = 0; // unknown
|
|
} else {
|
|
throw new BoxError(BoxError.NOT_FOUND, 'only files or dirs can be downloaded');
|
|
}
|
|
|
|
const execId = await createExec(app, { cmd, tty: false });
|
|
const inputStream = await startExec(app, execId, { tty: false });
|
|
|
|
// transforms the docker stream into a normal stream
|
|
const stdoutStream = new TransformStream({
|
|
transform: function (chunk, ignoredEncoding, callback) {
|
|
this._buffer = this._buffer ? Buffer.concat([this._buffer, chunk]) : chunk;
|
|
|
|
for (;;) {
|
|
if (this._buffer.length < 8) break; // header is 8 bytes
|
|
|
|
const type = this._buffer.readUInt8(0);
|
|
const len = this._buffer.readUInt32BE(4);
|
|
|
|
if (this._buffer.length < (8 + len)) break; // not enough
|
|
|
|
const payload = this._buffer.slice(8, 8 + len);
|
|
|
|
this._buffer = this._buffer.slice(8+len); // consumed
|
|
|
|
if (type === 1) this.push(payload);
|
|
}
|
|
|
|
callback();
|
|
}
|
|
});
|
|
|
|
inputStream.pipe(stdoutStream);
|
|
|
|
return { stream: stdoutStream, filename, size };
|
|
}
|
|
|
|
async function uploadFile(app, sourceFilePath, destFilePath) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof sourceFilePath, 'string');
|
|
assert.strictEqual(typeof destFilePath, 'string');
|
|
|
|
// the built-in bash printf understands "%q" but not /usr/bin/printf.
|
|
// ' gets replaced with '\'' . the first closes the quote and last one starts a new one
|
|
const escapedDestFilePath = await shell.bash(`printf %q '${destFilePath.replace(/'/g, '\'\\\'\'')}'`, { encoding: 'utf8' });
|
|
debug(`uploadFile: ${sourceFilePath} -> ${escapedDestFilePath}`);
|
|
|
|
const execId = await createExec(app, { cmd: [ 'bash', '-c', `cat - > ${escapedDestFilePath}` ], tty: false });
|
|
const destStream = await startExec(app, execId, { tty: false });
|
|
|
|
return new Promise((resolve, reject) => {
|
|
const done = once(error => reject(new BoxError(BoxError.FS_ERROR, error.message)));
|
|
|
|
const sourceStream = fs.createReadStream(sourceFilePath);
|
|
sourceStream.on('error', done);
|
|
destStream.on('error', done);
|
|
|
|
destStream.on('finish', resolve);
|
|
|
|
sourceStream.pipe(destStream);
|
|
});
|
|
}
|
|
|
|
async function writeConfig(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
if (!safe.fs.writeFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/config.json'), JSON.stringify(app, null, 4))) {
|
|
throw new BoxError(BoxError.FS_ERROR, 'Error creating config.json: ' + safe.error.message);
|
|
}
|
|
|
|
const [error, icons] = await safe(getIcons(app.id));
|
|
if (!error && icons.icon) safe.fs.writeFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/icon.png'), icons.icon);
|
|
}
|
|
|
|
async function loadConfig(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
const appConfig = safe.JSON.parse(safe.fs.readFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/config.json')));
|
|
let data = {};
|
|
if (appConfig) {
|
|
data = _.pick(appConfig, 'memoryLimit', 'cpuQuota', 'enableBackup', 'reverseProxyConfig', 'env', 'servicesConfig', 'label', 'tags', 'enableAutomaticUpdate');
|
|
}
|
|
|
|
const icon = safe.fs.readFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/icon.png'));
|
|
if (icon) data.icon = icon;
|
|
|
|
await update(app.id, data);
|
|
}
|