12e073e8cf
mostly because code is being autogenerated by all the AI stuff using this prefix. it's also used in the stack trace.
2233 lines
87 KiB
JavaScript
2233 lines
87 KiB
JavaScript
'use strict';
|
|
|
|
exports = module.exports = {
|
|
getServiceConfig,
|
|
|
|
listServices,
|
|
getServiceStatus,
|
|
getServiceLogs,
|
|
|
|
configureService,
|
|
restartService,
|
|
rebuildService,
|
|
|
|
startAppServices,
|
|
stopAppServices,
|
|
|
|
startServices,
|
|
|
|
moveDataDir, // localstorage specific command
|
|
|
|
setupAddons,
|
|
teardownAddons,
|
|
backupAddons,
|
|
restoreAddons,
|
|
clearAddons,
|
|
checkAddonsSupport,
|
|
|
|
getEnvironment,
|
|
getContainerNamesSync,
|
|
|
|
getContainerDetails,
|
|
|
|
// exported only for apptask.js to update immich pgvectors extension - can be removed later
|
|
_postgreSqlNames: postgreSqlNames,
|
|
|
|
SERVICE_STATUS_STARTING: 'starting', // container up, waiting for healthcheck
|
|
SERVICE_STATUS_ACTIVE: 'active',
|
|
SERVICE_STATUS_STOPPED: 'stopped',
|
|
SERVICE_STATUS_DISABLED: 'disabled', // feature not supported
|
|
};
|
|
|
|
const addonConfigs = require('./addonconfigs.js'),
|
|
apps = require('./apps.js'),
|
|
assert = require('node:assert'),
|
|
blobs = require('./blobs.js'),
|
|
BoxError = require('./boxerror.js'),
|
|
branding = require('./branding.js'),
|
|
constants = require('./constants.js'),
|
|
crypto = require('node:crypto'),
|
|
dashboard = require('./dashboard.js'),
|
|
debug = require('debug')('box:services'),
|
|
dig = require('./dig.js'),
|
|
docker = require('./docker.js'),
|
|
eventlog = require('./eventlog.js'),
|
|
fs = require('node:fs'),
|
|
hat = require('./hat.js'),
|
|
http = require('node:http'),
|
|
infra = require('./infra_version.js'),
|
|
logs = require('./logs.js'),
|
|
mail = require('./mail.js'),
|
|
mailServer = require('./mailserver.js'),
|
|
oidcClients = require('./oidcclients.js'),
|
|
os = require('node:os'),
|
|
path = require('node:path'),
|
|
paths = require('./paths.js'),
|
|
{ pipeline } = require('node:stream'),
|
|
promiseRetry = require('./promise-retry.js'),
|
|
safe = require('safetydance'),
|
|
semver = require('semver'),
|
|
settings = require('./settings.js'),
|
|
sftp = require('./sftp.js'),
|
|
shell = require('./shell.js')('services'),
|
|
superagent = require('@cloudron/superagent');
|
|
|
|
const NOOP = async function (/*app, options*/) {};
|
|
const RMADDONDIR_CMD = path.join(__dirname, 'scripts/rmaddondir.sh');
|
|
const RESTART_SERVICE_CMD = path.join(__dirname, 'scripts/restartservice.sh');
|
|
const CLEARVOLUME_CMD = path.join(__dirname, 'scripts/clearvolume.sh');
|
|
const RMVOLUME_CMD = path.join(__dirname, 'scripts/rmvolume.sh');
|
|
const SETUPVOLUME_CMD = path.join(__dirname, 'scripts/setupvolume.sh');
|
|
const MV_VOLUME_CMD = path.join(__dirname, 'scripts/mvvolume.sh');
|
|
|
|
// setup can be called multiple times for the same app (configure crash restart) and existing data must not be lost
|
|
// teardown is destructive. app data stored with the addon is lost
|
|
// addons have 1-1 mapping with the manifest
|
|
const ADDONS = {
|
|
turn: {
|
|
setup: setupTurn,
|
|
teardown: teardownTurn,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
getDynamicEnvironment: NOOP,
|
|
clear: NOOP,
|
|
},
|
|
email: {
|
|
setup: setupEmail,
|
|
teardown: teardownEmail,
|
|
backup: NOOP,
|
|
restore: setupEmail,
|
|
getDynamicEnvironment: NOOP,
|
|
clear: NOOP,
|
|
},
|
|
ldap: {
|
|
setup: setupLdap,
|
|
teardown: teardownLdap,
|
|
backup: NOOP,
|
|
restore: setupLdap,
|
|
getDynamicEnvironment: NOOP,
|
|
clear: NOOP,
|
|
},
|
|
localstorage: {
|
|
setup: setupLocalStorage,
|
|
teardown: teardownLocalStorage,
|
|
backup: backupLocalStorage, // no backup because it's already inside app data
|
|
restore: restoreLocalStorage,
|
|
getDynamicEnvironment: NOOP,
|
|
clear: clearLocalStorage,
|
|
},
|
|
mongodb: {
|
|
setup: setupMongoDb,
|
|
teardown: teardownMongoDb,
|
|
backup: backupMongoDb,
|
|
restore: restoreMongoDb,
|
|
getDynamicEnvironment: NOOP,
|
|
clear: clearMongodb,
|
|
},
|
|
mysql: {
|
|
setup: setupMySql,
|
|
teardown: teardownMySql,
|
|
backup: backupMySql,
|
|
restore: restoreMySql,
|
|
getDynamicEnvironment: NOOP,
|
|
clear: clearMySql,
|
|
},
|
|
postgresql: {
|
|
setup: setupPostgreSql,
|
|
teardown: teardownPostgreSql,
|
|
backup: backupPostgreSql,
|
|
restore: restorePostgreSql,
|
|
getDynamicEnvironment: NOOP,
|
|
clear: clearPostgreSql,
|
|
},
|
|
proxyAuth: {
|
|
setup: setupProxyAuth,
|
|
teardown: teardownProxyAuth,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
getDynamicEnvironment: NOOP,
|
|
clear: NOOP
|
|
},
|
|
recvmail: {
|
|
setup: setupRecvMail,
|
|
teardown: teardownRecvMail,
|
|
backup: NOOP,
|
|
restore: setupRecvMail,
|
|
getDynamicEnvironment: NOOP,
|
|
clear: NOOP,
|
|
},
|
|
redis: {
|
|
setup: setupRedis,
|
|
teardown: teardownRedis,
|
|
backup: backupRedis,
|
|
restore: restoreRedis,
|
|
getDynamicEnvironment: NOOP,
|
|
clear: clearRedis,
|
|
},
|
|
sendmail: {
|
|
setup: setupSendMail,
|
|
teardown: teardownSendMail,
|
|
backup: NOOP,
|
|
restore: setupSendMail,
|
|
getDynamicEnvironment: NOOP,
|
|
clear: NOOP,
|
|
},
|
|
scheduler: {
|
|
setup: NOOP,
|
|
teardown: NOOP,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
getDynamicEnvironment: NOOP,
|
|
clear: NOOP,
|
|
},
|
|
docker: {
|
|
setup: setupDocker,
|
|
teardown: teardownDocker,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
getDynamicEnvironment: NOOP,
|
|
clear: NOOP,
|
|
},
|
|
tls: {
|
|
setup: setupTls,
|
|
teardown: teardownTls,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
getDynamicEnvironment: NOOP,
|
|
clear: NOOP,
|
|
},
|
|
oauth: { // kept for backward compatibility. keep teardown for uninstall to work
|
|
setup: NOOP,
|
|
teardown: teardownOauth,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
getDynamicEnvironment: NOOP,
|
|
clear: NOOP,
|
|
},
|
|
oidc: {
|
|
setup: setupOidc,
|
|
teardown: teardownOidc,
|
|
backup: NOOP,
|
|
restore: setupOidc,
|
|
getDynamicEnvironment: getDynamicEnvironmentOidc,
|
|
clear: NOOP,
|
|
}
|
|
};
|
|
|
|
// services are actual containers that are running. addons are the concepts requested by app
|
|
const SERVICES = {
|
|
turn: {
|
|
status: statusTurn,
|
|
restart: docker.restartContainer.bind(null, 'turn'),
|
|
defaultMemoryLimit: 256 * 1024 * 1024
|
|
},
|
|
mail: {
|
|
status: containerStatus.bind(null, 'mail'),
|
|
restart: mailServer.restart,
|
|
defaultMemoryLimit: mailServer.DEFAULT_MEMORY_LIMIT
|
|
},
|
|
mongodb: {
|
|
status: statusMongodb,
|
|
restart: restartMongodb,
|
|
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024
|
|
},
|
|
mysql: {
|
|
status: containerStatus.bind(null, 'mysql'),
|
|
restart: docker.restartContainer.bind(null, 'mysql'),
|
|
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024
|
|
},
|
|
postgresql: {
|
|
status: containerStatus.bind(null, 'postgresql'),
|
|
restart: docker.restartContainer.bind(null, 'postgresql'),
|
|
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024
|
|
},
|
|
docker: {
|
|
status: statusDocker,
|
|
restart: restartDocker,
|
|
defaultMemoryLimit: 0
|
|
},
|
|
unbound: {
|
|
status: statusUnbound,
|
|
restart: restartUnbound,
|
|
defaultMemoryLimit: 0
|
|
},
|
|
sftp: {
|
|
status: containerStatus.bind(null, 'sftp'),
|
|
restart: docker.restartContainer.bind(null, 'sftp'),
|
|
defaultMemoryLimit: sftp.DEFAULT_MEMORY_LIMIT
|
|
},
|
|
graphite: {
|
|
status: statusGraphite,
|
|
restart: restartGraphite,
|
|
defaultMemoryLimit: 256 * 1024 * 1024
|
|
},
|
|
nginx: {
|
|
status: statusNginx,
|
|
restart: restartNginx,
|
|
defaultMemoryLimit: 0
|
|
}
|
|
};
|
|
|
|
const APP_SERVICES = {
|
|
redis: {
|
|
status: (instance, done) => containerStatus(`redis-${instance}`, done),
|
|
start: (instance, done) => docker.startContainer(`redis-${instance}`, done),
|
|
stop: (instance, done) => docker.stopContainer(`redis-${instance}`, done),
|
|
restart: (instance, done) => docker.restartContainer(`redis-${instance}`, done),
|
|
defaultMemoryLimit: 256 * 1024 * 1024
|
|
}
|
|
};
|
|
|
|
function requiresUpgrade(existingImageRef, currentImageRef) {
|
|
const etag = docker.parseImageRef(existingImageRef),
|
|
ctag = docker.parseImageRef(currentImageRef);
|
|
|
|
// semver.parse returns null on invalid tags . it's better to crash below if tag is null
|
|
return semver.parse(etag.tag).major !== semver.parse(ctag.tag).major;
|
|
}
|
|
|
|
async function hasAVX() {
|
|
// mongodb 5 and above requires AVX
|
|
const [error] = await safe(shell.spawn('grep', ['-q', 'avx', '/proc/cpuinfo'], {}));
|
|
return !error;
|
|
}
|
|
|
|
function dumpPath(addon, appId) {
|
|
assert.strictEqual(typeof addon, 'string');
|
|
assert.strictEqual(typeof appId, 'string');
|
|
|
|
switch (addon) {
|
|
case 'postgresql': return path.join(paths.APPS_DATA_DIR, appId, 'postgresqldump');
|
|
case 'mysql': return path.join(paths.APPS_DATA_DIR, appId, 'mysqldump');
|
|
case 'mongodb': return path.join(paths.APPS_DATA_DIR, appId, 'mongodbdump');
|
|
case 'redis': return path.join(paths.APPS_DATA_DIR, appId, 'dump.rdb');
|
|
}
|
|
}
|
|
|
|
async function getContainerDetails(containerName, tokenEnvName) {
|
|
assert.strictEqual(typeof containerName, 'string');
|
|
assert.strictEqual(typeof tokenEnvName, 'string');
|
|
|
|
const result = await docker.inspect(containerName);
|
|
|
|
const ip = safe.query(result, 'NetworkSettings.Networks.cloudron.IPAddress', null);
|
|
if (!ip) throw new BoxError(BoxError.INACTIVE, `Error getting IP of ${containerName} service`);
|
|
|
|
// extract the cloudron token for auth
|
|
const env = safe.query(result, 'Config.Env', null);
|
|
if (!env) throw new BoxError(BoxError.DOCKER_ERROR, `Error inspecting environment of ${containerName} service`);
|
|
const tmp = env.find(function (e) { return e.indexOf(tokenEnvName) === 0; });
|
|
if (!tmp) throw new BoxError(BoxError.DOCKER_ERROR, `Error getting token of ${containerName} service`);
|
|
const token = tmp.slice(tokenEnvName.length + 1); // +1 for the = sign
|
|
if (!token) throw new BoxError(BoxError.DOCKER_ERROR, `Error getting token of ${containerName} service`);
|
|
|
|
return { ip: ip, token: token, state: result.State };
|
|
}
|
|
|
|
async function containerStatus(containerName) {
|
|
assert.strictEqual(typeof containerName, 'string');
|
|
|
|
const [error, container] = await safe(docker.inspect(containerName));
|
|
if (error && error.reason === BoxError.NOT_FOUND) return { status: exports.SERVICE_STATUS_STOPPED };
|
|
if (error) throw error;
|
|
|
|
const ip = safe.query(container, 'NetworkSettings.Networks.cloudron.IPAddress', null);
|
|
if (!ip) return { status: exports.SERVICE_STATUS_STOPPED };
|
|
const isRunning = container.State?.Running;
|
|
|
|
const [networkError, response] = await safe(superagent.get(`http://${ip}:3000/healthcheck`)
|
|
.timeout(20000)
|
|
.ok(() => true));
|
|
|
|
if (networkError) return { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for ${containerName}: ${networkError.message}` };
|
|
if (response.status !== 200 || !response.body.status) return { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for ${containerName}. Status code: ${response.status} message: ${response.body.message}` };
|
|
|
|
const result = await docker.getStats(containerName, { stream: false });
|
|
const stats = result.memory_stats || { usage: 0, limit: 1 };
|
|
|
|
return {
|
|
status: isRunning ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STOPPED,
|
|
memoryUsed: stats.usage,
|
|
memoryPercent: parseInt(100 * stats.usage / stats.limit),
|
|
healthcheck: response.body
|
|
};
|
|
}
|
|
|
|
async function listServices() {
|
|
const serviceIds = Object.keys(SERVICES);
|
|
|
|
const result = await apps.list();
|
|
for (const app of result) {
|
|
if (app.manifest.addons?.redis && app.enableRedis) serviceIds.push(`redis:${app.id}`);
|
|
}
|
|
|
|
return serviceIds;
|
|
}
|
|
|
|
async function getConfig() {
|
|
return await settings.getJson(settings.SERVICES_CONFIG_KEY) || {};
|
|
}
|
|
|
|
async function getServiceConfig(id) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
|
|
const [name, instance] = id.split(':');
|
|
if (!instance) {
|
|
const servicesConfig = await getConfig();
|
|
return servicesConfig[name] || {};
|
|
}
|
|
|
|
const app = await apps.get(instance);
|
|
if (!app) throw new BoxError(BoxError.NOT_FOUND, 'App not found');
|
|
|
|
return app.servicesConfig[name] || {};
|
|
}
|
|
|
|
async function getServiceStatus(id) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
|
|
const [name, instance ] = id.split(':');
|
|
let containerStatusFunc, service;
|
|
|
|
if (instance) {
|
|
service = APP_SERVICES[name];
|
|
if (!service) throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
|
|
containerStatusFunc = service.status.bind(null, instance);
|
|
} else if (SERVICES[name]) {
|
|
service = SERVICES[name];
|
|
containerStatusFunc = service.status;
|
|
} else {
|
|
throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
|
|
}
|
|
|
|
const result = {
|
|
name: name,
|
|
status: null,
|
|
memoryUsed: 0,
|
|
memoryPercent: 0,
|
|
error: null,
|
|
healthcheck: null,
|
|
config: {}
|
|
};
|
|
|
|
const status = await containerStatusFunc();
|
|
result.status = status.status;
|
|
result.memoryUsed = status.memoryUsed;
|
|
result.memoryPercent = status.memoryPercent;
|
|
result.defaultMemoryLimit = service.defaultMemoryLimit;
|
|
result.error = status.error || null;
|
|
result.healthcheck = status.healthcheck || null;
|
|
|
|
result.config = await getServiceConfig(id);
|
|
|
|
if (!result.config.memoryLimit && service.defaultMemoryLimit) {
|
|
result.config.memoryLimit = service.defaultMemoryLimit;
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
async function configureService(id, data, auditSource) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert.strictEqual(typeof data, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const [name, instance ] = id.split(':');
|
|
let needsRebuild = false;
|
|
|
|
if (instance) {
|
|
if (!APP_SERVICES[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
|
|
|
|
const app = await apps.get(instance);
|
|
if (!app) throw new BoxError(BoxError.NOT_FOUND, 'App not found');
|
|
|
|
const servicesConfig = app.servicesConfig;
|
|
needsRebuild = servicesConfig[name]?.recoveryMode != data.recoveryMode;
|
|
servicesConfig[name] = data;
|
|
|
|
await apps.update(instance, { servicesConfig });
|
|
} else if (SERVICES[name]) {
|
|
const servicesConfig = await getConfig();
|
|
needsRebuild = servicesConfig[name]?.recoveryMode != data.recoveryMode; // intentional != since 'recoveryMode' may or may not be there
|
|
|
|
servicesConfig[name] = data;
|
|
|
|
await settings.setJson(settings.SERVICES_CONFIG_KEY, servicesConfig);
|
|
} else {
|
|
throw new BoxError(BoxError.NOT_FOUND, 'No such service');
|
|
}
|
|
|
|
debug(`configureService: ${id} rebuild=${needsRebuild}`);
|
|
|
|
// do this in background
|
|
if (needsRebuild) {
|
|
safe(rebuildService(id, auditSource), { debug });
|
|
} else {
|
|
safe(applyMemoryLimit(id), { debug });
|
|
}
|
|
|
|
await eventlog.add(eventlog.ACTION_SERVICE_CONFIGURE, auditSource, { id, data });
|
|
}
|
|
|
|
async function getServiceLogs(id, options) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert(options && typeof options === 'object');
|
|
|
|
const [name, instance ] = id.split(':');
|
|
|
|
if (instance) {
|
|
if (!APP_SERVICES[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
|
|
} else if (!SERVICES[name]) {
|
|
throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
|
|
}
|
|
|
|
debug(`getServiceLogs: getting logs for ${name}`);
|
|
|
|
let cp;
|
|
|
|
if (name === 'docker' || name === 'unbound') {
|
|
cp = logs.journalctl(name, options);
|
|
} else if (name === 'nginx') {
|
|
cp = logs.tail(['/var/log/nginx/access.log', '/var/log/nginx/error.log'], { lines: options.lines, follow: options.follow });
|
|
} else {
|
|
const containerName = APP_SERVICES[name] ? `${name}-${instance}` : name;
|
|
cp = logs.tail([path.join(paths.LOG_DIR, containerName, 'app.log')], { lines: options.lines, follow: options.follow });
|
|
}
|
|
|
|
const logStream = new logs.LogStream({ format: options.format || 'json', source: name });
|
|
logStream.on('close', () => cp.terminate()); // the caller has to call destroy() on logStream. destroy() of Transform emits 'close'
|
|
|
|
cp.stdout.pipe(logStream);
|
|
|
|
return logStream;
|
|
}
|
|
|
|
async function rebuildService(id, auditSource) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
if (constants.TEST && !process.env.TEST_CREATE_INFRA) return;
|
|
|
|
// this attempts to recreate the service docker container if they don't exist but platform infra version is unchanged
|
|
// passing an infra version of 'none' will not attempt to purge existing data
|
|
const [name, instance] = id.split(':');
|
|
|
|
switch (name) {
|
|
case 'turn':
|
|
await startTurn({ version: 'none' });
|
|
break;
|
|
case 'mongodb':
|
|
await startMongodb({ version: 'none' });
|
|
break;
|
|
case 'postgresql':
|
|
await startPostgresql({ version: 'none' });
|
|
break;
|
|
case 'mysql':
|
|
await startMysql({ version: 'none' });
|
|
break;
|
|
case 'sftp':
|
|
await sftp.start({ version: 'none' });
|
|
break;
|
|
case 'graphite':
|
|
await startGraphite({ version: 'none' });
|
|
break;
|
|
case 'mail':
|
|
await mailServer.start({ version: 'none' });
|
|
break;
|
|
case 'redis': {
|
|
await safe(shell.spawn('docker', ['rm', '-f', `redis-${instance}`], {})); // ignore error
|
|
const app = await apps.get(instance);
|
|
if (app) await setupRedis(app, app.manifest.addons.redis); // starts the container
|
|
break;
|
|
}
|
|
default:
|
|
// nothing to rebuild for now.
|
|
}
|
|
|
|
safe(applyMemoryLimit(id), { debug }); // do this in background. ok to fail
|
|
|
|
await eventlog.add(eventlog.ACTION_SERVICE_REBUILD, auditSource, { id });
|
|
}
|
|
|
|
async function restartService(id, auditSource) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const [name, instance ] = id.split(':');
|
|
|
|
if (instance) {
|
|
if (!APP_SERVICES[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
|
|
|
|
await APP_SERVICES[name].restart(instance);
|
|
} else if (SERVICES[name]) {
|
|
await SERVICES[name].restart();
|
|
} else {
|
|
throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
|
|
}
|
|
|
|
await eventlog.add(eventlog.ACTION_SERVICE_RESTART, auditSource, { id });
|
|
}
|
|
|
|
// in the future, we can refcount and lazy start global services
|
|
async function startAppServices(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
const instance = app.id;
|
|
for (const addon of Object.keys(app.manifest.addons || {})) {
|
|
if (!(addon in APP_SERVICES)) continue;
|
|
|
|
const [error] = await safe(APP_SERVICES[addon].start(instance)); // assume addons name is service name
|
|
// error ignored because we don't want "start app" to error. use can fix it from Services
|
|
if (error) debug(`startAppServices: ${addon}:${instance}. %o`, error);
|
|
}
|
|
}
|
|
|
|
// in the future, we can refcount and stop global services as well
|
|
async function stopAppServices(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
const instance = app.id;
|
|
for (const addon of Object.keys(app.manifest.addons || {})) {
|
|
if (!(addon in APP_SERVICES)) continue;
|
|
|
|
const [error] = await safe(APP_SERVICES[addon].stop(instance)); // assume addons name is service name
|
|
// error ignored because we don't want "start app" to error. use can fix it from Services
|
|
if (error) debug(`stopAppServices: ${addon}:${instance}. %o`, error);
|
|
}
|
|
}
|
|
|
|
async function waitForContainer(containerName, tokenEnvName) {
|
|
assert.strictEqual(typeof containerName, 'string');
|
|
assert.strictEqual(typeof tokenEnvName, 'string');
|
|
|
|
debug(`Waiting for ${containerName}`);
|
|
|
|
const result = await getContainerDetails(containerName, tokenEnvName);
|
|
|
|
await promiseRetry({ times: 20, interval: 15000, debug }, async () => {
|
|
const [networkError, response] = await safe(superagent.get(`http://${result.ip}:3000/healthcheck?access_token=${result.token}`)
|
|
.timeout(20000)
|
|
.ok(() => true));
|
|
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error waiting for ${containerName}: ${networkError.message}`);
|
|
if (response.status !== 200 || !response.body.status) throw new BoxError(BoxError.ADDONS_ERROR, `Error waiting for ${containerName}. Status code: ${response.status} message: ${response.body.message}`);
|
|
});
|
|
}
|
|
|
|
async function setupAddons(app, addons) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
|
|
if (!addons) return;
|
|
|
|
debug('setupAddons: Setting up %j', Object.keys(addons));
|
|
|
|
for (const addon of Object.keys(addons)) {
|
|
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
|
|
|
|
debug(`setupAddons: setting up addon ${addon} with options ${JSON.stringify(addons[addon])}`);
|
|
|
|
await ADDONS[addon].setup(app, addons[addon]);
|
|
}
|
|
}
|
|
|
|
async function teardownAddons(app, addons) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
|
|
if (!addons) return;
|
|
|
|
debug('teardownAddons: Tearing down %j', Object.keys(addons));
|
|
|
|
for (const addon of Object.keys(addons)) {
|
|
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
|
|
|
|
debug(`teardownAddons: Tearing down addon ${addon} with options ${JSON.stringify(addons[addon])}`);
|
|
|
|
await ADDONS[addon].teardown(app, addons[addon]);
|
|
}
|
|
}
|
|
|
|
async function backupAddons(app, addons) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
|
|
debug('backupAddons');
|
|
|
|
if (!addons) return;
|
|
|
|
debug('backupAddons: backing up %j', Object.keys(addons));
|
|
|
|
for (const addon of Object.keys(addons)) {
|
|
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
|
|
|
|
await ADDONS[addon].backup(app, addons[addon]);
|
|
}
|
|
}
|
|
|
|
async function clearAddons(app, addons) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
|
|
debug('clearAddons');
|
|
|
|
if (!addons) return;
|
|
|
|
debug('clearAddons: clearing %j', Object.keys(addons));
|
|
|
|
for (const addon of Object.keys(addons)) {
|
|
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
|
|
|
|
await ADDONS[addon].clear(app, addons[addon]);
|
|
}
|
|
}
|
|
|
|
async function restoreAddons(app, addons) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
|
|
debug('restoreAddons');
|
|
|
|
if (!addons) return;
|
|
|
|
debug('restoreAddons: restoring %j', Object.keys(addons));
|
|
|
|
for (const addon of Object.keys(addons)) {
|
|
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
|
|
|
|
await ADDONS[addon].restore(app, addons[addon]);
|
|
}
|
|
}
|
|
|
|
async function importAppDatabase(app, addon) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof addon, 'string');
|
|
|
|
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
|
|
|
|
await ADDONS[addon].setup(app, app.manifest.addons[addon]);
|
|
await ADDONS[addon].clear(app, app.manifest.addons[addon]); // clear in case we crashed in a restore
|
|
await ADDONS[addon].restore(app, app.manifest.addons[addon]);
|
|
}
|
|
|
|
async function importDatabase(addon) {
|
|
assert.strictEqual(typeof addon, 'string');
|
|
|
|
debug(`importDatabase: Importing ${addon}`);
|
|
|
|
const allApps = await apps.list();
|
|
|
|
for (const app of allApps) {
|
|
if (!app.manifest.addons || !(addon in app.manifest.addons)) continue; // app doesn't use the addon
|
|
|
|
debug(`importDatabase: Importing addon ${addon} of app ${app.id}`);
|
|
|
|
const [error] = await safe(importAppDatabase(app, addon));
|
|
if (!error) continue;
|
|
|
|
debug(`importDatabase: Error importing ${addon} of app ${app.id}. Marking as errored. %o`, error);
|
|
// FIXME: there is no way to 'repair' if we are here. we need to make a separate apptask that re-imports db
|
|
// not clear, if repair workflow should be part of addon or per-app
|
|
await safe(apps.update(app.id, { installationState: apps.ISTATE_ERROR, error: { message: error.message } }));
|
|
}
|
|
|
|
safe.fs.unlinkSync(path.join(paths.ADDON_CONFIG_DIR, `exported-${addon}`)); // clean up for future migrations
|
|
}
|
|
|
|
async function exportDatabase(addon) {
|
|
assert.strictEqual(typeof addon, 'string');
|
|
|
|
debug(`exportDatabase: Exporting ${addon}`);
|
|
|
|
if (fs.existsSync(path.join(paths.ADDON_CONFIG_DIR, `exported-${addon}`))) {
|
|
debug(`exportDatabase: Already exported addon ${addon} in previous run`);
|
|
return;
|
|
}
|
|
|
|
const allApps = await apps.list();
|
|
|
|
for (const app of allApps) {
|
|
if (!app.manifest.addons || !(addon in app.manifest.addons)) continue; // app doesn't use the addon
|
|
if (app.installationState === apps.ISTATE_ERROR) continue; // missing db causes crash in old app addon containers
|
|
|
|
debug(`exportDatabase: Exporting addon ${addon} of app ${app.id}`);
|
|
|
|
const [error] = await safe(ADDONS[addon].backup(app, app.manifest.addons[addon]));
|
|
if (error) {
|
|
debug(`exportDatabase: Error exporting ${addon} of app ${app.id}. %o`, error);
|
|
// for errored apps, we can ignore if export had an error
|
|
if (app.installationState === apps.ISTATE_ERROR) continue;
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
safe.fs.writeFileSync(path.join(paths.ADDON_CONFIG_DIR, `exported-${addon}`), '', 'utf8');
|
|
if (safe.error) throw BoxError(BoxError.FS_ERROR, 'Error writing export checkpoint file');
|
|
// note: after this point, we are restart safe. it's ok if the box code crashes at this point
|
|
await shell.spawn('docker', ['rm', '-f', addon], {}); // what if db writes something when quitting ...
|
|
await shell.sudo([ RMADDONDIR_CMD, addon ], {}); // ready to start afresh
|
|
}
|
|
|
|
async function applyMemoryLimit(id) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
|
|
const [name, instance] = id.split(':');
|
|
let containerName, memoryLimit;
|
|
const serviceConfig = await getServiceConfig(id);
|
|
|
|
if (instance) {
|
|
if (!APP_SERVICES[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
|
|
|
|
containerName = `${name}-${instance}`;
|
|
memoryLimit = serviceConfig && serviceConfig.memoryLimit ? serviceConfig.memoryLimit : APP_SERVICES[name].defaultMemoryLimit;
|
|
} else if (SERVICES[name]) {
|
|
if (name === 'mongodb' && !await hasAVX()) {
|
|
debug('applyMemoryLimit: skipping mongodb because CPU does not have AVX');
|
|
return;
|
|
}
|
|
|
|
containerName = name;
|
|
memoryLimit = serviceConfig && serviceConfig.memoryLimit ? serviceConfig.memoryLimit : SERVICES[name].defaultMemoryLimit;
|
|
} else {
|
|
throw new BoxError(BoxError.NOT_FOUND, 'No such service');
|
|
}
|
|
|
|
debug(`applyMemoryLimit: ${containerName} ${JSON.stringify(serviceConfig)}`);
|
|
|
|
await docker.update(containerName, memoryLimit);
|
|
}
|
|
|
|
async function startServices(existingInfra) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
|
|
const startFuncs = [];
|
|
|
|
// always start addons on any infra change, regardless of minor or major update
|
|
if (existingInfra.version !== infra.version) {
|
|
debug(`startServices: ${existingInfra.version} -> ${infra.version}. starting all services`);
|
|
startFuncs.push(
|
|
mailServer.start, // start this first to reduce email downtime
|
|
startTurn,
|
|
startMysql,
|
|
startPostgresql,
|
|
startMongodb,
|
|
startRedis,
|
|
startGraphite,
|
|
sftp.start,
|
|
);
|
|
} else {
|
|
assert.strictEqual(typeof existingInfra.images, 'object');
|
|
|
|
if (infra.images.mail !== existingInfra.images.mail) startFuncs.push(mailServer.start); // start this first to reduce email downtime
|
|
if (infra.images.turn !== existingInfra.images.turn) startFuncs.push(startTurn);
|
|
if (infra.images.mysql !== existingInfra.images.mysql) startFuncs.push(startMysql);
|
|
if (infra.images.postgresql !== existingInfra.images.postgresql) startFuncs.push(startPostgresql);
|
|
if (infra.images.mongodb !== existingInfra.images.mongodb) startFuncs.push(startMongodb);
|
|
if (infra.images.redis !== existingInfra.images.redis) startFuncs.push(startRedis);
|
|
if (infra.images.graphite !== existingInfra.images.graphite) startFuncs.push(startGraphite);
|
|
if (infra.images.sftp !== existingInfra.images.sftp) startFuncs.push(sftp.start);
|
|
|
|
debug('startServices: existing infra. incremental service create %j', startFuncs.map(function (f) { return f.name; }));
|
|
}
|
|
|
|
for (const func of startFuncs) {
|
|
await func(existingInfra);
|
|
}
|
|
|
|
// we always start db containers with unlimited memory. we then scale them down per configuration
|
|
for (const id of [ 'mysql', 'postgresql', 'mongodb' ]) {
|
|
safe(applyMemoryLimit(id), { debug }); // no waiting. and it's ok if applying service configs fails
|
|
}
|
|
}
|
|
|
|
async function getEnvironment(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
// contains values for environment from addonConfigs db
|
|
const result = await addonConfigs.getByAppId(app.id);
|
|
|
|
// convert result to object to ensure unique env names if we overwrite static ones from the previously stored value in addonconfigs
|
|
let env = {};
|
|
result.forEach(e => { env[e.name] = e.value; });
|
|
|
|
// get environment configs which are dynamic e.g generated based on dashboard domain and are not stored in db
|
|
for (const addon in (app.manifest.addons || {})) {
|
|
const configs = await ADDONS[addon].getDynamicEnvironment(app, {});
|
|
if (configs) env = { ...env, ...configs };
|
|
}
|
|
|
|
return Object.keys(env).map(function (e) { return e + '=' + env[e]; });
|
|
}
|
|
|
|
function getContainerNamesSync(app, addons) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
|
|
let names = [];
|
|
|
|
if (!addons) return names;
|
|
|
|
for (const addon in addons) {
|
|
switch (addon) {
|
|
case 'scheduler':
|
|
// names here depend on how scheduler.js creates containers
|
|
names = names.concat(Object.keys(addons.scheduler).map(function (taskName) { return app.id + '-' + taskName; }));
|
|
break;
|
|
default: break;
|
|
}
|
|
}
|
|
|
|
return names;
|
|
}
|
|
|
|
async function setupLocalStorage(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('setupLocalStorage');
|
|
|
|
const volumeDataDir = await apps.getStorageDir(app);
|
|
|
|
const [error] = await safe(shell.sudo([ SETUPVOLUME_CMD, volumeDataDir ], {}));
|
|
if (error) throw new BoxError(BoxError.FS_ERROR, `Error creating app storage data dir: ${error.message}`);
|
|
}
|
|
|
|
async function clearLocalStorage(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('clearLocalStorage');
|
|
|
|
const volumeDataDir = await apps.getStorageDir(app);
|
|
const [error] = await safe(shell.sudo([ CLEARVOLUME_CMD, volumeDataDir ], {}));
|
|
if (error) throw new BoxError(BoxError.FS_ERROR, error);
|
|
}
|
|
|
|
async function teardownLocalStorage(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('teardownLocalStorage');
|
|
|
|
const volumeDataDir = await apps.getStorageDir(app);
|
|
const [error] = await safe(shell.sudo([ RMVOLUME_CMD, volumeDataDir ], {}));
|
|
if (error) throw new BoxError(BoxError.FS_ERROR, error);
|
|
|
|
// sqlite files are automatically cleared
|
|
}
|
|
|
|
async function backupSqlite(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Backing up sqlite');
|
|
|
|
const volumeDataDir = await apps.getStorageDir(app);
|
|
|
|
// we use .dump instead of .backup because it's more portable across sqlite versions
|
|
for (const p of options.paths) {
|
|
const outputFile = path.join(paths.APPS_DATA_DIR, app.id, path.basename(p, path.extname(p)) + '.sqlite');
|
|
|
|
// we could use docker exec but it may not work if app is restarting
|
|
const cmd = `sqlite3 ${p} ".dump"`;
|
|
const runCmd = `docker run --rm --name=sqlite-${app.id} \
|
|
--net cloudron \
|
|
-v ${volumeDataDir}:/app/data \
|
|
--label isCloudronManaged=true \
|
|
--read-only -v /tmp -v /run ${app.manifest.dockerImage} ${cmd} > ${outputFile}`;
|
|
|
|
await shell.bash(runCmd, { encoding: 'utf8' });
|
|
}
|
|
}
|
|
|
|
async function backupLocalStorage(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
if (options.sqlite) await backupSqlite(app, options.sqlite);
|
|
}
|
|
|
|
async function restoreSqlite(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Restoring sqlite');
|
|
|
|
const volumeDataDir = await apps.getStorageDir(app);
|
|
|
|
// can also restore using .read <filename>
|
|
for (const p of options.paths) {
|
|
const inputFile = path.join(paths.APPS_DATA_DIR, app.id, path.basename(p, path.extname(p)) + '.sqlite');
|
|
|
|
const cmd = `sqlite3 ${p}`;
|
|
const runCmd = `docker run --rm --name=sqlite-${app.id} \
|
|
--net cloudron \
|
|
-v ${volumeDataDir}:/app/data \
|
|
--label isCloudronManaged=true \
|
|
--read-only -v /tmp -v /run ${app.manifest.dockerImage} ${cmd} < ${inputFile}`;
|
|
|
|
await shell.bash(runCmd, { encoding: 'utf8' });
|
|
}
|
|
}
|
|
|
|
async function restoreLocalStorage(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
if (options.sqlite) await restoreSqlite(app, options.sqlite);
|
|
}
|
|
|
|
async function setupTurn(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const disabled = app.manifest.addons.turn.optional && !app.enableTurn;
|
|
if (disabled) return await addonConfigs.unset(app.id, 'turn');
|
|
|
|
const turnSecret = await blobs.getString(blobs.ADDON_TURN_SECRET);
|
|
if (!turnSecret) throw new BoxError(BoxError.ADDONS_ERROR, 'Turn secret is missing');
|
|
|
|
const { fqdn:dashboardFqdn } = await dashboard.getLocation();
|
|
|
|
const env = [
|
|
{ name: 'CLOUDRON_STUN_SERVER', value: dashboardFqdn },
|
|
{ name: 'CLOUDRON_STUN_PORT', value: '3478' },
|
|
{ name: 'CLOUDRON_STUN_TLS_PORT', value: '5349' },
|
|
{ name: 'CLOUDRON_TURN_SERVER', value: dashboardFqdn },
|
|
{ name: 'CLOUDRON_TURN_PORT', value: '3478' },
|
|
{ name: 'CLOUDRON_TURN_TLS_PORT', value: '5349' },
|
|
{ name: 'CLOUDRON_TURN_SECRET', value: turnSecret }
|
|
];
|
|
|
|
debug('Setting up TURN');
|
|
|
|
await addonConfigs.set(app.id, 'turn', env);
|
|
}
|
|
|
|
async function startTurn(existingInfra) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
|
|
const serviceConfig = await getServiceConfig('turn');
|
|
const image = infra.images.turn;
|
|
const memoryLimit = serviceConfig.memoryLimit || SERVICES['turn'].defaultMemoryLimit;
|
|
const { fqdn:realm } = await dashboard.getLocation();
|
|
|
|
let turnSecret = await blobs.getString(blobs.ADDON_TURN_SECRET);
|
|
if (!turnSecret) {
|
|
debug('startTurn: generating turn secret');
|
|
turnSecret = 'a' + crypto.randomBytes(15).toString('hex'); // prefix with a to ensure string starts with a letter
|
|
await blobs.setString(blobs.ADDON_TURN_SECRET, turnSecret);
|
|
}
|
|
|
|
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
|
|
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
|
|
const verboseLogs = serviceConfig.recoveryMode ? 'true' : '';
|
|
|
|
// docker's userland proxy spins a process for every mapped port. this makes exposing port ranges unviable
|
|
// despite --userland-proxy=false, docker create a firewall rule for each port in a range. this takes over 20s to start/stop containers
|
|
// constants.TURN_PORT, constants.TURN_TLS_PORT, constants.TURN_UDP_PORT_START, constants.TURN_UDP_PORT_END are exposed automatically in host mode
|
|
// https://github.com/moby/moby/issues/8356 and https://github.com/moby/moby/issues/14856 https://github.com/moby/moby/issues/36214
|
|
const runCmd = `docker run --restart=unless-stopped -d --name=turn \
|
|
--hostname turn \
|
|
--net host \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=unix://${paths.SYSLOG_SOCKET_FILE} \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=turn \
|
|
-m ${memoryLimit} \
|
|
--memory-swap -1 \
|
|
-e CLOUDRON_TURN_SECRET=${turnSecret} \
|
|
-e CLOUDRON_REALM=${realm} \
|
|
-e CLOUDRON_VERBOSE_LOGS=${verboseLogs} \
|
|
--label isCloudronManaged=true \
|
|
${readOnly} -v /tmp -v /run ${image} ${cmd}`;
|
|
|
|
debug('startTurn: stopping and deleting previous turn container');
|
|
await docker.stopContainer('turn');
|
|
await docker.deleteContainer('turn');
|
|
|
|
debug('startTurn: starting turn container');
|
|
await shell.bash(runCmd, { encoding: 'utf8' });
|
|
|
|
if (existingInfra.version !== 'none' && existingInfra.images.turn !== image) await docker.deleteImage(existingInfra.images.turn);
|
|
}
|
|
|
|
async function teardownTurn(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Tearing down TURN');
|
|
|
|
await addonConfigs.unset(app.id, 'turn');
|
|
}
|
|
|
|
async function setupEmail(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const mailDomains = await mail.listDomains();
|
|
const mailInDomains = mailDomains.filter(function (d) { return d.enabled; }).map(function (d) { return d.domain; }).join(',');
|
|
const { fqdn } = await mailServer.getLocation();
|
|
|
|
// note that "external" access info can be derived from MAIL_DOMAIN (since it's part of user documentation)
|
|
const env = [
|
|
{ name: 'CLOUDRON_EMAIL_SMTP_SERVER', value: 'mail' },
|
|
{ name: 'CLOUDRON_EMAIL_SMTP_PORT', value: '2525' },
|
|
{ name: 'CLOUDRON_EMAIL_SMTPS_PORT', value: '2465' },
|
|
{ name: 'CLOUDRON_EMAIL_STARTTLS_PORT', value: '2587' },
|
|
{ name: 'CLOUDRON_EMAIL_IMAP_SERVER', value: 'mail' },
|
|
{ name: 'CLOUDRON_EMAIL_IMAPS_PORT', value: '9993' },
|
|
{ name: 'CLOUDRON_EMAIL_IMAP_PORT', value: '9393' },
|
|
{ name: 'CLOUDRON_EMAIL_SIEVE_SERVER', value: 'mail' },
|
|
{ name: 'CLOUDRON_EMAIL_SIEVE_PORT', value: '4190' }, // starttls
|
|
{ name: 'CLOUDRON_EMAIL_DOMAIN', value: app.domain },
|
|
{ name: 'CLOUDRON_EMAIL_DOMAINS', value: mailInDomains },
|
|
{ name: 'CLOUDRON_EMAIL_SERVER_HOST', value: fqdn }, // this is also a hint to reconfigure on mail server name change
|
|
{ name: 'CLOUDRON_EMAIL_LDAP_MAILBOXES_BASE_DN', value: 'ou=mailboxes,dc=cloudron' }
|
|
];
|
|
|
|
debug('Setting up Email');
|
|
|
|
await addonConfigs.set(app.id, 'email', env);
|
|
}
|
|
|
|
async function teardownEmail(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Tearing down Email');
|
|
|
|
await addonConfigs.unset(app.id, 'email');
|
|
}
|
|
|
|
async function setupLdap(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
if (!app.sso) return;
|
|
|
|
const env = [
|
|
{ name: 'CLOUDRON_LDAP_SERVER', value: '172.18.0.1' },
|
|
{ name: 'CLOUDRON_LDAP_HOST', value: '172.18.0.1' }, // to keep things in sync with the database _HOST vars
|
|
{ name: 'CLOUDRON_LDAP_PORT', value: '' + constants.LDAP_PORT },
|
|
{ name: 'CLOUDRON_LDAP_URL', value: 'ldap://172.18.0.1:' + constants.LDAP_PORT },
|
|
{ name: 'CLOUDRON_LDAP_USERS_BASE_DN', value: 'ou=users,dc=cloudron' },
|
|
{ name: 'CLOUDRON_LDAP_GROUPS_BASE_DN', value: 'ou=groups,dc=cloudron' },
|
|
{ name: 'CLOUDRON_LDAP_BIND_DN', value: 'cn='+ app.id + ',ou=apps,dc=cloudron' },
|
|
{ name: 'CLOUDRON_LDAP_BIND_PASSWORD', value: hat(4 * 128) } // this is ignored
|
|
];
|
|
|
|
debug('Setting up LDAP');
|
|
|
|
await addonConfigs.set(app.id, 'ldap', env);
|
|
}
|
|
|
|
async function teardownLdap(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Tearing down LDAP');
|
|
|
|
await addonConfigs.unset(app.id, 'ldap');
|
|
}
|
|
|
|
async function setupSendMail(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Setting up SendMail');
|
|
|
|
const disabled = app.manifest.addons.sendmail.optional && !app.enableMailbox;
|
|
if (disabled) return await addonConfigs.unset(app.id, 'sendmail');
|
|
|
|
const existingPassword = await addonConfigs.getByName(app.id, 'sendmail', '%MAIL_SMTP_PASSWORD');
|
|
|
|
const password = existingPassword || hat(4 * 48); // see box#565 for password length
|
|
|
|
const smtpServer = options.requiresValidCertificate ? (await mailServer.getLocation()).fqdn : 'mail'; // this is also a hint to reconfigure on mail server name change
|
|
|
|
const env = [
|
|
{ name: 'CLOUDRON_MAIL_SMTP_SERVER', value: smtpServer },
|
|
{ name: 'CLOUDRON_MAIL_SMTP_PORT', value: '2525' },
|
|
{ name: 'CLOUDRON_MAIL_SMTPS_PORT', value: '2465' },
|
|
{ name: 'CLOUDRON_MAIL_STARTTLS_PORT', value: '2587' },
|
|
{ name: 'CLOUDRON_MAIL_SMTP_USERNAME', value: app.mailboxName + '@' + app.mailboxDomain },
|
|
{ name: 'CLOUDRON_MAIL_SMTP_PASSWORD', value: password },
|
|
{ name: 'CLOUDRON_MAIL_FROM', value: app.mailboxName + '@' + app.mailboxDomain },
|
|
{ name: 'CLOUDRON_MAIL_DOMAIN', value: app.mailboxDomain }
|
|
];
|
|
|
|
if (app.manifest.addons.sendmail.supportsDisplayName) env.push({ name: 'CLOUDRON_MAIL_FROM_DISPLAY_NAME', value: app.mailboxDisplayName });
|
|
|
|
debug('Setting sendmail addon config to %j', env);
|
|
await addonConfigs.set(app.id, 'sendmail', env);
|
|
}
|
|
|
|
async function teardownSendMail(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Tearing down sendmail');
|
|
|
|
await addonConfigs.unset(app.id, 'sendmail');
|
|
}
|
|
|
|
async function setupRecvMail(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('setupRecvMail: setting up recvmail');
|
|
|
|
if (!app.enableInbox) return await addonConfigs.unset(app.id, 'recvmail');
|
|
|
|
const existingPassword = await addonConfigs.getByName(app.id, 'recvmail', '%MAIL_IMAP_PASSWORD');
|
|
|
|
const password = existingPassword || hat(4 * 48); // see box#565 for password length
|
|
|
|
const env = [
|
|
{ name: 'CLOUDRON_MAIL_IMAP_SERVER', value: 'mail' },
|
|
{ name: 'CLOUDRON_MAIL_IMAP_PORT', value: '9393' },
|
|
{ name: 'CLOUDRON_MAIL_IMAPS_PORT', value: '9993' },
|
|
{ name: 'CLOUDRON_MAIL_POP3_PORT', value: '9595' },
|
|
{ name: 'CLOUDRON_MAIL_POP3S_PORT', value: '9995' },
|
|
{ name: 'CLOUDRON_MAIL_IMAP_USERNAME', value: app.inboxName + '@' + app.inboxDomain },
|
|
{ name: 'CLOUDRON_MAIL_IMAP_PASSWORD', value: password },
|
|
{ name: 'CLOUDRON_MAIL_TO', value: app.inboxName + '@' + app.inboxDomain },
|
|
{ name: 'CLOUDRON_MAIL_TO_DOMAIN', value: app.inboxDomain },
|
|
];
|
|
|
|
debug('setupRecvMail: setting recvmail addon config to %j', env);
|
|
await addonConfigs.set(app.id, 'recvmail', env);
|
|
}
|
|
|
|
async function teardownRecvMail(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('teardownRecvMail: tearing down recvmail');
|
|
|
|
await addonConfigs.unset(app.id, 'recvmail');
|
|
}
|
|
|
|
function mysqlDatabaseName(appId) {
|
|
assert.strictEqual(typeof appId, 'string');
|
|
|
|
const md5sum = crypto.createHash('md5'); // get rid of "-"
|
|
md5sum.update(appId);
|
|
return md5sum.digest('hex').substring(0, 16); // max length of mysql usernames is 16
|
|
}
|
|
|
|
async function startMysql(existingInfra) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
|
|
const image = infra.images.mysql;
|
|
const dataDir = paths.PLATFORM_DATA_DIR;
|
|
const rootPassword = hat(8 * 128);
|
|
const cloudronToken = hat(8 * 128);
|
|
|
|
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.mysql, image);
|
|
|
|
if (upgrading) {
|
|
debug('startMysql: mysql will be upgraded');
|
|
await exportDatabase('mysql');
|
|
}
|
|
|
|
const serviceConfig = await getServiceConfig('mysql');
|
|
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
|
|
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
|
|
|
|
// memory options are applied dynamically. import requires all the memory we can get
|
|
const runCmd = `docker run --restart=unless-stopped -d --name=mysql \
|
|
--hostname mysql \
|
|
--net cloudron \
|
|
--net-alias mysql \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=unix://${paths.SYSLOG_SOCKET_FILE} \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=mysql \
|
|
--ip ${constants.MYSQL_SERVICE_IPv4} \
|
|
-e CLOUDRON_MYSQL_TOKEN=${cloudronToken} \
|
|
-e CLOUDRON_MYSQL_ROOT_HOST=172.18.0.1 \
|
|
-e CLOUDRON_MYSQL_ROOT_PASSWORD=${rootPassword} \
|
|
-v ${dataDir}/mysql:/var/lib/mysql \
|
|
--label isCloudronManaged=true \
|
|
--cap-add SYS_NICE \
|
|
${readOnly} -v /tmp -v /run ${image} ${cmd}`;
|
|
|
|
debug('startMysql: stopping and deleting previous mysql container');
|
|
await docker.stopContainer('mysql');
|
|
await docker.deleteContainer('mysql');
|
|
|
|
debug('startMysql: starting mysql container');
|
|
await shell.bash(runCmd, { encoding: 'utf8' });
|
|
|
|
if (!serviceConfig.recoveryMode) {
|
|
await waitForContainer('mysql', 'CLOUDRON_MYSQL_TOKEN');
|
|
if (upgrading) await importDatabase('mysql');
|
|
}
|
|
|
|
if (existingInfra.version !== 'none' && existingInfra.images.mysql !== image) await docker.deleteImage(existingInfra.images.mysql);
|
|
}
|
|
|
|
async function setupMySql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Setting up mysql');
|
|
|
|
const existingPassword = await addonConfigs.getByName(app.id, 'mysql', '%MYSQL_PASSWORD');
|
|
|
|
const tmp = mysqlDatabaseName(app.id);
|
|
|
|
const data = {
|
|
database: tmp,
|
|
prefix: tmp,
|
|
username: tmp,
|
|
password: existingPassword || hat(4 * 48) // see box#362 for password length
|
|
};
|
|
|
|
const result = await getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN');
|
|
|
|
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `?access_token=${result.token}`)
|
|
.send(data)
|
|
.ok(() => true));
|
|
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error setting up mysql: ${networkError.message}`);
|
|
if (response.status !== 201) throw new BoxError(BoxError.ADDONS_ERROR, `Error setting up mysql. Status code: ${response.status} message: ${response.body.message}`);
|
|
|
|
let env = [
|
|
{ name: 'CLOUDRON_MYSQL_USERNAME', value: data.username },
|
|
{ name: 'CLOUDRON_MYSQL_PASSWORD', value: data.password },
|
|
{ name: 'CLOUDRON_MYSQL_HOST', value: 'mysql' },
|
|
{ name: 'CLOUDRON_MYSQL_PORT', value: '3306' }
|
|
];
|
|
|
|
if (options.multipleDatabases) {
|
|
env = env.concat({ name: 'CLOUDRON_MYSQL_DATABASE_PREFIX', value: `${data.prefix}_` });
|
|
} else {
|
|
env = env.concat(
|
|
{ name: 'CLOUDRON_MYSQL_URL', value: `mysql://${data.username}:${data.password}@mysql/${data.database}` },
|
|
{ name: 'CLOUDRON_MYSQL_DATABASE', value: data.database }
|
|
);
|
|
}
|
|
|
|
debug('Setting mysql addon config to %j', env);
|
|
await addonConfigs.set(app.id, 'mysql', env);
|
|
}
|
|
|
|
async function clearMySql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const database = mysqlDatabaseName(app.id);
|
|
|
|
const result = await getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN');
|
|
|
|
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}/clear?access_token=${result.token}`)
|
|
.ok(() => true));
|
|
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error clearing mysql: ${networkError.message}`);
|
|
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error clearing mysql. Status code: ${response.status} message: ${response.body.message}`);
|
|
}
|
|
|
|
async function teardownMySql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const database = mysqlDatabaseName(app.id);
|
|
const username = database;
|
|
|
|
const result = await getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN');
|
|
|
|
const [networkError, response] = await safe(superagent.del(`http://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}?access_token=${result.token}&username=${username}`)
|
|
.ok(() => true));
|
|
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mysql: ${networkError.message}`);
|
|
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mysql. Status code: ${response.status} message: ${response.body.message}`);
|
|
|
|
await addonConfigs.unset(app.id, 'mysql');
|
|
}
|
|
|
|
function pipeRequestToFile(url, filename) {
|
|
assert.strictEqual(typeof url, 'string');
|
|
assert.strictEqual(typeof filename, 'string');
|
|
|
|
return new Promise((resolve, reject) => {
|
|
const writeStream = fs.createWriteStream(filename);
|
|
const request = http.request(url, { method: 'POST' }); // ClientRequest
|
|
request.setTimeout(4 * 60 * 60 * 1000, () => {
|
|
debug('pipeRequestToFile: timeout - connect or post-connect idle timeout');
|
|
request.destroy(); // connect OR post-connect idle timeout
|
|
reject(new Error('Request timedout'));
|
|
});
|
|
|
|
request.on('error', (error) => reject(new BoxError(BoxError.NETWORK_ERROR, `Could not pipe ${url} to ${filename}: ${error.message}`))); // network error, dns error
|
|
request.on('response', (response) => {
|
|
debug(`pipeRequestToFile: connected with status code ${response.statusCode}`);
|
|
if (response.statusCode !== 200) return reject(new BoxError(BoxError.ADDONS_ERROR, `Unexpected response code or HTTP error when piping ${url} to ${filename}: status ${response.statusCode}`));
|
|
|
|
pipeline(response, writeStream, (error) => {
|
|
if (error) return reject(new BoxError(BoxError.ADDONS_ERROR, `Error piping ${url} to ${filename}: ${error.message}`));
|
|
|
|
if (!response.complete) return reject(new BoxError(BoxError.ADDONS_ERROR, `Response not complete when piping ${url} to ${filename}`));
|
|
resolve();
|
|
});
|
|
});
|
|
request.end(); // make the request
|
|
});
|
|
}
|
|
|
|
function pipeFileToRequest(filename, url) {
|
|
assert.strictEqual(typeof filename, 'string');
|
|
assert.strictEqual(typeof url, 'string');
|
|
|
|
return new Promise((resolve, reject) => {
|
|
const readStream = fs.createReadStream(filename);
|
|
const request = http.request(url, { method: 'POST' }); // ClientRequest
|
|
request.setTimeout(4 * 60 * 60 * 1000, () => {
|
|
debug('pipeFileToRequest: timeout - connect or post-connect idle timeout');
|
|
request.destroy();
|
|
reject(new Error('Request timedout'));
|
|
});
|
|
request.on('response', (response) => {
|
|
debug(`pipeFileToRequest: request completed with status code ${response.statusCode}`);
|
|
response.resume(); // drain the response
|
|
if (response.statusCode !== 200) return reject(new BoxError(BoxError.ADDONS_ERROR, `Unexpected response code or HTTP error when piping ${filename} to ${url}: status ${response.statusCode} complete ${response.complete}`));
|
|
|
|
resolve();
|
|
});
|
|
|
|
debug(`pipeFileToRequest: piping ${filename} to ${url}`);
|
|
pipeline(readStream, request, function (error) {
|
|
if (error) return reject(new BoxError(BoxError.ADDONS_ERROR, `Error piping file ${filename} to request ${url}`));
|
|
debug(`pipeFileToRequest: piped ${filename} to ${url}`); // now we have to wait for 'response' above
|
|
});
|
|
});
|
|
}
|
|
|
|
async function backupMySql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const database = mysqlDatabaseName(app.id);
|
|
|
|
debug('Backing up mysql');
|
|
|
|
const result = await getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN');
|
|
|
|
const url = `http://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}/backup?access_token=${result.token}`;
|
|
await pipeRequestToFile(url, dumpPath('mysql', app.id));
|
|
}
|
|
|
|
async function restoreMySql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const database = mysqlDatabaseName(app.id);
|
|
|
|
debug('restoreMySql');
|
|
|
|
const result = await getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN');
|
|
|
|
const url = `http://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}/restore?access_token=${result.token}`;
|
|
await pipeFileToRequest(dumpPath('mysql', app.id), url);
|
|
}
|
|
|
|
function postgreSqlNames(appId) {
|
|
appId = appId.replace(/-/g, '');
|
|
return { database: `db${appId}`, username: `user${appId}` };
|
|
}
|
|
|
|
async function startPostgresql(existingInfra) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
|
|
const image = infra.images.postgresql;
|
|
const dataDir = paths.PLATFORM_DATA_DIR;
|
|
const rootPassword = hat(8 * 128);
|
|
const cloudronToken = hat(8 * 128);
|
|
|
|
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.postgresql, image);
|
|
|
|
if (upgrading) {
|
|
debug('startPostgresql: postgresql will be upgraded');
|
|
await exportDatabase('postgresql');
|
|
}
|
|
|
|
const serviceConfig = await getServiceConfig('postgresql');
|
|
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
|
|
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
|
|
|
|
// memory options are applied dynamically. import requires all the memory we can get
|
|
const runCmd = `docker run --restart=unless-stopped -d --name=postgresql \
|
|
--hostname postgresql \
|
|
--net cloudron \
|
|
--net-alias postgresql \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=unix://${paths.SYSLOG_SOCKET_FILE} \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=postgresql \
|
|
--ip ${constants.POSTGRESQL_SERVICE_IPv4} \
|
|
--shm-size=128M \
|
|
-e CLOUDRON_POSTGRESQL_ROOT_PASSWORD=${rootPassword} \
|
|
-e CLOUDRON_POSTGRESQL_TOKEN=${cloudronToken} \
|
|
-v ${dataDir}/postgresql:/var/lib/postgresql \
|
|
--label isCloudronManaged=true \
|
|
${readOnly} -v /tmp -v /run ${image} ${cmd}`;
|
|
|
|
debug('startPostgresql: stopping and deleting previous postgresql container');
|
|
await docker.stopContainer('postgresql');
|
|
await docker.deleteContainer('postgresql');
|
|
|
|
debug('startPostgresql: starting postgresql container');
|
|
await shell.bash(runCmd, { encoding: 'utf8' });
|
|
|
|
if (!serviceConfig.recoveryMode) {
|
|
await waitForContainer('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
|
|
if (upgrading) await importDatabase('postgresql');
|
|
}
|
|
|
|
if (existingInfra.version !== 'none' && existingInfra.images.postgresql !== image) await docker.deleteImage(existingInfra.images.postgresql);
|
|
}
|
|
|
|
async function setupPostgreSql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Setting up postgresql');
|
|
|
|
const { database, username } = postgreSqlNames(app.id);
|
|
|
|
const existingPassword = await addonConfigs.getByName(app.id, 'postgresql', '%POSTGRESQL_PASSWORD');
|
|
|
|
const data = {
|
|
database: database,
|
|
username: username,
|
|
password: existingPassword || hat(4 * 128),
|
|
locale: options.locale || 'C'
|
|
};
|
|
|
|
const result = await getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
|
|
|
|
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/databases?access_token=${result.token}`)
|
|
.send(data)
|
|
.ok(() => true));
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error setting up postgresql: ${networkError.message}`);
|
|
if (response.status !== 201) throw new BoxError(BoxError.ADDONS_ERROR, `Error setting up postgresql. Status code: ${response.status} message: ${response.body.message}`);
|
|
|
|
const env = [
|
|
{ name: 'CLOUDRON_POSTGRESQL_URL', value: `postgres://${data.username}:${data.password}@postgresql/${data.database}` },
|
|
{ name: 'CLOUDRON_POSTGRESQL_USERNAME', value: data.username },
|
|
{ name: 'CLOUDRON_POSTGRESQL_PASSWORD', value: data.password },
|
|
{ name: 'CLOUDRON_POSTGRESQL_HOST', value: 'postgresql' },
|
|
{ name: 'CLOUDRON_POSTGRESQL_PORT', value: '5432' },
|
|
{ name: 'CLOUDRON_POSTGRESQL_DATABASE', value: data.database }
|
|
];
|
|
|
|
debug('Setting postgresql addon config to %j', env);
|
|
await addonConfigs.set(app.id, 'postgresql', env);
|
|
}
|
|
|
|
async function clearPostgreSql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const { database, username } = postgreSqlNames(app.id);
|
|
const locale = options.locale || 'C';
|
|
|
|
debug('Clearing postgresql');
|
|
|
|
const result = await getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
|
|
|
|
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/databases/${database}/clear?access_token=${result.token}&username=${username}&locale=${locale}`)
|
|
.ok(() => true));
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error clearing postgresql: ${networkError.message}`);
|
|
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error clearing postgresql. Status code: ${response.status} message: ${response.body.message}`);
|
|
}
|
|
|
|
async function teardownPostgreSql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const { database, username } = postgreSqlNames(app.id);
|
|
|
|
const result = await getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
|
|
|
|
const [networkError, response] = await safe(superagent.del(`http://${result.ip}:3000/databases/${database}?access_token=${result.token}&username=${username}`)
|
|
.ok(() => true));
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error tearing down postgresql: ${networkError.message}`);
|
|
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error tearing down postgresql. Status code: ${response.status} message: ${response.body.message}`);
|
|
|
|
await addonConfigs.unset(app.id, 'postgresql');
|
|
}
|
|
|
|
async function backupPostgreSql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Backing up postgresql');
|
|
|
|
const { database } = postgreSqlNames(app.id);
|
|
|
|
const result = await getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
|
|
await pipeRequestToFile(`http://${result.ip}:3000/databases/${database}/backup?access_token=${result.token}`, dumpPath('postgresql', app.id));
|
|
}
|
|
|
|
async function restorePostgreSql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Restore postgresql');
|
|
|
|
const { database, username } = postgreSqlNames(app.id);
|
|
|
|
const result = await getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
|
|
|
|
await pipeFileToRequest(dumpPath('postgresql', app.id), `http://${result.ip}:3000/databases/${database}/restore?access_token=${result.token}&username=${username}`);
|
|
}
|
|
|
|
async function startMongodb(existingInfra) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
|
|
const image = infra.images.mongodb;
|
|
const dataDir = paths.PLATFORM_DATA_DIR;
|
|
const rootPassword = hat(8 * 128);
|
|
const cloudronToken = hat(8 * 128);
|
|
|
|
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.mongodb, image);
|
|
|
|
if (upgrading) {
|
|
debug('startMongodb: mongodb will be upgraded');
|
|
await exportDatabase('mongodb');
|
|
}
|
|
|
|
const serviceConfig = await getServiceConfig('mongodb');
|
|
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
|
|
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
|
|
|
|
// memory options are applied dynamically. import requires all the memory we can get
|
|
const runCmd = `docker run --restart=unless-stopped -d --name=mongodb \
|
|
--hostname mongodb \
|
|
--net cloudron \
|
|
--net-alias mongodb \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=unix://${paths.SYSLOG_SOCKET_FILE} \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=mongodb \
|
|
--ip ${constants.MONGODB_SERVICE_IPv4} \
|
|
-e CLOUDRON_MONGODB_ROOT_PASSWORD=${rootPassword} \
|
|
-e CLOUDRON_MONGODB_TOKEN=${cloudronToken} \
|
|
-v ${dataDir}/mongodb:/var/lib/mongodb \
|
|
--label isCloudronManaged=true \
|
|
${readOnly} -v /tmp -v /run ${image} ${cmd}`;
|
|
|
|
debug('startMongodb: stopping and deleting previous mongodb container');
|
|
await docker.stopContainer('mongodb');
|
|
await docker.deleteContainer('mongodb');
|
|
|
|
if (!await hasAVX()) {
|
|
debug('startMongodb: not starting mongodb because CPU does not have AVX');
|
|
return;
|
|
}
|
|
|
|
debug('startMongodb: starting mongodb container');
|
|
await shell.bash(runCmd, { encoding: 'utf8' });
|
|
|
|
if (!serviceConfig.recoveryMode) {
|
|
await waitForContainer('mongodb', 'CLOUDRON_MONGODB_TOKEN');
|
|
if (upgrading) await importDatabase('mongodb');
|
|
}
|
|
|
|
if (existingInfra.version !== 'none' && existingInfra.images.mongodb !== image) await docker.deleteImage(existingInfra.images.mongodb);
|
|
}
|
|
|
|
async function setupMongoDb(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Setting up mongodb');
|
|
|
|
if (!await hasAVX()) throw new BoxError(BoxError.ADDONS_ERROR, 'Error setting up mongodb. CPU has no AVX support');
|
|
|
|
const existingPassword = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_PASSWORD');
|
|
let database = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_DATABASE');
|
|
database = database || hat(8 * 8); // 16 bytes. keep this short, so as to not overflow the 127 byte index length in MongoDB < 4.4
|
|
|
|
const data = {
|
|
database,
|
|
username: app.id,
|
|
password: existingPassword || hat(4 * 128),
|
|
oplog: !!options.oplog
|
|
};
|
|
|
|
const result = await getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN');
|
|
|
|
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/databases?access_token=${result.token}`)
|
|
.send(data)
|
|
.ok(() => true));
|
|
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error setting up mongodb: ${networkError.message}`);
|
|
if (response.status !== 201) throw new BoxError(BoxError.ADDONS_ERROR, `Error setting up mongodb. Status code: ${response.status} message: ${response.body.message}`);
|
|
|
|
const env = [
|
|
{ name: 'CLOUDRON_MONGODB_URL', value : `mongodb://${data.username}:${data.password}@mongodb:27017/${data.database}` },
|
|
{ name: 'CLOUDRON_MONGODB_USERNAME', value : data.username },
|
|
{ name: 'CLOUDRON_MONGODB_PASSWORD', value: data.password },
|
|
{ name: 'CLOUDRON_MONGODB_HOST', value : 'mongodb' },
|
|
{ name: 'CLOUDRON_MONGODB_PORT', value : '27017' },
|
|
{ name: 'CLOUDRON_MONGODB_DATABASE', value : data.database }
|
|
];
|
|
|
|
if (options.oplog) {
|
|
env.push({ name: 'CLOUDRON_MONGODB_OPLOG_URL', value : `mongodb://${data.username}:${data.password}@mongodb:27017/local?authSource=${data.database}` });
|
|
}
|
|
|
|
debug('Setting mongodb addon config to %j', env);
|
|
await addonConfigs.set(app.id, 'mongodb', env);
|
|
}
|
|
|
|
async function clearMongodb(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
if (!await hasAVX()) throw new BoxError(BoxError.ADDONS_ERROR, 'Error clearing mongodb. CPU has no AVX support');
|
|
|
|
const result = await getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN');
|
|
|
|
const database = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_DATABASE');
|
|
if (!database) throw new BoxError(BoxError.NOT_FOUND, 'Error clearing mongodb. No database');
|
|
|
|
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/databases/${database}/clear?access_token=${result.token}`)
|
|
.ok(() => true));
|
|
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error clearing mongodb: ${networkError.message}`);
|
|
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error clearing mongodb. Status code: ${response.status} message: ${response.body.message}`);
|
|
}
|
|
|
|
async function teardownMongoDb(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
if (!await hasAVX()) throw new BoxError(BoxError.ADDONS_ERROR, 'Error tearing down mongodb. CPU has no AVX support');
|
|
|
|
const result = await getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN');
|
|
|
|
const database = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_DATABASE');
|
|
if (!database) return;
|
|
|
|
const [networkError, response] = await safe(superagent.del(`http://${result.ip}:3000/databases/${database}?access_token=${result.token}`)
|
|
.ok(() => true));
|
|
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mongodb: ${networkError.message}`);
|
|
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mongodb. Status code: ${response.status} message: ${response.body.message}`);
|
|
|
|
addonConfigs.unset(app.id, 'mongodb');
|
|
}
|
|
|
|
async function backupMongoDb(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Backing up mongodb');
|
|
|
|
if (!await hasAVX()) throw new BoxError(BoxError.ADDONS_ERROR, 'Error backing up mongodb. CPU has no AVX support');
|
|
|
|
const result = await getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN');
|
|
|
|
const database = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_DATABASE');
|
|
if (!database) throw new BoxError(BoxError.NOT_FOUND, 'Error backing up mongodb. No database');
|
|
|
|
await pipeRequestToFile(`http://${result.ip}:3000/databases/${database}/backup?access_token=${result.token}`, dumpPath('mongodb', app.id));
|
|
}
|
|
|
|
async function restoreMongoDb(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('restoreMongoDb');
|
|
|
|
if (!await hasAVX()) throw new BoxError(BoxError.ADDONS_ERROR, 'Error restoring mongodb. CPU has no AVX support');
|
|
|
|
const result = await getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN');
|
|
|
|
const database = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_DATABASE');
|
|
if (!database) throw new BoxError(BoxError.NOT_FOUND, 'Error restoring mongodb. No database');
|
|
|
|
await pipeFileToRequest(dumpPath('mongodb', app.id), `http://${result.ip}:3000/databases/${database}/restore?access_token=${result.token}`);
|
|
}
|
|
|
|
async function statusMongodb() {
|
|
if (!await hasAVX()) return { status: exports.SERVICE_STATUS_DISABLED };
|
|
return await containerStatus('mongodb');
|
|
}
|
|
|
|
async function restartMongodb() {
|
|
if (!await hasAVX()) throw new BoxError(BoxError.ADDONS_ERROR, 'MongoDB is disabled. CPU has no AVX support');
|
|
|
|
return await docker.restartContainer('mongodb');
|
|
}
|
|
|
|
async function startGraphite(existingInfra) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
|
|
const serviceConfig = await getServiceConfig('graphite');
|
|
const image = infra.images.graphite;
|
|
const memoryLimit = serviceConfig.memoryLimit || 256 * 1024 * 1024;
|
|
|
|
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.graphite, image);
|
|
|
|
if (upgrading) debug('startGraphite: graphite will be upgraded');
|
|
|
|
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
|
|
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
|
|
|
|
// port 2003 is used by collectd
|
|
const runCmd = `docker run --restart=unless-stopped -d --name=graphite \
|
|
--hostname graphite \
|
|
--net cloudron \
|
|
--net-alias graphite \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=unix://${paths.SYSLOG_SOCKET_FILE} \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=graphite \
|
|
-m ${memoryLimit} \
|
|
--memory-swap -1 \
|
|
--ip ${constants.GRAPHITE_SERVICE_IPv4} \
|
|
-p 127.0.0.1:2003:2003 \
|
|
-v ${paths.PLATFORM_DATA_DIR}/graphite:/var/lib/graphite \
|
|
--label isCloudronManaged=true \
|
|
${readOnly} -v /tmp -v /run ${image} ${cmd}`;
|
|
|
|
debug('startGraphite: stopping and deleting previous graphite container');
|
|
await docker.stopContainer('graphite');
|
|
await docker.deleteContainer('graphite');
|
|
|
|
if (upgrading) await shell.sudo([ RMADDONDIR_CMD, 'graphite' ], {});
|
|
|
|
debug('startGraphite: starting graphite container');
|
|
await shell.bash(runCmd, { encoding: 'utf8' });
|
|
|
|
if (existingInfra.version !== 'none' && existingInfra.images.graphite !== image) await docker.deleteImage(existingInfra.images.graphite);
|
|
|
|
// restart collectd to get the disk stats after graphite starts. currently, there is no way to do graphite health check
|
|
setTimeout(async () => await safe(shell.sudo([ RESTART_SERVICE_CMD, 'collectd' ], {})), 60000);
|
|
}
|
|
|
|
async function setupProxyAuth(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Setting up proxyAuth');
|
|
|
|
const enabled = app.sso && app.manifest.addons && app.manifest.addons.proxyAuth;
|
|
|
|
if (!enabled) return;
|
|
|
|
const env = [ { name: 'CLOUDRON_PROXY_AUTH', value: '1' } ];
|
|
await addonConfigs.set(app.id, 'proxyauth', env);
|
|
|
|
debug('Creating OpenID client for proxyAuth');
|
|
|
|
const proxyAuthClientId = `${app.id}-proxyauth`;
|
|
const result = await oidcClients.get(proxyAuthClientId);
|
|
|
|
const data = {
|
|
id: proxyAuthClientId,
|
|
secret: result ? result.secret : hat(4 * 128), // ensure we keep the secret
|
|
loginRedirectUri: `https://${app.fqdn}/callback`,
|
|
logoutRedirectUri: '',
|
|
tokenSignatureAlgorithm: 'RS256',
|
|
name: 'ProxyAuth Addon',
|
|
appId: app.id
|
|
};
|
|
|
|
if (result) await oidcClients.update(data.id, data);
|
|
else await oidcClients.add(data);
|
|
}
|
|
|
|
async function teardownProxyAuth(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
await addonConfigs.unset(app.id, 'proxyauth');
|
|
|
|
debug('Deleting OpenID client for proxyAuth');
|
|
|
|
const proxyAuthClientId = `${app.id}-proxyauth`;
|
|
|
|
const [error] = await safe(oidcClients.del(proxyAuthClientId));
|
|
if (error && error.reason !== BoxError.NOT_FOUND) throw error;
|
|
}
|
|
|
|
async function setupDocker(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Setting up docker');
|
|
|
|
const env = [ { name: 'CLOUDRON_DOCKER_HOST', value: `tcp://172.18.0.1:${constants.DOCKER_PROXY_PORT}` } ];
|
|
await addonConfigs.set(app.id, 'docker', env);
|
|
}
|
|
|
|
async function teardownDocker(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
await addonConfigs.unset(app.id, 'docker');
|
|
}
|
|
|
|
async function startRedis(existingInfra) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
|
|
const image = infra.images.redis;
|
|
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.redis, image);
|
|
|
|
const allApps = await apps.list();
|
|
|
|
for (const app of allApps) {
|
|
if (!app.manifest.addons || !('redis' in app.manifest.addons)) continue; // app doesn't use the addon
|
|
|
|
const redisName = `redis-${app.id}`;
|
|
|
|
if (upgrading) await backupRedis(app, {});
|
|
|
|
debug(`startRedis: stopping and deleting previous redis container ${redisName}`);
|
|
await docker.stopContainer(redisName);
|
|
await docker.deleteContainer(redisName);
|
|
|
|
debug(`startRedis: starting redis container ${redisName}`);
|
|
await setupRedis(app, app.manifest.addons.redis); // starts the container
|
|
}
|
|
|
|
if (upgrading) await importDatabase('redis');
|
|
|
|
if (existingInfra.version !== 'none' && existingInfra.images.redis !== image) await docker.deleteImage(existingInfra.images.redis);
|
|
}
|
|
|
|
async function setupRedis(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const redisName = 'redis-' + app.id;
|
|
|
|
const disabled = app.manifest.addons.redis.optional && !app.enableRedis;
|
|
if (disabled) return await addonConfigs.unset(app.id, 'redis');
|
|
|
|
const existingPassword = await addonConfigs.getByName(app.id, 'redis', '%REDIS_PASSWORD');
|
|
|
|
const redisPassword = options.noPassword ? '' : (existingPassword || hat(4 * 48)); // see box#362 for password length
|
|
const redisServiceToken = hat(4 * 48);
|
|
|
|
// Compute redis memory limit based on app's memory limit (this is arbitrary)
|
|
const memoryLimit = app.servicesConfig['redis']?.memoryLimit || APP_SERVICES['redis'].defaultMemoryLimit;
|
|
|
|
const recoveryMode = app.servicesConfig['redis']?.recoveryMode || false;
|
|
const readOnly = !recoveryMode ? '--read-only' : '';
|
|
const cmd = recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
|
|
|
|
const isAppRunning = app.runState === apps.RSTATE_RUNNING; // when app is not running, only create the container
|
|
|
|
const image = infra.images.redis;
|
|
const label = app.fqdn; // note: do not add appId label because this interferes with app container deletion logic
|
|
const runCmd = `docker ${isAppRunning ? 'run -d' : 'create'} --restart=unless-stopped --name=${redisName} \
|
|
--hostname ${redisName} \
|
|
--label=location=${label} \
|
|
--net cloudron \
|
|
--net-alias ${redisName} \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=unix://${paths.SYSLOG_SOCKET_FILE} \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=${redisName} \
|
|
-m ${memoryLimit} \
|
|
--memory-swap -1 \
|
|
-e CLOUDRON_REDIS_PASSWORD=${redisPassword} \
|
|
-e CLOUDRON_REDIS_TOKEN=${redisServiceToken} \
|
|
-v ${paths.PLATFORM_DATA_DIR}/redis/${app.id}:/var/lib/redis \
|
|
--label isCloudronManaged=true \
|
|
${readOnly} -v /tmp -v /run ${image} ${cmd}`;
|
|
|
|
const env = [
|
|
{ name: 'CLOUDRON_REDIS_URL', value: 'redis://default:' + redisPassword + '@redis-' + app.id }, // https://github.com/redis/node-redis/issues/1591
|
|
{ name: 'CLOUDRON_REDIS_PASSWORD', value: redisPassword },
|
|
{ name: 'CLOUDRON_REDIS_HOST', value: redisName },
|
|
{ name: 'CLOUDRON_REDIS_PORT', value: '6379' }
|
|
];
|
|
|
|
await addonConfigs.set(app.id, 'redis', env);
|
|
|
|
const [inspectError, result] = await safe(docker.inspect(redisName));
|
|
if (inspectError) {
|
|
await shell.bash(runCmd, { encoding: 'utf8' });
|
|
} else { // fast path
|
|
debug(`Re-using existing redis container with state: ${JSON.stringify(result.State)}`);
|
|
}
|
|
|
|
if (isAppRunning && !recoveryMode) await waitForContainer(`redis-${app.id}`, 'CLOUDRON_REDIS_TOKEN');
|
|
}
|
|
|
|
async function clearRedis(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Clearing redis');
|
|
|
|
const disabled = app.manifest.addons.redis.optional && !app.enableRedis;
|
|
if (disabled) return;
|
|
|
|
const result = await getContainerDetails('redis-' + app.id, 'CLOUDRON_REDIS_TOKEN');
|
|
|
|
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/clear?access_token=${result.token}`)
|
|
.ok(() => true));
|
|
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error clearing redis: ${networkError.message}`);
|
|
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error clearing redis. Status code: ${response.status} message: ${response.body.message}`);
|
|
}
|
|
|
|
async function teardownRedis(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
await docker.deleteContainer(`redis-${app.id}`);
|
|
|
|
const [error] = await safe(shell.sudo([ RMADDONDIR_CMD, 'redis', app.id ], {}));
|
|
if (error) throw new BoxError(BoxError.FS_ERROR, `Error removing redis data: ${error.message}`);
|
|
|
|
safe.fs.rmSync(path.join(paths.LOG_DIR, `redis-${app.id}`), { recursive: true, force: true });
|
|
if (safe.error) debug('teardownRedis: cannot cleanup logs: %o', safe.error);
|
|
|
|
await addonConfigs.unset(app.id, 'redis');
|
|
}
|
|
|
|
async function backupRedis(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const disabled = app.manifest.addons.redis.optional && !app.enableRedis;
|
|
if (disabled) return;
|
|
|
|
debug('Backing up redis');
|
|
|
|
const result = await getContainerDetails('redis-' + app.id, 'CLOUDRON_REDIS_TOKEN');
|
|
await pipeRequestToFile(`http://${result.ip}:3000/backup?access_token=${result.token}`, dumpPath('redis', app.id));
|
|
}
|
|
|
|
async function restoreRedis(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const disabled = app.manifest.addons.redis.optional && !app.enableRedis;
|
|
if (disabled) return;
|
|
|
|
debug('Restoring redis');
|
|
|
|
const result = await getContainerDetails('redis-' + app.id, 'CLOUDRON_REDIS_TOKEN');
|
|
await pipeFileToRequest(dumpPath('redis', app.id), `http://${result.ip}:3000/restore?access_token=${result.token}`);
|
|
}
|
|
|
|
async function setupTls(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
if (!safe.fs.mkdirSync(`${paths.PLATFORM_DATA_DIR}/tls/${app.id}`, { recursive: true })) {
|
|
debug('Error creating tls directory');
|
|
throw new BoxError(BoxError.FS_ERROR, safe.error.message);
|
|
}
|
|
}
|
|
|
|
async function teardownTls(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
safe.fs.rmSync(`${paths.PLATFORM_DATA_DIR}/tls/${app.id}`, { recursive: true, force: true });
|
|
}
|
|
|
|
async function statusTurn() {
|
|
const [error, container] = await safe(docker.inspect('turn'));
|
|
if (error && error.reason === BoxError.NOT_FOUND) return { status: exports.SERVICE_STATUS_STOPPED };
|
|
if (error) throw error;
|
|
|
|
const result = await docker.getStats(container.Id, { stream: false });
|
|
|
|
const status = container.State.Running
|
|
? (container.HostConfig.ReadonlyRootfs ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STARTING)
|
|
: exports.SERVICE_STATUS_STOPPED;
|
|
const stats = result.memory_stats || { usage: 0, limit: 1 };
|
|
|
|
return {
|
|
status,
|
|
memoryUsed: stats.usage,
|
|
memoryPercent: parseInt(100 * stats.usage / stats.limit)
|
|
};
|
|
}
|
|
|
|
async function statusDocker() {
|
|
const [error] = await safe(docker.ping());
|
|
return { status: error ? exports.SERVICE_STATUS_STOPPED: exports.SERVICE_STATUS_ACTIVE };
|
|
}
|
|
|
|
async function restartDocker() {
|
|
const [error] = await safe(shell.sudo([ RESTART_SERVICE_CMD, 'docker' ], {}));
|
|
if (error) debug(`restartDocker: error restarting docker. ${error.message}`);
|
|
}
|
|
|
|
async function statusUnbound() {
|
|
const [error] = await safe(shell.spawn('systemctl', ['is-active', 'unbound'], {}));
|
|
if (error) return { status: exports.SERVICE_STATUS_STOPPED };
|
|
|
|
const [digError, digResult] = await safe(dig.resolve('ipv4.api.cloudron.io', 'A', { timeout: 10000 }));
|
|
if (!digError && Array.isArray(digResult) && digResult.length !== 0) return { status: exports.SERVICE_STATUS_ACTIVE };
|
|
|
|
debug('statusUnbound: unbound is up, but failed to resolve ipv4.api.cloudron.io . %o %j', digError, digResult);
|
|
return { status: exports.SERVICE_STATUS_STARTING };
|
|
}
|
|
|
|
async function restartUnbound() {
|
|
const [error] = await safe(shell.sudo([ RESTART_SERVICE_CMD, 'unbound' ], {}));
|
|
if (error) debug(`restartDocker: error restarting unbound. ${error.message}`);
|
|
}
|
|
|
|
async function statusNginx() {
|
|
const [error] = await safe(shell.spawn('systemctl', ['is-active', 'nginx'], {}));
|
|
return { status: error ? exports.SERVICE_STATUS_STOPPED : exports.SERVICE_STATUS_ACTIVE };
|
|
}
|
|
|
|
async function restartNginx() {
|
|
const [error] = await safe(shell.sudo([ RESTART_SERVICE_CMD, 'nginx' ], {}));
|
|
if (error) debug(`restartNginx: error restarting unbound. ${error.message}`);
|
|
}
|
|
|
|
async function statusGraphite() {
|
|
const [error, container] = await safe(docker.inspect('graphite'));
|
|
if (error && error.reason === BoxError.NOT_FOUND) return { status: exports.SERVICE_STATUS_STOPPED };
|
|
if (error) throw error;
|
|
|
|
const ip = safe.query(container, 'NetworkSettings.Networks.cloudron.IPAddress', null);
|
|
if (!ip) throw new BoxError(BoxError.INACTIVE, 'Error getting IP of graphite service');
|
|
|
|
const [networkError, response] = await safe(superagent.get(`http://${ip}:8000/graphite-web/dashboard`)
|
|
.timeout(20000)
|
|
.ok(() => true));
|
|
|
|
if (networkError) return { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite: ${networkError.message}` };
|
|
if (response.status !== 200) return { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite. Status code: ${response.status} message: ${response.body.message}` };
|
|
|
|
const result = await docker.getStats('graphite', { stream: false });
|
|
const stats = result.memory_stats || { usage: 0, limit: 1 };
|
|
|
|
return {
|
|
status: container.State.Running ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STOPPED,
|
|
memoryUsed: stats.usage,
|
|
memoryPercent: parseInt(100 * stats.usage / stats.limit)
|
|
};
|
|
}
|
|
|
|
async function restartGraphite() {
|
|
await docker.restartContainer('graphite');
|
|
|
|
setTimeout(async () => {
|
|
const [error] = await safe(shell.sudo([ RESTART_SERVICE_CMD, 'collectd' ], {}));
|
|
if (error) debug(`restartGraphite: error restarting collected. ${error.message}`);
|
|
}, 60000);
|
|
}
|
|
|
|
async function teardownOauth(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('teardownOauth');
|
|
|
|
await addonConfigs.unset(app.id, 'oauth');
|
|
}
|
|
|
|
async function setupOidc(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
if (!app.sso) return;
|
|
|
|
debug('Setting up OIDC');
|
|
|
|
const oidcAddonClientId = `${app.id}-oidc`;
|
|
const [error, result] = await safe(oidcClients.get(oidcAddonClientId));
|
|
if (error) throw error;
|
|
|
|
// ensure we keep the secret
|
|
const data = {
|
|
id: oidcAddonClientId,
|
|
secret: result ? result.secret : hat(4 * 128),
|
|
loginRedirectUri: options.loginRedirectUri || '',
|
|
logoutRedirectUri: options.logoutRedirectUri || '',
|
|
tokenSignatureAlgorithm: options.tokenSignatureAlgorithm || 'RS256',
|
|
name: 'OIDC Addon',
|
|
appId: app.id
|
|
};
|
|
|
|
if (result) await oidcClients.update(data.id, data);
|
|
else await oidcClients.add(data);
|
|
}
|
|
|
|
async function teardownOidc(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Tearing down OIDC');
|
|
|
|
const oidcAddonClientId = `${app.id}-oidc`;
|
|
|
|
const [error] = await safe(oidcClients.del(oidcAddonClientId));
|
|
if (error && error.reason !== BoxError.NOT_FOUND) throw error;
|
|
}
|
|
|
|
async function getDynamicEnvironmentOidc(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const { fqdn:dashboardFqdn } = await dashboard.getLocation();
|
|
|
|
if (!app.sso) return {};
|
|
|
|
const oidcAddonClientId = `${app.id}-oidc`;
|
|
const client = await oidcClients.get(oidcAddonClientId);
|
|
if (!client) throw new BoxError(BoxError.NOT_FOUND, `OIDC client for ${app.id} has not been allocated yet`); // happens with overzealous scheduler logic
|
|
|
|
return {
|
|
CLOUDRON_OIDC_DISCOVERY_URL: `https://${dashboardFqdn}/openid/.well-known/openid-configuration`,
|
|
CLOUDRON_OIDC_ISSUER: `https://${dashboardFqdn}/openid`,
|
|
CLOUDRON_OIDC_AUTH_ENDPOINT: `https://${dashboardFqdn}/openid/auth`,
|
|
CLOUDRON_OIDC_TOKEN_ENDPOINT: `https://${dashboardFqdn}/openid/token`,
|
|
CLOUDRON_OIDC_KEYS_ENDPOINT: `https://${dashboardFqdn}/openid/jwks`,
|
|
CLOUDRON_OIDC_PROFILE_ENDPOINT: `https://${dashboardFqdn}/openid/me`,
|
|
// following is only available if rpInitiatedLogout would be enabled https://github.com/panva/node-oidc-provider/blob/main/docs/README.md#featuresrpinitiatedlogout
|
|
// CLOUDRON_OIDC_LOGOUT_URL: `https://${dashboardFqdn}/openid/session/end`
|
|
|
|
CLOUDRON_OIDC_CLIENT_ID: client.id,
|
|
CLOUDRON_OIDC_CLIENT_SECRET: client.secret,
|
|
|
|
CLOUDRON_OIDC_PROVIDER_NAME: await branding.getCloudronName()
|
|
};
|
|
}
|
|
|
|
async function checkAddonsSupport(addons) {
|
|
assert.strictEqual(typeof addons, 'object');
|
|
|
|
if (addons.mongodb && !await hasAVX()) return new BoxError(BoxError.BAD_FIELD, 'This app requires MongoDB, but MongoDB is disabled because the CPU does not support AVX');
|
|
|
|
return null;
|
|
}
|
|
|
|
async function moveDataDir(app, targetVolumeId, targetVolumePrefix) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.ok(app.manifest.addons.localstorage, 'should have local storage addon');
|
|
assert(targetVolumeId === null || typeof targetVolumeId === 'string');
|
|
assert(targetVolumePrefix === null || typeof targetVolumePrefix === 'string');
|
|
|
|
const resolvedSourceDir = await apps.getStorageDir(app);
|
|
const resolvedTargetDir = await apps.getStorageDir(Object.assign({}, app, { storageVolumeId: targetVolumeId, storageVolumePrefix: targetVolumePrefix }));
|
|
|
|
debug(`moveDataDir: migrating data from ${resolvedSourceDir} to ${resolvedTargetDir}`);
|
|
|
|
if (resolvedSourceDir !== resolvedTargetDir) {
|
|
const [error] = await safe(shell.sudo([ MV_VOLUME_CMD, resolvedSourceDir, resolvedTargetDir ], {}));
|
|
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error migrating data directory: ${error.message}`);
|
|
}
|
|
}
|