9c8f78a059 already fixed many of the cert issues.
However, some issues were caught in the CI:
* The TLS addon has to be rebuilt and not just restarted. For this reason, we now
move things to a directory instead of mounting files. This way the container is just restarted.
* Cleanups must be driven by the database and not the filesystem . Deleting files on disk or after a restore,
the certs are left dangling forever in the db.
* Separate the db cert logic and disk cert logic. This way we can sync as many times as we want and whenever we want.
1924 lines
75 KiB
JavaScript
1924 lines
75 KiB
JavaScript
'use strict';
|
|
|
|
exports = module.exports = {
|
|
listServices,
|
|
getServiceStatus,
|
|
getServiceLogs,
|
|
|
|
configureService,
|
|
restartService,
|
|
rebuildService,
|
|
|
|
startAppServices,
|
|
stopAppServices,
|
|
|
|
startServices,
|
|
|
|
setupAddons,
|
|
teardownAddons,
|
|
backupAddons,
|
|
restoreAddons,
|
|
clearAddons,
|
|
|
|
getEnvironment,
|
|
getContainerNamesSync,
|
|
|
|
getContainerDetails,
|
|
|
|
SERVICE_STATUS_STARTING: 'starting', // container up, waiting for healthcheck
|
|
SERVICE_STATUS_ACTIVE: 'active',
|
|
SERVICE_STATUS_STOPPED: 'stopped'
|
|
};
|
|
|
|
const addonConfigs = require('./addonconfigs.js'),
|
|
apps = require('./apps.js'),
|
|
assert = require('assert'),
|
|
blobs = require('./blobs.js'),
|
|
BoxError = require('./boxerror.js'),
|
|
constants = require('./constants.js'),
|
|
crypto = require('crypto'),
|
|
debug = require('debug')('box:services'),
|
|
docker = require('./docker.js'),
|
|
eventlog = require('./eventlog.js'),
|
|
fs = require('fs'),
|
|
hat = require('./hat.js'),
|
|
http = require('http'),
|
|
infra = require('./infra_version.js'),
|
|
LogStream = require('./log-stream.js'),
|
|
mail = require('./mail.js'),
|
|
os = require('os'),
|
|
path = require('path'),
|
|
paths = require('./paths.js'),
|
|
{ pipeline } = require('stream'),
|
|
promiseRetry = require('./promise-retry.js'),
|
|
safe = require('safetydance'),
|
|
semver = require('semver'),
|
|
settings = require('./settings.js'),
|
|
sftp = require('./sftp.js'),
|
|
shell = require('./shell.js'),
|
|
spawn = require('child_process').spawn,
|
|
superagent = require('superagent'),
|
|
system = require('./system.js');
|
|
|
|
const NOOP = async function (/*app, options*/) {};
|
|
const RMADDONDIR_CMD = path.join(__dirname, 'scripts/rmaddondir.sh');
|
|
const RESTART_SERVICE_CMD = path.join(__dirname, 'scripts/restartservice.sh');
|
|
const CLEARVOLUME_CMD = path.join(__dirname, 'scripts/clearvolume.sh');
|
|
const MKDIRVOLUME_CMD = path.join(__dirname, 'scripts/mkdirvolume.sh');
|
|
|
|
// setup can be called multiple times for the same app (configure crash restart) and existing data must not be lost
|
|
// teardown is destructive. app data stored with the addon is lost
|
|
const ADDONS = {
|
|
turn: {
|
|
setup: setupTurn,
|
|
teardown: teardownTurn,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
clear: NOOP
|
|
},
|
|
email: {
|
|
setup: setupEmail,
|
|
teardown: teardownEmail,
|
|
backup: NOOP,
|
|
restore: setupEmail,
|
|
clear: NOOP,
|
|
},
|
|
ldap: {
|
|
setup: setupLdap,
|
|
teardown: teardownLdap,
|
|
backup: NOOP,
|
|
restore: setupLdap,
|
|
clear: NOOP,
|
|
},
|
|
localstorage: {
|
|
setup: setupLocalStorage,
|
|
teardown: teardownLocalStorage,
|
|
backup: NOOP, // no backup because it's already inside app data
|
|
restore: NOOP,
|
|
clear: clearLocalStorage,
|
|
},
|
|
mongodb: {
|
|
setup: setupMongoDb,
|
|
teardown: teardownMongoDb,
|
|
backup: backupMongoDb,
|
|
restore: restoreMongoDb,
|
|
clear: clearMongodb,
|
|
},
|
|
mysql: {
|
|
setup: setupMySql,
|
|
teardown: teardownMySql,
|
|
backup: backupMySql,
|
|
restore: restoreMySql,
|
|
clear: clearMySql,
|
|
},
|
|
postgresql: {
|
|
setup: setupPostgreSql,
|
|
teardown: teardownPostgreSql,
|
|
backup: backupPostgreSql,
|
|
restore: restorePostgreSql,
|
|
clear: clearPostgreSql,
|
|
},
|
|
proxyAuth: {
|
|
setup: setupProxyAuth,
|
|
teardown: teardownProxyAuth,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
clear: NOOP
|
|
},
|
|
recvmail: {
|
|
setup: setupRecvMail,
|
|
teardown: teardownRecvMail,
|
|
backup: NOOP,
|
|
restore: setupRecvMail,
|
|
clear: NOOP,
|
|
},
|
|
redis: {
|
|
setup: setupRedis,
|
|
teardown: teardownRedis,
|
|
backup: backupRedis,
|
|
restore: restoreRedis,
|
|
clear: clearRedis,
|
|
},
|
|
sendmail: {
|
|
setup: setupSendMail,
|
|
teardown: teardownSendMail,
|
|
backup: NOOP,
|
|
restore: setupSendMail,
|
|
clear: NOOP,
|
|
},
|
|
scheduler: {
|
|
setup: NOOP,
|
|
teardown: NOOP,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
clear: NOOP,
|
|
},
|
|
docker: {
|
|
setup: NOOP,
|
|
teardown: NOOP,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
clear: NOOP,
|
|
},
|
|
tls: {
|
|
setup: setupTls,
|
|
teardown: teardownTls,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
clear: NOOP,
|
|
},
|
|
oauth: { // kept for backward compatibility. keep teardown for uninstall to work
|
|
setup: NOOP,
|
|
teardown: teardownOauth,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
clear: NOOP,
|
|
}
|
|
};
|
|
|
|
// services are actual containers that are running. addons are the concepts requested by app
|
|
const SERVICES = {
|
|
turn: {
|
|
status: statusTurn,
|
|
restart: docker.restartContainer.bind(null, 'turn'),
|
|
defaultMemoryLimit: 256 * 1024 * 1024
|
|
},
|
|
mail: {
|
|
status: containerStatus.bind(null, 'mail', 'CLOUDRON_MAIL_TOKEN'),
|
|
restart: mail.restartMail,
|
|
defaultMemoryLimit: mail.DEFAULT_MEMORY_LIMIT
|
|
},
|
|
mongodb: {
|
|
status: containerStatus.bind(null, 'mongodb', 'CLOUDRON_MONGODB_TOKEN'),
|
|
restart: docker.restartContainer.bind(null, 'mongodb'),
|
|
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024
|
|
},
|
|
mysql: {
|
|
status: containerStatus.bind(null, 'mysql', 'CLOUDRON_MYSQL_TOKEN'),
|
|
restart: docker.restartContainer.bind(null, 'mysql'),
|
|
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024
|
|
},
|
|
postgresql: {
|
|
status: containerStatus.bind(null, 'postgresql', 'CLOUDRON_POSTGRESQL_TOKEN'),
|
|
restart: docker.restartContainer.bind(null, 'postgresql'),
|
|
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024
|
|
},
|
|
docker: {
|
|
status: statusDocker,
|
|
restart: restartDocker,
|
|
defaultMemoryLimit: 0
|
|
},
|
|
unbound: {
|
|
status: statusUnbound,
|
|
restart: restartUnbound,
|
|
defaultMemoryLimit: 0
|
|
},
|
|
sftp: {
|
|
status: sftp.status,
|
|
restart: docker.restartContainer.bind(null, 'sftp'),
|
|
defaultMemoryLimit: sftp.DEFAULT_MEMORY_LIMIT
|
|
},
|
|
graphite: {
|
|
status: statusGraphite,
|
|
restart: restartGraphite,
|
|
defaultMemoryLimit: 256 * 1024 * 1024
|
|
},
|
|
nginx: {
|
|
status: statusNginx,
|
|
restart: restartNginx,
|
|
defaultMemoryLimit: 0
|
|
}
|
|
};
|
|
|
|
const APP_SERVICES = {
|
|
redis: {
|
|
status: (instance, done) => containerStatus(`redis-${instance}`, 'CLOUDRON_REDIS_TOKEN', done),
|
|
start: (instance, done) => docker.startContainer(`redis-${instance}`, done),
|
|
stop: (instance, done) => docker.stopContainer(`redis-${instance}`, done),
|
|
restart: (instance, done) => docker.restartContainer(`redis-${instance}`, done),
|
|
defaultMemoryLimit: 150 * 1024 * 1024
|
|
}
|
|
};
|
|
|
|
function parseImageTag(tag) {
|
|
let repository = tag.split(':', 1)[0];
|
|
let version = tag.substr(repository.length + 1).split('@', 1)[0];
|
|
let digest = tag.substr(repository.length + 1 + version.length + 1).split(':', 2)[1];
|
|
|
|
return { repository, version: semver.parse(version), digest };
|
|
}
|
|
|
|
function requiresUpgrade(existingTag, currentTag) {
|
|
let etag = parseImageTag(existingTag), ctag = parseImageTag(currentTag);
|
|
|
|
return etag.version.major !== ctag.version.major;
|
|
}
|
|
|
|
// paths for dumps
|
|
function dumpPath(addon, appId) {
|
|
switch (addon) {
|
|
case 'postgresql': return path.join(paths.APPS_DATA_DIR, appId, 'postgresqldump');
|
|
case 'mysql': return path.join(paths.APPS_DATA_DIR, appId, 'mysqldump');
|
|
case 'mongodb': return path.join(paths.APPS_DATA_DIR, appId, 'mongodbdump');
|
|
case 'redis': return path.join(paths.APPS_DATA_DIR, appId, 'dump.rdb');
|
|
}
|
|
}
|
|
|
|
async function getContainerDetails(containerName, tokenEnvName) {
|
|
assert.strictEqual(typeof containerName, 'string');
|
|
assert.strictEqual(typeof tokenEnvName, 'string');
|
|
|
|
const result = await docker.inspect(containerName);
|
|
|
|
const ip = safe.query(result, 'NetworkSettings.Networks.cloudron.IPAddress', null);
|
|
if (!ip) throw new BoxError(BoxError.INACTIVE, `Error getting IP of ${containerName} service`);
|
|
|
|
// extract the cloudron token for auth
|
|
const env = safe.query(result, 'Config.Env', null);
|
|
if (!env) throw new BoxError(BoxError.DOCKER_ERROR, `Error inspecting environment of ${containerName} service`);
|
|
const tmp = env.find(function (e) { return e.indexOf(tokenEnvName) === 0; });
|
|
if (!tmp) throw new BoxError(BoxError.DOCKER_ERROR, `Error getting token of ${containerName} service`);
|
|
const token = tmp.slice(tokenEnvName.length + 1); // +1 for the = sign
|
|
if (!token) throw new BoxError(BoxError.DOCKER_ERROR, `Error getting token of ${containerName} service`);
|
|
|
|
return { ip: ip, token: token, state: result.State };
|
|
}
|
|
|
|
async function containerStatus(containerName, tokenEnvName) {
|
|
assert.strictEqual(typeof containerName, 'string');
|
|
assert.strictEqual(typeof tokenEnvName, 'string');
|
|
|
|
const [error, addonDetails] = await safe(getContainerDetails(containerName, tokenEnvName));
|
|
if (error && (error.reason === BoxError.NOT_FOUND || error.reason === BoxError.INACTIVE)) return { status: exports.SERVICE_STATUS_STOPPED };
|
|
if (error) throw error;
|
|
|
|
const [networkError, response] = await safe(superagent.get(`http://${addonDetails.ip}:3000/healthcheck?access_token=${addonDetails.token}`)
|
|
.timeout(20000)
|
|
.ok(() => true));
|
|
|
|
if (networkError) return { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for ${containerName}: ${networkError.message}` };
|
|
if (response.status !== 200 || !response.body.status) return { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for ${containerName}. Status code: ${response.statusCode} message: ${response.body.message}` };
|
|
|
|
const result = await docker.memoryUsage(containerName);
|
|
const stats = result.memory_stats || { usage: 0, limit: 1 };
|
|
|
|
return {
|
|
status: addonDetails.state.Running ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STOPPED,
|
|
memoryUsed: stats.usage,
|
|
memoryPercent: parseInt(100 * stats.usage / stats.limit),
|
|
healthcheck: response.body
|
|
};
|
|
}
|
|
|
|
async function listServices() {
|
|
const serviceIds = Object.keys(SERVICES);
|
|
|
|
const result = await apps.list();
|
|
for (let app of result) {
|
|
if (app.manifest.addons && app.manifest.addons['redis']) serviceIds.push(`redis:${app.id}`);
|
|
}
|
|
|
|
return serviceIds;
|
|
}
|
|
|
|
async function getServiceConfig(id) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
|
|
const [name, instance] = id.split(':');
|
|
if (!instance) {
|
|
const servicesConfig = await settings.getServicesConfig();
|
|
return servicesConfig[name] || {};
|
|
}
|
|
|
|
const app = await apps.get(instance);
|
|
if (!app) throw new BoxError(BoxError.NOT_FOUND, 'App not found');
|
|
|
|
return app.servicesConfig[name] || {};
|
|
}
|
|
|
|
async function getServiceStatus(id) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
|
|
const [name, instance ] = id.split(':');
|
|
let containerStatusFunc, service;
|
|
|
|
if (instance) {
|
|
service = APP_SERVICES[name];
|
|
if (!service) throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
|
|
containerStatusFunc = service.status.bind(null, instance);
|
|
} else if (SERVICES[name]) {
|
|
service = SERVICES[name];
|
|
containerStatusFunc = service.status;
|
|
} else {
|
|
throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
|
|
}
|
|
|
|
const tmp = {
|
|
name: name,
|
|
status: null,
|
|
memoryUsed: 0,
|
|
memoryPercent: 0,
|
|
error: null,
|
|
healthcheck: null,
|
|
config: {}
|
|
};
|
|
|
|
const result = await containerStatusFunc();
|
|
tmp.status = result.status;
|
|
tmp.memoryUsed = result.memoryUsed;
|
|
tmp.memoryPercent = result.memoryPercent;
|
|
tmp.error = result.error || null;
|
|
tmp.healthcheck = result.healthcheck || null;
|
|
|
|
tmp.config = await getServiceConfig(id);
|
|
|
|
if (!tmp.config.memoryLimit && service.defaultMemoryLimit) {
|
|
tmp.config.memoryLimit = service.defaultMemoryLimit;
|
|
}
|
|
|
|
return tmp;
|
|
}
|
|
|
|
async function configureService(id, data, auditSource) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert.strictEqual(typeof data, 'object');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const [name, instance ] = id.split(':');
|
|
let needsRebuild = false;
|
|
|
|
if (instance) {
|
|
if (!APP_SERVICES[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
|
|
|
|
const app = await apps.get(instance);
|
|
if (!app) throw new BoxError(BoxError.NOT_FOUND, 'App not found');
|
|
|
|
const servicesConfig = app.servicesConfig;
|
|
needsRebuild = servicesConfig[name]?.recoveryMode != data.recoveryMode;
|
|
servicesConfig[name] = data;
|
|
|
|
await apps.update(instance, { servicesConfig });
|
|
} else if (SERVICES[name]) {
|
|
const servicesConfig = await settings.getServicesConfig();
|
|
needsRebuild = servicesConfig[name]?.recoveryMode != data.recoveryMode; // intentional != since 'recoveryMode' may or may not be there
|
|
servicesConfig[name] = data;
|
|
|
|
await settings.setServicesConfig(servicesConfig);
|
|
} else {
|
|
throw new BoxError(BoxError.NOT_FOUND, 'No such service');
|
|
}
|
|
|
|
debug(`configureService: ${id} rebuild=${needsRebuild}`);
|
|
|
|
// do this in background
|
|
if (needsRebuild) {
|
|
safe(rebuildService(id, auditSource), { debug });
|
|
} else {
|
|
safe(applyMemoryLimit(id), { debug });
|
|
}
|
|
|
|
await eventlog.add(eventlog.ACTION_SERVICE_CONFIGURE, auditSource, { id, data });
|
|
}
|
|
|
|
async function getServiceLogs(id, options) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert(options && typeof options === 'object');
|
|
|
|
assert.strictEqual(typeof options.lines, 'number');
|
|
assert.strictEqual(typeof options.format, 'string');
|
|
assert.strictEqual(typeof options.follow, 'boolean');
|
|
|
|
const [name, instance ] = id.split(':');
|
|
|
|
if (instance) {
|
|
if (!APP_SERVICES[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
|
|
} else if (!SERVICES[name]) {
|
|
throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
|
|
}
|
|
|
|
debug(`Getting logs for ${name}`);
|
|
|
|
const lines = options.lines,
|
|
format = options.format || 'json',
|
|
follow = options.follow;
|
|
|
|
let cmd, args = [];
|
|
|
|
// docker and unbound use journald
|
|
if (name === 'docker' || name === 'unbound') {
|
|
cmd = 'journalctl';
|
|
|
|
args.push('--lines=' + (lines === -1 ? 'all' : lines));
|
|
args.push(`--unit=${name}`);
|
|
args.push('--no-pager');
|
|
args.push('--output=short-iso');
|
|
|
|
if (follow) args.push('--follow');
|
|
} else if (name === 'nginx') {
|
|
cmd = '/usr/bin/tail';
|
|
|
|
args.push('--lines=' + (lines === -1 ? '+1' : lines));
|
|
if (follow) args.push('--follow', '--retry', '--quiet'); // same as -F. to make it work if file doesn't exist, --quiet to not output file headers, which are no logs
|
|
args.push('/var/log/nginx/access.log');
|
|
args.push('/var/log/nginx/error.log');
|
|
} else {
|
|
cmd = '/usr/bin/tail';
|
|
|
|
args.push('--lines=' + (lines === -1 ? '+1' : lines));
|
|
if (follow) args.push('--follow', '--retry', '--quiet'); // same as -F. to make it work if file doesn't exist, --quiet to not output file headers, which are no logs
|
|
const containerName = APP_SERVICES[name] ? `${name}-${instance}` : name;
|
|
args.push(path.join(paths.LOG_DIR, containerName, 'app.log'));
|
|
}
|
|
|
|
const cp = spawn(cmd, args);
|
|
|
|
const logStream = new LogStream({ format, source: name });
|
|
logStream.close = cp.kill.bind(cp, 'SIGKILL'); // closing stream kills the child process
|
|
|
|
cp.stdout.pipe(logStream);
|
|
|
|
return logStream;
|
|
}
|
|
|
|
async function rebuildService(id, auditSource) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
// this attempts to recreate the service docker container if they don't exist but platform infra version is unchanged
|
|
// passing an infra version of 'none' will not attempt to purge existing data
|
|
const [name, instance] = id.split(':');
|
|
|
|
switch (name) {
|
|
case 'turn':
|
|
await startTurn({ version: 'none' });
|
|
break;
|
|
case 'mongodb':
|
|
await startMongodb({ version: 'none' });
|
|
break;
|
|
case 'postgresql':
|
|
await startPostgresql({ version: 'none' });
|
|
break;
|
|
case 'mysql':
|
|
await startMysql({ version: 'none' });
|
|
break;
|
|
case 'sftp':
|
|
await sftp.start({ version: 'none' });
|
|
break;
|
|
case 'graphite':
|
|
await startGraphite({ version: 'none' });
|
|
break;
|
|
case 'mail':
|
|
await mail.startMail({ version: 'none' });
|
|
break;
|
|
case 'redis': {
|
|
await shell.promises.exec('removeRedis', `docker rm -f redis-${instance} || true`);
|
|
const app = await apps.get(instance);
|
|
if (app) await setupRedis(app, app.manifest.addons.redis); // starts the container
|
|
break;
|
|
}
|
|
default:
|
|
// nothing to rebuild for now.
|
|
}
|
|
|
|
safe(applyMemoryLimit(id), { debug }); // do this in background. ok to fail
|
|
|
|
await eventlog.add(eventlog.ACTION_SERVICE_REBUILD, auditSource, { id });
|
|
}
|
|
|
|
async function restartService(id, auditSource) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert.strictEqual(typeof auditSource, 'object');
|
|
|
|
const [name, instance ] = id.split(':');
|
|
|
|
if (instance) {
|
|
if (!APP_SERVICES[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
|
|
|
|
await APP_SERVICES[name].restart(instance);
|
|
} else if (SERVICES[name]) {
|
|
await SERVICES[name].restart();
|
|
} else {
|
|
throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
|
|
}
|
|
|
|
await eventlog.add(eventlog.ACTION_SERVICE_RESTART, auditSource, { id });
|
|
}
|
|
|
|
// in the future, we can refcount and lazy start global services
|
|
async function startAppServices(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
const instance = app.id;
|
|
for (const addon of Object.keys(app.manifest.addons || {})) {
|
|
if (!(addon in APP_SERVICES)) continue;
|
|
|
|
const [error] = await safe(APP_SERVICES[addon].start(instance)); // assume addons name is service name
|
|
// error ignored because we don't want "start app" to error. use can fix it from Services
|
|
if (error) debug(`startAppServices: ${addon}:${instance}`, error);
|
|
}
|
|
}
|
|
|
|
// in the future, we can refcount and stop global services as well
|
|
async function stopAppServices(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
const instance = app.id;
|
|
for (const addon of Object.keys(app.manifest.addons || {})) {
|
|
if (!(addon in APP_SERVICES)) continue;
|
|
|
|
const [error] = await safe(APP_SERVICES[addon].stop(instance)); // assume addons name is service name
|
|
// error ignored because we don't want "start app" to error. use can fix it from Services
|
|
if (error) debug(`stopAppServices: ${addon}:${instance}`, error);
|
|
}
|
|
}
|
|
|
|
async function waitForContainer(containerName, tokenEnvName) {
|
|
assert.strictEqual(typeof containerName, 'string');
|
|
assert.strictEqual(typeof tokenEnvName, 'string');
|
|
|
|
debug(`Waiting for ${containerName}`);
|
|
|
|
const result = await getContainerDetails(containerName, tokenEnvName);
|
|
|
|
await promiseRetry({ times: 10, interval: 15000, debug }, async () => {
|
|
const [networkError, response] = await safe(superagent.get(`http://${result.ip}:3000/healthcheck?access_token=${result.token}`)
|
|
.timeout(5000)
|
|
.ok(() => true));
|
|
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error waiting for ${containerName}: ${networkError.message}`);
|
|
if (response.status !== 200 || !response.body.status) throw new BoxError(BoxError.ADDONS_ERROR, `Error waiting for ${containerName}. Status code: ${response.status} message: ${response.body.message}`);
|
|
});
|
|
}
|
|
|
|
async function setupAddons(app, addons) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
|
|
if (!addons) return;
|
|
|
|
debug('setupAddons: Setting up %j', Object.keys(addons));
|
|
|
|
for (const addon of Object.keys(addons)) {
|
|
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
|
|
|
|
debug(`setupAddons: setting up addon ${addon} with options ${JSON.stringify(addons[addon])}`);
|
|
|
|
await ADDONS[addon].setup(app, addons[addon]);
|
|
}
|
|
}
|
|
|
|
async function teardownAddons(app, addons) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
|
|
if (!addons) return;
|
|
|
|
debug('teardownAddons: Tearing down %j', Object.keys(addons));
|
|
|
|
for (const addon of Object.keys(addons)) {
|
|
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
|
|
|
|
debug(`teardownAddons: Tearing down addon ${addon} with options ${JSON.stringify(addons[addon])}`);
|
|
|
|
await ADDONS[addon].teardown(app, addons[addon]);
|
|
}
|
|
}
|
|
|
|
async function backupAddons(app, addons) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
|
|
debug('backupAddons');
|
|
|
|
if (!addons) return;
|
|
|
|
debug('backupAddons: backing up %j', Object.keys(addons));
|
|
|
|
for (const addon of Object.keys(addons)) {
|
|
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
|
|
|
|
await ADDONS[addon].backup(app, addons[addon]);
|
|
}
|
|
}
|
|
|
|
async function clearAddons(app, addons) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
|
|
debug('clearAddons');
|
|
|
|
if (!addons) return;
|
|
|
|
debug('clearAddons: clearing %j', Object.keys(addons));
|
|
|
|
for (const addon of Object.keys(addons)) {
|
|
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
|
|
|
|
await ADDONS[addon].clear(app, addons[addon]);
|
|
}
|
|
}
|
|
|
|
async function restoreAddons(app, addons) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
|
|
debug('restoreAddons');
|
|
|
|
if (!addons) return;
|
|
|
|
debug('restoreAddons: restoring %j', Object.keys(addons));
|
|
|
|
for (const addon of Object.keys(addons)) {
|
|
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
|
|
|
|
await ADDONS[addon].restore(app, addons[addon]);
|
|
}
|
|
}
|
|
|
|
async function importAppDatabase(app, addon) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof addon, 'string');
|
|
|
|
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
|
|
|
|
await ADDONS[addon].setup(app, app.manifest.addons[addon]);
|
|
await ADDONS[addon].clear(app, app.manifest.addons[addon]); // clear in case we crashed in a restore
|
|
await ADDONS[addon].restore(app, app.manifest.addons[addon]);
|
|
}
|
|
|
|
async function importDatabase(addon) {
|
|
assert.strictEqual(typeof addon, 'string');
|
|
|
|
debug(`importDatabase: Importing ${addon}`);
|
|
|
|
const allApps = await apps.list();
|
|
|
|
for (const app of allApps) {
|
|
if (!(addon in app.manifest.addons)) continue; // app doesn't use the addon
|
|
|
|
debug(`importDatabase: Importing addon ${addon} of app ${app.id}`);
|
|
|
|
const [error] = await safe(importAppDatabase(app, addon));
|
|
if (!error) continue;
|
|
|
|
debug(`importDatabase: Error importing ${addon} of app ${app.id}. Marking as errored`, error);
|
|
// FIXME: there is no way to 'repair' if we are here. we need to make a separate apptask that re-imports db
|
|
// not clear, if repair workflow should be part of addon or per-app
|
|
await safe(apps.update(app.id, { installationState: apps.ISTATE_ERROR, error: { message: error.message } }));
|
|
}
|
|
|
|
safe.fs.unlinkSync(path.join(paths.ADDON_CONFIG_DIR, `exported-${addon}`)); // clean up for future migrations
|
|
}
|
|
|
|
async function exportDatabase(addon) {
|
|
assert.strictEqual(typeof addon, 'string');
|
|
|
|
debug(`exportDatabase: Exporting ${addon}`);
|
|
|
|
if (fs.existsSync(path.join(paths.ADDON_CONFIG_DIR, `exported-${addon}`))) {
|
|
debug(`exportDatabase: Already exported addon ${addon} in previous run`);
|
|
return;
|
|
}
|
|
|
|
const allApps = await apps.list();
|
|
|
|
for (const app of allApps) {
|
|
if (!app.manifest.addons || !(addon in app.manifest.addons)) continue; // app doesn't use the addon
|
|
if (app.installationState === apps.ISTATE_ERROR) continue; // missing db causes crash in old app addon containers
|
|
|
|
debug(`exportDatabase: Exporting addon ${addon} of app ${app.id}`);
|
|
|
|
const [error] = await safe(ADDONS[addon].backup(app, app.manifest.addons[addon]));
|
|
if (error) {
|
|
debug(`exportDatabase: Error exporting ${addon} of app ${app.id}.`, error);
|
|
// for errored apps, we can ignore if export had an error
|
|
if (app.installationState === apps.ISTATE_ERROR) continue;
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
safe.fs.writeFileSync(path.join(paths.ADDON_CONFIG_DIR, `exported-${addon}`), '', 'utf8');
|
|
if (safe.error) throw BoxError(BoxError.FS_ERROR, 'Error writing export checkpoint file');
|
|
// note: after this point, we are restart safe. it's ok if the box code crashes at this point
|
|
await shell.promises.exec(`exportDatabase - remove${addon}`, `docker rm -f ${addon}`); // what if db writes something when quitting ...
|
|
await shell.promises.sudo(`exportDatabase - removeAddonDir${addon}`, [ RMADDONDIR_CMD, addon ], {}); // ready to start afresh
|
|
}
|
|
|
|
async function applyMemoryLimit(id) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
|
|
const [name, instance] = id.split(':');
|
|
let containerName, memoryLimit;
|
|
const serviceConfig = await getServiceConfig(id);
|
|
|
|
if (instance) {
|
|
if (!APP_SERVICES[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
|
|
|
|
containerName = `${name}-${instance}`;
|
|
memoryLimit = serviceConfig && serviceConfig.memoryLimit ? serviceConfig.memoryLimit : APP_SERVICES[name].defaultMemoryLimit;
|
|
} else if (SERVICES[name]) {
|
|
containerName = name;
|
|
memoryLimit = serviceConfig && serviceConfig.memoryLimit ? serviceConfig.memoryLimit : SERVICES[name].defaultMemoryLimit;
|
|
} else {
|
|
throw new BoxError(BoxError.NOT_FOUND, 'No such service');
|
|
}
|
|
|
|
debug(`applyMemoryLimit: ${containerName} ${JSON.stringify(serviceConfig)}`);
|
|
|
|
const memory = await system.getMemoryAllocation(memoryLimit);
|
|
await docker.update(containerName, memory, memoryLimit);
|
|
}
|
|
|
|
async function startServices(existingInfra) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
|
|
const startFuncs = [];
|
|
|
|
// always start addons on any infra change, regardless of minor or major update
|
|
if (existingInfra.version !== infra.version) {
|
|
debug(`startServices: ${existingInfra.version} -> ${infra.version}. starting all services`);
|
|
startFuncs.push(
|
|
mail.startMail, // start this first to reduce email downtime
|
|
startTurn,
|
|
startMysql,
|
|
startPostgresql,
|
|
startMongodb,
|
|
startRedis,
|
|
startGraphite,
|
|
sftp.start,
|
|
);
|
|
} else {
|
|
assert.strictEqual(typeof existingInfra.images, 'object');
|
|
|
|
if (infra.images.mail.tag !== existingInfra.images.mail.tag) startFuncs.push(mail.startMail); // start this first to reduce email downtime
|
|
if (infra.images.turn.tag !== existingInfra.images.turn.tag) startFuncs.push(startTurn);
|
|
if (infra.images.mysql.tag !== existingInfra.images.mysql.tag) startFuncs.push(startMysql);
|
|
if (infra.images.postgresql.tag !== existingInfra.images.postgresql.tag) startFuncs.push(startPostgresql);
|
|
if (infra.images.mongodb.tag !== existingInfra.images.mongodb.tag) startFuncs.push(startMongodb);
|
|
if (infra.images.redis.tag !== existingInfra.images.redis.tag) startFuncs.push(startRedis);
|
|
if (infra.images.graphite.tag !== existingInfra.images.graphite.tag) startFuncs.push(startGraphite);
|
|
if (infra.images.sftp.tag !== existingInfra.images.sftp.tag) startFuncs.push(sftp.start);
|
|
|
|
debug('startServices: existing infra. incremental service create %j', startFuncs.map(function (f) { return f.name; }));
|
|
}
|
|
|
|
for (const func of startFuncs) {
|
|
await func(existingInfra);
|
|
}
|
|
|
|
// we always start db containers with unlimited memory. we then scale them down per configuration
|
|
for (const id of [ 'mysql', 'postgresql', 'mongodb' ]) {
|
|
safe(applyMemoryLimit(id), { debug }); // no waiting. and it's ok if applying service configs fails
|
|
}
|
|
}
|
|
|
|
async function getEnvironment(app) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
|
|
const result = await addonConfigs.getByAppId(app.id);
|
|
|
|
if (app.manifest.addons['docker']) result.push({ name: 'CLOUDRON_DOCKER_HOST', value: `tcp://172.18.0.1:${constants.DOCKER_PROXY_PORT}` });
|
|
|
|
return result.map(function (e) { return e.name + '=' + e.value; });
|
|
}
|
|
|
|
function getContainerNamesSync(app, addons) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
|
|
let names = [];
|
|
|
|
if (!addons) return names;
|
|
|
|
for (const addon in addons) {
|
|
switch (addon) {
|
|
case 'scheduler':
|
|
// names here depend on how scheduler.js creates containers
|
|
names = names.concat(Object.keys(addons.scheduler).map(function (taskName) { return app.id + '-' + taskName; }));
|
|
break;
|
|
default: break;
|
|
}
|
|
}
|
|
|
|
return names;
|
|
}
|
|
|
|
async function setupLocalStorage(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('setupLocalStorage');
|
|
|
|
const volumeDataDir = await apps.getStorageDir(app);
|
|
|
|
const [error] = await safe(shell.promises.sudo('createVolume', [ MKDIRVOLUME_CMD, volumeDataDir ], {}));
|
|
if (error) throw new BoxError(BoxError.FS_ERROR, `Error creating app storage data dir: ${error.message}`);
|
|
}
|
|
|
|
async function clearLocalStorage(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('clearLocalStorage');
|
|
|
|
const volumeDataDir = await apps.getStorageDir(app);
|
|
const [error] = await shell.promises.sudo('clearVolume', [ CLEARVOLUME_CMD, 'clear', volumeDataDir ], {});
|
|
if (error) throw new BoxError(BoxError.FS_ERROR, error);
|
|
}
|
|
|
|
async function teardownLocalStorage(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('teardownLocalStorage');
|
|
|
|
const volumeDataDir = await apps.getStorageDir(app);
|
|
const [error] = await shell.promises.sudo('clearVolume', [ CLEARVOLUME_CMD, 'rmdir', volumeDataDir ], {});
|
|
if (error) throw new BoxError(BoxError.FS_ERROR, error);
|
|
}
|
|
|
|
async function setupTurn(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const turnSecret = await blobs.getString(blobs.ADDON_TURN_SECRET);
|
|
if (!turnSecret) throw new BoxError(BoxError.ADDONS_ERROR, 'Turn secret is missing');
|
|
|
|
const env = [
|
|
{ name: 'CLOUDRON_STUN_SERVER', value: settings.dashboardFqdn() },
|
|
{ name: 'CLOUDRON_STUN_PORT', value: '3478' },
|
|
{ name: 'CLOUDRON_STUN_TLS_PORT', value: '5349' },
|
|
{ name: 'CLOUDRON_TURN_SERVER', value: settings.dashboardFqdn() },
|
|
{ name: 'CLOUDRON_TURN_PORT', value: '3478' },
|
|
{ name: 'CLOUDRON_TURN_TLS_PORT', value: '5349' },
|
|
{ name: 'CLOUDRON_TURN_SECRET', value: turnSecret }
|
|
];
|
|
|
|
debug('Setting up TURN');
|
|
|
|
await addonConfigs.set(app.id, 'turn', env);
|
|
}
|
|
|
|
async function startTurn(existingInfra) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
|
|
const serviceConfig = await getServiceConfig('turn');
|
|
const tag = infra.images.turn.tag;
|
|
const memoryLimit = serviceConfig.memoryLimit || SERVICES['turn'].defaultMemoryLimit;
|
|
const memory = await system.getMemoryAllocation(memoryLimit);
|
|
const realm = settings.dashboardFqdn();
|
|
|
|
let turnSecret = await blobs.getString(blobs.ADDON_TURN_SECRET);
|
|
if (!turnSecret) {
|
|
debug('startTurn: generting turn secret');
|
|
turnSecret = 'a' + crypto.randomBytes(15).toString('hex'); // prefix with a to ensure string starts with a letter
|
|
await blobs.setString(blobs.ADDON_TURN_SECRET, turnSecret);
|
|
}
|
|
|
|
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
|
|
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
|
|
|
|
// this exports 3478/tcp, 5349/tls and 50000-51000/udp. note that this runs on the host network because docker's userland proxy
|
|
// is spun for every port. we can disable this in some future release with --userland-proxy=false
|
|
// https://github.com/moby/moby/issues/8356 and https://github.com/moby/moby/issues/14856
|
|
const runCmd = `docker run --restart=always -d --name="turn" \
|
|
--hostname turn \
|
|
--net host \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=turn \
|
|
-m ${memory} \
|
|
--memory-swap ${memoryLimit} \
|
|
--dns 172.18.0.1 \
|
|
--dns-search=. \
|
|
-e CLOUDRON_TURN_SECRET="${turnSecret}" \
|
|
-e CLOUDRON_REALM="${realm}" \
|
|
--label isCloudronManaged=true \
|
|
${readOnly} -v /tmp -v /run "${tag}" ${cmd}`;
|
|
|
|
await shell.promises.exec('stopTurn', 'docker stop turn || true');
|
|
await shell.promises.exec('removeTurn', 'docker rm -f turn || true');
|
|
await shell.promises.exec('startTurn', runCmd);
|
|
}
|
|
|
|
async function teardownTurn(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Tearing down TURN');
|
|
|
|
await addonConfigs.unset(app.id, 'turn');
|
|
}
|
|
|
|
async function setupEmail(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const mailDomains = await mail.listDomains();
|
|
const mailInDomains = mailDomains.filter(function (d) { return d.enabled; }).map(function (d) { return d.domain; }).join(',');
|
|
|
|
// note that "external" access info can be derived from MAIL_DOMAIN (since it's part of user documentation)
|
|
const env = [
|
|
{ name: 'CLOUDRON_EMAIL_SMTP_SERVER', value: 'mail' },
|
|
{ name: 'CLOUDRON_EMAIL_SMTP_PORT', value: '2525' },
|
|
{ name: 'CLOUDRON_EMAIL_SMTPS_PORT', value: '2465' },
|
|
{ name: 'CLOUDRON_EMAIL_STARTTLS_PORT', value: '2587' },
|
|
{ name: 'CLOUDRON_EMAIL_IMAP_SERVER', value: 'mail' },
|
|
{ name: 'CLOUDRON_EMAIL_IMAPS_PORT', value: '9993' },
|
|
{ name: 'CLOUDRON_EMAIL_IMAP_PORT', value: '9393' },
|
|
{ name: 'CLOUDRON_EMAIL_SIEVE_SERVER', value: 'mail' },
|
|
{ name: 'CLOUDRON_EMAIL_SIEVE_PORT', value: '4190' }, // starttls
|
|
{ name: 'CLOUDRON_EMAIL_DOMAIN', value: app.domain },
|
|
{ name: 'CLOUDRON_EMAIL_DOMAINS', value: mailInDomains },
|
|
{ name: 'CLOUDRON_EMAIL_SERVER_HOST', value: settings.mailFqdn() }, // this is also a hint to reconfigure on mail server name change
|
|
{ name: 'CLOUDRON_EMAIL_LDAP_MAILBOXES_BASE_DN', value: 'ou=mailboxes,dc=cloudron' }
|
|
];
|
|
|
|
debug('Setting up Email');
|
|
|
|
await addonConfigs.set(app.id, 'email', env);
|
|
}
|
|
|
|
async function teardownEmail(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Tearing down Email');
|
|
|
|
await addonConfigs.unset(app.id, 'email');
|
|
}
|
|
|
|
async function setupLdap(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
if (!app.sso) return;
|
|
|
|
const env = [
|
|
{ name: 'CLOUDRON_LDAP_SERVER', value: '172.18.0.1' },
|
|
{ name: 'CLOUDRON_LDAP_HOST', value: '172.18.0.1' }, // to keep things in sync with the database _HOST vars
|
|
{ name: 'CLOUDRON_LDAP_PORT', value: '' + constants.LDAP_PORT },
|
|
{ name: 'CLOUDRON_LDAP_URL', value: 'ldap://172.18.0.1:' + constants.LDAP_PORT },
|
|
{ name: 'CLOUDRON_LDAP_USERS_BASE_DN', value: 'ou=users,dc=cloudron' },
|
|
{ name: 'CLOUDRON_LDAP_GROUPS_BASE_DN', value: 'ou=groups,dc=cloudron' },
|
|
{ name: 'CLOUDRON_LDAP_BIND_DN', value: 'cn='+ app.id + ',ou=apps,dc=cloudron' },
|
|
{ name: 'CLOUDRON_LDAP_BIND_PASSWORD', value: hat(4 * 128) } // this is ignored
|
|
];
|
|
|
|
debug('Setting up LDAP');
|
|
|
|
await addonConfigs.set(app.id, 'ldap', env);
|
|
}
|
|
|
|
async function teardownLdap(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Tearing down LDAP');
|
|
|
|
await addonConfigs.unset(app.id, 'ldap');
|
|
}
|
|
|
|
async function setupSendMail(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Setting up SendMail');
|
|
|
|
const disabled = app.manifest.addons.sendmail.optional && !app.enableMailbox;
|
|
if (disabled) return await addonConfigs.set(app.id, 'sendmail', []);
|
|
|
|
const existingPassword = await addonConfigs.getByName(app.id, 'sendmail', '%MAIL_SMTP_PASSWORD');
|
|
|
|
const password = existingPassword || hat(4 * 48); // see box#565 for password length
|
|
|
|
const env = [
|
|
{ name: 'CLOUDRON_MAIL_SMTP_SERVER', value: 'mail' },
|
|
{ name: 'CLOUDRON_MAIL_SMTP_PORT', value: '2525' },
|
|
{ name: 'CLOUDRON_MAIL_SMTPS_PORT', value: '2465' },
|
|
{ name: 'CLOUDRON_MAIL_STARTTLS_PORT', value: '2587' },
|
|
{ name: 'CLOUDRON_MAIL_SMTP_USERNAME', value: app.mailboxName + '@' + app.mailboxDomain },
|
|
{ name: 'CLOUDRON_MAIL_SMTP_PASSWORD', value: password },
|
|
{ name: 'CLOUDRON_MAIL_FROM', value: app.mailboxName + '@' + app.mailboxDomain },
|
|
{ name: 'CLOUDRON_MAIL_DOMAIN', value: app.mailboxDomain }
|
|
];
|
|
|
|
if (app.manifest.addons.sendmail.supportsDisplayName) env.push({ name: 'CLOUDRON_MAIL_FROM_DISPLAY_NAME', value: app.mailboxDisplayName });
|
|
|
|
debug('Setting sendmail addon config to %j', env);
|
|
await addonConfigs.set(app.id, 'sendmail', env);
|
|
}
|
|
|
|
async function teardownSendMail(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Tearing down sendmail');
|
|
|
|
await addonConfigs.unset(app.id, 'sendmail');
|
|
}
|
|
|
|
async function setupRecvMail(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('setupRecvMail: setting up recvmail');
|
|
|
|
if (!app.enableInbox) return await addonConfigs.set(app.id, 'recvmail', []);
|
|
|
|
const existingPassword = await addonConfigs.getByName(app.id, 'recvmail', '%MAIL_IMAP_PASSWORD');
|
|
|
|
const password = existingPassword || hat(4 * 48); // see box#565 for password length
|
|
|
|
const env = [
|
|
{ name: 'CLOUDRON_MAIL_IMAP_SERVER', value: 'mail' },
|
|
{ name: 'CLOUDRON_MAIL_IMAP_PORT', value: '9393' },
|
|
{ name: 'CLOUDRON_MAIL_IMAPS_PORT', value: '9993' },
|
|
{ name: 'CLOUDRON_MAIL_POP3_PORT', value: '9595' },
|
|
{ name: 'CLOUDRON_MAIL_POP3S_PORT', value: '9995' },
|
|
{ name: 'CLOUDRON_MAIL_IMAP_USERNAME', value: app.inboxName + '@' + app.inboxDomain },
|
|
{ name: 'CLOUDRON_MAIL_IMAP_PASSWORD', value: password },
|
|
{ name: 'CLOUDRON_MAIL_TO', value: app.inboxName + '@' + app.inboxDomain },
|
|
{ name: 'CLOUDRON_MAIL_TO_DOMAIN', value: app.inboxDomain },
|
|
];
|
|
|
|
debug('setupRecvMail: setting recvmail addon config to %j', env);
|
|
await addonConfigs.set(app.id, 'recvmail', env);
|
|
}
|
|
|
|
async function teardownRecvMail(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('teardownRecvMail: tearing down recvmail');
|
|
|
|
await addonConfigs.unset(app.id, 'recvmail');
|
|
}
|
|
|
|
function mysqlDatabaseName(appId) {
|
|
assert.strictEqual(typeof appId, 'string');
|
|
|
|
const md5sum = crypto.createHash('md5'); // get rid of "-"
|
|
md5sum.update(appId);
|
|
return md5sum.digest('hex').substring(0, 16); // max length of mysql usernames is 16
|
|
}
|
|
|
|
async function startMysql(existingInfra) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
|
|
const tag = infra.images.mysql.tag;
|
|
const dataDir = paths.PLATFORM_DATA_DIR;
|
|
const rootPassword = hat(8 * 128);
|
|
const cloudronToken = hat(8 * 128);
|
|
|
|
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.mysql.tag, tag);
|
|
|
|
if (upgrading) {
|
|
debug('startMysql: mysql will be upgraded');
|
|
await exportDatabase('mysql');
|
|
}
|
|
|
|
const serviceConfig = await getServiceConfig('mysql');
|
|
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
|
|
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
|
|
|
|
// memory options are applied dynamically. import requires all the memory we can get
|
|
const runCmd = `docker run --restart=always -d --name="mysql" \
|
|
--hostname mysql \
|
|
--net cloudron \
|
|
--net-alias mysql \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=mysql \
|
|
--dns 172.18.0.1 \
|
|
--dns-search=. \
|
|
-e CLOUDRON_MYSQL_TOKEN=${cloudronToken} \
|
|
-e CLOUDRON_MYSQL_ROOT_HOST=172.18.0.1 \
|
|
-e CLOUDRON_MYSQL_ROOT_PASSWORD=${rootPassword} \
|
|
-v "${dataDir}/mysql:/var/lib/mysql" \
|
|
--label isCloudronManaged=true \
|
|
--cap-add SYS_NICE \
|
|
${readOnly} -v /tmp -v /run "${tag}" ${cmd}`;
|
|
|
|
await shell.promises.exec('stopMysql', 'docker stop mysql || true');
|
|
await shell.promises.exec('removeMysql', 'docker rm -f mysql || true');
|
|
await shell.promises.exec('startMysql', runCmd);
|
|
|
|
if (!serviceConfig.recoveryMode) {
|
|
await waitForContainer('mysql', 'CLOUDRON_MYSQL_TOKEN');
|
|
if (upgrading) await importDatabase('mysql');
|
|
}
|
|
}
|
|
|
|
async function setupMySql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Setting up mysql');
|
|
|
|
const existingPassword = await addonConfigs.getByName(app.id, 'mysql', '%MYSQL_PASSWORD');
|
|
|
|
const tmp = mysqlDatabaseName(app.id);
|
|
|
|
const data = {
|
|
database: tmp,
|
|
prefix: tmp,
|
|
username: tmp,
|
|
password: existingPassword || hat(4 * 48) // see box#362 for password length
|
|
};
|
|
|
|
const result = await getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN');
|
|
|
|
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `?access_token=${result.token}`)
|
|
.send(data)
|
|
.ok(() => true));
|
|
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error setting up mysql: ${networkError.message}`);
|
|
if (response.status !== 201) throw new BoxError(BoxError.ADDONS_ERROR, `Error setting up mysql. Status code: ${response.status} message: ${response.body.message}`);
|
|
|
|
let env = [
|
|
{ name: 'CLOUDRON_MYSQL_USERNAME', value: data.username },
|
|
{ name: 'CLOUDRON_MYSQL_PASSWORD', value: data.password },
|
|
{ name: 'CLOUDRON_MYSQL_HOST', value: 'mysql' },
|
|
{ name: 'CLOUDRON_MYSQL_PORT', value: '3306' }
|
|
];
|
|
|
|
if (options.multipleDatabases) {
|
|
env = env.concat({ name: 'CLOUDRON_MYSQL_DATABASE_PREFIX', value: `${data.prefix}_` });
|
|
} else {
|
|
env = env.concat(
|
|
{ name: 'CLOUDRON_MYSQL_URL', value: `mysql://${data.username}:${data.password}@mysql/${data.database}` },
|
|
{ name: 'CLOUDRON_MYSQL_DATABASE', value: data.database }
|
|
);
|
|
}
|
|
|
|
debug('Setting mysql addon config to %j', env);
|
|
await addonConfigs.set(app.id, 'mysql', env);
|
|
}
|
|
|
|
async function clearMySql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const database = mysqlDatabaseName(app.id);
|
|
|
|
const result = await getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN');
|
|
|
|
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}/clear?access_token=${result.token}`)
|
|
.ok(() => true));
|
|
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error clearing mysql: ${networkError.message}`);
|
|
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error clearing mysql. Status code: ${response.status} message: ${response.body.message}`);
|
|
}
|
|
|
|
async function teardownMySql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const database = mysqlDatabaseName(app.id);
|
|
const username = database;
|
|
|
|
const result = await getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN');
|
|
|
|
const [networkError, response] = await safe(superagent.del(`http://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}?access_token=${result.token}&username=${username}`)
|
|
.ok(() => true));
|
|
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mysql: ${networkError.message}`);
|
|
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mysql. Status code: ${response.status} message: ${response.body.message}`);
|
|
|
|
await addonConfigs.unset(app.id, 'mysql');
|
|
}
|
|
|
|
async function pipeRequestToFile(url, filename) {
|
|
assert.strictEqual(typeof url, 'string');
|
|
assert.strictEqual(typeof filename, 'string');
|
|
|
|
return new Promise((resolve, reject) => {
|
|
const writeStream = fs.createWriteStream(filename);
|
|
const request = http.request(url, { method: 'POST' }); // ClientRequest
|
|
request.setTimeout(600000, () => {
|
|
debug('pipeRequestToFile: timeout - connect or post-connect idle timeout');
|
|
request.destroy(); // connect OR post-connect idle timeout
|
|
reject(new Error('Request timedout'));
|
|
});
|
|
|
|
request.on('error', (error) => reject(new BoxError(BoxError.NETWORK_ERROR, `Could not pipe ${url} to ${filename}: ${error.message}`))); // network error, dns error
|
|
request.on('response', (response) => {
|
|
debug(`pipeRequestToFile: connected with status code ${response.statusCode}`);
|
|
if (response.statusCode !== 200) return reject(new BoxError(BoxError.ADDONS_ERROR, `Unexpected response code or HTTP error when piping ${url} to ${filename}: status ${response.statusCode}`));
|
|
|
|
pipeline(response, writeStream, (error) => {
|
|
if (error) return reject(new BoxError(BoxError.ADDONS_ERROR, `Error piping ${url} to ${filename}: ${error.message}`));
|
|
|
|
if (!response.complete) return reject(new BoxError(BoxError.ADDONS_ERROR, `Response not complete when piping ${url} to ${filename}`));
|
|
resolve();
|
|
});
|
|
});
|
|
request.end(); // make the request
|
|
});
|
|
}
|
|
|
|
async function pipeFileToRequest(filename, url) {
|
|
assert.strictEqual(typeof filename, 'string');
|
|
assert.strictEqual(typeof url, 'string');
|
|
|
|
return new Promise((resolve, reject) => {
|
|
const readStream = fs.createReadStream(filename);
|
|
const request = http.request(url, { method: 'POST' }); // ClientRequest
|
|
request.setTimeout(600000, () => {
|
|
debug('pipeFileToRequest: timeout - connect or post-connect idle timeout');
|
|
request.destroy();
|
|
reject(new Error('Request timedout'));
|
|
});
|
|
request.on('response', (response) => {
|
|
debug(`pipeFileToRequest: request completed with status code ${response.statusCode}`);
|
|
response.resume(); // drain the response
|
|
if (response.statusCode !== 200) return reject(new BoxError(BoxError.ADDONS_ERROR, `Unexpected response code or HTTP error when piping ${filename} to ${url}: status ${response.statusCode} complete ${response.complete}`));
|
|
|
|
resolve();
|
|
});
|
|
|
|
debug(`pipeFileToRequest: piping ${filename} to ${url}`);
|
|
pipeline(readStream, request, function (error) {
|
|
if (error) return reject(new BoxError(BoxError.ADDONS_ERROR, `Error piping file ${filename} to request ${url}`));
|
|
debug(`pipeFileToRequest: piped ${filename} to ${url}`); // now we have to wait for 'response' above
|
|
});
|
|
});
|
|
}
|
|
|
|
async function backupMySql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const database = mysqlDatabaseName(app.id);
|
|
|
|
debug('Backing up mysql');
|
|
|
|
const result = await getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN');
|
|
|
|
const url = `http://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}/backup?access_token=${result.token}`;
|
|
await pipeRequestToFile(url, dumpPath('mysql', app.id));
|
|
}
|
|
|
|
async function restoreMySql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const database = mysqlDatabaseName(app.id);
|
|
|
|
debug('restoreMySql');
|
|
|
|
const result = await getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN');
|
|
|
|
const url = `http://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}/restore?access_token=${result.token}`;
|
|
await pipeFileToRequest(dumpPath('mysql', app.id), url);
|
|
}
|
|
|
|
function postgreSqlNames(appId) {
|
|
appId = appId.replace(/-/g, '');
|
|
return { database: `db${appId}`, username: `user${appId}` };
|
|
}
|
|
|
|
async function startPostgresql(existingInfra) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
|
|
const tag = infra.images.postgresql.tag;
|
|
const dataDir = paths.PLATFORM_DATA_DIR;
|
|
const rootPassword = hat(8 * 128);
|
|
const cloudronToken = hat(8 * 128);
|
|
|
|
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.postgresql.tag, tag);
|
|
|
|
if (upgrading) {
|
|
debug('startPostgresql: postgresql will be upgraded');
|
|
await exportDatabase('postgresql');
|
|
}
|
|
|
|
const serviceConfig = await getServiceConfig('postgresql');
|
|
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
|
|
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
|
|
|
|
// memory options are applied dynamically. import requires all the memory we can get
|
|
const runCmd = `docker run --restart=always -d --name="postgresql" \
|
|
--hostname postgresql \
|
|
--net cloudron \
|
|
--net-alias postgresql \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=postgresql \
|
|
--dns 172.18.0.1 \
|
|
--dns-search=. \
|
|
--shm-size=128M \
|
|
-e CLOUDRON_POSTGRESQL_ROOT_PASSWORD="${rootPassword}" \
|
|
-e CLOUDRON_POSTGRESQL_TOKEN="${cloudronToken}" \
|
|
-v "${dataDir}/postgresql:/var/lib/postgresql" \
|
|
--label isCloudronManaged=true \
|
|
${readOnly} -v /tmp -v /run "${tag}" ${cmd}`;
|
|
|
|
await shell.promises.exec('stopPostgresql', 'docker stop postgresql || true');
|
|
await shell.promises.exec('removePostgresql', 'docker rm -f postgresql || true');
|
|
await shell.promises.exec('startPostgresql', runCmd);
|
|
|
|
if (!serviceConfig.recoveryMode) {
|
|
await waitForContainer('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
|
|
if (upgrading) await importDatabase('postgresql');
|
|
}
|
|
}
|
|
|
|
async function setupPostgreSql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Setting up postgresql');
|
|
|
|
const { database, username } = postgreSqlNames(app.id);
|
|
|
|
const existingPassword = await addonConfigs.getByName(app.id, 'postgresql', '%POSTGRESQL_PASSWORD');
|
|
|
|
const data = {
|
|
database: database,
|
|
username: username,
|
|
password: existingPassword || hat(4 * 128),
|
|
locale: options.locale || 'C'
|
|
};
|
|
|
|
const result = await getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
|
|
|
|
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/databases?access_token=${result.token}`)
|
|
.send(data)
|
|
.ok(() => true));
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error setting up postgresql: ${networkError.message}`);
|
|
if (response.status !== 201) throw new BoxError(BoxError.ADDONS_ERROR, `Error setting up postgresql. Status code: ${response.status} message: ${response.body.message}`);
|
|
|
|
const env = [
|
|
{ name: 'CLOUDRON_POSTGRESQL_URL', value: `postgres://${data.username}:${data.password}@postgresql/${data.database}` },
|
|
{ name: 'CLOUDRON_POSTGRESQL_USERNAME', value: data.username },
|
|
{ name: 'CLOUDRON_POSTGRESQL_PASSWORD', value: data.password },
|
|
{ name: 'CLOUDRON_POSTGRESQL_HOST', value: 'postgresql' },
|
|
{ name: 'CLOUDRON_POSTGRESQL_PORT', value: '5432' },
|
|
{ name: 'CLOUDRON_POSTGRESQL_DATABASE', value: data.database }
|
|
];
|
|
|
|
debug('Setting postgresql addon config to %j', env);
|
|
await addonConfigs.set(app.id, 'postgresql', env);
|
|
}
|
|
|
|
async function clearPostgreSql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const { database, username } = postgreSqlNames(app.id);
|
|
const locale = options.locale || 'C';
|
|
|
|
debug('Clearing postgresql');
|
|
|
|
const result = await getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
|
|
|
|
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/databases/${database}/clear?access_token=${result.token}&username=${username}&locale=${locale}`)
|
|
.ok(() => true));
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error clearing postgresql: ${networkError.message}`);
|
|
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error clearing postgresql. Status code: ${response.status} message: ${response.body.message}`);
|
|
}
|
|
|
|
async function teardownPostgreSql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const { database, username } = postgreSqlNames(app.id);
|
|
|
|
const result = await getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
|
|
|
|
const [networkError, response] = await safe(superagent.del(`http://${result.ip}:3000/databases/${database}?access_token=${result.token}&username=${username}`)
|
|
.ok(() => true));
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error tearing down postgresql: ${networkError.message}`);
|
|
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error tearing down postgresql. Status code: ${response.status} message: ${response.body.message}`);
|
|
|
|
await addonConfigs.unset(app.id, 'postgresql');
|
|
}
|
|
|
|
async function backupPostgreSql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Backing up postgresql');
|
|
|
|
const { database } = postgreSqlNames(app.id);
|
|
|
|
const result = await getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
|
|
await pipeRequestToFile(`http://${result.ip}:3000/databases/${database}/backup?access_token=${result.token}`, dumpPath('postgresql', app.id));
|
|
}
|
|
|
|
async function restorePostgreSql(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Restore postgresql');
|
|
|
|
const { database, username } = postgreSqlNames(app.id);
|
|
|
|
const result = await getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
|
|
|
|
await pipeFileToRequest(dumpPath('postgresql', app.id), `http://${result.ip}:3000/databases/${database}/restore?access_token=${result.token}&username=${username}`);
|
|
}
|
|
|
|
async function startMongodb(existingInfra) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
|
|
const tag = infra.images.mongodb.tag;
|
|
const dataDir = paths.PLATFORM_DATA_DIR;
|
|
const rootPassword = hat(8 * 128);
|
|
const cloudronToken = hat(8 * 128);
|
|
|
|
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.mongodb.tag, tag);
|
|
|
|
if (upgrading) {
|
|
debug('startMongodb: mongodb will be upgraded');
|
|
await exportDatabase('mongodb');
|
|
}
|
|
|
|
const serviceConfig = await getServiceConfig('mongodb');
|
|
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
|
|
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
|
|
|
|
// memory options are applied dynamically. import requires all the memory we can get
|
|
const runCmd = `docker run --restart=always -d --name="mongodb" \
|
|
--hostname mongodb \
|
|
--net cloudron \
|
|
--net-alias mongodb \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=mongodb \
|
|
--dns 172.18.0.1 \
|
|
--dns-search=. \
|
|
-e CLOUDRON_MONGODB_ROOT_PASSWORD="${rootPassword}" \
|
|
-e CLOUDRON_MONGODB_TOKEN="${cloudronToken}" \
|
|
-v "${dataDir}/mongodb:/var/lib/mongodb" \
|
|
--label isCloudronManaged=true \
|
|
${readOnly} -v /tmp -v /run "${tag}" ${cmd}`;
|
|
|
|
await shell.promises.exec('stopMongodb', 'docker stop mongodb || true');
|
|
await shell.promises.exec('removeMongodb', 'docker rm -f mongodb || true');
|
|
await shell.promises.exec('startMongodb', runCmd);
|
|
|
|
if (!serviceConfig.recoveryMode) {
|
|
await waitForContainer('mongodb', 'CLOUDRON_MONGODB_TOKEN');
|
|
if (upgrading) await importDatabase('mongodb');
|
|
}
|
|
}
|
|
|
|
async function setupMongoDb(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Setting up mongodb');
|
|
|
|
const existingPassword = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_PASSWORD');
|
|
let database = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_DATABASE');
|
|
database = database || hat(8 * 8); // 16 bytes. keep this short, so as to not overflow the 127 byte index length in MongoDB < 4.4
|
|
|
|
const data = {
|
|
database,
|
|
username: app.id,
|
|
password: existingPassword || hat(4 * 128),
|
|
oplog: !!options.oplog
|
|
};
|
|
|
|
const result = await getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN');
|
|
|
|
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/databases?access_token=${result.token}`)
|
|
.send(data)
|
|
.ok(() => true));
|
|
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error setting up mongodb: ${networkError.message}`);
|
|
if (response.status !== 201) throw new BoxError(BoxError.ADDONS_ERROR, `Error setting up mongodb. Status code: ${response.status} message: ${response.body.message}`);
|
|
|
|
const env = [
|
|
{ name: 'CLOUDRON_MONGODB_URL', value : `mongodb://${data.username}:${data.password}@mongodb:27017/${data.database}` },
|
|
{ name: 'CLOUDRON_MONGODB_USERNAME', value : data.username },
|
|
{ name: 'CLOUDRON_MONGODB_PASSWORD', value: data.password },
|
|
{ name: 'CLOUDRON_MONGODB_HOST', value : 'mongodb' },
|
|
{ name: 'CLOUDRON_MONGODB_PORT', value : '27017' },
|
|
{ name: 'CLOUDRON_MONGODB_DATABASE', value : data.database }
|
|
];
|
|
|
|
if (options.oplog) {
|
|
env.push({ name: 'CLOUDRON_MONGODB_OPLOG_URL', value : `mongodb://${data.username}:${data.password}@mongodb:27017/local?authSource=${data.database}` });
|
|
}
|
|
|
|
debug('Setting mongodb addon config to %j', env);
|
|
await addonConfigs.set(app.id, 'mongodb', env);
|
|
}
|
|
|
|
async function clearMongodb(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const result = await getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN');
|
|
|
|
const database = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_DATABASE');
|
|
if (!database) throw new BoxError(BoxError.NOT_FOUND, 'Error clearing mongodb. No database');
|
|
|
|
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/databases/${database}/clear?access_token=${result.token}`)
|
|
.ok(() => true));
|
|
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error clearing mongodb: ${networkError.message}`);
|
|
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error clearing mongodb. Status code: ${response.status} message: ${response.body.message}`);
|
|
}
|
|
|
|
async function teardownMongoDb(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const result = await getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN');
|
|
|
|
const database = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_DATABASE');
|
|
if (!database) return;
|
|
|
|
const [networkError, response] = await safe(superagent.del(`http://${result.ip}:3000/databases/${database}?access_token=${result.token}`)
|
|
.ok(() => true));
|
|
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mongodb: ${networkError.message}`);
|
|
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mongodb. Status code: ${response.status} message: ${response.body.message}`);
|
|
|
|
addonConfigs.unset(app.id, 'mongodb');
|
|
}
|
|
|
|
async function backupMongoDb(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Backing up mongodb');
|
|
|
|
const result = await getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN');
|
|
|
|
const database = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_DATABASE');
|
|
if (!database) throw new BoxError(BoxError.NOT_FOUND, 'Error backing up mongodb. No database');
|
|
|
|
await pipeRequestToFile(`http://${result.ip}:3000/databases/${database}/backup?access_token=${result.token}`, dumpPath('mongodb', app.id));
|
|
}
|
|
|
|
async function restoreMongoDb(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('restoreMongoDb');
|
|
|
|
const result = await getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN');
|
|
|
|
const database = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_DATABASE');
|
|
if (!database) throw new BoxError(BoxError.NOT_FOUND, 'Error restoring mongodb. No database');
|
|
|
|
await pipeFileToRequest(dumpPath('mongodb', app.id), `http://${result.ip}:3000/databases/${database}/restore?access_token=${result.token}`);
|
|
}
|
|
|
|
async function startGraphite(existingInfra) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
|
|
const serviceConfig = await getServiceConfig('graphite');
|
|
const tag = infra.images.graphite.tag;
|
|
const memoryLimit = serviceConfig.memoryLimit || 256 * 1024 * 1024;
|
|
const memory = await system.getMemoryAllocation(memoryLimit);
|
|
|
|
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.graphite.tag, tag);
|
|
|
|
if (upgrading) debug('startGraphite: graphite will be upgraded');
|
|
|
|
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
|
|
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
|
|
|
|
// port 2003 is used by collectd
|
|
const runCmd = `docker run --restart=always -d --name="graphite" \
|
|
--hostname graphite \
|
|
--net cloudron \
|
|
--net-alias graphite \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=graphite \
|
|
-m ${memory} \
|
|
--memory-swap ${memoryLimit} \
|
|
--dns 172.18.0.1 \
|
|
--dns-search=. \
|
|
-p 127.0.0.1:2003:2003 \
|
|
-v "${paths.PLATFORM_DATA_DIR}/graphite:/var/lib/graphite" \
|
|
--label isCloudronManaged=true \
|
|
${readOnly} -v /tmp -v /run "${tag}" ${cmd}`;
|
|
|
|
await shell.promises.exec('stopGraphite', 'docker stop graphite || true');
|
|
await shell.promises.exec('removeGraphite', 'docker rm -f graphite || true');
|
|
if (upgrading) await shell.promises.sudo('removeGraphiteDir', [ RMADDONDIR_CMD, 'graphite' ], {});
|
|
await shell.promises.exec('startGraphite', runCmd);
|
|
|
|
// restart collectd to get the disk stats after graphite starts. currently, there is no way to do graphite health check
|
|
setTimeout(async () => await safe(shell.promises.sudo('restartcollectd', [ RESTART_SERVICE_CMD, 'collectd' ], {})), 60000);
|
|
}
|
|
|
|
async function setupProxyAuth(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Setting up proxyAuth');
|
|
|
|
const enabled = app.sso && app.manifest.addons && app.manifest.addons.proxyAuth;
|
|
|
|
if (!enabled) return;
|
|
|
|
const env = [ { name: 'CLOUDRON_PROXY_AUTH', value: '1' } ];
|
|
await addonConfigs.set(app.id, 'proxyauth', env);
|
|
}
|
|
|
|
async function teardownProxyAuth(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
await addonConfigs.unset(app.id, 'proxyauth');
|
|
}
|
|
|
|
async function startRedis(existingInfra) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
|
|
const tag = infra.images.redis.tag;
|
|
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.redis.tag, tag);
|
|
|
|
const allApps = await apps.list();
|
|
|
|
for (const app of allApps) {
|
|
if (!('redis' in app.manifest.addons)) continue; // app doesn't use the addon
|
|
|
|
const redisName = `redis-${app.id}`;
|
|
|
|
if (upgrading) await backupRedis(app, {});
|
|
|
|
await shell.promises.exec('stopRedis', `docker stop ${redisName} || true`); // redis will backup as part of signal handling
|
|
await shell.promises.exec('removeRedis', `docker rm -f ${redisName} || true`);
|
|
await setupRedis(app, app.manifest.addons.redis); // starts the container
|
|
}
|
|
|
|
if (upgrading) await importDatabase('redis');
|
|
}
|
|
|
|
// Ensures that app's addon redis container is running. Can be called when named container already exists/running
|
|
async function setupRedis(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const redisName = 'redis-' + app.id;
|
|
|
|
const existingPassword = await addonConfigs.getByName(app.id, 'redis', '%REDIS_PASSWORD');
|
|
|
|
const redisPassword = options.noPassword ? '' : (existingPassword || hat(4 * 48)); // see box#362 for password length
|
|
const redisServiceToken = hat(4 * 48);
|
|
|
|
// Compute redis memory limit based on app's memory limit (this is arbitrary)
|
|
const memoryLimit = app.servicesConfig['redis']?.memoryLimit || APP_SERVICES['redis'].defaultMemoryLimit;
|
|
const memory = await system.getMemoryAllocation(memoryLimit);
|
|
|
|
const recoveryMode = app.servicesConfig['redis']?.recoveryMode || false;
|
|
const readOnly = !recoveryMode ? '--read-only' : '';
|
|
const cmd = recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
|
|
|
|
const tag = infra.images.redis.tag;
|
|
const label = app.fqdn;
|
|
// note that we do not add appId label because this interferes with the stop/start app logic
|
|
const runCmd = `docker run --restart=always -d --name=${redisName} \
|
|
--hostname ${redisName} \
|
|
--label=location=${label} \
|
|
--net cloudron \
|
|
--net-alias ${redisName} \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag="${redisName}" \
|
|
-m ${memory} \
|
|
--memory-swap ${memoryLimit} \
|
|
--dns 172.18.0.1 \
|
|
--dns-search=. \
|
|
-e CLOUDRON_REDIS_PASSWORD="${redisPassword}" \
|
|
-e CLOUDRON_REDIS_TOKEN="${redisServiceToken}" \
|
|
-v "${paths.PLATFORM_DATA_DIR}/redis/${app.id}:/var/lib/redis" \
|
|
--label isCloudronManaged=true \
|
|
${readOnly} -v /tmp -v /run ${tag} ${cmd}`;
|
|
|
|
const env = [
|
|
{ name: 'CLOUDRON_REDIS_URL', value: 'redis://redisuser:' + redisPassword + '@redis-' + app.id },
|
|
{ name: 'CLOUDRON_REDIS_PASSWORD', value: redisPassword },
|
|
{ name: 'CLOUDRON_REDIS_HOST', value: redisName },
|
|
{ name: 'CLOUDRON_REDIS_PORT', value: '6379' }
|
|
];
|
|
|
|
const [inspectError, result] = await safe(docker.inspect(redisName));
|
|
if (inspectError) {
|
|
await shell.promises.exec('startRedis', runCmd);
|
|
} else { // fast path
|
|
debug(`Re-using existing redis container with state: ${JSON.stringify(result.State)}`);
|
|
}
|
|
|
|
if (!recoveryMode) {
|
|
await addonConfigs.set(app.id, 'redis', env);
|
|
await waitForContainer('redis-' + app.id, 'CLOUDRON_REDIS_TOKEN');
|
|
}
|
|
}
|
|
|
|
async function clearRedis(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Clearing redis');
|
|
|
|
const result = await getContainerDetails('redis-' + app.id, 'CLOUDRON_REDIS_TOKEN');
|
|
|
|
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/clear?access_token=${result.token}`)
|
|
.ok(() => true));
|
|
|
|
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error clearing redis: ${networkError.message}`);
|
|
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error clearing redis. Status code: ${response.status} message: ${response.body.message}`);
|
|
}
|
|
|
|
async function teardownRedis(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
await docker.deleteContainer(`redis-${app.id}`);
|
|
|
|
const [error] = await safe(shell.promises.sudo('removeVolume', [ RMADDONDIR_CMD, 'redis', app.id ], {}));
|
|
if (error) throw new BoxError(BoxError.FS_ERROR, `Error removing redis data: ${error.message}`);
|
|
|
|
safe.fs.rmSync(path.join(paths.LOG_DIR, `redis-${app.id}`), { recursive: true, force: true });
|
|
if (safe.error) debug('cannot cleanup logs:', safe.error);
|
|
|
|
await addonConfigs.unset(app.id, 'redis');
|
|
}
|
|
|
|
async function backupRedis(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Backing up redis');
|
|
|
|
const result = await getContainerDetails('redis-' + app.id, 'CLOUDRON_REDIS_TOKEN');
|
|
await pipeRequestToFile(`http://${result.ip}:3000/backup?access_token=${result.token}`, dumpPath('redis', app.id));
|
|
}
|
|
|
|
async function restoreRedis(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('Restoring redis');
|
|
|
|
const result = await getContainerDetails('redis-' + app.id, 'CLOUDRON_REDIS_TOKEN');
|
|
await pipeFileToRequest(dumpPath('redis', app.id), `http://${result.ip}:3000/restore?access_token=${result.token}`);
|
|
}
|
|
|
|
async function setupTls(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
if (!safe.fs.mkdirSync(`${paths.PLATFORM_DATA_DIR}/tls/${app.id}`, { recursive: true })) {
|
|
debug('Error creating tls directory');
|
|
throw new BoxError(BoxError.FS_ERROR, safe.error.message);
|
|
}
|
|
}
|
|
|
|
async function teardownTls(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
safe.fs.rmSync(`${paths.PLATFORM_DATA_DIR}/tls/${app.id}`, { recursive: true, force: true });
|
|
}
|
|
|
|
async function statusTurn() {
|
|
const [error, container] = await safe(docker.inspect('turn'));
|
|
if (error && error.reason === BoxError.NOT_FOUND) return { status: exports.SERVICE_STATUS_STOPPED };
|
|
if (error) throw error;
|
|
|
|
const result = await docker.memoryUsage(container.Id);
|
|
|
|
const status = container.State.Running
|
|
? (container.HostConfig.ReadonlyRootfs ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STARTING)
|
|
: exports.SERVICE_STATUS_STOPPED;
|
|
const stats = result.memory_stats || { usage: 0, limit: 1 };
|
|
|
|
return {
|
|
status,
|
|
memoryUsed: stats.usage,
|
|
memoryPercent: parseInt(100 * stats.usage / stats.limit)
|
|
};
|
|
}
|
|
|
|
async function statusDocker() {
|
|
const [error] = await safe(docker.ping());
|
|
return { status: error ? exports.SERVICE_STATUS_STOPPED: exports.SERVICE_STATUS_ACTIVE };
|
|
}
|
|
|
|
async function restartDocker() {
|
|
const [error] = await safe(shell.promises.sudo('restartdocker', [ RESTART_SERVICE_CMD, 'docker' ], {}));
|
|
if (error) debug(`restartDocker: error restarting docker. ${error.message}`);
|
|
}
|
|
|
|
async function statusUnbound() {
|
|
const [error] = await safe(shell.promises.exec('statusUnbound', 'systemctl is-active unbound'));
|
|
return { status: error ? exports.SERVICE_STATUS_STOPPED : exports.SERVICE_STATUS_ACTIVE };
|
|
}
|
|
|
|
async function restartUnbound() {
|
|
const [error] = await safe(shell.promises.sudo('restartunbound', [ RESTART_SERVICE_CMD, 'unbound' ], {}));
|
|
if (error) debug(`restartDocker: error restarting unbound. ${error.message}`);
|
|
}
|
|
|
|
async function statusNginx() {
|
|
const [error] = await safe(shell.promises.exec('statusNginx', 'systemctl is-active nginx'));
|
|
return { status: error ? exports.SERVICE_STATUS_STOPPED : exports.SERVICE_STATUS_ACTIVE };
|
|
}
|
|
|
|
async function restartNginx() {
|
|
const [error] = await safe(shell.promises.sudo('restartnginx', [ RESTART_SERVICE_CMD, 'nginx' ], {}));
|
|
if (error) debug(`restartNginx: error restarting unbound. ${error.message}`);
|
|
}
|
|
|
|
async function statusGraphite() {
|
|
const [error, container] = await safe(docker.inspect('graphite'));
|
|
if (error && error.reason === BoxError.NOT_FOUND) return { status: exports.SERVICE_STATUS_STOPPED };
|
|
if (error) throw error;
|
|
|
|
const ip = safe.query(container, 'NetworkSettings.Networks.cloudron.IPAddress', null);
|
|
if (!ip) throw new BoxError(BoxError.INACTIVE, 'Error getting IP of graphite service');
|
|
|
|
const [networkError, response] = await safe(superagent.get(`http://${ip}:8000/graphite-web/dashboard`)
|
|
.timeout(20000)
|
|
.ok(() => true));
|
|
|
|
if (networkError) return { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite: ${networkError.message}` };
|
|
if (response.status !== 200) return { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite. Status code: ${response.status} message: ${response.body.message}` };
|
|
|
|
const result = await docker.memoryUsage('graphite');
|
|
const stats = result.memory_stats || { usage: 0, limit: 1 };
|
|
|
|
return {
|
|
status: container.State.Running ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STOPPED,
|
|
memoryUsed: stats.usage,
|
|
memoryPercent: parseInt(100 * stats.usage / stats.limit)
|
|
};
|
|
}
|
|
|
|
async function restartGraphite() {
|
|
await docker.restartContainer('graphite');
|
|
|
|
setTimeout(async () => {
|
|
const [error] = await safe(shell.promises.sudo('restartcollectd', [ RESTART_SERVICE_CMD, 'collectd' ], {}));
|
|
if (error) debug(`restartGraphite: error restarting collected. ${error.message}`);
|
|
}, 60000);
|
|
}
|
|
|
|
async function teardownOauth(app, options) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
debug('teardownOauth');
|
|
|
|
await addonConfigs.unset(app.id, 'oauth');
|
|
}
|