Files
cloudron-box/src/services.js

1924 lines
75 KiB
JavaScript
Raw Normal View History

'use strict';
exports = module.exports = {
2021-09-20 09:15:28 -07:00
listServices,
2021-01-21 12:40:41 -08:00
getServiceStatus,
2020-09-23 15:13:23 -07:00
getServiceLogs,
configureService,
2020-09-23 15:13:23 -07:00
restartService,
rebuildService,
2018-11-15 19:59:08 +01:00
startAppServices,
stopAppServices,
2020-09-23 15:13:23 -07:00
startServices,
2020-09-23 15:13:23 -07:00
setupAddons,
teardownAddons,
backupAddons,
restoreAddons,
clearAddons,
2020-09-23 15:13:23 -07:00
getEnvironment,
getContainerNamesSync,
2020-09-23 15:13:23 -07:00
getContainerDetails,
2020-02-07 14:11:52 -08:00
2018-12-02 19:40:27 -08:00
SERVICE_STATUS_STARTING: 'starting', // container up, waiting for healthcheck
SERVICE_STATUS_ACTIVE: 'active',
SERVICE_STATUS_STOPPED: 'stopped'
};
const addonConfigs = require('./addonconfigs.js'),
apps = require('./apps.js'),
assert = require('assert'),
blobs = require('./blobs.js'),
2019-09-23 12:13:21 -07:00
BoxError = require('./boxerror.js'),
constants = require('./constants.js'),
crypto = require('crypto'),
2021-02-08 23:20:45 -08:00
debug = require('debug')('box:services'),
2016-04-18 10:37:33 -07:00
docker = require('./docker.js'),
eventlog = require('./eventlog.js'),
fs = require('fs'),
hat = require('./hat.js'),
http = require('http'),
2016-05-24 13:06:59 -07:00
infra = require('./infra_version.js'),
LogStream = require('./log-stream.js'),
2018-03-07 20:39:58 -08:00
mail = require('./mail.js'),
os = require('os'),
path = require('path'),
paths = require('./paths.js'),
{ pipeline } = require('stream'),
2021-08-25 19:41:46 -07:00
promiseRetry = require('./promise-retry.js'),
safe = require('safetydance'),
semver = require('semver'),
2018-11-20 16:53:27 +01:00
settings = require('./settings.js'),
sftp = require('./sftp.js'),
shell = require('./shell.js'),
2018-11-16 17:53:22 +01:00
spawn = require('child_process').spawn,
2021-08-25 19:41:46 -07:00
superagent = require('superagent'),
2021-10-01 09:17:44 -07:00
system = require('./system.js');
2021-08-25 19:41:46 -07:00
const NOOP = async function (/*app, options*/) {};
2018-11-25 14:43:29 -08:00
const RMADDONDIR_CMD = path.join(__dirname, 'scripts/rmaddondir.sh');
2021-03-23 11:01:14 -07:00
const RESTART_SERVICE_CMD = path.join(__dirname, 'scripts/restartservice.sh');
const CLEARVOLUME_CMD = path.join(__dirname, 'scripts/clearvolume.sh');
const MKDIRVOLUME_CMD = path.join(__dirname, 'scripts/mkdirvolume.sh');
// setup can be called multiple times for the same app (configure crash restart) and existing data must not be lost
// teardown is destructive. app data stored with the addon is lost
const ADDONS = {
2020-03-27 21:37:06 +01:00
turn: {
setup: setupTurn,
teardown: teardownTurn,
2020-03-27 21:37:06 +01:00
backup: NOOP,
restore: NOOP,
clear: NOOP
},
2016-05-12 08:54:59 -07:00
email: {
setup: setupEmail,
teardown: teardownEmail,
backup: NOOP,
restore: setupEmail,
2018-11-15 19:59:08 +01:00
clear: NOOP,
2016-05-12 08:54:59 -07:00
},
ldap: {
setup: setupLdap,
teardown: teardownLdap,
backup: NOOP,
restore: setupLdap,
2018-11-15 19:59:08 +01:00
clear: NOOP,
},
2015-10-18 09:52:37 -07:00
localstorage: {
setup: setupLocalStorage,
teardown: teardownLocalStorage,
2015-10-18 09:52:37 -07:00
backup: NOOP, // no backup because it's already inside app data
restore: NOOP,
2018-11-15 19:59:08 +01:00
clear: clearLocalStorage,
2015-10-18 09:52:37 -07:00
},
mongodb: {
setup: setupMongoDb,
teardown: teardownMongoDb,
backup: backupMongoDb,
restore: restoreMongoDb,
2018-11-15 19:59:08 +01:00
clear: clearMongodb,
},
mysql: {
setup: setupMySql,
teardown: teardownMySql,
backup: backupMySql,
restore: restoreMySql,
2018-11-15 19:59:08 +01:00
clear: clearMySql,
},
postgresql: {
setup: setupPostgreSql,
teardown: teardownPostgreSql,
backup: backupPostgreSql,
restore: restorePostgreSql,
2018-11-15 19:59:08 +01:00
clear: clearPostgreSql,
},
2020-11-10 09:59:28 -08:00
proxyAuth: {
2020-11-26 15:04:25 -08:00
setup: setupProxyAuth,
teardown: teardownProxyAuth,
2020-11-10 09:59:28 -08:00
backup: NOOP,
restore: NOOP,
clear: NOOP
},
2016-05-13 14:13:25 -07:00
recvmail: {
setup: setupRecvMail,
teardown: teardownRecvMail,
backup: NOOP,
restore: setupRecvMail,
2018-11-15 19:59:08 +01:00
clear: NOOP,
2016-05-13 14:13:25 -07:00
},
redis: {
setup: setupRedis,
teardown: teardownRedis,
2015-10-12 13:29:27 -07:00
backup: backupRedis,
restore: restoreRedis,
2018-11-15 19:59:08 +01:00
clear: clearRedis,
},
2015-10-18 09:52:37 -07:00
sendmail: {
setup: setupSendMail,
teardown: teardownSendMail,
backup: NOOP,
restore: setupSendMail,
2018-11-15 19:59:08 +01:00
clear: NOOP,
2015-10-18 09:52:37 -07:00
},
scheduler: {
setup: NOOP,
teardown: NOOP,
backup: NOOP,
restore: NOOP,
2018-11-15 19:59:08 +01:00
clear: NOOP,
},
2018-08-10 12:31:46 -07:00
docker: {
setup: NOOP,
teardown: NOOP,
backup: NOOP,
restore: NOOP,
2018-11-15 19:59:08 +01:00
clear: NOOP,
},
2021-02-17 22:53:50 -08:00
tls: {
setup: setupTls,
teardown: teardownTls,
2021-02-17 22:53:50 -08:00
backup: NOOP,
restore: NOOP,
clear: NOOP,
},
oauth: { // kept for backward compatibility. keep teardown for uninstall to work
setup: NOOP,
teardown: teardownOauth,
backup: NOOP,
restore: NOOP,
clear: NOOP,
2018-12-02 18:05:19 -08:00
}
};
// services are actual containers that are running. addons are the concepts requested by app
2020-04-27 22:59:11 -07:00
const SERVICES = {
2020-03-27 21:37:06 +01:00
turn: {
status: statusTurn,
restart: docker.restartContainer.bind(null, 'turn'),
2020-03-27 21:37:06 +01:00
defaultMemoryLimit: 256 * 1024 * 1024
},
2018-12-02 18:05:19 -08:00
mail: {
status: containerStatus.bind(null, 'mail', 'CLOUDRON_MAIL_TOKEN'),
restart: mail.restartMail,
defaultMemoryLimit: mail.DEFAULT_MEMORY_LIMIT
2018-12-02 18:05:19 -08:00
},
mongodb: {
status: containerStatus.bind(null, 'mongodb', 'CLOUDRON_MONGODB_TOKEN'),
restart: docker.restartContainer.bind(null, 'mongodb'),
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024
2018-12-02 18:05:19 -08:00
},
mysql: {
status: containerStatus.bind(null, 'mysql', 'CLOUDRON_MYSQL_TOKEN'),
restart: docker.restartContainer.bind(null, 'mysql'),
2018-12-02 18:05:19 -08:00
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024
},
postgresql: {
status: containerStatus.bind(null, 'postgresql', 'CLOUDRON_POSTGRESQL_TOKEN'),
restart: docker.restartContainer.bind(null, 'postgresql'),
2018-12-02 18:05:19 -08:00
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024
},
docker: {
status: statusDocker,
2018-12-02 18:05:19 -08:00
restart: restartDocker,
defaultMemoryLimit: 0
2018-12-02 19:38:21 -08:00
},
unbound: {
status: statusUnbound,
restart: restartUnbound,
defaultMemoryLimit: 0
2019-03-18 19:02:32 -07:00
},
2019-04-04 20:46:01 -07:00
sftp: {
status: sftp.status,
restart: docker.restartContainer.bind(null, 'sftp'),
defaultMemoryLimit: sftp.DEFAULT_MEMORY_LIMIT
2019-03-19 15:56:29 -07:00
},
graphite: {
status: statusGraphite,
2021-03-23 11:01:14 -07:00
restart: restartGraphite,
defaultMemoryLimit: 256 * 1024 * 1024
},
nginx: {
status: statusNginx,
restart: restartNginx,
defaultMemoryLimit: 0
}
};
2020-05-03 17:38:15 -07:00
const APP_SERVICES = {
redis: {
status: (instance, done) => containerStatus(`redis-${instance}`, 'CLOUDRON_REDIS_TOKEN', done),
start: (instance, done) => docker.startContainer(`redis-${instance}`, done),
stop: (instance, done) => docker.stopContainer(`redis-${instance}`, done),
restart: (instance, done) => docker.restartContainer(`redis-${instance}`, done),
2020-05-03 17:38:15 -07:00
defaultMemoryLimit: 150 * 1024 * 1024
}
};
function parseImageTag(tag) {
let repository = tag.split(':', 1)[0];
let version = tag.substr(repository.length + 1).split('@', 1)[0];
let digest = tag.substr(repository.length + 1 + version.length + 1).split(':', 2)[1];
return { repository, version: semver.parse(version), digest };
}
function requiresUpgrade(existingTag, currentTag) {
let etag = parseImageTag(existingTag), ctag = parseImageTag(currentTag);
return etag.version.major !== ctag.version.major;
}
2018-11-09 12:02:38 -08:00
// paths for dumps
function dumpPath(addon, appId) {
switch (addon) {
2018-11-12 09:32:02 -08:00
case 'postgresql': return path.join(paths.APPS_DATA_DIR, appId, 'postgresqldump');
2018-11-09 12:02:38 -08:00
case 'mysql': return path.join(paths.APPS_DATA_DIR, appId, 'mysqldump');
case 'mongodb': return path.join(paths.APPS_DATA_DIR, appId, 'mongodbdump');
case 'redis': return path.join(paths.APPS_DATA_DIR, appId, 'dump.rdb');
}
}
2021-08-25 19:41:46 -07:00
async function getContainerDetails(containerName, tokenEnvName) {
2019-03-19 15:56:29 -07:00
assert.strictEqual(typeof containerName, 'string');
assert.strictEqual(typeof tokenEnvName, 'string');
2021-08-25 19:41:46 -07:00
const result = await docker.inspect(containerName);
2019-03-19 15:56:29 -07:00
2021-08-25 19:41:46 -07:00
const ip = safe.query(result, 'NetworkSettings.Networks.cloudron.IPAddress', null);
if (!ip) throw new BoxError(BoxError.INACTIVE, `Error getting IP of ${containerName} service`);
2019-03-19 15:56:29 -07:00
2021-08-25 19:41:46 -07:00
// extract the cloudron token for auth
const env = safe.query(result, 'Config.Env', null);
if (!env) throw new BoxError(BoxError.DOCKER_ERROR, `Error inspecting environment of ${containerName} service`);
const tmp = env.find(function (e) { return e.indexOf(tokenEnvName) === 0; });
if (!tmp) throw new BoxError(BoxError.DOCKER_ERROR, `Error getting token of ${containerName} service`);
const token = tmp.slice(tokenEnvName.length + 1); // +1 for the = sign
if (!token) throw new BoxError(BoxError.DOCKER_ERROR, `Error getting token of ${containerName} service`);
2019-03-19 15:56:29 -07:00
2021-08-25 19:41:46 -07:00
return { ip: ip, token: token, state: result.State };
2019-03-19 15:56:29 -07:00
}
2021-08-25 19:41:46 -07:00
async function containerStatus(containerName, tokenEnvName) {
assert.strictEqual(typeof containerName, 'string');
assert.strictEqual(typeof tokenEnvName, 'string');
2018-11-21 17:28:44 +01:00
2021-08-25 19:41:46 -07:00
const [error, addonDetails] = await safe(getContainerDetails(containerName, tokenEnvName));
if (error && (error.reason === BoxError.NOT_FOUND || error.reason === BoxError.INACTIVE)) return { status: exports.SERVICE_STATUS_STOPPED };
if (error) throw error;
2018-11-21 17:28:44 +01:00
2021-12-17 13:39:06 -08:00
const [networkError, response] = await safe(superagent.get(`http://${addonDetails.ip}:3000/healthcheck?access_token=${addonDetails.token}`)
2021-08-25 19:41:46 -07:00
.timeout(20000)
.ok(() => true));
2018-11-21 17:28:44 +01:00
2021-08-25 19:41:46 -07:00
if (networkError) return { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for ${containerName}: ${networkError.message}` };
if (response.status !== 200 || !response.body.status) return { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for ${containerName}. Status code: ${response.statusCode} message: ${response.body.message}` };
2018-11-28 10:39:12 +01:00
2021-08-25 19:41:46 -07:00
const result = await docker.memoryUsage(containerName);
2022-11-24 00:40:40 +01:00
const stats = result.memory_stats || { usage: 0, limit: 1 };
2018-11-28 10:39:12 +01:00
2021-08-25 19:41:46 -07:00
return {
status: addonDetails.state.Running ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STOPPED,
2022-11-24 00:40:40 +01:00
memoryUsed: stats.usage,
memoryPercent: parseInt(100 * stats.usage / stats.limit),
2021-08-25 19:41:46 -07:00
healthcheck: response.body
};
2018-11-21 17:28:44 +01:00
}
2021-09-20 09:15:28 -07:00
async function listServices() {
2022-10-13 20:32:36 +02:00
const serviceIds = Object.keys(SERVICES);
2018-11-15 19:59:08 +01:00
2021-08-20 09:19:44 -07:00
const result = await apps.list();
for (let app of result) {
if (app.manifest.addons && app.manifest.addons['redis']) serviceIds.push(`redis:${app.id}`);
}
2020-05-03 17:38:15 -07:00
2021-08-20 09:19:44 -07:00
return serviceIds;
2018-11-15 19:59:08 +01:00
}
2021-08-25 19:41:46 -07:00
async function getServiceConfig(id) {
2020-05-03 17:38:15 -07:00
assert.strictEqual(typeof id, 'string');
const [name, instance] = id.split(':');
2020-05-03 17:38:15 -07:00
if (!instance) {
2021-08-25 19:41:46 -07:00
const servicesConfig = await settings.getServicesConfig();
return servicesConfig[name] || {};
2020-05-03 17:38:15 -07:00
}
2021-08-25 19:41:46 -07:00
const app = await apps.get(instance);
if (!app) throw new BoxError(BoxError.NOT_FOUND, 'App not found');
2020-05-03 17:38:15 -07:00
2021-08-25 19:41:46 -07:00
return app.servicesConfig[name] || {};
2020-05-03 17:38:15 -07:00
}
2021-08-25 19:41:46 -07:00
async function getServiceStatus(id) {
2020-05-03 17:38:15 -07:00
assert.strictEqual(typeof id, 'string');
2018-11-15 19:59:08 +01:00
2020-05-03 17:38:15 -07:00
const [name, instance ] = id.split(':');
let containerStatusFunc, service;
2020-05-03 17:38:15 -07:00
if (instance) {
service = APP_SERVICES[name];
2021-08-25 19:41:46 -07:00
if (!service) throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
containerStatusFunc = service.status.bind(null, instance);
2020-05-03 17:38:15 -07:00
} else if (SERVICES[name]) {
service = SERVICES[name];
containerStatusFunc = service.status;
2020-05-03 17:38:15 -07:00
} else {
2021-08-25 19:41:46 -07:00
throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
2020-05-03 17:38:15 -07:00
}
2018-11-20 16:53:27 +01:00
2021-08-25 19:41:46 -07:00
const tmp = {
2020-05-03 17:38:15 -07:00
name: name,
2018-11-20 16:53:27 +01:00
status: null,
2019-12-16 15:21:26 +01:00
memoryUsed: 0,
memoryPercent: 0,
error: null,
healthcheck: null,
config: {}
2018-11-20 16:53:27 +01:00
};
2018-11-15 19:59:08 +01:00
2021-08-25 19:41:46 -07:00
const result = await containerStatusFunc();
tmp.status = result.status;
tmp.memoryUsed = result.memoryUsed;
tmp.memoryPercent = result.memoryPercent;
tmp.error = result.error || null;
tmp.healthcheck = result.healthcheck || null;
2018-11-20 16:53:27 +01:00
2021-08-25 19:41:46 -07:00
tmp.config = await getServiceConfig(id);
2018-11-16 17:53:22 +01:00
2021-08-25 19:41:46 -07:00
if (!tmp.config.memoryLimit && service.defaultMemoryLimit) {
tmp.config.memoryLimit = service.defaultMemoryLimit;
}
2018-11-20 16:53:27 +01:00
2021-08-25 19:41:46 -07:00
return tmp;
2018-11-15 19:59:08 +01:00
}
async function configureService(id, data, auditSource) {
2020-05-03 17:38:15 -07:00
assert.strictEqual(typeof id, 'string');
2018-11-21 15:47:34 +01:00
assert.strictEqual(typeof data, 'object');
assert.strictEqual(typeof auditSource, 'object');
2018-11-21 15:47:34 +01:00
2020-05-03 17:38:15 -07:00
const [name, instance ] = id.split(':');
2021-10-01 12:09:13 -07:00
let needsRebuild = false;
2018-11-21 15:47:34 +01:00
2020-05-03 17:38:15 -07:00
if (instance) {
2021-08-25 19:41:46 -07:00
if (!APP_SERVICES[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
2020-05-03 17:38:15 -07:00
2021-08-25 19:41:46 -07:00
const app = await apps.get(instance);
if (!app) throw new BoxError(BoxError.NOT_FOUND, 'App not found');
2018-11-21 15:47:34 +01:00
2021-08-25 19:41:46 -07:00
const servicesConfig = app.servicesConfig;
2021-10-01 12:09:13 -07:00
needsRebuild = servicesConfig[name]?.recoveryMode != data.recoveryMode;
2021-08-25 19:41:46 -07:00
servicesConfig[name] = data;
2018-11-21 15:47:34 +01:00
2021-08-25 19:41:46 -07:00
await apps.update(instance, { servicesConfig });
} else if (SERVICES[name]) {
2021-08-25 19:41:46 -07:00
const servicesConfig = await settings.getServicesConfig();
needsRebuild = servicesConfig[name]?.recoveryMode != data.recoveryMode; // intentional != since 'recoveryMode' may or may not be there
2021-08-25 19:41:46 -07:00
servicesConfig[name] = data;
2021-08-25 19:41:46 -07:00
await settings.setServicesConfig(servicesConfig);
} else {
2021-08-25 19:41:46 -07:00
throw new BoxError(BoxError.NOT_FOUND, 'No such service');
}
debug(`configureService: ${id} rebuild=${needsRebuild}`);
2021-10-01 12:09:13 -07:00
// do this in background
if (needsRebuild) {
safe(rebuildService(id, auditSource), { debug });
} else {
safe(applyMemoryLimit(id), { debug });
}
await eventlog.add(eventlog.ACTION_SERVICE_CONFIGURE, auditSource, { id, data });
2018-11-21 15:47:34 +01:00
}
2021-08-25 19:41:46 -07:00
async function getServiceLogs(id, options) {
2020-05-03 17:38:15 -07:00
assert.strictEqual(typeof id, 'string');
2018-11-15 19:59:08 +01:00
assert(options && typeof options === 'object');
assert.strictEqual(typeof options.lines, 'number');
assert.strictEqual(typeof options.format, 'string');
assert.strictEqual(typeof options.follow, 'boolean');
2020-05-03 17:38:15 -07:00
const [name, instance ] = id.split(':');
2018-11-15 19:59:08 +01:00
2020-05-03 17:38:15 -07:00
if (instance) {
2021-08-25 19:41:46 -07:00
if (!APP_SERVICES[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
2020-05-03 17:38:15 -07:00
} else if (!SERVICES[name]) {
2021-08-25 19:41:46 -07:00
throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
2020-05-03 17:38:15 -07:00
}
debug(`Getting logs for ${name}`);
2018-11-15 19:59:08 +01:00
2021-08-25 19:41:46 -07:00
const lines = options.lines,
2018-11-15 19:59:08 +01:00
format = options.format || 'json',
follow = options.follow;
2018-11-15 19:59:08 +01:00
let cmd, args = [];
2018-11-21 18:38:19 +01:00
// docker and unbound use journald
2020-05-03 17:38:15 -07:00
if (name === 'docker' || name === 'unbound') {
cmd = 'journalctl';
args.push('--lines=' + (lines === -1 ? 'all' : lines));
2020-05-03 17:38:15 -07:00
args.push(`--unit=${name}`);
args.push('--no-pager');
args.push('--output=short-iso');
if (follow) args.push('--follow');
2020-06-15 17:12:37 +02:00
} else if (name === 'nginx') {
cmd = '/usr/bin/tail';
args.push('--lines=' + (lines === -1 ? '+1' : lines));
if (follow) args.push('--follow', '--retry', '--quiet'); // same as -F. to make it work if file doesn't exist, --quiet to not output file headers, which are no logs
args.push('/var/log/nginx/access.log');
args.push('/var/log/nginx/error.log');
} else {
cmd = '/usr/bin/tail';
args.push('--lines=' + (lines === -1 ? '+1' : lines));
if (follow) args.push('--follow', '--retry', '--quiet'); // same as -F. to make it work if file doesn't exist, --quiet to not output file headers, which are no logs
2020-05-03 17:38:15 -07:00
const containerName = APP_SERVICES[name] ? `${name}-${instance}` : name;
args.push(path.join(paths.LOG_DIR, containerName, 'app.log'));
}
2021-08-25 19:41:46 -07:00
const cp = spawn(cmd, args);
2018-11-15 19:59:08 +01:00
const logStream = new LogStream({ format, source: name });
logStream.close = cp.kill.bind(cp, 'SIGKILL'); // closing stream kills the child process
2018-11-15 19:59:08 +01:00
cp.stdout.pipe(logStream);
2018-11-15 19:59:08 +01:00
return logStream;
2018-11-15 19:59:08 +01:00
}
async function rebuildService(id, auditSource) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof auditSource, 'object');
// this attempts to recreate the service docker container if they don't exist but platform infra version is unchanged
// passing an infra version of 'none' will not attempt to purge existing data
2022-06-23 15:52:59 -07:00
const [name, instance] = id.split(':');
switch (name) {
case 'turn':
2021-09-26 22:48:14 -07:00
await startTurn({ version: 'none' });
break;
case 'mongodb':
await startMongodb({ version: 'none' });
break;
case 'postgresql':
await startPostgresql({ version: 'none' });
break;
case 'mysql':
await startMysql({ version: 'none' });
break;
case 'sftp':
2021-09-26 22:48:14 -07:00
await sftp.start({ version: 'none' });
break;
case 'graphite':
2021-09-26 22:48:14 -07:00
await startGraphite({ version: 'none' });
break;
case 'mail':
await mail.startMail({ version: 'none' });
break;
2022-06-23 15:52:59 -07:00
case 'redis': {
await shell.promises.exec('removeRedis', `docker rm -f redis-${instance} || true`);
const app = await apps.get(instance);
if (app) await setupRedis(app, app.manifest.addons.redis); // starts the container
break;
}
default:
// nothing to rebuild for now.
}
2021-08-25 19:41:46 -07:00
2021-10-01 12:09:13 -07:00
safe(applyMemoryLimit(id), { debug }); // do this in background. ok to fail
await eventlog.add(eventlog.ACTION_SERVICE_REBUILD, auditSource, { id });
}
async function restartService(id, auditSource) {
2020-05-03 17:38:15 -07:00
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof auditSource, 'object');
2018-11-15 19:59:08 +01:00
2020-05-03 17:38:15 -07:00
const [name, instance ] = id.split(':');
if (instance) {
2021-08-25 19:41:46 -07:00
if (!APP_SERVICES[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
2018-11-19 14:30:14 +01:00
2021-08-25 19:41:46 -07:00
await APP_SERVICES[name].restart(instance);
2020-05-03 17:38:15 -07:00
} else if (SERVICES[name]) {
2021-08-25 19:41:46 -07:00
await SERVICES[name].restart();
2020-05-03 17:38:15 -07:00
} else {
2021-08-25 19:41:46 -07:00
throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
2020-05-03 17:38:15 -07:00
}
await eventlog.add(eventlog.ACTION_SERVICE_RESTART, auditSource, { id });
2018-11-15 19:59:08 +01:00
}
// in the future, we can refcount and lazy start global services
2021-08-25 19:41:46 -07:00
async function startAppServices(app) {
assert.strictEqual(typeof app, 'object');
const instance = app.id;
2021-08-25 19:41:46 -07:00
for (const addon of Object.keys(app.manifest.addons || {})) {
if (!(addon in APP_SERVICES)) continue;
2021-08-25 19:41:46 -07:00
const [error] = await safe(APP_SERVICES[addon].start(instance)); // assume addons name is service name
// error ignored because we don't want "start app" to error. use can fix it from Services
if (error) debug(`startAppServices: ${addon}:${instance}`, error);
}
}
// in the future, we can refcount and stop global services as well
2021-08-25 19:41:46 -07:00
async function stopAppServices(app) {
assert.strictEqual(typeof app, 'object');
const instance = app.id;
2021-08-25 19:41:46 -07:00
for (const addon of Object.keys(app.manifest.addons || {})) {
if (!(addon in APP_SERVICES)) continue;
2021-08-25 19:41:46 -07:00
const [error] = await safe(APP_SERVICES[addon].stop(instance)); // assume addons name is service name
// error ignored because we don't want "start app" to error. use can fix it from Services
if (error) debug(`stopAppServices: ${addon}:${instance}`, error);
}
}
2021-08-25 19:41:46 -07:00
async function waitForContainer(containerName, tokenEnvName) {
assert.strictEqual(typeof containerName, 'string');
assert.strictEqual(typeof tokenEnvName, 'string');
debug(`Waiting for ${containerName}`);
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails(containerName, tokenEnvName);
2021-12-07 11:18:26 -08:00
await promiseRetry({ times: 10, interval: 15000, debug }, async () => {
const [networkError, response] = await safe(superagent.get(`http://${result.ip}:3000/healthcheck?access_token=${result.token}`)
2021-08-25 19:41:46 -07:00
.timeout(5000)
.ok(() => true));
2021-08-25 19:41:46 -07:00
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error waiting for ${containerName}: ${networkError.message}`);
if (response.status !== 200 || !response.body.status) throw new BoxError(BoxError.ADDONS_ERROR, `Error waiting for ${containerName}. Status code: ${response.status} message: ${response.body.message}`);
});
}
2021-08-25 19:41:46 -07:00
async function setupAddons(app, addons) {
assert.strictEqual(typeof app, 'object');
assert(!addons || typeof addons === 'object');
2021-08-25 19:41:46 -07:00
if (!addons) return;
2021-10-01 09:17:44 -07:00
debug('setupAddons: Setting up %j', Object.keys(addons));
2015-07-20 11:03:11 -07:00
2021-08-25 19:41:46 -07:00
for (const addon of Object.keys(addons)) {
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
2021-10-01 09:17:44 -07:00
debug(`setupAddons: setting up addon ${addon} with options ${JSON.stringify(addons[addon])}`);
2021-08-25 19:41:46 -07:00
await ADDONS[addon].setup(app, addons[addon]);
}
}
2021-08-25 19:41:46 -07:00
async function teardownAddons(app, addons) {
assert.strictEqual(typeof app, 'object');
assert(!addons || typeof addons === 'object');
2021-08-25 19:41:46 -07:00
if (!addons) return;
2021-10-01 09:17:44 -07:00
debug('teardownAddons: Tearing down %j', Object.keys(addons));
2015-07-20 11:03:11 -07:00
2021-08-25 19:41:46 -07:00
for (const addon of Object.keys(addons)) {
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
2021-10-01 09:17:44 -07:00
debug(`teardownAddons: Tearing down addon ${addon} with options ${JSON.stringify(addons[addon])}`);
2021-08-25 19:41:46 -07:00
await ADDONS[addon].teardown(app, addons[addon]);
}
}
2021-08-25 19:41:46 -07:00
async function backupAddons(app, addons) {
assert.strictEqual(typeof app, 'object');
assert(!addons || typeof addons === 'object');
2021-10-01 09:17:44 -07:00
debug('backupAddons');
2021-08-25 19:41:46 -07:00
if (!addons) return;
2021-10-01 09:17:44 -07:00
debug('backupAddons: backing up %j', Object.keys(addons));
2015-07-20 11:03:11 -07:00
2021-08-25 19:41:46 -07:00
for (const addon of Object.keys(addons)) {
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
2021-08-25 19:41:46 -07:00
await ADDONS[addon].backup(app, addons[addon]);
}
}
2021-08-25 19:41:46 -07:00
async function clearAddons(app, addons) {
assert.strictEqual(typeof app, 'object');
assert(!addons || typeof addons === 'object');
2021-10-01 09:17:44 -07:00
debug('clearAddons');
2021-08-25 19:41:46 -07:00
if (!addons) return;
2021-10-01 09:17:44 -07:00
debug('clearAddons: clearing %j', Object.keys(addons));
2021-08-25 19:41:46 -07:00
for (const addon of Object.keys(addons)) {
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
2021-08-25 19:41:46 -07:00
await ADDONS[addon].clear(app, addons[addon]);
}
}
2021-08-25 19:41:46 -07:00
async function restoreAddons(app, addons) {
assert.strictEqual(typeof app, 'object');
assert(!addons || typeof addons === 'object');
2021-10-01 09:17:44 -07:00
debug('restoreAddons');
2021-08-25 19:41:46 -07:00
if (!addons) return;
2021-10-01 09:17:44 -07:00
debug('restoreAddons: restoring %j', Object.keys(addons));
2015-07-20 11:03:11 -07:00
2021-08-25 19:41:46 -07:00
for (const addon of Object.keys(addons)) {
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
2021-08-25 19:41:46 -07:00
await ADDONS[addon].restore(app, addons[addon]);
}
}
2021-12-17 07:47:20 -08:00
async function importAppDatabase(app, addon) {
2018-11-11 21:58:02 -08:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof addon, 'string');
2021-12-17 07:47:20 -08:00
if (!(addon in ADDONS)) throw new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`);
2018-11-11 21:58:02 -08:00
2021-12-17 07:47:20 -08:00
await ADDONS[addon].setup(app, app.manifest.addons[addon]);
await ADDONS[addon].clear(app, app.manifest.addons[addon]); // clear in case we crashed in a restore
await ADDONS[addon].restore(app, app.manifest.addons[addon]);
2018-11-11 21:58:02 -08:00
}
2021-08-25 19:41:46 -07:00
async function importDatabase(addon) {
assert.strictEqual(typeof addon, 'string');
debug(`importDatabase: Importing ${addon}`);
2021-08-25 19:41:46 -07:00
const allApps = await apps.list();
2021-08-25 19:41:46 -07:00
for (const app of allApps) {
if (!(addon in app.manifest.addons)) continue; // app doesn't use the addon
2021-08-25 19:41:46 -07:00
debug(`importDatabase: Importing addon ${addon} of app ${app.id}`);
2021-08-25 19:41:46 -07:00
const [error] = await safe(importAppDatabase(app, addon));
if (!error) continue;
2018-11-11 10:35:26 -08:00
2021-08-25 19:41:46 -07:00
debug(`importDatabase: Error importing ${addon} of app ${app.id}. Marking as errored`, error);
// FIXME: there is no way to 'repair' if we are here. we need to make a separate apptask that re-imports db
// not clear, if repair workflow should be part of addon or per-app
await safe(apps.update(app.id, { installationState: apps.ISTATE_ERROR, error: { message: error.message } }));
}
2021-08-25 19:41:46 -07:00
safe.fs.unlinkSync(path.join(paths.ADDON_CONFIG_DIR, `exported-${addon}`)); // clean up for future migrations
}
2021-08-25 19:41:46 -07:00
async function exportDatabase(addon) {
assert.strictEqual(typeof addon, 'string');
debug(`exportDatabase: Exporting ${addon}`);
if (fs.existsSync(path.join(paths.ADDON_CONFIG_DIR, `exported-${addon}`))) {
debug(`exportDatabase: Already exported addon ${addon} in previous run`);
2021-08-25 19:41:46 -07:00
return;
}
2021-08-25 19:41:46 -07:00
const allApps = await apps.list();
2021-08-25 19:41:46 -07:00
for (const app of allApps) {
if (!app.manifest.addons || !(addon in app.manifest.addons)) continue; // app doesn't use the addon
if (app.installationState === apps.ISTATE_ERROR) continue; // missing db causes crash in old app addon containers
2021-08-25 19:41:46 -07:00
debug(`exportDatabase: Exporting addon ${addon} of app ${app.id}`);
2021-08-25 19:41:46 -07:00
const [error] = await safe(ADDONS[addon].backup(app, app.manifest.addons[addon]));
if (error) {
debug(`exportDatabase: Error exporting ${addon} of app ${app.id}.`, error);
// for errored apps, we can ignore if export had an error
if (app.installationState === apps.ISTATE_ERROR) continue;
throw error;
}
}
2021-08-25 19:41:46 -07:00
safe.fs.writeFileSync(path.join(paths.ADDON_CONFIG_DIR, `exported-${addon}`), '', 'utf8');
if (safe.error) throw BoxError(BoxError.FS_ERROR, 'Error writing export checkpoint file');
// note: after this point, we are restart safe. it's ok if the box code crashes at this point
await shell.promises.exec(`exportDatabase - remove${addon}`, `docker rm -f ${addon}`); // what if db writes something when quitting ...
await shell.promises.sudo(`exportDatabase - removeAddonDir${addon}`, [ RMADDONDIR_CMD, addon ], {}); // ready to start afresh
}
2021-10-01 12:09:13 -07:00
async function applyMemoryLimit(id) {
assert.strictEqual(typeof id, 'string');
const [name, instance] = id.split(':');
let containerName, memoryLimit;
2021-10-01 12:09:13 -07:00
const serviceConfig = await getServiceConfig(id);
if (instance) {
2021-08-25 19:41:46 -07:00
if (!APP_SERVICES[name]) throw new BoxError(BoxError.NOT_FOUND, 'Service not found');
2020-05-03 17:38:15 -07:00
containerName = `${name}-${instance}`;
memoryLimit = serviceConfig && serviceConfig.memoryLimit ? serviceConfig.memoryLimit : APP_SERVICES[name].defaultMemoryLimit;
} else if (SERVICES[name]) {
containerName = name;
memoryLimit = serviceConfig && serviceConfig.memoryLimit ? serviceConfig.memoryLimit : SERVICES[name].defaultMemoryLimit;
2020-05-03 17:38:15 -07:00
} else {
2021-08-25 19:41:46 -07:00
throw new BoxError(BoxError.NOT_FOUND, 'No such service');
2020-05-03 17:38:15 -07:00
}
debug(`applyMemoryLimit: ${containerName} ${JSON.stringify(serviceConfig)}`);
2022-11-04 15:09:37 +01:00
const memory = await system.getMemoryAllocation(memoryLimit);
2021-08-25 19:41:46 -07:00
await docker.update(containerName, memory, memoryLimit);
2020-05-03 17:38:15 -07:00
}
2021-08-25 19:41:46 -07:00
async function startServices(existingInfra) {
assert.strictEqual(typeof existingInfra, 'object');
2021-09-26 22:48:14 -07:00
const startFuncs = [];
2021-08-25 19:41:46 -07:00
// always start addons on any infra change, regardless of minor or major update
if (existingInfra.version !== infra.version) {
debug(`startServices: ${existingInfra.version} -> ${infra.version}. starting all services`);
startFuncs.push(
mail.startMail, // start this first to reduce email downtime
2021-09-26 22:48:14 -07:00
startTurn,
startMysql,
startPostgresql,
startMongodb,
startRedis,
startGraphite,
sftp.start,
2021-08-25 19:41:46 -07:00
);
} else {
assert.strictEqual(typeof existingInfra.images, 'object');
if (infra.images.mail.tag !== existingInfra.images.mail.tag) startFuncs.push(mail.startMail); // start this first to reduce email downtime
2021-09-26 22:48:14 -07:00
if (infra.images.turn.tag !== existingInfra.images.turn.tag) startFuncs.push(startTurn);
if (infra.images.mysql.tag !== existingInfra.images.mysql.tag) startFuncs.push(startMysql);
if (infra.images.postgresql.tag !== existingInfra.images.postgresql.tag) startFuncs.push(startPostgresql);
if (infra.images.mongodb.tag !== existingInfra.images.mongodb.tag) startFuncs.push(startMongodb);
if (infra.images.redis.tag !== existingInfra.images.redis.tag) startFuncs.push(startRedis);
if (infra.images.graphite.tag !== existingInfra.images.graphite.tag) startFuncs.push(startGraphite);
if (infra.images.sftp.tag !== existingInfra.images.sftp.tag) startFuncs.push(sftp.start);
2021-08-25 19:41:46 -07:00
debug('startServices: existing infra. incremental service create %j', startFuncs.map(function (f) { return f.name; }));
}
2021-08-25 19:41:46 -07:00
for (const func of startFuncs) {
2021-09-26 22:48:14 -07:00
await func(existingInfra);
2021-08-25 19:41:46 -07:00
}
2021-08-25 19:41:46 -07:00
// we always start db containers with unlimited memory. we then scale them down per configuration
2021-09-26 22:48:14 -07:00
for (const id of [ 'mysql', 'postgresql', 'mongodb' ]) {
2021-10-01 12:09:13 -07:00
safe(applyMemoryLimit(id), { debug }); // no waiting. and it's ok if applying service configs fails
2021-08-25 19:41:46 -07:00
}
}
async function getEnvironment(app) {
assert.strictEqual(typeof app, 'object');
const result = await addonConfigs.getByAppId(app.id);
if (app.manifest.addons['docker']) result.push({ name: 'CLOUDRON_DOCKER_HOST', value: `tcp://172.18.0.1:${constants.DOCKER_PROXY_PORT}` });
return result.map(function (e) { return e.name + '=' + e.value; });
}
2015-11-02 11:20:50 -08:00
function getContainerNamesSync(app, addons) {
assert.strictEqual(typeof app, 'object');
assert(!addons || typeof addons === 'object');
2021-08-25 19:41:46 -07:00
let names = [];
2015-11-02 11:20:50 -08:00
if (!addons) return names;
2021-08-25 19:41:46 -07:00
for (const addon in addons) {
2015-11-02 11:20:50 -08:00
switch (addon) {
case 'scheduler':
// names here depend on how scheduler.js creates containers
names = names.concat(Object.keys(addons.scheduler).map(function (taskName) { return app.id + '-' + taskName; }));
break;
default: break;
}
}
return names;
}
2021-08-25 19:41:46 -07:00
async function setupLocalStorage(app, options) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('setupLocalStorage');
2022-06-01 22:44:52 -07:00
const volumeDataDir = await apps.getStorageDir(app);
const [error] = await safe(shell.promises.sudo('createVolume', [ MKDIRVOLUME_CMD, volumeDataDir ], {}));
if (error) throw new BoxError(BoxError.FS_ERROR, `Error creating app storage data dir: ${error.message}`);
}
2021-08-25 19:41:46 -07:00
async function clearLocalStorage(app, options) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('clearLocalStorage');
2022-06-01 22:44:52 -07:00
const volumeDataDir = await apps.getStorageDir(app);
const [error] = await shell.promises.sudo('clearVolume', [ CLEARVOLUME_CMD, 'clear', volumeDataDir ], {});
if (error) throw new BoxError(BoxError.FS_ERROR, error);
}
2021-08-25 19:41:46 -07:00
async function teardownLocalStorage(app, options) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('teardownLocalStorage');
2022-06-01 22:44:52 -07:00
const volumeDataDir = await apps.getStorageDir(app);
const [error] = await shell.promises.sudo('clearVolume', [ CLEARVOLUME_CMD, 'rmdir', volumeDataDir ], {});
if (error) throw new BoxError(BoxError.FS_ERROR, error);
}
2021-08-25 19:41:46 -07:00
async function setupTurn(app, options) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2022-02-01 17:36:51 -08:00
const turnSecret = await blobs.getString(blobs.ADDON_TURN_SECRET);
2021-08-25 19:41:46 -07:00
if (!turnSecret) throw new BoxError(BoxError.ADDONS_ERROR, 'Turn secret is missing');
const env = [
{ name: 'CLOUDRON_STUN_SERVER', value: settings.dashboardFqdn() },
{ name: 'CLOUDRON_STUN_PORT', value: '3478' },
{ name: 'CLOUDRON_STUN_TLS_PORT', value: '5349' },
{ name: 'CLOUDRON_TURN_SERVER', value: settings.dashboardFqdn() },
{ name: 'CLOUDRON_TURN_PORT', value: '3478' },
{ name: 'CLOUDRON_TURN_TLS_PORT', value: '5349' },
{ name: 'CLOUDRON_TURN_SECRET', value: turnSecret }
];
2021-10-01 09:17:44 -07:00
debug('Setting up TURN');
2021-08-25 19:41:46 -07:00
await addonConfigs.set(app.id, 'turn', env);
}
2021-09-26 22:48:14 -07:00
async function startTurn(existingInfra) {
2021-08-25 19:41:46 -07:00
assert.strictEqual(typeof existingInfra, 'object');
2021-09-26 22:48:14 -07:00
const serviceConfig = await getServiceConfig('turn');
2021-08-25 19:41:46 -07:00
const tag = infra.images.turn.tag;
const memoryLimit = serviceConfig.memoryLimit || SERVICES['turn'].defaultMemoryLimit;
2022-11-04 15:09:37 +01:00
const memory = await system.getMemoryAllocation(memoryLimit);
2021-08-25 19:41:46 -07:00
const realm = settings.dashboardFqdn();
2022-02-01 17:36:51 -08:00
let turnSecret = await blobs.getString(blobs.ADDON_TURN_SECRET);
2021-11-16 22:37:42 -08:00
if (!turnSecret) {
debug('startTurn: generting turn secret');
turnSecret = 'a' + crypto.randomBytes(15).toString('hex'); // prefix with a to ensure string starts with a letter
2022-02-01 17:36:51 -08:00
await blobs.setString(blobs.ADDON_TURN_SECRET, turnSecret);
2021-11-16 22:37:42 -08:00
}
2021-08-25 19:41:46 -07:00
2021-10-01 12:09:13 -07:00
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
2022-02-11 23:08:56 -08:00
// this exports 3478/tcp, 5349/tls and 50000-51000/udp. note that this runs on the host network because docker's userland proxy
// is spun for every port. we can disable this in some future release with --userland-proxy=false
// https://github.com/moby/moby/issues/8356 and https://github.com/moby/moby/issues/14856
2021-10-01 12:09:13 -07:00
const runCmd = `docker run --restart=always -d --name="turn" \
2021-08-25 19:41:46 -07:00
--hostname turn \
--net host \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=turn \
-m ${memory} \
--memory-swap ${memoryLimit} \
--dns 172.18.0.1 \
--dns-search=. \
-e CLOUDRON_TURN_SECRET="${turnSecret}" \
-e CLOUDRON_REALM="${realm}" \
--label isCloudronManaged=true \
2021-10-01 12:09:13 -07:00
${readOnly} -v /tmp -v /run "${tag}" ${cmd}`;
2021-08-25 19:41:46 -07:00
await shell.promises.exec('stopTurn', 'docker stop turn || true');
await shell.promises.exec('removeTurn', 'docker rm -f turn || true');
2021-10-01 12:09:13 -07:00
await shell.promises.exec('startTurn', runCmd);
2021-08-25 19:41:46 -07:00
}
async function teardownTurn(app, options) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('Tearing down TURN');
2021-08-25 19:41:46 -07:00
await addonConfigs.unset(app.id, 'turn');
}
2021-08-25 19:41:46 -07:00
async function setupEmail(app, options) {
2016-05-12 08:54:59 -07:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2018-03-07 20:39:58 -08:00
2021-08-25 19:41:46 -07:00
const mailDomains = await mail.listDomains();
const mailInDomains = mailDomains.filter(function (d) { return d.enabled; }).map(function (d) { return d.domain; }).join(',');
2018-03-07 20:39:58 -08:00
2021-08-25 19:41:46 -07:00
// note that "external" access info can be derived from MAIL_DOMAIN (since it's part of user documentation)
const env = [
{ name: 'CLOUDRON_EMAIL_SMTP_SERVER', value: 'mail' },
{ name: 'CLOUDRON_EMAIL_SMTP_PORT', value: '2525' },
2022-01-09 16:03:35 -08:00
{ name: 'CLOUDRON_EMAIL_SMTPS_PORT', value: '2465' },
{ name: 'CLOUDRON_EMAIL_STARTTLS_PORT', value: '2587' },
{ name: 'CLOUDRON_EMAIL_IMAP_SERVER', value: 'mail' },
{ name: 'CLOUDRON_EMAIL_IMAPS_PORT', value: '9993' },
{ name: 'CLOUDRON_EMAIL_IMAP_PORT', value: '9393' },
{ name: 'CLOUDRON_EMAIL_SIEVE_SERVER', value: 'mail' },
2022-01-09 16:03:35 -08:00
{ name: 'CLOUDRON_EMAIL_SIEVE_PORT', value: '4190' }, // starttls
{ name: 'CLOUDRON_EMAIL_DOMAIN', value: app.domain },
{ name: 'CLOUDRON_EMAIL_DOMAINS', value: mailInDomains },
{ name: 'CLOUDRON_EMAIL_SERVER_HOST', value: settings.mailFqdn() }, // this is also a hint to reconfigure on mail server name change
{ name: 'CLOUDRON_EMAIL_LDAP_MAILBOXES_BASE_DN', value: 'ou=mailboxes,dc=cloudron' }
2021-08-25 19:41:46 -07:00
];
2016-05-12 08:54:59 -07:00
2021-10-01 09:17:44 -07:00
debug('Setting up Email');
2016-05-12 08:54:59 -07:00
2021-08-25 19:41:46 -07:00
await addonConfigs.set(app.id, 'email', env);
2016-05-12 08:54:59 -07:00
}
2021-08-25 19:41:46 -07:00
async function teardownEmail(app, options) {
2016-05-12 08:54:59 -07:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('Tearing down Email');
2016-05-12 08:54:59 -07:00
2021-08-25 19:41:46 -07:00
await addonConfigs.unset(app.id, 'email');
2016-05-12 08:54:59 -07:00
}
2021-08-25 19:41:46 -07:00
async function setupLdap(app, options) {
assert.strictEqual(typeof app, 'object');
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof options, 'object');
2021-08-25 19:41:46 -07:00
if (!app.sso) return;
2021-08-25 19:41:46 -07:00
const env = [
{ name: 'CLOUDRON_LDAP_SERVER', value: '172.18.0.1' },
{ name: 'CLOUDRON_LDAP_HOST', value: '172.18.0.1' }, // to keep things in sync with the database _HOST vars
{ name: 'CLOUDRON_LDAP_PORT', value: '' + constants.LDAP_PORT },
{ name: 'CLOUDRON_LDAP_URL', value: 'ldap://172.18.0.1:' + constants.LDAP_PORT },
{ name: 'CLOUDRON_LDAP_USERS_BASE_DN', value: 'ou=users,dc=cloudron' },
{ name: 'CLOUDRON_LDAP_GROUPS_BASE_DN', value: 'ou=groups,dc=cloudron' },
{ name: 'CLOUDRON_LDAP_BIND_DN', value: 'cn='+ app.id + ',ou=apps,dc=cloudron' },
{ name: 'CLOUDRON_LDAP_BIND_PASSWORD', value: hat(4 * 128) } // this is ignored
];
2021-10-01 09:17:44 -07:00
debug('Setting up LDAP');
2021-08-25 19:41:46 -07:00
await addonConfigs.set(app.id, 'ldap', env);
}
2021-08-25 19:41:46 -07:00
async function teardownLdap(app, options) {
assert.strictEqual(typeof app, 'object');
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('Tearing down LDAP');
2021-08-25 19:41:46 -07:00
await addonConfigs.unset(app.id, 'ldap');
}
2021-08-25 19:41:46 -07:00
async function setupSendMail(app, options) {
assert.strictEqual(typeof app, 'object');
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('Setting up SendMail');
2016-01-14 12:56:35 -08:00
2021-03-16 22:38:59 -07:00
const disabled = app.manifest.addons.sendmail.optional && !app.enableMailbox;
2021-08-25 19:41:46 -07:00
if (disabled) return await addonConfigs.set(app.id, 'sendmail', []);
const existingPassword = await addonConfigs.getByName(app.id, 'sendmail', '%MAIL_SMTP_PASSWORD');
const password = existingPassword || hat(4 * 48); // see box#565 for password length
const env = [
{ name: 'CLOUDRON_MAIL_SMTP_SERVER', value: 'mail' },
{ name: 'CLOUDRON_MAIL_SMTP_PORT', value: '2525' },
{ name: 'CLOUDRON_MAIL_SMTPS_PORT', value: '2465' },
{ name: 'CLOUDRON_MAIL_STARTTLS_PORT', value: '2587' },
{ name: 'CLOUDRON_MAIL_SMTP_USERNAME', value: app.mailboxName + '@' + app.mailboxDomain },
{ name: 'CLOUDRON_MAIL_SMTP_PASSWORD', value: password },
{ name: 'CLOUDRON_MAIL_FROM', value: app.mailboxName + '@' + app.mailboxDomain },
{ name: 'CLOUDRON_MAIL_DOMAIN', value: app.mailboxDomain }
2021-08-25 19:41:46 -07:00
];
if (app.manifest.addons.sendmail.supportsDisplayName) env.push({ name: 'CLOUDRON_MAIL_FROM_DISPLAY_NAME', value: app.mailboxDisplayName });
2021-10-01 09:17:44 -07:00
debug('Setting sendmail addon config to %j', env);
2021-08-25 19:41:46 -07:00
await addonConfigs.set(app.id, 'sendmail', env);
}
2021-08-25 19:41:46 -07:00
async function teardownSendMail(app, options) {
assert.strictEqual(typeof app, 'object');
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('Tearing down sendmail');
2016-05-15 21:23:44 -07:00
2021-08-25 19:41:46 -07:00
await addonConfigs.unset(app.id, 'sendmail');
}
2021-08-25 19:41:46 -07:00
async function setupRecvMail(app, options) {
2016-05-13 14:13:25 -07:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
debug('setupRecvMail: setting up recvmail');
if (!app.enableInbox) return await addonConfigs.set(app.id, 'recvmail', []);
2016-05-13 14:13:25 -07:00
2021-08-25 19:41:46 -07:00
const existingPassword = await addonConfigs.getByName(app.id, 'recvmail', '%MAIL_IMAP_PASSWORD');
2016-05-13 14:13:25 -07:00
2021-08-25 19:41:46 -07:00
const password = existingPassword || hat(4 * 48); // see box#565 for password length
2021-08-25 19:41:46 -07:00
const env = [
{ name: 'CLOUDRON_MAIL_IMAP_SERVER', value: 'mail' },
{ name: 'CLOUDRON_MAIL_IMAP_PORT', value: '9393' },
{ name: 'CLOUDRON_MAIL_IMAPS_PORT', value: '9993' },
{ name: 'CLOUDRON_MAIL_POP3_PORT', value: '9595' },
{ name: 'CLOUDRON_MAIL_POP3S_PORT', value: '9995' },
{ name: 'CLOUDRON_MAIL_IMAP_USERNAME', value: app.inboxName + '@' + app.inboxDomain },
{ name: 'CLOUDRON_MAIL_IMAP_PASSWORD', value: password },
{ name: 'CLOUDRON_MAIL_TO', value: app.inboxName + '@' + app.inboxDomain },
{ name: 'CLOUDRON_MAIL_TO_DOMAIN', value: app.inboxDomain },
2021-08-25 19:41:46 -07:00
];
debug('setupRecvMail: setting recvmail addon config to %j', env);
2021-08-25 19:41:46 -07:00
await addonConfigs.set(app.id, 'recvmail', env);
2016-05-13 14:13:25 -07:00
}
2021-08-25 19:41:46 -07:00
async function teardownRecvMail(app, options) {
2016-05-13 14:13:25 -07:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
debug('teardownRecvMail: tearing down recvmail');
2016-05-13 14:13:25 -07:00
2021-08-25 19:41:46 -07:00
await addonConfigs.unset(app.id, 'recvmail');
2016-05-13 14:13:25 -07:00
}
function mysqlDatabaseName(appId) {
assert.strictEqual(typeof appId, 'string');
2022-04-14 17:41:41 -05:00
const md5sum = crypto.createHash('md5'); // get rid of "-"
md5sum.update(appId);
return md5sum.digest('hex').substring(0, 16); // max length of mysql usernames is 16
}
2021-08-25 19:41:46 -07:00
async function startMysql(existingInfra) {
assert.strictEqual(typeof existingInfra, 'object');
const tag = infra.images.mysql.tag;
const dataDir = paths.PLATFORM_DATA_DIR;
const rootPassword = hat(8 * 128);
const cloudronToken = hat(8 * 128);
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.mysql.tag, tag);
2021-08-25 19:41:46 -07:00
if (upgrading) {
debug('startMysql: mysql will be upgraded');
await exportDatabase('mysql');
}
2021-10-01 12:09:13 -07:00
const serviceConfig = await getServiceConfig('mysql');
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
2021-08-25 19:41:46 -07:00
// memory options are applied dynamically. import requires all the memory we can get
2021-10-01 12:09:13 -07:00
const runCmd = `docker run --restart=always -d --name="mysql" \
2021-08-25 19:41:46 -07:00
--hostname mysql \
--net cloudron \
--net-alias mysql \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=mysql \
--dns 172.18.0.1 \
--dns-search=. \
-e CLOUDRON_MYSQL_TOKEN=${cloudronToken} \
-e CLOUDRON_MYSQL_ROOT_HOST=172.18.0.1 \
-e CLOUDRON_MYSQL_ROOT_PASSWORD=${rootPassword} \
-v "${dataDir}/mysql:/var/lib/mysql" \
--label isCloudronManaged=true \
--cap-add SYS_NICE \
2021-10-01 12:09:13 -07:00
${readOnly} -v /tmp -v /run "${tag}" ${cmd}`;
2021-08-25 19:41:46 -07:00
await shell.promises.exec('stopMysql', 'docker stop mysql || true');
await shell.promises.exec('removeMysql', 'docker rm -f mysql || true');
2021-10-01 12:09:13 -07:00
await shell.promises.exec('startMysql', runCmd);
2021-08-25 19:41:46 -07:00
if (!serviceConfig.recoveryMode) {
await waitForContainer('mysql', 'CLOUDRON_MYSQL_TOKEN');
if (upgrading) await importDatabase('mysql');
}
2021-08-25 19:41:46 -07:00
}
async function setupMySql(app, options) {
assert.strictEqual(typeof app, 'object');
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('Setting up mysql');
2021-08-25 19:41:46 -07:00
const existingPassword = await addonConfigs.getByName(app.id, 'mysql', '%MYSQL_PASSWORD');
const tmp = mysqlDatabaseName(app.id);
const data = {
database: tmp,
prefix: tmp,
username: tmp,
password: existingPassword || hat(4 * 48) // see box#362 for password length
};
const result = await getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN');
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `?access_token=${result.token}`)
2021-08-25 19:41:46 -07:00
.send(data)
.ok(() => true));
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error setting up mysql: ${networkError.message}`);
if (response.status !== 201) throw new BoxError(BoxError.ADDONS_ERROR, `Error setting up mysql. Status code: ${response.status} message: ${response.body.message}`);
let env = [
{ name: 'CLOUDRON_MYSQL_USERNAME', value: data.username },
{ name: 'CLOUDRON_MYSQL_PASSWORD', value: data.password },
{ name: 'CLOUDRON_MYSQL_HOST', value: 'mysql' },
{ name: 'CLOUDRON_MYSQL_PORT', value: '3306' }
2021-08-25 19:41:46 -07:00
];
if (options.multipleDatabases) {
env = env.concat({ name: 'CLOUDRON_MYSQL_DATABASE_PREFIX', value: `${data.prefix}_` });
2021-08-25 19:41:46 -07:00
} else {
env = env.concat(
{ name: 'CLOUDRON_MYSQL_URL', value: `mysql://${data.username}:${data.password}@mysql/${data.database}` },
{ name: 'CLOUDRON_MYSQL_DATABASE', value: data.database }
2021-08-25 19:41:46 -07:00
);
}
2021-10-01 09:17:44 -07:00
debug('Setting mysql addon config to %j', env);
2021-08-25 19:41:46 -07:00
await addonConfigs.set(app.id, 'mysql', env);
}
2021-08-25 19:41:46 -07:00
async function clearMySql(app, options) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2018-09-18 19:41:24 +02:00
const database = mysqlDatabaseName(app.id);
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN');
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}/clear?access_token=${result.token}`)
2021-08-25 19:41:46 -07:00
.ok(() => true));
2019-12-04 13:17:58 -08:00
2021-08-25 19:41:46 -07:00
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error clearing mysql: ${networkError.message}`);
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error clearing mysql. Status code: ${response.status} message: ${response.body.message}`);
}
2021-08-25 19:41:46 -07:00
async function teardownMySql(app, options) {
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2018-09-18 19:41:24 +02:00
const database = mysqlDatabaseName(app.id);
const username = database;
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN');
const [networkError, response] = await safe(superagent.del(`http://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}?access_token=${result.token}&username=${username}`)
2021-08-25 19:41:46 -07:00
.ok(() => true));
2018-09-18 19:41:24 +02:00
2021-08-25 19:41:46 -07:00
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mysql: ${networkError.message}`);
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mysql. Status code: ${response.status} message: ${response.body.message}`);
await addonConfigs.unset(app.id, 'mysql');
}
2021-08-25 19:41:46 -07:00
async function pipeRequestToFile(url, filename) {
assert.strictEqual(typeof url, 'string');
assert.strictEqual(typeof filename, 'string');
return new Promise((resolve, reject) => {
const writeStream = fs.createWriteStream(filename);
const request = http.request(url, { method: 'POST' }); // ClientRequest
2022-05-09 17:08:43 -07:00
request.setTimeout(600000, () => {
debug('pipeRequestToFile: timeout - connect or post-connect idle timeout');
request.destroy(); // connect OR post-connect idle timeout
reject(new Error('Request timedout'));
});
request.on('error', (error) => reject(new BoxError(BoxError.NETWORK_ERROR, `Could not pipe ${url} to ${filename}: ${error.message}`))); // network error, dns error
request.on('response', (response) => {
2022-05-09 17:08:43 -07:00
debug(`pipeRequestToFile: connected with status code ${response.statusCode}`);
if (response.statusCode !== 200) return reject(new BoxError(BoxError.ADDONS_ERROR, `Unexpected response code or HTTP error when piping ${url} to ${filename}: status ${response.statusCode}`));
pipeline(response, writeStream, (error) => {
if (error) return reject(new BoxError(BoxError.ADDONS_ERROR, `Error piping ${url} to ${filename}: ${error.message}`));
if (!response.complete) return reject(new BoxError(BoxError.ADDONS_ERROR, `Response not complete when piping ${url} to ${filename}`));
resolve();
});
});
request.end(); // make the request
});
}
async function pipeFileToRequest(filename, url) {
assert.strictEqual(typeof filename, 'string');
assert.strictEqual(typeof url, 'string');
return new Promise((resolve, reject) => {
const readStream = fs.createReadStream(filename);
const request = http.request(url, { method: 'POST' }); // ClientRequest
2022-05-09 17:08:43 -07:00
request.setTimeout(600000, () => {
debug('pipeFileToRequest: timeout - connect or post-connect idle timeout');
request.destroy();
reject(new Error('Request timedout'));
});
request.on('response', (response) => {
2022-05-09 17:08:43 -07:00
debug(`pipeFileToRequest: request completed with status code ${response.statusCode}`);
response.resume(); // drain the response
if (response.statusCode !== 200) return reject(new BoxError(BoxError.ADDONS_ERROR, `Unexpected response code or HTTP error when piping ${filename} to ${url}: status ${response.statusCode} complete ${response.complete}`));
resolve();
});
2022-05-09 17:08:43 -07:00
debug(`pipeFileToRequest: piping ${filename} to ${url}`);
pipeline(readStream, request, function (error) {
if (error) return reject(new BoxError(BoxError.ADDONS_ERROR, `Error piping file ${filename} to request ${url}`));
2022-05-09 17:08:43 -07:00
debug(`pipeFileToRequest: piped ${filename} to ${url}`); // now we have to wait for 'response' above
});
});
}
2021-08-25 19:41:46 -07:00
async function backupMySql(app, options) {
2018-02-08 15:07:49 +01:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2018-09-18 19:41:24 +02:00
const database = mysqlDatabaseName(app.id);
2021-10-01 09:17:44 -07:00
debug('Backing up mysql');
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN');
const url = `http://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}/backup?access_token=${result.token}`;
2021-08-25 19:41:46 -07:00
await pipeRequestToFile(url, dumpPath('mysql', app.id));
}
2021-08-25 19:41:46 -07:00
async function restoreMySql(app, options) {
2018-02-08 15:07:49 +01:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2018-09-18 19:41:24 +02:00
const database = mysqlDatabaseName(app.id);
2021-10-01 09:17:44 -07:00
debug('restoreMySql');
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN');
2018-09-18 19:41:24 +02:00
const url = `http://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}/restore?access_token=${result.token}`;
await pipeFileToRequest(dumpPath('mysql', app.id), url);
}
function postgreSqlNames(appId) {
appId = appId.replace(/-/g, '');
2018-09-19 15:46:29 -07:00
return { database: `db${appId}`, username: `user${appId}` };
}
2021-08-25 19:41:46 -07:00
async function startPostgresql(existingInfra) {
assert.strictEqual(typeof existingInfra, 'object');
const tag = infra.images.postgresql.tag;
const dataDir = paths.PLATFORM_DATA_DIR;
const rootPassword = hat(8 * 128);
const cloudronToken = hat(8 * 128);
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.postgresql.tag, tag);
2021-08-25 19:41:46 -07:00
if (upgrading) {
debug('startPostgresql: postgresql will be upgraded');
await exportDatabase('postgresql');
}
2021-10-01 12:09:13 -07:00
const serviceConfig = await getServiceConfig('postgresql');
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
2021-08-25 19:41:46 -07:00
// memory options are applied dynamically. import requires all the memory we can get
2021-10-01 12:09:13 -07:00
const runCmd = `docker run --restart=always -d --name="postgresql" \
2021-08-25 19:41:46 -07:00
--hostname postgresql \
--net cloudron \
--net-alias postgresql \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=postgresql \
--dns 172.18.0.1 \
--dns-search=. \
--shm-size=128M \
2021-08-25 19:41:46 -07:00
-e CLOUDRON_POSTGRESQL_ROOT_PASSWORD="${rootPassword}" \
-e CLOUDRON_POSTGRESQL_TOKEN="${cloudronToken}" \
-v "${dataDir}/postgresql:/var/lib/postgresql" \
--label isCloudronManaged=true \
2021-10-01 12:09:13 -07:00
${readOnly} -v /tmp -v /run "${tag}" ${cmd}`;
2021-08-25 19:41:46 -07:00
await shell.promises.exec('stopPostgresql', 'docker stop postgresql || true');
await shell.promises.exec('removePostgresql', 'docker rm -f postgresql || true');
2021-10-01 12:09:13 -07:00
await shell.promises.exec('startPostgresql', runCmd);
2021-08-25 19:41:46 -07:00
if (!serviceConfig.recoveryMode) {
await waitForContainer('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
if (upgrading) await importDatabase('postgresql');
}
2021-08-25 19:41:46 -07:00
}
async function setupPostgreSql(app, options) {
assert.strictEqual(typeof app, 'object');
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('Setting up postgresql');
const { database, username } = postgreSqlNames(app.id);
2021-08-25 19:41:46 -07:00
const existingPassword = await addonConfigs.getByName(app.id, 'postgresql', '%POSTGRESQL_PASSWORD');
2021-08-25 19:41:46 -07:00
const data = {
database: database,
username: username,
password: existingPassword || hat(4 * 128),
locale: options.locale || 'C'
};
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/databases?access_token=${result.token}`)
2021-08-25 19:41:46 -07:00
.send(data)
.ok(() => true));
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error setting up postgresql: ${networkError.message}`);
if (response.status !== 201) throw new BoxError(BoxError.ADDONS_ERROR, `Error setting up postgresql. Status code: ${response.status} message: ${response.body.message}`);
2018-09-16 22:21:34 +02:00
2021-08-25 19:41:46 -07:00
const env = [
{ name: 'CLOUDRON_POSTGRESQL_URL', value: `postgres://${data.username}:${data.password}@postgresql/${data.database}` },
{ name: 'CLOUDRON_POSTGRESQL_USERNAME', value: data.username },
{ name: 'CLOUDRON_POSTGRESQL_PASSWORD', value: data.password },
{ name: 'CLOUDRON_POSTGRESQL_HOST', value: 'postgresql' },
{ name: 'CLOUDRON_POSTGRESQL_PORT', value: '5432' },
{ name: 'CLOUDRON_POSTGRESQL_DATABASE', value: data.database }
2021-08-25 19:41:46 -07:00
];
2018-09-16 22:21:34 +02:00
2021-10-01 09:17:44 -07:00
debug('Setting postgresql addon config to %j', env);
2021-11-16 18:48:13 +01:00
await addonConfigs.set(app.id, 'postgresql', env);
}
2021-08-25 19:41:46 -07:00
async function clearPostgreSql(app, options) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
const { database, username } = postgreSqlNames(app.id);
const locale = options.locale || 'C';
2021-10-01 09:17:44 -07:00
debug('Clearing postgresql');
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/databases/${database}/clear?access_token=${result.token}&username=${username}&locale=${locale}`)
2021-08-25 19:41:46 -07:00
.ok(() => true));
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error clearing postgresql: ${networkError.message}`);
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error clearing postgresql. Status code: ${response.status} message: ${response.body.message}`);
}
2021-08-25 19:41:46 -07:00
async function teardownPostgreSql(app, options) {
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
const { database, username } = postgreSqlNames(app.id);
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
const [networkError, response] = await safe(superagent.del(`http://${result.ip}:3000/databases/${database}?access_token=${result.token}&username=${username}`)
2021-08-25 19:41:46 -07:00
.ok(() => true));
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error tearing down postgresql: ${networkError.message}`);
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error tearing down postgresql. Status code: ${response.status} message: ${response.body.message}`);
2018-09-16 22:21:34 +02:00
2021-08-25 19:41:46 -07:00
await addonConfigs.unset(app.id, 'postgresql');
}
2021-08-25 19:41:46 -07:00
async function backupPostgreSql(app, options) {
2018-02-08 15:07:49 +01:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('Backing up postgresql');
const { database } = postgreSqlNames(app.id);
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
await pipeRequestToFile(`http://${result.ip}:3000/databases/${database}/backup?access_token=${result.token}`, dumpPath('postgresql', app.id));
}
2021-08-25 19:41:46 -07:00
async function restorePostgreSql(app, options) {
2018-02-08 15:07:49 +01:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('Restore postgresql');
const { database, username } = postgreSqlNames(app.id);
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN');
await pipeFileToRequest(dumpPath('postgresql', app.id), `http://${result.ip}:3000/databases/${database}/restore?access_token=${result.token}&username=${username}`);
}
2021-08-25 19:41:46 -07:00
async function startMongodb(existingInfra) {
2020-03-27 21:37:06 +01:00
assert.strictEqual(typeof existingInfra, 'object');
const tag = infra.images.mongodb.tag;
const dataDir = paths.PLATFORM_DATA_DIR;
const rootPassword = hat(8 * 128);
const cloudronToken = hat(8 * 128);
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.mongodb.tag, tag);
2021-08-25 19:41:46 -07:00
if (upgrading) {
debug('startMongodb: mongodb will be upgraded');
await exportDatabase('mongodb');
}
2021-10-01 12:09:13 -07:00
const serviceConfig = await getServiceConfig('mongodb');
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
2021-08-25 19:41:46 -07:00
// memory options are applied dynamically. import requires all the memory we can get
2021-10-01 12:09:13 -07:00
const runCmd = `docker run --restart=always -d --name="mongodb" \
2021-08-25 19:41:46 -07:00
--hostname mongodb \
--net cloudron \
--net-alias mongodb \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=mongodb \
--dns 172.18.0.1 \
--dns-search=. \
-e CLOUDRON_MONGODB_ROOT_PASSWORD="${rootPassword}" \
-e CLOUDRON_MONGODB_TOKEN="${cloudronToken}" \
-v "${dataDir}/mongodb:/var/lib/mongodb" \
--label isCloudronManaged=true \
2021-10-01 12:09:13 -07:00
${readOnly} -v /tmp -v /run "${tag}" ${cmd}`;
2021-08-25 19:41:46 -07:00
await shell.promises.exec('stopMongodb', 'docker stop mongodb || true');
await shell.promises.exec('removeMongodb', 'docker rm -f mongodb || true');
2021-10-01 12:09:13 -07:00
await shell.promises.exec('startMongodb', runCmd);
2021-08-25 19:41:46 -07:00
if (!serviceConfig.recoveryMode) {
await waitForContainer('mongodb', 'CLOUDRON_MONGODB_TOKEN');
if (upgrading) await importDatabase('mongodb');
}
2021-08-25 19:41:46 -07:00
}
async function setupMongoDb(app, options) {
assert.strictEqual(typeof app, 'object');
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('Setting up mongodb');
2021-08-25 19:41:46 -07:00
const existingPassword = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_PASSWORD');
let database = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_DATABASE');
database = database || hat(8 * 8); // 16 bytes. keep this short, so as to not overflow the 127 byte index length in MongoDB < 4.4
2021-08-25 19:41:46 -07:00
const data = {
database,
username: app.id,
password: existingPassword || hat(4 * 128),
oplog: !!options.oplog
};
2020-08-17 10:02:46 -07:00
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN');
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/databases?access_token=${result.token}`)
2021-08-25 19:41:46 -07:00
.send(data)
.ok(() => true));
2021-08-25 19:41:46 -07:00
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error setting up mongodb: ${networkError.message}`);
if (response.status !== 201) throw new BoxError(BoxError.ADDONS_ERROR, `Error setting up mongodb. Status code: ${response.status} message: ${response.body.message}`);
2018-09-11 19:27:06 +02:00
2021-08-25 19:41:46 -07:00
const env = [
{ name: 'CLOUDRON_MONGODB_URL', value : `mongodb://${data.username}:${data.password}@mongodb:27017/${data.database}` },
{ name: 'CLOUDRON_MONGODB_USERNAME', value : data.username },
{ name: 'CLOUDRON_MONGODB_PASSWORD', value: data.password },
{ name: 'CLOUDRON_MONGODB_HOST', value : 'mongodb' },
{ name: 'CLOUDRON_MONGODB_PORT', value : '27017' },
{ name: 'CLOUDRON_MONGODB_DATABASE', value : data.database }
2021-08-25 19:41:46 -07:00
];
2018-09-11 19:27:06 +02:00
2021-08-25 19:41:46 -07:00
if (options.oplog) {
env.push({ name: 'CLOUDRON_MONGODB_OPLOG_URL', value : `mongodb://${data.username}:${data.password}@mongodb:27017/local?authSource=${data.database}` });
2021-08-25 19:41:46 -07:00
}
2021-10-01 09:17:44 -07:00
debug('Setting mongodb addon config to %j', env);
2021-08-25 19:41:46 -07:00
await addonConfigs.set(app.id, 'mongodb', env);
}
2021-08-25 19:41:46 -07:00
async function clearMongodb(app, options) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN');
2021-08-25 19:41:46 -07:00
const database = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_DATABASE');
if (!database) throw new BoxError(BoxError.NOT_FOUND, 'Error clearing mongodb. No database');
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/databases/${database}/clear?access_token=${result.token}`)
2021-08-25 19:41:46 -07:00
.ok(() => true));
2020-08-17 10:02:46 -07:00
2021-08-25 19:41:46 -07:00
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error clearing mongodb: ${networkError.message}`);
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error clearing mongodb. Status code: ${response.status} message: ${response.body.message}`);
}
2021-08-25 19:41:46 -07:00
async function teardownMongoDb(app, options) {
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN');
2021-08-25 19:41:46 -07:00
const database = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_DATABASE');
if (!database) return;
2018-09-11 19:27:06 +02:00
const [networkError, response] = await safe(superagent.del(`http://${result.ip}:3000/databases/${database}?access_token=${result.token}`)
2021-08-25 19:41:46 -07:00
.ok(() => true));
2020-08-17 10:02:46 -07:00
2021-08-25 19:41:46 -07:00
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mongodb: ${networkError.message}`);
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mongodb. Status code: ${response.status} message: ${response.body.message}`);
addonConfigs.unset(app.id, 'mongodb');
}
2021-08-25 19:41:46 -07:00
async function backupMongoDb(app, options) {
2018-02-08 15:07:49 +01:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('Backing up mongodb');
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN');
2021-08-25 19:41:46 -07:00
const database = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_DATABASE');
if (!database) throw new BoxError(BoxError.NOT_FOUND, 'Error backing up mongodb. No database');
2020-08-17 10:02:46 -07:00
await pipeRequestToFile(`http://${result.ip}:3000/databases/${database}/backup?access_token=${result.token}`, dumpPath('mongodb', app.id));
}
2021-08-25 19:41:46 -07:00
async function restoreMongoDb(app, options) {
2018-02-08 15:07:49 +01:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('restoreMongoDb');
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN');
2021-08-25 19:41:46 -07:00
const database = await addonConfigs.getByName(app.id, 'mongodb', '%MONGODB_DATABASE');
if (!database) throw new BoxError(BoxError.NOT_FOUND, 'Error restoring mongodb. No database');
await pipeFileToRequest(dumpPath('mongodb', app.id), `http://${result.ip}:3000/databases/${database}/restore?access_token=${result.token}`);
}
2021-09-26 22:48:14 -07:00
async function startGraphite(existingInfra) {
assert.strictEqual(typeof existingInfra, 'object');
2021-09-26 22:48:14 -07:00
const serviceConfig = await getServiceConfig('graphite');
const tag = infra.images.graphite.tag;
const memoryLimit = serviceConfig.memoryLimit || 256 * 1024 * 1024;
2022-11-04 15:09:37 +01:00
const memory = await system.getMemoryAllocation(memoryLimit);
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.graphite.tag, tag);
if (upgrading) debug('startGraphite: graphite will be upgraded');
2021-10-01 12:09:13 -07:00
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
// port 2003 is used by collectd
2021-10-01 12:09:13 -07:00
const runCmd = `docker run --restart=always -d --name="graphite" \
--hostname graphite \
--net cloudron \
--net-alias graphite \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=graphite \
-m ${memory} \
--memory-swap ${memoryLimit} \
--dns 172.18.0.1 \
--dns-search=. \
-p 127.0.0.1:2003:2003 \
-v "${paths.PLATFORM_DATA_DIR}/graphite:/var/lib/graphite" \
--label isCloudronManaged=true \
2021-10-01 12:09:13 -07:00
${readOnly} -v /tmp -v /run "${tag}" ${cmd}`;
2021-08-25 19:41:46 -07:00
await shell.promises.exec('stopGraphite', 'docker stop graphite || true');
await shell.promises.exec('removeGraphite', 'docker rm -f graphite || true');
if (upgrading) await shell.promises.sudo('removeGraphiteDir', [ RMADDONDIR_CMD, 'graphite' ], {});
2021-10-01 12:09:13 -07:00
await shell.promises.exec('startGraphite', runCmd);
2021-08-25 19:41:46 -07:00
// restart collectd to get the disk stats after graphite starts. currently, there is no way to do graphite health check
2021-08-30 09:24:55 -07:00
setTimeout(async () => await safe(shell.promises.sudo('restartcollectd', [ RESTART_SERVICE_CMD, 'collectd' ], {})), 60000);
}
2021-08-25 19:41:46 -07:00
async function setupProxyAuth(app, options) {
2020-11-26 15:04:25 -08:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('Setting up proxyAuth');
2020-11-26 15:04:25 -08:00
const enabled = app.sso && app.manifest.addons && app.manifest.addons.proxyAuth;
2021-08-25 19:41:46 -07:00
if (!enabled) return;
2020-11-26 15:04:25 -08:00
const env = [ { name: 'CLOUDRON_PROXY_AUTH', value: '1' } ];
2021-08-25 19:41:46 -07:00
await addonConfigs.set(app.id, 'proxyauth', env);
2020-11-26 15:04:25 -08:00
}
2021-08-25 19:41:46 -07:00
async function teardownProxyAuth(app, options) {
2020-11-26 15:04:25 -08:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-08-25 19:41:46 -07:00
await addonConfigs.unset(app.id, 'proxyauth');
2020-11-26 15:04:25 -08:00
}
2021-08-25 19:41:46 -07:00
async function startRedis(existingInfra) {
assert.strictEqual(typeof existingInfra, 'object');
const tag = infra.images.redis.tag;
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.redis.tag, tag);
2021-08-25 19:41:46 -07:00
const allApps = await apps.list();
2021-08-25 19:41:46 -07:00
for (const app of allApps) {
if (!('redis' in app.manifest.addons)) continue; // app doesn't use the addon
2022-06-23 15:52:59 -07:00
const redisName = `redis-${app.id}`;
2021-08-25 19:41:46 -07:00
if (upgrading) await backupRedis(app, {});
2021-08-25 19:41:46 -07:00
await shell.promises.exec('stopRedis', `docker stop ${redisName} || true`); // redis will backup as part of signal handling
await shell.promises.exec('removeRedis', `docker rm -f ${redisName} || true`);
await setupRedis(app, app.manifest.addons.redis); // starts the container
}
2021-08-25 19:41:46 -07:00
if (upgrading) await importDatabase('redis');
}
// Ensures that app's addon redis container is running. Can be called when named container already exists/running
2021-08-25 19:41:46 -07:00
async function setupRedis(app, options) {
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2018-09-18 14:15:23 -07:00
const redisName = 'redis-' + app.id;
2021-08-25 19:41:46 -07:00
const existingPassword = await addonConfigs.getByName(app.id, 'redis', '%REDIS_PASSWORD');
const redisPassword = options.noPassword ? '' : (existingPassword || hat(4 * 48)); // see box#362 for password length
const redisServiceToken = hat(4 * 48);
// Compute redis memory limit based on app's memory limit (this is arbitrary)
2021-10-01 12:09:13 -07:00
const memoryLimit = app.servicesConfig['redis']?.memoryLimit || APP_SERVICES['redis'].defaultMemoryLimit;
2022-11-04 15:09:37 +01:00
const memory = await system.getMemoryAllocation(memoryLimit);
2021-08-25 19:41:46 -07:00
2021-10-01 12:09:13 -07:00
const recoveryMode = app.servicesConfig['redis']?.recoveryMode || false;
const readOnly = !recoveryMode ? '--read-only' : '';
const cmd = recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
2021-08-25 19:41:46 -07:00
const tag = infra.images.redis.tag;
const label = app.fqdn;
// note that we do not add appId label because this interferes with the stop/start app logic
2021-10-01 12:09:13 -07:00
const runCmd = `docker run --restart=always -d --name=${redisName} \
2021-08-25 19:41:46 -07:00
--hostname ${redisName} \
--label=location=${label} \
--net cloudron \
--net-alias ${redisName} \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag="${redisName}" \
-m ${memory} \
--memory-swap ${memoryLimit} \
--dns 172.18.0.1 \
--dns-search=. \
-e CLOUDRON_REDIS_PASSWORD="${redisPassword}" \
-e CLOUDRON_REDIS_TOKEN="${redisServiceToken}" \
-v "${paths.PLATFORM_DATA_DIR}/redis/${app.id}:/var/lib/redis" \
--label isCloudronManaged=true \
2021-10-01 12:09:13 -07:00
${readOnly} -v /tmp -v /run ${tag} ${cmd}`;
2021-08-25 19:41:46 -07:00
const env = [
{ name: 'CLOUDRON_REDIS_URL', value: 'redis://redisuser:' + redisPassword + '@redis-' + app.id },
{ name: 'CLOUDRON_REDIS_PASSWORD', value: redisPassword },
{ name: 'CLOUDRON_REDIS_HOST', value: redisName },
{ name: 'CLOUDRON_REDIS_PORT', value: '6379' }
2021-08-25 19:41:46 -07:00
];
2021-08-30 18:52:02 -07:00
const [inspectError, result] = await safe(docker.inspect(redisName));
2021-08-25 19:41:46 -07:00
if (inspectError) {
2021-10-01 12:09:13 -07:00
await shell.promises.exec('startRedis', runCmd);
2021-08-25 19:41:46 -07:00
} else { // fast path
debug(`Re-using existing redis container with state: ${JSON.stringify(result.State)}`);
}
if (!recoveryMode) {
await addonConfigs.set(app.id, 'redis', env);
await waitForContainer('redis-' + app.id, 'CLOUDRON_REDIS_TOKEN');
}
}
2021-08-25 19:41:46 -07:00
async function clearRedis(app, options) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('Clearing redis');
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails('redis-' + app.id, 'CLOUDRON_REDIS_TOKEN');
const [networkError, response] = await safe(superagent.post(`http://${result.ip}:3000/clear?access_token=${result.token}`)
2021-08-25 19:41:46 -07:00
.ok(() => true));
2021-08-25 19:41:46 -07:00
if (networkError) throw new BoxError(BoxError.ADDONS_ERROR, `Network error clearing redis: ${networkError.message}`);
if (response.status !== 200) throw new BoxError(BoxError.ADDONS_ERROR, `Error clearing redis. Status code: ${response.status} message: ${response.body.message}`);
}
2021-08-25 19:41:46 -07:00
async function teardownRedis(app, options) {
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-08-30 11:42:46 -07:00
await docker.deleteContainer(`redis-${app.id}`);
2021-08-25 19:41:46 -07:00
const [error] = await safe(shell.promises.sudo('removeVolume', [ RMADDONDIR_CMD, 'redis', app.id ], {}));
if (error) throw new BoxError(BoxError.FS_ERROR, `Error removing redis data: ${error.message}`);
2021-08-25 19:41:46 -07:00
safe.fs.rmSync(path.join(paths.LOG_DIR, `redis-${app.id}`), { recursive: true, force: true });
2021-10-01 09:17:44 -07:00
if (safe.error) debug('cannot cleanup logs:', safe.error);
2018-09-18 14:15:23 -07:00
2021-08-25 19:41:46 -07:00
await addonConfigs.unset(app.id, 'redis');
}
2015-10-12 13:29:27 -07:00
2021-08-25 19:41:46 -07:00
async function backupRedis(app, options) {
2018-02-08 15:07:49 +01:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('Backing up redis');
2015-10-12 13:29:27 -07:00
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails('redis-' + app.id, 'CLOUDRON_REDIS_TOKEN');
await pipeRequestToFile(`http://${result.ip}:3000/backup?access_token=${result.token}`, dumpPath('redis', app.id));
}
2021-08-25 19:41:46 -07:00
async function restoreRedis(app, options) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('Restoring redis');
2021-08-25 19:41:46 -07:00
const result = await getContainerDetails('redis-' + app.id, 'CLOUDRON_REDIS_TOKEN');
await pipeFileToRequest(dumpPath('redis', app.id), `http://${result.ip}:3000/restore?access_token=${result.token}`);
2015-10-12 13:29:27 -07:00
}
2018-11-15 19:59:08 +01:00
async function setupTls(app, options) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
if (!safe.fs.mkdirSync(`${paths.PLATFORM_DATA_DIR}/tls/${app.id}`, { recursive: true })) {
debug('Error creating tls directory');
throw new BoxError(BoxError.FS_ERROR, safe.error.message);
}
}
async function teardownTls(app, options) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
safe.fs.rmSync(`${paths.PLATFORM_DATA_DIR}/tls/${app.id}`, { recursive: true, force: true });
}
2021-08-25 19:41:46 -07:00
async function statusTurn() {
const [error, container] = await safe(docker.inspect('turn'));
if (error && error.reason === BoxError.NOT_FOUND) return { status: exports.SERVICE_STATUS_STOPPED };
if (error) throw error;
2020-03-27 21:37:06 +01:00
2021-08-25 19:41:46 -07:00
const result = await docker.memoryUsage(container.Id);
2020-03-27 21:37:06 +01:00
const status = container.State.Running
? (container.HostConfig.ReadonlyRootfs ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STARTING)
: exports.SERVICE_STATUS_STOPPED;
2022-11-24 00:40:40 +01:00
const stats = result.memory_stats || { usage: 0, limit: 1 };
2021-08-25 19:41:46 -07:00
return {
status,
2022-11-24 00:40:40 +01:00
memoryUsed: stats.usage,
memoryPercent: parseInt(100 * stats.usage / stats.limit)
2021-08-25 19:41:46 -07:00
};
2020-03-27 21:37:06 +01:00
}
2021-08-25 19:41:46 -07:00
async function statusDocker() {
const [error] = await safe(docker.ping());
return { status: error ? exports.SERVICE_STATUS_STOPPED: exports.SERVICE_STATUS_ACTIVE };
2018-11-15 19:59:08 +01:00
}
2018-11-23 15:49:47 +01:00
2021-08-25 19:41:46 -07:00
async function restartDocker() {
const [error] = await safe(shell.promises.sudo('restartdocker', [ RESTART_SERVICE_CMD, 'docker' ], {}));
if (error) debug(`restartDocker: error restarting docker. ${error.message}`);
2018-11-25 14:43:29 -08:00
}
2018-12-02 19:38:21 -08:00
2021-08-25 19:41:46 -07:00
async function statusUnbound() {
2021-08-26 21:14:49 -07:00
const [error] = await safe(shell.promises.exec('statusUnbound', 'systemctl is-active unbound'));
2021-08-25 19:41:46 -07:00
return { status: error ? exports.SERVICE_STATUS_STOPPED : exports.SERVICE_STATUS_ACTIVE };
2018-12-02 19:38:21 -08:00
}
2021-08-25 19:41:46 -07:00
async function restartUnbound() {
const [error] = await safe(shell.promises.sudo('restartunbound', [ RESTART_SERVICE_CMD, 'unbound' ], {}));
if (error) debug(`restartDocker: error restarting unbound. ${error.message}`);
2018-12-02 19:38:21 -08:00
}
2019-03-18 19:02:32 -07:00
2021-08-25 19:41:46 -07:00
async function statusNginx() {
const [error] = await safe(shell.promises.exec('statusNginx', 'systemctl is-active nginx'));
return { status: error ? exports.SERVICE_STATUS_STOPPED : exports.SERVICE_STATUS_ACTIVE };
}
2021-08-25 19:41:46 -07:00
async function restartNginx() {
const [error] = await safe(shell.promises.sudo('restartnginx', [ RESTART_SERVICE_CMD, 'nginx' ], {}));
if (error) debug(`restartNginx: error restarting unbound. ${error.message}`);
}
2021-08-25 19:41:46 -07:00
async function statusGraphite() {
const [error, container] = await safe(docker.inspect('graphite'));
if (error && error.reason === BoxError.NOT_FOUND) return { status: exports.SERVICE_STATUS_STOPPED };
if (error) throw error;
2019-03-19 15:56:29 -07:00
const ip = safe.query(container, 'NetworkSettings.Networks.cloudron.IPAddress', null);
if (!ip) throw new BoxError(BoxError.INACTIVE, 'Error getting IP of graphite service');
const [networkError, response] = await safe(superagent.get(`http://${ip}:8000/graphite-web/dashboard`)
2021-08-25 19:41:46 -07:00
.timeout(20000)
.ok(() => true));
2019-03-19 15:56:29 -07:00
2021-08-25 19:41:46 -07:00
if (networkError) return { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite: ${networkError.message}` };
if (response.status !== 200) return { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite. Status code: ${response.status} message: ${response.body.message}` };
2019-03-19 15:56:29 -07:00
2021-08-25 19:41:46 -07:00
const result = await docker.memoryUsage('graphite');
2022-11-24 00:40:40 +01:00
const stats = result.memory_stats || { usage: 0, limit: 1 };
2019-03-19 15:56:29 -07:00
2021-08-25 19:41:46 -07:00
return {
status: container.State.Running ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STOPPED,
2022-11-24 00:40:40 +01:00
memoryUsed: stats.usage,
memoryPercent: parseInt(100 * stats.usage / stats.limit)
2021-08-25 19:41:46 -07:00
};
2019-03-19 15:56:29 -07:00
}
2021-08-25 19:41:46 -07:00
async function restartGraphite() {
await docker.restartContainer('graphite');
2021-03-23 11:01:14 -07:00
2021-08-25 19:41:46 -07:00
setTimeout(async () => {
const [error] = await safe(shell.promises.sudo('restartcollectd', [ RESTART_SERVICE_CMD, 'collectd' ], {}));
if (error) debug(`restartGraphite: error restarting collected. ${error.message}`);
}, 60000);
2021-03-23 11:01:14 -07:00
}
2021-08-25 19:41:46 -07:00
async function teardownOauth(app, options) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
2021-10-01 09:17:44 -07:00
debug('teardownOauth');
2021-08-25 19:41:46 -07:00
await addonConfigs.unset(app.id, 'oauth');
}