2180 lines
88 KiB
JavaScript
2180 lines
88 KiB
JavaScript
'use strict';
|
|
|
|
exports = module.exports = {
|
|
getServiceIds,
|
|
getServiceStatus,
|
|
getServiceConfig,
|
|
getServiceLogs,
|
|
|
|
configureService,
|
|
restartService,
|
|
rebuildService,
|
|
|
|
startAppServices,
|
|
stopAppServices,
|
|
|
|
startServices,
|
|
|
|
setupAddons,
|
|
teardownAddons,
|
|
backupAddons,
|
|
restoreAddons,
|
|
clearAddons,
|
|
|
|
getEnvironment,
|
|
getContainerNamesSync,
|
|
|
|
getContainerDetails,
|
|
|
|
SERVICE_STATUS_STARTING: 'starting', // container up, waiting for healthcheck
|
|
SERVICE_STATUS_ACTIVE: 'active',
|
|
SERVICE_STATUS_STOPPED: 'stopped'
|
|
};
|
|
|
|
var appdb = require('./appdb.js'),
|
|
apps = require('./apps.js'),
|
|
assert = require('assert'),
|
|
async = require('async'),
|
|
BoxError = require('./boxerror.js'),
|
|
constants = require('./constants.js'),
|
|
crypto = require('crypto'),
|
|
debug = require('debug')('box:services'),
|
|
docker = require('./docker.js'),
|
|
fs = require('fs'),
|
|
hat = require('./hat.js'),
|
|
infra = require('./infra_version.js'),
|
|
mail = require('./mail.js'),
|
|
once = require('once'),
|
|
os = require('os'),
|
|
path = require('path'),
|
|
paths = require('./paths.js'),
|
|
rimraf = require('rimraf'),
|
|
safe = require('safetydance'),
|
|
semver = require('semver'),
|
|
settings = require('./settings.js'),
|
|
sftp = require('./sftp.js'),
|
|
shell = require('./shell.js'),
|
|
spawn = require('child_process').spawn,
|
|
split = require('split'),
|
|
request = require('request'),
|
|
system = require('./system.js'),
|
|
util = require('util');
|
|
|
|
const NOOP = function (app, options, callback) { return callback(); };
|
|
const NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
|
const RMADDONDIR_CMD = path.join(__dirname, 'scripts/rmaddondir.sh');
|
|
const RESTART_SERVICE_CMD = path.join(__dirname, 'scripts/restartservice.sh');
|
|
|
|
// setup can be called multiple times for the same app (configure crash restart) and existing data must not be lost
|
|
// teardown is destructive. app data stored with the addon is lost
|
|
var ADDONS = {
|
|
turn: {
|
|
setup: setupTurn,
|
|
teardown: teardownTurn,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
clear: NOOP
|
|
},
|
|
email: {
|
|
setup: setupEmail,
|
|
teardown: teardownEmail,
|
|
backup: NOOP,
|
|
restore: setupEmail,
|
|
clear: NOOP,
|
|
},
|
|
ldap: {
|
|
setup: setupLdap,
|
|
teardown: teardownLdap,
|
|
backup: NOOP,
|
|
restore: setupLdap,
|
|
clear: NOOP,
|
|
},
|
|
localstorage: {
|
|
setup: setupLocalStorage,
|
|
teardown: teardownLocalStorage,
|
|
backup: NOOP, // no backup because it's already inside app data
|
|
restore: NOOP,
|
|
clear: clearLocalStorage,
|
|
},
|
|
mongodb: {
|
|
setup: setupMongoDb,
|
|
teardown: teardownMongoDb,
|
|
backup: backupMongoDb,
|
|
restore: restoreMongoDb,
|
|
clear: clearMongodb,
|
|
},
|
|
mysql: {
|
|
setup: setupMySql,
|
|
teardown: teardownMySql,
|
|
backup: backupMySql,
|
|
restore: restoreMySql,
|
|
clear: clearMySql,
|
|
},
|
|
postgresql: {
|
|
setup: setupPostgreSql,
|
|
teardown: teardownPostgreSql,
|
|
backup: backupPostgreSql,
|
|
restore: restorePostgreSql,
|
|
clear: clearPostgreSql,
|
|
},
|
|
proxyAuth: {
|
|
setup: setupProxyAuth,
|
|
teardown: teardownProxyAuth,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
clear: NOOP
|
|
},
|
|
recvmail: {
|
|
setup: setupRecvMail,
|
|
teardown: teardownRecvMail,
|
|
backup: NOOP,
|
|
restore: setupRecvMail,
|
|
clear: NOOP,
|
|
},
|
|
redis: {
|
|
setup: setupRedis,
|
|
teardown: teardownRedis,
|
|
backup: backupRedis,
|
|
restore: restoreRedis,
|
|
clear: clearRedis,
|
|
},
|
|
sendmail: {
|
|
setup: setupSendMail,
|
|
teardown: teardownSendMail,
|
|
backup: NOOP,
|
|
restore: setupSendMail,
|
|
clear: NOOP,
|
|
},
|
|
scheduler: {
|
|
setup: NOOP,
|
|
teardown: NOOP,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
clear: NOOP,
|
|
},
|
|
docker: {
|
|
setup: NOOP,
|
|
teardown: NOOP,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
clear: NOOP,
|
|
},
|
|
tls: {
|
|
setup: NOOP,
|
|
teardown: NOOP,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
clear: NOOP,
|
|
},
|
|
oauth: { // kept for backward compatibility. keep teardown for uninstall to work
|
|
setup: NOOP,
|
|
teardown: teardownOauth,
|
|
backup: NOOP,
|
|
restore: NOOP,
|
|
clear: NOOP,
|
|
}
|
|
};
|
|
|
|
// services are actual containers that are running. addons are the concepts requested by app
|
|
const SERVICES = {
|
|
turn: {
|
|
status: statusTurn,
|
|
restart: docker.restartContainer.bind(null, 'turn'),
|
|
defaultMemoryLimit: 256 * 1024 * 1024
|
|
},
|
|
mail: {
|
|
status: containerStatus.bind(null, 'mail', 'CLOUDRON_MAIL_TOKEN'),
|
|
restart: mail.restartMail,
|
|
defaultMemoryLimit: mail.DEFAULT_MEMORY_LIMIT
|
|
},
|
|
mongodb: {
|
|
status: containerStatus.bind(null, 'mongodb', 'CLOUDRON_MONGODB_TOKEN'),
|
|
restart: docker.restartContainer.bind(null, 'mongodb'),
|
|
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024
|
|
},
|
|
mysql: {
|
|
status: containerStatus.bind(null, 'mysql', 'CLOUDRON_MYSQL_TOKEN'),
|
|
restart: docker.restartContainer.bind(null, 'mysql'),
|
|
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024
|
|
},
|
|
postgresql: {
|
|
status: containerStatus.bind(null, 'postgresql', 'CLOUDRON_POSTGRESQL_TOKEN'),
|
|
restart: docker.restartContainer.bind(null, 'postgresql'),
|
|
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024
|
|
},
|
|
docker: {
|
|
status: statusDocker,
|
|
restart: restartDocker,
|
|
defaultMemoryLimit: 0
|
|
},
|
|
unbound: {
|
|
status: statusUnbound,
|
|
restart: restartUnbound,
|
|
defaultMemoryLimit: 0
|
|
},
|
|
sftp: {
|
|
status: statusSftp,
|
|
restart: docker.restartContainer.bind(null, 'sftp'),
|
|
defaultMemoryLimit: sftp.DEFAULT_MEMORY_LIMIT
|
|
},
|
|
graphite: {
|
|
status: statusGraphite,
|
|
restart: restartGraphite,
|
|
defaultMemoryLimit: 256 * 1024 * 1024
|
|
},
|
|
nginx: {
|
|
status: statusNginx,
|
|
restart: restartNginx,
|
|
defaultMemoryLimit: 0
|
|
}
|
|
};
|
|
|
|
const APP_SERVICES = {
|
|
redis: {
|
|
status: (instance, done) => containerStatus(`redis-${instance}`, 'CLOUDRON_REDIS_TOKEN', done),
|
|
start: (instance, done) => docker.startContainer(`redis-${instance}`, done),
|
|
stop: (instance, done) => docker.stopContainer(`redis-${instance}`, done),
|
|
restart: (instance, done) => docker.restartContainer(`redis-${instance}`, done),
|
|
defaultMemoryLimit: 150 * 1024 * 1024
|
|
}
|
|
};
|
|
|
|
function debugApp(app /*, args */) {
|
|
assert(typeof app === 'object');
|
|
|
|
debug((app.fqdn || app.location) + ' ' + util.format.apply(util, Array.prototype.slice.call(arguments, 1)));
|
|
}
|
|
|
|
function parseImageTag(tag) {
|
|
let repository = tag.split(':', 1)[0];
|
|
let version = tag.substr(repository.length + 1).split('@', 1)[0];
|
|
let digest = tag.substr(repository.length + 1 + version.length + 1).split(':', 2)[1];
|
|
|
|
return { repository, version: semver.parse(version), digest };
|
|
}
|
|
|
|
function requiresUpgrade(existingTag, currentTag) {
|
|
let etag = parseImageTag(existingTag), ctag = parseImageTag(currentTag);
|
|
|
|
return etag.version.major !== ctag.version.major;
|
|
}
|
|
|
|
// paths for dumps
|
|
function dumpPath(addon, appId) {
|
|
switch (addon) {
|
|
case 'postgresql': return path.join(paths.APPS_DATA_DIR, appId, 'postgresqldump');
|
|
case 'mysql': return path.join(paths.APPS_DATA_DIR, appId, 'mysqldump');
|
|
case 'mongodb': return path.join(paths.APPS_DATA_DIR, appId, 'mongodbdump');
|
|
case 'redis': return path.join(paths.APPS_DATA_DIR, appId, 'dump.rdb');
|
|
}
|
|
}
|
|
|
|
function getContainerDetails(containerName, tokenEnvName, callback) {
|
|
assert.strictEqual(typeof containerName, 'string');
|
|
assert.strictEqual(typeof tokenEnvName, 'string');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
docker.inspect(containerName, function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
const ip = safe.query(result, 'NetworkSettings.Networks.cloudron.IPAddress', null);
|
|
if (!ip) return callback(new BoxError(BoxError.INACTIVE, `Error getting IP of ${containerName} service`));
|
|
|
|
// extract the cloudron token for auth
|
|
const env = safe.query(result, 'Config.Env', null);
|
|
if (!env) return callback(new BoxError(BoxError.DOCKER_ERROR, `Error inspecting environment of ${containerName} service`));
|
|
const tmp = env.find(function (e) { return e.indexOf(tokenEnvName) === 0; });
|
|
if (!tmp) return callback(new BoxError(BoxError.DOCKER_ERROR, `Error getting token of ${containerName} service`));
|
|
const token = tmp.slice(tokenEnvName.length + 1); // +1 for the = sign
|
|
if (!token) return callback(new BoxError(BoxError.DOCKER_ERROR, `Error getting token of ${containerName} service`));
|
|
|
|
callback(null, { ip: ip, token: token, state: result.State });
|
|
});
|
|
}
|
|
|
|
function containerStatus(containerName, tokenEnvName, callback) {
|
|
assert.strictEqual(typeof containerName, 'string');
|
|
assert.strictEqual(typeof tokenEnvName, 'string');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
getContainerDetails(containerName, tokenEnvName, function (error, addonDetails) {
|
|
if (error && (error.reason === BoxError.NOT_FOUND || error.reason === BoxError.INACTIVE)) return callback(null, { status: exports.SERVICE_STATUS_STOPPED });
|
|
if (error) return callback(error);
|
|
|
|
request.get(`https://${addonDetails.ip}:3000/healthcheck?access_token=${addonDetails.token}`, { json: true, rejectUnauthorized: false, timeout: 20000 }, function (error, response) {
|
|
if (error) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for ${containerName}: ${error.message}` });
|
|
if (response.statusCode !== 200 || !response.body.status) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for ${containerName}. Status code: ${response.statusCode} message: ${response.body.message}` });
|
|
|
|
docker.memoryUsage(containerName, function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
var tmp = {
|
|
status: addonDetails.state.Running ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STOPPED,
|
|
memoryUsed: result.memory_stats.usage,
|
|
memoryPercent: parseInt(100 * result.memory_stats.usage / result.memory_stats.limit),
|
|
healthcheck: response.body
|
|
};
|
|
|
|
callback(null, tmp);
|
|
});
|
|
});
|
|
});
|
|
}
|
|
|
|
function getServiceIds(callback) {
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
let serviceIds = Object.keys(SERVICES);
|
|
|
|
appdb.getAll(function (error, apps) {
|
|
if (error) return callback(error);
|
|
|
|
for (let app of apps) {
|
|
if (app.manifest.addons && app.manifest.addons['redis']) serviceIds.push(`redis:${app.id}`);
|
|
}
|
|
|
|
callback(null, serviceIds);
|
|
});
|
|
}
|
|
|
|
function getServiceConfig(id, callback) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const [name, instance] = id.split(':');
|
|
if (!instance) {
|
|
settings.getServicesConfig(function (error, servicesConfig) {
|
|
if (error) return callback(error);
|
|
|
|
callback(null, servicesConfig[name] || {});
|
|
});
|
|
|
|
return;
|
|
}
|
|
|
|
appdb.get(instance, function (error, app) {
|
|
if (error) return callback(error);
|
|
|
|
callback(null, app.servicesConfig[name] || {});
|
|
});
|
|
}
|
|
|
|
function getServiceStatus(id, callback) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const [name, instance ] = id.split(':');
|
|
let containerStatusFunc, service;
|
|
|
|
if (instance) {
|
|
service = APP_SERVICES[name];
|
|
if (!service) return callback(new BoxError(BoxError.NOT_FOUND));
|
|
containerStatusFunc = service.status.bind(null, instance);
|
|
} else if (SERVICES[name]) {
|
|
service = SERVICES[name];
|
|
containerStatusFunc = service.status;
|
|
} else {
|
|
return callback(new BoxError(BoxError.NOT_FOUND));
|
|
}
|
|
|
|
var tmp = {
|
|
name: name,
|
|
status: null,
|
|
memoryUsed: 0,
|
|
memoryPercent: 0,
|
|
error: null,
|
|
healthcheck: null,
|
|
config: {}
|
|
};
|
|
|
|
containerStatusFunc(function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
tmp.status = result.status;
|
|
tmp.memoryUsed = result.memoryUsed;
|
|
tmp.memoryPercent = result.memoryPercent;
|
|
tmp.error = result.error || null;
|
|
tmp.healthcheck = result.healthcheck || null;
|
|
|
|
getServiceConfig(id, function (error, serviceConfig) {
|
|
if (error) return callback(error);
|
|
|
|
tmp.config = serviceConfig;
|
|
|
|
if (!tmp.config.memoryLimit && service.defaultMemoryLimit) {
|
|
tmp.config.memoryLimit = service.defaultMemoryLimit;
|
|
}
|
|
|
|
callback(null, tmp);
|
|
});
|
|
});
|
|
}
|
|
|
|
function configureService(id, data, callback) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert.strictEqual(typeof data, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const [name, instance ] = id.split(':');
|
|
|
|
if (instance) {
|
|
if (!APP_SERVICES[name]) return callback(new BoxError(BoxError.NOT_FOUND));
|
|
|
|
apps.get(instance, function (error, app) {
|
|
if (error) return callback(error);
|
|
|
|
const servicesConfig = app.servicesConfig;
|
|
servicesConfig[name] = data;
|
|
|
|
appdb.update(instance, { servicesConfig }, function (error) {
|
|
if (error) return callback(error);
|
|
|
|
applyServiceConfig(id, data, callback);
|
|
});
|
|
});
|
|
} else if (SERVICES[name]) {
|
|
settings.getServicesConfig(function (error, servicesConfig) {
|
|
if (error) return callback(error);
|
|
|
|
servicesConfig[name] = data;
|
|
|
|
settings.setServicesConfig(servicesConfig, function (error) {
|
|
if (error) return callback(error);
|
|
|
|
applyServiceConfig(id, data, callback);
|
|
});
|
|
});
|
|
} else {
|
|
return callback(new BoxError(BoxError.NOT_FOUND));
|
|
}
|
|
}
|
|
|
|
function getServiceLogs(id, options, callback) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert(options && typeof options === 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
assert.strictEqual(typeof options.lines, 'number');
|
|
assert.strictEqual(typeof options.format, 'string');
|
|
assert.strictEqual(typeof options.follow, 'boolean');
|
|
|
|
const [name, instance ] = id.split(':');
|
|
|
|
if (instance) {
|
|
if (!APP_SERVICES[name]) return callback(new BoxError(BoxError.NOT_FOUND));
|
|
} else if (!SERVICES[name]) {
|
|
return callback(new BoxError(BoxError.NOT_FOUND));
|
|
}
|
|
|
|
debug(`Getting logs for ${name}`);
|
|
|
|
var lines = options.lines,
|
|
format = options.format || 'json',
|
|
follow = options.follow;
|
|
|
|
let cmd, args = [];
|
|
|
|
// docker and unbound use journald
|
|
if (name === 'docker' || name === 'unbound') {
|
|
cmd = 'journalctl';
|
|
|
|
args.push('--lines=' + (lines === -1 ? 'all' : lines));
|
|
args.push(`--unit=${name}`);
|
|
args.push('--no-pager');
|
|
args.push('--output=short-iso');
|
|
|
|
if (follow) args.push('--follow');
|
|
} else if (name === 'nginx') {
|
|
cmd = '/usr/bin/tail';
|
|
|
|
args.push('--lines=' + (lines === -1 ? '+1' : lines));
|
|
if (follow) args.push('--follow', '--retry', '--quiet'); // same as -F. to make it work if file doesn't exist, --quiet to not output file headers, which are no logs
|
|
args.push('/var/log/nginx/access.log');
|
|
args.push('/var/log/nginx/error.log');
|
|
} else {
|
|
cmd = '/usr/bin/tail';
|
|
|
|
args.push('--lines=' + (lines === -1 ? '+1' : lines));
|
|
if (follow) args.push('--follow', '--retry', '--quiet'); // same as -F. to make it work if file doesn't exist, --quiet to not output file headers, which are no logs
|
|
const containerName = APP_SERVICES[name] ? `${name}-${instance}` : name;
|
|
args.push(path.join(paths.LOG_DIR, containerName, 'app.log'));
|
|
}
|
|
|
|
var cp = spawn(cmd, args);
|
|
|
|
var transformStream = split(function mapper(line) {
|
|
if (format !== 'json') return line + '\n';
|
|
|
|
var data = line.split(' '); // logs are <ISOtimestamp> <msg>
|
|
var timestamp = (new Date(data[0])).getTime();
|
|
if (isNaN(timestamp)) timestamp = 0;
|
|
var message = line.slice(data[0].length+1);
|
|
|
|
// ignore faulty empty logs
|
|
if (!timestamp && !message) return;
|
|
|
|
return JSON.stringify({
|
|
realtimeTimestamp: timestamp * 1000,
|
|
message: message,
|
|
source: name
|
|
}) + '\n';
|
|
});
|
|
|
|
transformStream.close = cp.kill.bind(cp, 'SIGKILL'); // closing stream kills the child process
|
|
|
|
cp.stdout.pipe(transformStream);
|
|
|
|
callback(null, transformStream);
|
|
}
|
|
|
|
function rebuildService(id, callback) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
// this attempts to recreate the service docker container if they don't exist but platform infra version is unchanged
|
|
// passing an infra version of 'none' will not attempt to purge existing data, not sure if this is good or bad
|
|
getServiceConfig(id, function (error, serviceConfig) {
|
|
if (error) return callback(error);
|
|
|
|
if (id === 'turn') return startTurn({ version: 'none' }, serviceConfig, callback);
|
|
if (id === 'mongodb') return startMongodb({ version: 'none' }, callback);
|
|
if (id === 'postgresql') return startPostgresql({ version: 'none' }, callback);
|
|
if (id === 'mysql') return startMysql({ version: 'none' }, callback);
|
|
if (id === 'sftp') return sftp.rebuild(serviceConfig, { /* options */ }, callback);
|
|
if (id === 'graphite') return startGraphite({ version: 'none' }, serviceConfig, callback);
|
|
|
|
// nothing to rebuild for now.
|
|
// TODO: mongo/postgresql/mysql need to be scaled down.
|
|
// TODO: missing redis container is not created
|
|
callback();
|
|
});
|
|
}
|
|
|
|
function restartService(id, callback) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const [name, instance ] = id.split(':');
|
|
|
|
if (instance) {
|
|
if (!APP_SERVICES[name]) return callback(new BoxError(BoxError.NOT_FOUND));
|
|
|
|
return APP_SERVICES[name].restart(instance, callback);
|
|
} else if (SERVICES[name]) {
|
|
return SERVICES[name].restart(callback);
|
|
} else {
|
|
return callback(new BoxError(BoxError.NOT_FOUND));
|
|
}
|
|
}
|
|
|
|
// in the future, we can refcount and lazy start global services
|
|
function startAppServices(app, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const instance = app.id;
|
|
async.eachSeries(Object.keys(app.manifest.addons || {}), function (addon, iteratorDone) {
|
|
if (!(addon in APP_SERVICES)) return iteratorDone();
|
|
|
|
APP_SERVICES[addon].start(instance, function (error) { // assume addons name is service name
|
|
// error ignored because we don't want "start app" to error. use can fix it from Services
|
|
if (error) debug(`startAppServices: ${addon}:${instance}`, error);
|
|
|
|
iteratorDone();
|
|
});
|
|
}, callback);
|
|
}
|
|
|
|
// in the future, we can refcount and stop global services as well
|
|
function stopAppServices(app, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const instance = app.id;
|
|
async.eachSeries(Object.keys(app.manifest.addons || {}), function (addon, iteratorDone) {
|
|
if (!(addon in APP_SERVICES)) return iteratorDone();
|
|
|
|
APP_SERVICES[addon].stop(instance, function (error) { // assume addons name is service name
|
|
// error ignored because we don't want "start app" to error. use can fix it from Services
|
|
if (error) debug(`stopAppServices: ${addon}:${instance}`, error);
|
|
|
|
iteratorDone();
|
|
});
|
|
}, callback);
|
|
}
|
|
|
|
function waitForContainer(containerName, tokenEnvName, callback) {
|
|
assert.strictEqual(typeof containerName, 'string');
|
|
assert.strictEqual(typeof tokenEnvName, 'string');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debug(`Waiting for ${containerName}`);
|
|
|
|
getContainerDetails(containerName, tokenEnvName, function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
async.retry({ times: 10, interval: 15000 }, function (retryCallback) {
|
|
request.get(`https://${result.ip}:3000/healthcheck?access_token=${result.token}`, { json: true, rejectUnauthorized: false, timeout: 5000 }, function (error, response) {
|
|
if (error) return retryCallback(new BoxError(BoxError.ADDONS_ERROR, `Network error waiting for ${containerName}: ${error.message}`));
|
|
if (response.statusCode !== 200 || !response.body.status) return retryCallback(new BoxError(BoxError.ADDONS_ERROR, `Error waiting for ${containerName}. Status code: ${response.statusCode} message: ${response.body.message}`));
|
|
|
|
retryCallback(null);
|
|
});
|
|
}, callback);
|
|
});
|
|
}
|
|
|
|
function setupAddons(app, addons, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
if (!addons) return callback(null);
|
|
|
|
debugApp(app, 'setupAddons: Setting up %j', Object.keys(addons));
|
|
|
|
async.eachSeries(Object.keys(addons), function iterator(addon, iteratorCallback) {
|
|
if (!(addon in ADDONS)) return iteratorCallback(new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`));
|
|
|
|
debugApp(app, 'Setting up addon %s with options %j', addon, addons[addon]);
|
|
|
|
ADDONS[addon].setup(app, addons[addon], iteratorCallback);
|
|
}, callback);
|
|
}
|
|
|
|
function teardownAddons(app, addons, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
if (!addons) return callback(null);
|
|
|
|
debugApp(app, 'teardownAddons: Tearing down %j', Object.keys(addons));
|
|
|
|
async.eachSeries(Object.keys(addons), function iterator(addon, iteratorCallback) {
|
|
if (!(addon in ADDONS)) return iteratorCallback(new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`));
|
|
|
|
debugApp(app, 'Tearing down addon %s with options %j', addon, addons[addon]);
|
|
|
|
ADDONS[addon].teardown(app, addons[addon], iteratorCallback);
|
|
}, callback);
|
|
}
|
|
|
|
function backupAddons(app, addons, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'backupAddons');
|
|
|
|
if (!addons) return callback(null);
|
|
|
|
debugApp(app, 'backupAddons: Backing up %j', Object.keys(addons));
|
|
|
|
async.eachSeries(Object.keys(addons), function iterator (addon, iteratorCallback) {
|
|
if (!(addon in ADDONS)) return iteratorCallback(new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`));
|
|
|
|
ADDONS[addon].backup(app, addons[addon], iteratorCallback);
|
|
}, callback);
|
|
}
|
|
|
|
function clearAddons(app, addons, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'clearAddons');
|
|
|
|
if (!addons) return callback(null);
|
|
|
|
debugApp(app, 'clearAddons: clearing %j', Object.keys(addons));
|
|
|
|
async.eachSeries(Object.keys(addons), function iterator (addon, iteratorCallback) {
|
|
if (!(addon in ADDONS)) return iteratorCallback(new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`));
|
|
|
|
ADDONS[addon].clear(app, addons[addon], iteratorCallback);
|
|
}, callback);
|
|
}
|
|
|
|
function restoreAddons(app, addons, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'restoreAddons');
|
|
|
|
if (!addons) return callback(null);
|
|
|
|
debugApp(app, 'restoreAddons: restoring %j', Object.keys(addons));
|
|
|
|
async.eachSeries(Object.keys(addons), function iterator (addon, iteratorCallback) {
|
|
if (!(addon in ADDONS)) return iteratorCallback(new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`));
|
|
|
|
ADDONS[addon].restore(app, addons[addon], iteratorCallback);
|
|
}, callback);
|
|
}
|
|
|
|
function importAppDatabase(app, addon, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof addon, 'string');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
if (!(addon in ADDONS)) return callback(new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`));
|
|
|
|
async.series([
|
|
ADDONS[addon].setup.bind(null, app, app.manifest.addons[addon]),
|
|
ADDONS[addon].clear.bind(null, app, app.manifest.addons[addon]), // clear in case we crashed in a restore
|
|
ADDONS[addon].restore.bind(null, app, app.manifest.addons[addon])
|
|
], callback);
|
|
}
|
|
|
|
function importDatabase(addon, callback) {
|
|
assert.strictEqual(typeof addon, 'string');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debug(`importDatabase: Importing ${addon}`);
|
|
|
|
appdb.getAll(function (error, allApps) {
|
|
if (error) return callback(error);
|
|
|
|
async.eachSeries(allApps, function iterator (app, iteratorCallback) {
|
|
if (!(addon in app.manifest.addons)) return iteratorCallback(); // app doesn't use the addon
|
|
|
|
debug(`importDatabase: Importing addon ${addon} of app ${app.id}`);
|
|
|
|
importAppDatabase(app, addon, function (error) {
|
|
if (!error) return iteratorCallback();
|
|
|
|
debug(`importDatabase: Error importing ${addon} of app ${app.id}. Marking as errored`, error);
|
|
// FIXME: there is no way to 'repair' if we are here. we need to make a separate apptask that re-imports db
|
|
// not clear, if repair workflow should be part of addon or per-app
|
|
appdb.update(app.id, { installationState: apps.ISTATE_ERROR, error: { message: error.message } }, iteratorCallback);
|
|
});
|
|
}, function (error) {
|
|
safe.fs.unlinkSync(path.join(paths.ADDON_CONFIG_DIR, `exported-${addon}`)); // clean up for future migrations
|
|
|
|
callback(error);
|
|
});
|
|
});
|
|
}
|
|
|
|
function exportDatabase(addon, callback) {
|
|
assert.strictEqual(typeof addon, 'string');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debug(`exportDatabase: Exporting ${addon}`);
|
|
|
|
if (fs.existsSync(path.join(paths.ADDON_CONFIG_DIR, `exported-${addon}`))) {
|
|
debug(`exportDatabase: Already exported addon ${addon} in previous run`);
|
|
return callback(null);
|
|
}
|
|
|
|
appdb.getAll(function (error, allApps) {
|
|
if (error) return callback(error);
|
|
|
|
async.eachSeries(allApps, function iterator (app, iteratorCallback) {
|
|
if (!app.manifest.addons || !(addon in app.manifest.addons)) return iteratorCallback(); // app doesn't use the addon
|
|
if (app.installationState === apps.ISTATE_ERROR) return iteratorCallback(); // missing db causes crash in old app addon containers
|
|
|
|
debug(`exportDatabase: Exporting addon ${addon} of app ${app.id}`);
|
|
|
|
ADDONS[addon].backup(app, app.manifest.addons[addon], function (error) {
|
|
if (error) {
|
|
debug(`exportDatabase: Error exporting ${addon} of app ${app.id}.`, error);
|
|
// for errored apps, we can ignore if export had an error
|
|
return iteratorCallback(app.installationState === apps.ISTATE_ERROR ? null : error);
|
|
}
|
|
|
|
iteratorCallback();
|
|
});
|
|
}, function (error) {
|
|
if (error) return callback(error);
|
|
|
|
async.series([
|
|
(done) => fs.writeFile(path.join(paths.ADDON_CONFIG_DIR, `exported-${addon}`), '', 'utf8', done),
|
|
// note: after this point, we are restart safe. it's ok if the box code crashes at this point
|
|
(done) => shell.exec(`exportDatabase - remove${addon}`, `docker rm -f ${addon}`, done), // what if db writes something when quitting ...
|
|
(done) => shell.sudo(`exportDatabase - removeAddonDir${addon}`, [ RMADDONDIR_CMD, addon ], {}, done) // ready to start afresh
|
|
], callback);
|
|
});
|
|
});
|
|
}
|
|
|
|
function applyServiceConfig(id, serviceConfig, callback) {
|
|
assert.strictEqual(typeof id, 'string');
|
|
assert.strictEqual(typeof serviceConfig, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const [name, instance] = id.split(':');
|
|
let containerName, memoryLimit;
|
|
|
|
if (instance) {
|
|
if (!APP_SERVICES[name]) return callback(new BoxError(BoxError.NOT_FOUND));
|
|
|
|
containerName = `${name}-${instance}`;
|
|
memoryLimit = serviceConfig && serviceConfig.memoryLimit ? serviceConfig.memoryLimit : APP_SERVICES[name].defaultMemoryLimit;
|
|
} else if (SERVICES[name]) {
|
|
containerName = name;
|
|
memoryLimit = serviceConfig && serviceConfig.memoryLimit ? serviceConfig.memoryLimit : SERVICES[name].defaultMemoryLimit;
|
|
} else {
|
|
return callback(new BoxError(BoxError.NOT_FOUND));
|
|
}
|
|
|
|
debug(`updateServiceConfig: ${containerName} ${JSON.stringify(serviceConfig)}`);
|
|
|
|
const memory = system.getMemoryAllocation(memoryLimit);
|
|
docker.update(containerName, memory, memoryLimit, callback);
|
|
}
|
|
|
|
function startServices(existingInfra, callback) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
settings.getServicesConfig(function (error, servicesConfig) {
|
|
if (error) return callback(error);
|
|
|
|
let startFuncs = [ ];
|
|
|
|
// always start addons on any infra change, regardless of minor or major update
|
|
if (existingInfra.version !== infra.version) {
|
|
debug(`startServices: ${existingInfra.version} -> ${infra.version}. starting all services`);
|
|
startFuncs.push(
|
|
mail.startMail, // start this first to reduce email downtime
|
|
startTurn.bind(null, existingInfra, servicesConfig['turn'] || {}),
|
|
startMysql.bind(null, existingInfra),
|
|
startPostgresql.bind(null, existingInfra),
|
|
startMongodb.bind(null, existingInfra),
|
|
startRedis.bind(null, existingInfra),
|
|
startGraphite.bind(null, existingInfra, servicesConfig['graphite'] || {}),
|
|
sftp.start.bind(null, existingInfra, servicesConfig['sftp'] || {}),
|
|
);
|
|
} else {
|
|
assert.strictEqual(typeof existingInfra.images, 'object');
|
|
|
|
if (infra.images.mail.tag !== existingInfra.images.mail.tag) startFuncs.push(mail.startMail); // start this first to reduce email downtime
|
|
if (infra.images.turn.tag !== existingInfra.images.turn.tag) startFuncs.push(startTurn.bind(null, existingInfra, servicesConfig['turn'] || {}));
|
|
if (infra.images.mysql.tag !== existingInfra.images.mysql.tag) startFuncs.push(startMysql.bind(null, existingInfra));
|
|
if (infra.images.postgresql.tag !== existingInfra.images.postgresql.tag) startFuncs.push(startPostgresql.bind(null, existingInfra));
|
|
if (infra.images.mongodb.tag !== existingInfra.images.mongodb.tag) startFuncs.push(startMongodb.bind(null, existingInfra));
|
|
if (infra.images.redis.tag !== existingInfra.images.redis.tag) startFuncs.push(startRedis.bind(null, existingInfra));
|
|
if (infra.images.graphite.tag !== existingInfra.images.graphite.tag) startFuncs.push(startGraphite.bind(null, existingInfra, servicesConfig['graphite'] || {}));
|
|
if (infra.images.sftp.tag !== existingInfra.images.sftp.tag) startFuncs.push(sftp.start.bind(null, existingInfra, servicesConfig['sftp'] || {}));
|
|
|
|
debug('startServices: existing infra. incremental service create %j', startFuncs.map(function (f) { return f.name; }));
|
|
}
|
|
|
|
async.series(startFuncs, function (error) {
|
|
if (error) return callback(error);
|
|
|
|
// we always start db containers with unlimited memory. we then scale them down per configuration
|
|
let updateFuncs = [
|
|
applyServiceConfig.bind(null, 'mysql', servicesConfig['mysql'] || {}),
|
|
applyServiceConfig.bind(null, 'postgresql', servicesConfig['postgresql'] || {}),
|
|
applyServiceConfig.bind(null, 'mongodb', servicesConfig['mongodb'] || {}),
|
|
];
|
|
|
|
async.series(updateFuncs, NOOP_CALLBACK); // it's ok if applying service configs fails
|
|
|
|
callback();
|
|
});
|
|
});
|
|
}
|
|
|
|
function getEnvironment(app, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
appdb.getAddonConfigByAppId(app.id, function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
if (app.manifest.addons['docker']) result.push({ name: 'CLOUDRON_DOCKER_HOST', value: `tcp://172.18.0.1:${constants.DOCKER_PROXY_PORT}` });
|
|
|
|
return callback(null, result.map(function (e) { return e.name + '=' + e.value; }));
|
|
});
|
|
}
|
|
|
|
function getContainerNamesSync(app, addons) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert(!addons || typeof addons === 'object');
|
|
|
|
var names = [ ];
|
|
|
|
if (!addons) return names;
|
|
|
|
for (var addon in addons) {
|
|
switch (addon) {
|
|
case 'scheduler':
|
|
// names here depend on how scheduler.js creates containers
|
|
names = names.concat(Object.keys(addons.scheduler).map(function (taskName) { return app.id + '-' + taskName; }));
|
|
break;
|
|
default: break;
|
|
}
|
|
}
|
|
|
|
return names;
|
|
}
|
|
|
|
function setupLocalStorage(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'setupLocalStorage');
|
|
|
|
const volumeDataDir = apps.getDataDir(app, app.dataDir);
|
|
|
|
// reomve any existing volume in case it's bound with an old dataDir
|
|
async.series([
|
|
docker.removeVolume.bind(null, `${app.id}-localstorage`),
|
|
docker.createVolume.bind(null, `${app.id}-localstorage`, volumeDataDir, { fqdn: app.fqdn, appId: app.id })
|
|
], callback);
|
|
}
|
|
|
|
function clearLocalStorage(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'clearLocalStorage');
|
|
|
|
docker.clearVolume(`${app.id}-localstorage`, { removeDirectory: false }, callback);
|
|
}
|
|
|
|
function teardownLocalStorage(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'teardownLocalStorage');
|
|
|
|
async.series([
|
|
docker.clearVolume.bind(null, `${app.id}-localstorage`, { removeDirectory: true }),
|
|
docker.removeVolume.bind(null, `${app.id}-localstorage`)
|
|
], callback);
|
|
}
|
|
|
|
function setupTurn(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
var turnSecret = safe.fs.readFileSync(paths.ADDON_TURN_SECRET_FILE, 'utf8');
|
|
if (!turnSecret) debug('setupTurn: no turn secret set. Will leave emtpy, but this is a problem!');
|
|
|
|
const env = [
|
|
{ name: 'CLOUDRON_STUN_SERVER', value: settings.adminFqdn() },
|
|
{ name: 'CLOUDRON_STUN_PORT', value: '3478' },
|
|
{ name: 'CLOUDRON_STUN_TLS_PORT', value: '5349' },
|
|
{ name: 'CLOUDRON_TURN_SERVER', value: settings.adminFqdn() },
|
|
{ name: 'CLOUDRON_TURN_PORT', value: '3478' },
|
|
{ name: 'CLOUDRON_TURN_TLS_PORT', value: '5349' },
|
|
{ name: 'CLOUDRON_TURN_SECRET', value: turnSecret }
|
|
];
|
|
|
|
debugApp(app, 'Setting up TURN');
|
|
|
|
appdb.setAddonConfig(app.id, 'turn', env, callback);
|
|
}
|
|
|
|
function teardownTurn(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'Tearing down TURN');
|
|
|
|
appdb.unsetAddonConfig(app.id, 'turn', callback);
|
|
}
|
|
|
|
function setupEmail(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
mail.getDomains(function (error, mailDomains) {
|
|
if (error) return callback(error);
|
|
|
|
const mailInDomains = mailDomains.filter(function (d) { return d.enabled; }).map(function (d) { return d.domain; }).join(',');
|
|
|
|
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
|
|
|
// note that "external" access info can be derived from MAIL_DOMAIN (since it's part of user documentation)
|
|
var env = [
|
|
{ name: `${envPrefix}MAIL_SMTP_SERVER`, value: 'mail' },
|
|
{ name: `${envPrefix}MAIL_SMTP_PORT`, value: '2525' },
|
|
{ name: `${envPrefix}MAIL_IMAP_SERVER`, value: 'mail' },
|
|
{ name: `${envPrefix}MAIL_IMAP_PORT`, value: '9993' },
|
|
{ name: `${envPrefix}MAIL_SIEVE_SERVER`, value: 'mail' },
|
|
{ name: `${envPrefix}MAIL_SIEVE_PORT`, value: '4190' },
|
|
{ name: `${envPrefix}MAIL_DOMAIN`, value: app.domain },
|
|
{ name: `${envPrefix}MAIL_DOMAINS`, value: mailInDomains },
|
|
{ name: 'CLOUDRON_MAIL_SERVER_HOST', value: settings.mailFqdn() }, // this is also a hint to reconfigure on mail server name change
|
|
{ name: `${envPrefix}LDAP_MAILBOXES_BASE_DN`, value: 'ou=mailboxes,dc=cloudron' }
|
|
];
|
|
|
|
debugApp(app, 'Setting up Email');
|
|
|
|
appdb.setAddonConfig(app.id, 'email', env, callback);
|
|
});
|
|
}
|
|
|
|
function teardownEmail(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'Tearing down Email');
|
|
|
|
appdb.unsetAddonConfig(app.id, 'email', callback);
|
|
}
|
|
|
|
function setupLdap(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
if (!app.sso) return callback(null);
|
|
|
|
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
|
|
|
var env = [
|
|
{ name: `${envPrefix}LDAP_SERVER`, value: '172.18.0.1' },
|
|
{ name: 'CLOUDRON_LDAP_HOST', value: '172.18.0.1' }, // to keep things in sync with the database _HOST vars
|
|
{ name: `${envPrefix}LDAP_PORT`, value: '' + constants.LDAP_PORT },
|
|
{ name: `${envPrefix}LDAP_URL`, value: 'ldap://172.18.0.1:' + constants.LDAP_PORT },
|
|
{ name: `${envPrefix}LDAP_USERS_BASE_DN`, value: 'ou=users,dc=cloudron' },
|
|
{ name: `${envPrefix}LDAP_GROUPS_BASE_DN`, value: 'ou=groups,dc=cloudron' },
|
|
{ name: `${envPrefix}LDAP_BIND_DN`, value: 'cn='+ app.id + ',ou=apps,dc=cloudron' },
|
|
{ name: `${envPrefix}LDAP_BIND_PASSWORD`, value: hat(4 * 128) } // this is ignored
|
|
];
|
|
|
|
debugApp(app, 'Setting up LDAP');
|
|
|
|
appdb.setAddonConfig(app.id, 'ldap', env, callback);
|
|
}
|
|
|
|
function teardownLdap(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'Tearing down LDAP');
|
|
|
|
appdb.unsetAddonConfig(app.id, 'ldap', callback);
|
|
}
|
|
|
|
function setupSendMail(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'Setting up SendMail');
|
|
|
|
const disabled = app.manifest.addons.sendmail.optional && !app.enableMailbox;
|
|
if (disabled) return appdb.setAddonConfig(app.id, 'sendmail', [], callback);
|
|
|
|
appdb.getAddonConfigByName(app.id, 'sendmail', '%MAIL_SMTP_PASSWORD', function (error, existingPassword) {
|
|
if (error && error.reason !== BoxError.NOT_FOUND) return callback(error);
|
|
|
|
var password = error ? hat(4 * 48) : existingPassword; // see box#565 for password length
|
|
|
|
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
|
|
|
var env = [
|
|
{ name: `${envPrefix}MAIL_SMTP_SERVER`, value: 'mail' },
|
|
{ name: `${envPrefix}MAIL_SMTP_PORT`, value: '2525' },
|
|
{ name: `${envPrefix}MAIL_SMTPS_PORT`, value: '2465' },
|
|
{ name: `${envPrefix}MAIL_STARTTLS_PORT`, value: '2587' },
|
|
{ name: `${envPrefix}MAIL_SMTP_USERNAME`, value: app.mailboxName + '@' + app.mailboxDomain },
|
|
{ name: `${envPrefix}MAIL_SMTP_PASSWORD`, value: password },
|
|
{ name: `${envPrefix}MAIL_FROM`, value: app.mailboxName + '@' + app.mailboxDomain },
|
|
{ name: `${envPrefix}MAIL_DOMAIN`, value: app.mailboxDomain }
|
|
];
|
|
debugApp(app, 'Setting sendmail addon config to %j', env);
|
|
appdb.setAddonConfig(app.id, 'sendmail', env, callback);
|
|
});
|
|
}
|
|
|
|
function teardownSendMail(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'Tearing down sendmail');
|
|
|
|
appdb.unsetAddonConfig(app.id, 'sendmail', callback);
|
|
}
|
|
|
|
function setupRecvMail(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'Setting up recvmail');
|
|
|
|
appdb.getAddonConfigByName(app.id, 'recvmail', '%MAIL_IMAP_PASSWORD', function (error, existingPassword) {
|
|
if (error && error.reason !== BoxError.NOT_FOUND) return callback(error);
|
|
|
|
var password = error ? hat(4 * 48) : existingPassword; // see box#565 for password length
|
|
|
|
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
|
|
|
var env = [
|
|
{ name: `${envPrefix}MAIL_IMAP_SERVER`, value: 'mail' },
|
|
{ name: `${envPrefix}MAIL_IMAP_PORT`, value: '9993' },
|
|
{ name: `${envPrefix}MAIL_IMAP_USERNAME`, value: app.mailboxName + '@' + app.mailboxDomain },
|
|
{ name: `${envPrefix}MAIL_IMAP_PASSWORD`, value: password },
|
|
{ name: `${envPrefix}MAIL_TO`, value: app.mailboxName + '@' + app.mailboxDomain },
|
|
{ name: `${envPrefix}MAIL_DOMAIN`, value: app.mailboxDomain }
|
|
];
|
|
|
|
debugApp(app, 'Setting sendmail addon config to %j', env);
|
|
appdb.setAddonConfig(app.id, 'recvmail', env, callback);
|
|
});
|
|
}
|
|
|
|
function teardownRecvMail(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'Tearing down recvmail');
|
|
|
|
appdb.unsetAddonConfig(app.id, 'recvmail', callback);
|
|
}
|
|
|
|
function mysqlDatabaseName(appId) {
|
|
assert.strictEqual(typeof appId, 'string');
|
|
|
|
var md5sum = crypto.createHash('md5'); // get rid of "-"
|
|
md5sum.update(appId);
|
|
return md5sum.digest('hex').substring(0, 16); // max length of mysql usernames is 16
|
|
}
|
|
|
|
function startMysql(existingInfra, callback) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const tag = infra.images.mysql.tag;
|
|
const dataDir = paths.PLATFORM_DATA_DIR;
|
|
const rootPassword = hat(8 * 128);
|
|
const cloudronToken = hat(8 * 128);
|
|
|
|
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.mysql.tag, tag);
|
|
|
|
if (upgrading) debug('startMysql: mysql will be upgraded');
|
|
const upgradeFunc = upgrading ? exportDatabase.bind(null, 'mysql') : (next) => next();
|
|
|
|
upgradeFunc(function (error) {
|
|
if (error) return callback(error);
|
|
|
|
// memory options are applied dynamically. import requires all the memory we can get
|
|
const cmd = `docker run --restart=always -d --name="mysql" \
|
|
--hostname mysql \
|
|
--net cloudron \
|
|
--net-alias mysql \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=mysql \
|
|
--dns 172.18.0.1 \
|
|
--dns-search=. \
|
|
-e CLOUDRON_MYSQL_TOKEN=${cloudronToken} \
|
|
-e CLOUDRON_MYSQL_ROOT_HOST=172.18.0.1 \
|
|
-e CLOUDRON_MYSQL_ROOT_PASSWORD=${rootPassword} \
|
|
-v "${dataDir}/mysql:/var/lib/mysql" \
|
|
--label isCloudronManaged=true \
|
|
--read-only -v /tmp -v /run "${tag}"`;
|
|
|
|
async.series([
|
|
shell.exec.bind(null, 'stopMysql', 'docker stop mysql || true'),
|
|
shell.exec.bind(null, 'removeMysql', 'docker rm -f mysql || true'),
|
|
shell.exec.bind(null, 'startMysql', cmd)
|
|
], function (error) {
|
|
if (error) return callback(error);
|
|
|
|
waitForContainer('mysql', 'CLOUDRON_MYSQL_TOKEN', function (error) {
|
|
if (error) return callback(error);
|
|
if (!upgrading) return callback(null);
|
|
|
|
importDatabase('mysql', callback);
|
|
});
|
|
});
|
|
});
|
|
}
|
|
|
|
function setupMySql(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'Setting up mysql');
|
|
|
|
appdb.getAddonConfigByName(app.id, 'mysql', '%MYSQL_PASSWORD', function (error, existingPassword) {
|
|
if (error && error.reason !== BoxError.NOT_FOUND) return callback(error);
|
|
|
|
const tmp = mysqlDatabaseName(app.id);
|
|
|
|
const data = {
|
|
database: tmp,
|
|
prefix: tmp,
|
|
username: tmp,
|
|
password: error ? hat(4 * 48) : existingPassword // see box#362 for password length
|
|
};
|
|
|
|
getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
request.post(`https://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `?access_token=${result.token}`, { rejectUnauthorized: false, json: data }, function (error, response) {
|
|
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Network error setting up mysql: ${error.message}`));
|
|
if (response.statusCode !== 201) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error setting up mysql. Status code: ${response.statusCode} message: ${response.body.message}`));
|
|
|
|
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
|
|
|
var env = [
|
|
{ name: `${envPrefix}MYSQL_USERNAME`, value: data.username },
|
|
{ name: `${envPrefix}MYSQL_PASSWORD`, value: data.password },
|
|
{ name: `${envPrefix}MYSQL_HOST`, value: 'mysql' },
|
|
{ name: `${envPrefix}MYSQL_PORT`, value: '3306' }
|
|
];
|
|
|
|
if (options.multipleDatabases) {
|
|
env = env.concat({ name: `${envPrefix}MYSQL_DATABASE_PREFIX`, value: `${data.prefix}_` });
|
|
} else {
|
|
env = env.concat(
|
|
{ name: `${envPrefix}MYSQL_URL`, value: `mysql://${data.username}:${data.password}@mysql/${data.database}` },
|
|
{ name: `${envPrefix}MYSQL_DATABASE`, value: data.database }
|
|
);
|
|
}
|
|
|
|
debugApp(app, 'Setting mysql addon config to %j', env);
|
|
appdb.setAddonConfig(app.id, 'mysql', env, callback);
|
|
});
|
|
});
|
|
});
|
|
}
|
|
|
|
function clearMySql(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const database = mysqlDatabaseName(app.id);
|
|
|
|
getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
request.post(`https://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}/clear?access_token=${result.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
|
|
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Network error clearing mysql: ${error.message}`));
|
|
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error clearing mysql. Status code: ${response.statusCode} message: ${response.body.message}`));
|
|
|
|
callback();
|
|
});
|
|
});
|
|
}
|
|
|
|
function teardownMySql(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const database = mysqlDatabaseName(app.id);
|
|
const username = database;
|
|
|
|
getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
request.delete(`https://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}?access_token=${result.token}&username=${username}`, { json: true, rejectUnauthorized: false }, function (error, response) {
|
|
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mysql: ${error.message}`));
|
|
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mysql. Status code: ${response.statusCode} message: ${response.body.message}`));
|
|
|
|
appdb.unsetAddonConfig(app.id, 'mysql', callback);
|
|
});
|
|
});
|
|
}
|
|
|
|
function pipeRequestToFile(url, filename, callback) {
|
|
assert.strictEqual(typeof url, 'string');
|
|
assert.strictEqual(typeof filename, 'string');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const writeStream = fs.createWriteStream(filename);
|
|
|
|
const done = once(function (error) { // the writeStream and the request can both error
|
|
if (error) writeStream.close();
|
|
callback(error);
|
|
});
|
|
|
|
writeStream.on('error', (error) => done(new BoxError(BoxError.FS_ERROR, `Error writing to ${filename}: ${error.message}`)));
|
|
|
|
writeStream.on('open', function () {
|
|
// note: do not attach to post callback handler because this will buffer the entire reponse!
|
|
// see https://github.com/request/request/issues/2270
|
|
const req = request.post(url, { rejectUnauthorized: false });
|
|
req.on('error', (error) => done(new BoxError(BoxError.NETWORK_ERROR, `Request error writing to ${filename}: ${error.message}`))); // network error, dns error, request errored in middle etc
|
|
req.on('response', function (response) {
|
|
if (response.statusCode !== 200) return done(new BoxError(BoxError.ADDONS_ERROR, `Unexpected response code when piping ${url}: ${response.statusCode} message: ${response.statusMessage} filename: ${filename}`));
|
|
|
|
response.pipe(writeStream).on('finish', done); // this is hit after data written to disk
|
|
});
|
|
});
|
|
}
|
|
|
|
function backupMySql(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const database = mysqlDatabaseName(app.id);
|
|
|
|
debugApp(app, 'Backing up mysql');
|
|
|
|
getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
const url = `https://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}/backup?access_token=${result.token}`;
|
|
pipeRequestToFile(url, dumpPath('mysql', app.id), callback);
|
|
});
|
|
}
|
|
|
|
function restoreMySql(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const database = mysqlDatabaseName(app.id);
|
|
|
|
debugApp(app, 'restoreMySql');
|
|
|
|
callback = once(callback); // protect from multiple returns with streams
|
|
|
|
getContainerDetails('mysql', 'CLOUDRON_MYSQL_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
var input = fs.createReadStream(dumpPath('mysql', app.id));
|
|
input.on('error', (error) => callback(new BoxError(BoxError.FS_ERROR, `Error reading input stream when restoring mysql: ${error.message}`)));
|
|
|
|
const restoreReq = request.post(`https://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}/restore?access_token=${result.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
|
|
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error restoring mysql: ${error.message}`));
|
|
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error restoring mysql. Status code: ${response.statusCode} message: ${response.body.message}`));
|
|
|
|
callback(null);
|
|
});
|
|
|
|
input.pipe(restoreReq);
|
|
});
|
|
}
|
|
|
|
function postgreSqlNames(appId) {
|
|
appId = appId.replace(/-/g, '');
|
|
return { database: `db${appId}`, username: `user${appId}` };
|
|
}
|
|
|
|
function startPostgresql(existingInfra, callback) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const tag = infra.images.postgresql.tag;
|
|
const dataDir = paths.PLATFORM_DATA_DIR;
|
|
const rootPassword = hat(8 * 128);
|
|
const cloudronToken = hat(8 * 128);
|
|
|
|
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.postgresql.tag, tag);
|
|
|
|
if (upgrading) debug('startPostgresql: postgresql will be upgraded');
|
|
const upgradeFunc = upgrading ? exportDatabase.bind(null, 'postgresql') : (next) => next();
|
|
|
|
upgradeFunc(function (error) {
|
|
if (error) return callback(error);
|
|
|
|
// memory options are applied dynamically. import requires all the memory we can get
|
|
const cmd = `docker run --restart=always -d --name="postgresql" \
|
|
--hostname postgresql \
|
|
--net cloudron \
|
|
--net-alias postgresql \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=postgresql \
|
|
--dns 172.18.0.1 \
|
|
--dns-search=. \
|
|
-e CLOUDRON_POSTGRESQL_ROOT_PASSWORD="${rootPassword}" \
|
|
-e CLOUDRON_POSTGRESQL_TOKEN="${cloudronToken}" \
|
|
-v "${dataDir}/postgresql:/var/lib/postgresql" \
|
|
--label isCloudronManaged=true \
|
|
--read-only -v /tmp -v /run "${tag}"`;
|
|
|
|
async.series([
|
|
shell.exec.bind(null, 'stopPostgresql', 'docker stop postgresql || true'),
|
|
shell.exec.bind(null, 'removePostgresql', 'docker rm -f postgresql || true'),
|
|
shell.exec.bind(null, 'startPostgresql', cmd)
|
|
], function (error) {
|
|
if (error) return callback(error);
|
|
|
|
waitForContainer('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN', function (error) {
|
|
if (error) return callback(error);
|
|
if (!upgrading) return callback(null);
|
|
|
|
importDatabase('postgresql', callback);
|
|
});
|
|
});
|
|
});
|
|
}
|
|
|
|
function setupPostgreSql(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'Setting up postgresql');
|
|
|
|
const { database, username } = postgreSqlNames(app.id);
|
|
|
|
appdb.getAddonConfigByName(app.id, 'postgresql', '%POSTGRESQL_PASSWORD', function (error, existingPassword) {
|
|
if (error && error.reason !== BoxError.NOT_FOUND) return callback(error);
|
|
|
|
const data = {
|
|
database: database,
|
|
username: username,
|
|
password: error ? hat(4 * 128) : existingPassword,
|
|
locale: options.locale || 'C'
|
|
};
|
|
|
|
getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
request.post(`https://${result.ip}:3000/databases?access_token=${result.token}`, { rejectUnauthorized: false, json: data }, function (error, response) {
|
|
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Network error setting up postgresql: ${error.message}`));
|
|
if (response.statusCode !== 201) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error setting up postgresql. Status code: ${response.statusCode} message: ${response.body.message}`));
|
|
|
|
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
|
|
|
var env = [
|
|
{ name: `${envPrefix}POSTGRESQL_URL`, value: `postgres://${data.username}:${data.password}@postgresql/${data.database}` },
|
|
{ name: `${envPrefix}POSTGRESQL_USERNAME`, value: data.username },
|
|
{ name: `${envPrefix}POSTGRESQL_PASSWORD`, value: data.password },
|
|
{ name: `${envPrefix}POSTGRESQL_HOST`, value: 'postgresql' },
|
|
{ name: `${envPrefix}POSTGRESQL_PORT`, value: '5432' },
|
|
{ name: `${envPrefix}POSTGRESQL_DATABASE`, value: data.database }
|
|
];
|
|
|
|
debugApp(app, 'Setting postgresql addon config to %j', env);
|
|
appdb.setAddonConfig(app.id, 'postgresql', env, callback);
|
|
});
|
|
});
|
|
});
|
|
}
|
|
|
|
function clearPostgreSql(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const { database, username } = postgreSqlNames(app.id);
|
|
const locale = options.locale || 'C';
|
|
|
|
debugApp(app, 'Clearing postgresql');
|
|
|
|
getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
request.post(`https://${result.ip}:3000/databases/${database}/clear?access_token=${result.token}&username=${username}&locale=${locale}`, { json: true, rejectUnauthorized: false }, function (error, response) {
|
|
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Network error clearing postgresql: ${error.message}`));
|
|
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error clearing postgresql. Status code: ${response.statusCode} message: ${response.body.message}`));
|
|
|
|
callback(null);
|
|
});
|
|
});
|
|
}
|
|
|
|
function teardownPostgreSql(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const { database, username } = postgreSqlNames(app.id);
|
|
|
|
getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
request.delete(`https://${result.ip}:3000/databases/${database}?access_token=${result.token}&username=${username}`, { json: true, rejectUnauthorized: false }, function (error, response) {
|
|
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Network error tearing down postgresql: ${error.message}`));
|
|
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error tearing down postgresql. Status code: ${response.statusCode} message: ${response.body.message}`));
|
|
|
|
appdb.unsetAddonConfig(app.id, 'postgresql', callback);
|
|
});
|
|
});
|
|
}
|
|
|
|
function backupPostgreSql(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'Backing up postgresql');
|
|
|
|
const { database } = postgreSqlNames(app.id);
|
|
|
|
getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
const url = `https://${result.ip}:3000/databases/${database}/backup?access_token=${result.token}`;
|
|
pipeRequestToFile(url, dumpPath('postgresql', app.id), callback);
|
|
});
|
|
}
|
|
|
|
function restorePostgreSql(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'Restore postgresql');
|
|
|
|
const { database, username } = postgreSqlNames(app.id);
|
|
|
|
callback = once(callback); // protect from multiple returns with streams
|
|
|
|
getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
var input = fs.createReadStream(dumpPath('postgresql', app.id));
|
|
input.on('error', (error) => callback(new BoxError(BoxError.FS_ERROR, `Error reading input stream when restoring postgresql: ${error.message}`)));
|
|
|
|
const restoreReq = request.post(`https://${result.ip}:3000/databases/${database}/restore?access_token=${result.token}&username=${username}`, { json: true, rejectUnauthorized: false }, function (error, response) {
|
|
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error restoring postgresql: ${error.message}`));
|
|
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error restoring postgresql. Status code: ${response.statusCode} message: ${response.body.message}`));
|
|
|
|
callback(null);
|
|
});
|
|
|
|
input.pipe(restoreReq);
|
|
});
|
|
}
|
|
|
|
function startTurn(existingInfra, serviceConfig, callback) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
assert.strictEqual(typeof serviceConfig, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
// get and ensure we have a turn secret
|
|
var turnSecret = safe.fs.readFileSync(paths.ADDON_TURN_SECRET_FILE, 'utf8');
|
|
if (!turnSecret) {
|
|
turnSecret = 'a' + crypto.randomBytes(15).toString('hex'); // prefix with a to ensure string starts with a letter
|
|
safe.fs.writeFileSync(paths.ADDON_TURN_SECRET_FILE, turnSecret, 'utf8');
|
|
}
|
|
|
|
const tag = infra.images.turn.tag;
|
|
const memoryLimit = serviceConfig.memoryLimit || SERVICES['turn'].defaultMemoryLimit;
|
|
const memory = system.getMemoryAllocation(memoryLimit);
|
|
const realm = settings.adminFqdn();
|
|
|
|
// this exports 3478/tcp, 5349/tls and 50000-51000/udp. note that this runs on the host network!
|
|
const cmd = `docker run --restart=always -d --name="turn" \
|
|
--hostname turn \
|
|
--net host \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=turn \
|
|
-m ${memory} \
|
|
--memory-swap ${memoryLimit} \
|
|
--dns 172.18.0.1 \
|
|
--dns-search=. \
|
|
-e CLOUDRON_TURN_SECRET="${turnSecret}" \
|
|
-e CLOUDRON_REALM="${realm}" \
|
|
--label isCloudronManaged=true \
|
|
--read-only -v /tmp -v /run "${tag}"`;
|
|
|
|
async.series([
|
|
shell.exec.bind(null, 'stopTurn', 'docker stop turn || true'),
|
|
shell.exec.bind(null, 'removeTurn', 'docker rm -f turn || true'),
|
|
shell.exec.bind(null, 'startTurn', cmd)
|
|
], callback);
|
|
}
|
|
|
|
function startMongodb(existingInfra, callback) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const tag = infra.images.mongodb.tag;
|
|
const dataDir = paths.PLATFORM_DATA_DIR;
|
|
const rootPassword = hat(8 * 128);
|
|
const cloudronToken = hat(8 * 128);
|
|
|
|
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.mongodb.tag, tag);
|
|
|
|
if (upgrading) debug('startMongodb: mongodb will be upgraded');
|
|
const upgradeFunc = upgrading ? exportDatabase.bind(null, 'mongodb') : (next) => next();
|
|
|
|
upgradeFunc(function (error) {
|
|
if (error) return callback(error);
|
|
|
|
// memory options are applied dynamically. import requires all the memory we can get
|
|
const cmd = `docker run --restart=always -d --name="mongodb" \
|
|
--hostname mongodb \
|
|
--net cloudron \
|
|
--net-alias mongodb \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=mongodb \
|
|
--dns 172.18.0.1 \
|
|
--dns-search=. \
|
|
-e CLOUDRON_MONGODB_ROOT_PASSWORD="${rootPassword}" \
|
|
-e CLOUDRON_MONGODB_TOKEN="${cloudronToken}" \
|
|
-v "${dataDir}/mongodb:/var/lib/mongodb" \
|
|
--label isCloudronManaged=true \
|
|
--read-only -v /tmp -v /run "${tag}"`;
|
|
|
|
async.series([
|
|
shell.exec.bind(null, 'stopMongodb', 'docker stop mongodb || true'),
|
|
shell.exec.bind(null, 'removeMongodb', 'docker rm -f mongodb || true'),
|
|
shell.exec.bind(null, 'startMongodb', cmd)
|
|
], function (error) {
|
|
if (error) return callback(error);
|
|
|
|
waitForContainer('mongodb', 'CLOUDRON_MONGODB_TOKEN', function (error) {
|
|
if (error) return callback(error);
|
|
if (!upgrading) return callback(null);
|
|
|
|
importDatabase('mongodb', callback);
|
|
});
|
|
});
|
|
});
|
|
}
|
|
|
|
function setupMongoDb(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'Setting up mongodb');
|
|
|
|
appdb.getAddonConfigByName(app.id, 'mongodb', '%MONGODB_PASSWORD', function (error, existingPassword) {
|
|
if (error && error.reason !== BoxError.NOT_FOUND) return callback(error);
|
|
|
|
appdb.getAddonConfigByName(app.id, 'mongodb', '%MONGODB_DATABASE', function (error, database) {
|
|
database = database || hat(8 * 8); // 16 bytes. keep this short, so as to not overflow the 127 byte index length in MongoDB < 4.4
|
|
|
|
const data = {
|
|
database: database,
|
|
username: app.id,
|
|
password: error ? hat(4 * 128) : existingPassword,
|
|
oplog: !!options.oplog
|
|
};
|
|
|
|
getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
request.post(`https://${result.ip}:3000/databases?access_token=${result.token}`, { rejectUnauthorized: false, json: data }, function (error, response) {
|
|
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Network error setting up mongodb: ${error.message}`));
|
|
if (response.statusCode !== 201) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error setting up mongodb. Status code: ${response.statusCode} message: ${response.body.message}`));
|
|
|
|
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
|
|
|
var env = [
|
|
{ name: `${envPrefix}MONGODB_URL`, value : `mongodb://${data.username}:${data.password}@mongodb:27017/${data.database}` },
|
|
{ name: `${envPrefix}MONGODB_USERNAME`, value : data.username },
|
|
{ name: `${envPrefix}MONGODB_PASSWORD`, value: data.password },
|
|
{ name: `${envPrefix}MONGODB_HOST`, value : 'mongodb' },
|
|
{ name: `${envPrefix}MONGODB_PORT`, value : '27017' },
|
|
{ name: `${envPrefix}MONGODB_DATABASE`, value : data.database }
|
|
];
|
|
|
|
if (options.oplog) {
|
|
env.push({ name: `${envPrefix}MONGODB_OPLOG_URL`, value : `mongodb://${data.username}:${data.password}@mongodb:27017/local?authSource=${data.database}` });
|
|
}
|
|
|
|
debugApp(app, 'Setting mongodb addon config to %j', env);
|
|
appdb.setAddonConfig(app.id, 'mongodb', env, callback);
|
|
});
|
|
});
|
|
});
|
|
});
|
|
}
|
|
|
|
function clearMongodb(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
appdb.getAddonConfigByName(app.id, 'mongodb', '%MONGODB_DATABASE', function (error, database) {
|
|
if (error) return callback(error);
|
|
|
|
request.post(`https://${result.ip}:3000/databases/${database}/clear?access_token=${result.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
|
|
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Network error clearing mongodb: ${error.message}`));
|
|
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error clearing mongodb. Status code: ${response.statusCode} message: ${response.body.message}`));
|
|
|
|
callback();
|
|
});
|
|
});
|
|
});
|
|
}
|
|
|
|
function teardownMongoDb(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
appdb.getAddonConfigByName(app.id, 'mongodb', '%MONGODB_DATABASE', function (error, database) {
|
|
if (error && error.reason === BoxError.NOT_FOUND) return callback(null);
|
|
if (error) return callback(error);
|
|
|
|
request.delete(`https://${result.ip}:3000/databases/${database}?access_token=${result.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
|
|
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mongodb: ${error.message}`));
|
|
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mongodb. Status code: ${response.statusCode} message: ${response.body.message}`));
|
|
|
|
appdb.unsetAddonConfig(app.id, 'mongodb', callback);
|
|
});
|
|
});
|
|
});
|
|
}
|
|
|
|
function backupMongoDb(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'Backing up mongodb');
|
|
|
|
getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
appdb.getAddonConfigByName(app.id, 'mongodb', '%MONGODB_DATABASE', function (error, database) {
|
|
if (error) return callback(error);
|
|
|
|
const url = `https://${result.ip}:3000/databases/${database}/backup?access_token=${result.token}`;
|
|
pipeRequestToFile(url, dumpPath('mongodb', app.id), callback);
|
|
});
|
|
});
|
|
}
|
|
|
|
function restoreMongoDb(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
callback = once(callback); // protect from multiple returns with streams
|
|
|
|
debugApp(app, 'restoreMongoDb');
|
|
|
|
getContainerDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
appdb.getAddonConfigByName(app.id, 'mongodb', '%MONGODB_DATABASE', function (error, database) {
|
|
if (error) return callback(error);
|
|
|
|
const readStream = fs.createReadStream(dumpPath('mongodb', app.id));
|
|
readStream.on('error', (error) => callback(new BoxError(BoxError.FS_ERROR, `Error reading input stream when restoring mongodb: ${error.message}`)));
|
|
|
|
const restoreReq = request.post(`https://${result.ip}:3000/databases/${database}/restore?access_token=${result.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
|
|
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error restoring mongodb: ${error.message}`));
|
|
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error restoring mongodb. Status code: ${response.statusCode} message: ${response.body.message}`));
|
|
|
|
callback(null);
|
|
});
|
|
|
|
readStream.pipe(restoreReq);
|
|
});
|
|
});
|
|
}
|
|
|
|
function startGraphite(existingInfra, serviceConfig, callback) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
assert.strictEqual(typeof serviceConfig, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const tag = infra.images.graphite.tag;
|
|
const memoryLimit = serviceConfig.memoryLimit || 256 * 1024 * 1024;
|
|
const memory = system.getMemoryAllocation(memoryLimit);
|
|
|
|
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.graphite.tag, tag);
|
|
|
|
if (upgrading) debug('startGraphite: graphite will be upgraded');
|
|
|
|
const cmd = `docker run --restart=always -d --name="graphite" \
|
|
--hostname graphite \
|
|
--net cloudron \
|
|
--net-alias graphite \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=graphite \
|
|
-m ${memory} \
|
|
--memory-swap ${memoryLimit} \
|
|
--dns 172.18.0.1 \
|
|
--dns-search=. \
|
|
-p 127.0.0.1:2003:2003 \
|
|
-p 127.0.0.1:2004:2004 \
|
|
-p 127.0.0.1:8417:8000 \
|
|
-v "${paths.PLATFORM_DATA_DIR}/graphite:/var/lib/graphite" \
|
|
--label isCloudronManaged=true \
|
|
--read-only -v /tmp -v /run "${tag}"`;
|
|
|
|
async.series([
|
|
shell.exec.bind(null, 'stopGraphite', 'docker stop graphite || true'),
|
|
shell.exec.bind(null, 'removeGraphite', 'docker rm -f graphite || true'),
|
|
(done) => {
|
|
if (!upgrading) return done();
|
|
shell.sudo('removeGraphiteDir', [ RMADDONDIR_CMD, 'graphite' ], {}, done);
|
|
},
|
|
shell.exec.bind(null, 'startGraphite', cmd)
|
|
], function (error) {
|
|
// restart collectd to get the disk stats after graphite starts. currently, there is no way to do graphite health check
|
|
if (!error) setTimeout(() => shell.sudo('restartcollectd', [ RESTART_SERVICE_CMD, 'collectd' ], {}, NOOP_CALLBACK), 60000);
|
|
|
|
callback(error);
|
|
});
|
|
}
|
|
|
|
function setupProxyAuth(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'Setting up proxyAuth');
|
|
|
|
const enabled = app.sso && app.manifest.addons && app.manifest.addons.proxyAuth;
|
|
|
|
if (!enabled) return callback();
|
|
|
|
const env = [ { name: 'CLOUDRON_PROXY_AUTH', value: '1' } ];
|
|
appdb.setAddonConfig(app.id, 'proxyauth', env, callback);
|
|
}
|
|
|
|
function teardownProxyAuth(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
appdb.unsetAddonConfig(app.id, 'proxyauth', callback);
|
|
}
|
|
|
|
function startRedis(existingInfra, callback) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const tag = infra.images.redis.tag;
|
|
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.redis.tag, tag);
|
|
|
|
apps.getAll(function (error, allApps) {
|
|
if (error) return callback(error);
|
|
|
|
async.eachSeries(allApps, function iterator (app, iteratorCallback) {
|
|
if (!('redis' in app.manifest.addons)) return iteratorCallback(); // app doesn't use the addon
|
|
|
|
const redisName = 'redis-' + app.id;
|
|
|
|
async.series([
|
|
(done) => {
|
|
if (!upgrading) return done();
|
|
backupRedis(app, {}, done);
|
|
},
|
|
shell.exec.bind(null, 'stopRedis', `docker stop ${redisName} || true`), // redis will backup as part of signal handling
|
|
shell.exec.bind(null, 'removeRedis', `docker rm -f ${redisName} || true`),
|
|
setupRedis.bind(null, app, app.manifest.addons.redis) // starts the container
|
|
], iteratorCallback);
|
|
}, function (error) {
|
|
if (error) return callback(error);
|
|
|
|
if (!upgrading) return callback();
|
|
|
|
importDatabase('redis', callback);
|
|
});
|
|
});
|
|
}
|
|
|
|
// Ensures that app's addon redis container is running. Can be called when named container already exists/running
|
|
function setupRedis(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
const redisName = 'redis-' + app.id;
|
|
|
|
appdb.getAddonConfigByName(app.id, 'redis', '%REDIS_PASSWORD', function (error, existingPassword) {
|
|
if (error && error.reason !== BoxError.NOT_FOUND) return callback(error);
|
|
|
|
const redisPassword = options.noPassword ? '' : (error ? hat(4 * 48) : existingPassword); // see box#362 for password length
|
|
const redisServiceToken = hat(4 * 48);
|
|
|
|
// Compute redis memory limit based on app's memory limit (this is arbitrary)
|
|
const memoryLimit = app.servicesConfig['redis'] ? app.servicesConfig['redis'].memoryLimit : APP_SERVICES['redis'].defaultMemoryLimit;
|
|
const memory = system.getMemoryAllocation(memoryLimit);
|
|
|
|
const tag = infra.images.redis.tag;
|
|
const label = app.fqdn;
|
|
// note that we do not add appId label because this interferes with the stop/start app logic
|
|
const cmd = `docker run --restart=always -d --name=${redisName} \
|
|
--hostname ${redisName} \
|
|
--label=location=${label} \
|
|
--net cloudron \
|
|
--net-alias ${redisName} \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag="${redisName}" \
|
|
-m ${memory} \
|
|
--memory-swap ${memoryLimit} \
|
|
--dns 172.18.0.1 \
|
|
--dns-search=. \
|
|
-e CLOUDRON_REDIS_PASSWORD="${redisPassword}" \
|
|
-e CLOUDRON_REDIS_TOKEN="${redisServiceToken}" \
|
|
-v "${paths.PLATFORM_DATA_DIR}/redis/${app.id}:/var/lib/redis" \
|
|
--label isCloudronManaged=true \
|
|
--read-only -v /tmp -v /run ${tag}`;
|
|
|
|
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
|
|
|
var env = [
|
|
{ name: `${envPrefix}REDIS_URL`, value: 'redis://redisuser:' + redisPassword + '@redis-' + app.id },
|
|
{ name: `${envPrefix}REDIS_PASSWORD`, value: redisPassword },
|
|
{ name: `${envPrefix}REDIS_HOST`, value: redisName },
|
|
{ name: `${envPrefix}REDIS_PORT`, value: '6379' }
|
|
];
|
|
|
|
async.series([
|
|
(next) => {
|
|
docker.inspect(redisName, function (inspectError, result) { // fast-path
|
|
if (!inspectError) {
|
|
debug(`Re-using existing redis container with state: ${JSON.stringify(result.State)}`);
|
|
return next();
|
|
}
|
|
shell.exec('startRedis', cmd, next);
|
|
});
|
|
},
|
|
appdb.setAddonConfig.bind(null, app.id, 'redis', env),
|
|
waitForContainer.bind(null, 'redis-' + app.id, 'CLOUDRON_REDIS_TOKEN')
|
|
], function (error) {
|
|
if (error) debug('Error setting up redis: ', error);
|
|
callback(error);
|
|
});
|
|
});
|
|
}
|
|
|
|
function clearRedis(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'Clearing redis');
|
|
|
|
getContainerDetails('redis-' + app.id, 'CLOUDRON_REDIS_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
request.post(`https://${result.ip}:3000/clear?access_token=${result.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
|
|
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Network error clearing redis: ${error.message}`));
|
|
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error clearing redis. Status code: ${response.statusCode} message: ${response.body.message}`));
|
|
|
|
callback(null);
|
|
});
|
|
});
|
|
}
|
|
|
|
function teardownRedis(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
docker.deleteContainer(`redis-${app.id}`, function (error) {
|
|
if (error) return callback(error);
|
|
|
|
shell.sudo('removeVolume', [ RMADDONDIR_CMD, 'redis', app.id ], {}, function (error) {
|
|
if (error) return callback(new BoxError(BoxError.FS_ERROR, `Error removing redis data: ${error.message}`));
|
|
|
|
rimraf(path.join(paths.LOG_DIR, `redis-${app.id}`), function (error) {
|
|
if (error) debugApp(app, 'cannot cleanup logs: %s', error);
|
|
|
|
appdb.unsetAddonConfig(app.id, 'redis', callback);
|
|
});
|
|
});
|
|
});
|
|
}
|
|
|
|
function backupRedis(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'Backing up redis');
|
|
|
|
getContainerDetails('redis-' + app.id, 'CLOUDRON_REDIS_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
const url = `https://${result.ip}:3000/backup?access_token=${result.token}`;
|
|
pipeRequestToFile(url, dumpPath('redis', app.id), callback);
|
|
});
|
|
}
|
|
|
|
function restoreRedis(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'Restoring redis');
|
|
|
|
callback = once(callback); // protect from multiple returns with streams
|
|
|
|
getContainerDetails('redis-' + app.id, 'CLOUDRON_REDIS_TOKEN', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
let input;
|
|
const newDumpLocation = dumpPath('redis', app.id);
|
|
if (fs.existsSync(newDumpLocation)) {
|
|
input = fs.createReadStream(newDumpLocation);
|
|
} else { // old location of dumps
|
|
input = fs.createReadStream(path.join(paths.APPS_DATA_DIR, app.id, 'redis/dump.rdb'));
|
|
}
|
|
input.on('error', (error) => callback(new BoxError(BoxError.FS_ERROR, `Error reading input stream when restoring redis: ${error.message}`)));
|
|
|
|
const restoreReq = request.post(`https://${result.ip}:3000/restore?access_token=${result.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
|
|
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error restoring redis: ${error.message}`));
|
|
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error restoring redis. Status code: ${response.statusCode} message: ${response.body.message}`));
|
|
|
|
callback(null);
|
|
});
|
|
|
|
input.pipe(restoreReq);
|
|
});
|
|
}
|
|
|
|
function statusTurn(callback) {
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
docker.inspect('turn', function (error, container) {
|
|
if (error && error.reason === BoxError.NOT_FOUND) return callback(null, { status: exports.SERVICE_STATUS_STOPPED });
|
|
if (error) return callback(error);
|
|
|
|
docker.memoryUsage(container.Id, function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
var tmp = {
|
|
status: container.State.Running ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STOPPED,
|
|
memoryUsed: result.memory_stats.usage,
|
|
memoryPercent: parseInt(100 * result.memory_stats.usage / result.memory_stats.limit)
|
|
};
|
|
|
|
callback(null, tmp);
|
|
});
|
|
});
|
|
}
|
|
|
|
function statusDocker(callback) {
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
docker.ping(function (error) {
|
|
callback(null, { status: error ? exports.SERVICE_STATUS_STOPPED: exports.SERVICE_STATUS_ACTIVE });
|
|
});
|
|
}
|
|
|
|
function restartDocker(callback) {
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
shell.sudo('restartdocker', [ RESTART_SERVICE_CMD, 'docker' ], {}, NOOP_CALLBACK);
|
|
|
|
callback(null);
|
|
}
|
|
|
|
function statusUnbound(callback) {
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
shell.exec('statusUnbound', 'systemctl is-active unbound', function (error) {
|
|
callback(null, { status: error ? exports.SERVICE_STATUS_STOPPED : exports.SERVICE_STATUS_ACTIVE });
|
|
});
|
|
}
|
|
|
|
function restartUnbound(callback) {
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
shell.sudo('restartunbound', [ RESTART_SERVICE_CMD, 'unbound' ], {}, NOOP_CALLBACK);
|
|
|
|
callback(null);
|
|
}
|
|
|
|
function statusNginx(callback) {
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
shell.exec('statusNginx', 'systemctl is-active nginx', function (error) {
|
|
callback(null, { status: error ? exports.SERVICE_STATUS_STOPPED : exports.SERVICE_STATUS_ACTIVE });
|
|
});
|
|
}
|
|
|
|
function restartNginx(callback) {
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
shell.sudo('restartnginx', [ RESTART_SERVICE_CMD, 'nginx' ], {}, NOOP_CALLBACK);
|
|
|
|
callback(null);
|
|
}
|
|
|
|
function statusSftp(callback) {
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
docker.inspect('sftp', function (error, container) {
|
|
if (error && error.reason === BoxError.NOT_FOUND) return callback(null, { status: exports.SERVICE_STATUS_STOPPED });
|
|
if (error) return callback(error);
|
|
|
|
docker.memoryUsage('sftp', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
var tmp = {
|
|
status: container.State.Running ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STOPPED,
|
|
memoryUsed: result.memory_stats.usage,
|
|
memoryPercent: parseInt(100 * result.memory_stats.usage / result.memory_stats.limit)
|
|
};
|
|
|
|
callback(null, tmp);
|
|
});
|
|
});
|
|
}
|
|
|
|
function statusGraphite(callback) {
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
docker.inspect('graphite', function (error, container) {
|
|
if (error && error.reason === BoxError.NOT_FOUND) return callback(null, { status: exports.SERVICE_STATUS_STOPPED });
|
|
if (error) return callback(error);
|
|
|
|
request.get('http://127.0.0.1:8417/graphite-web/dashboard', { json: true, timeout: 20000 }, function (error, response) {
|
|
if (error) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite: ${error.message}` });
|
|
if (response.statusCode !== 200) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite. Status code: ${response.statusCode} message: ${response.body.message}` });
|
|
|
|
docker.memoryUsage('graphite', function (error, result) {
|
|
if (error) return callback(error);
|
|
|
|
var tmp = {
|
|
status: container.State.Running ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STOPPED,
|
|
memoryUsed: result.memory_stats.usage,
|
|
memoryPercent: parseInt(100 * result.memory_stats.usage / result.memory_stats.limit)
|
|
};
|
|
|
|
callback(null, tmp);
|
|
});
|
|
});
|
|
});
|
|
}
|
|
|
|
function restartGraphite(callback) {
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
docker.restartContainer('graphite', callback);
|
|
|
|
setTimeout(() => shell.sudo('restartcollectd', [ RESTART_SERVICE_CMD, 'collectd' ], {}, NOOP_CALLBACK), 60000);
|
|
}
|
|
|
|
function teardownOauth(app, options, callback) {
|
|
assert.strictEqual(typeof app, 'object');
|
|
assert.strictEqual(typeof options, 'object');
|
|
assert.strictEqual(typeof callback, 'function');
|
|
|
|
debugApp(app, 'teardownOauth');
|
|
|
|
appdb.unsetAddonConfig(app.id, 'oauth', callback);
|
|
}
|