Files
cloudron-box/src/addons.js

1803 lines
72 KiB
JavaScript
Raw Normal View History

'use strict';
exports = module.exports = {
2018-12-02 18:05:19 -08:00
getServices: getServices,
getService: getService,
configureService: configureService,
2018-12-02 19:07:12 -08:00
getServiceLogs: getServiceLogs,
2018-12-02 18:05:19 -08:00
restartService: restartService,
2018-11-15 19:59:08 +01:00
startServices: startServices,
updateServiceConfig: updateServiceConfig,
setupAddons: setupAddons,
teardownAddons: teardownAddons,
backupAddons: backupAddons,
restoreAddons: restoreAddons,
clearAddons: clearAddons,
getEnvironment: getEnvironment,
getMountsSync: getMountsSync,
2015-11-02 11:20:50 -08:00
getContainerNamesSync: getContainerNamesSync,
2020-02-07 14:11:52 -08:00
getServiceDetails: getServiceDetails,
// exported for testing
_setupOauth: setupOauth,
2018-11-16 17:53:22 +01:00
_teardownOauth: teardownOauth,
2018-12-02 19:40:27 -08:00
SERVICE_STATUS_STARTING: 'starting', // container up, waiting for healthcheck
SERVICE_STATUS_ACTIVE: 'active',
SERVICE_STATUS_STOPPED: 'stopped'
};
2020-02-06 16:57:33 +01:00
var appdb = require('./appdb.js'),
apps = require('./apps.js'),
assert = require('assert'),
async = require('async'),
2019-09-23 12:13:21 -07:00
BoxError = require('./boxerror.js'),
constants = require('./constants.js'),
crypto = require('crypto'),
debug = require('debug')('box:addons'),
2016-04-18 10:37:33 -07:00
docker = require('./docker.js'),
fs = require('fs'),
graphs = require('./graphs.js'),
hat = require('./hat.js'),
2016-05-24 13:06:59 -07:00
infra = require('./infra_version.js'),
2018-03-07 20:39:58 -08:00
mail = require('./mail.js'),
once = require('once'),
os = require('os'),
path = require('path'),
paths = require('./paths.js'),
2018-09-18 14:15:23 -07:00
rimraf = require('rimraf'),
safe = require('safetydance'),
semver = require('semver'),
2018-11-20 16:53:27 +01:00
settings = require('./settings.js'),
sftp = require('./sftp.js'),
shell = require('./shell.js'),
2018-11-16 17:53:22 +01:00
spawn = require('child_process').spawn,
2018-11-19 13:31:03 +01:00
split = require('split'),
request = require('request'),
2016-06-03 14:56:45 +02:00
util = require('util');
const NOOP = function (app, options, callback) { return callback(); };
const NOOP_CALLBACK = function (error) { if (error) debug(error); };
2018-11-25 14:43:29 -08:00
const RMADDONDIR_CMD = path.join(__dirname, 'scripts/rmaddondir.sh');
// setup can be called multiple times for the same app (configure crash restart) and existing data must not be lost
// teardown is destructive. app data stored with the addon is lost
var KNOWN_ADDONS = {
2016-05-12 08:54:59 -07:00
email: {
setup: setupEmail,
teardown: teardownEmail,
backup: NOOP,
restore: setupEmail,
2018-11-15 19:59:08 +01:00
clear: NOOP,
2016-05-12 08:54:59 -07:00
},
ldap: {
setup: setupLdap,
teardown: teardownLdap,
backup: NOOP,
restore: setupLdap,
2018-11-15 19:59:08 +01:00
clear: NOOP,
},
2015-10-18 09:52:37 -07:00
localstorage: {
setup: setupLocalStorage,
teardown: teardownLocalStorage,
2015-10-18 09:52:37 -07:00
backup: NOOP, // no backup because it's already inside app data
restore: NOOP,
2018-11-15 19:59:08 +01:00
clear: clearLocalStorage,
2015-10-18 09:52:37 -07:00
},
mongodb: {
setup: setupMongoDb,
teardown: teardownMongoDb,
backup: backupMongoDb,
restore: restoreMongoDb,
2018-11-15 19:59:08 +01:00
clear: clearMongodb,
},
mysql: {
setup: setupMySql,
teardown: teardownMySql,
backup: backupMySql,
restore: restoreMySql,
2018-11-15 19:59:08 +01:00
clear: clearMySql,
},
2015-10-18 09:52:37 -07:00
oauth: {
setup: setupOauth,
teardown: teardownOauth,
backup: NOOP,
restore: setupOauth,
2018-11-15 19:59:08 +01:00
clear: NOOP,
2015-10-18 09:52:37 -07:00
},
postgresql: {
setup: setupPostgreSql,
teardown: teardownPostgreSql,
backup: backupPostgreSql,
restore: restorePostgreSql,
2018-11-15 19:59:08 +01:00
clear: clearPostgreSql,
},
2016-05-13 14:13:25 -07:00
recvmail: {
setup: setupRecvMail,
teardown: teardownRecvMail,
backup: NOOP,
restore: setupRecvMail,
2018-11-15 19:59:08 +01:00
clear: NOOP,
2016-05-13 14:13:25 -07:00
},
redis: {
setup: setupRedis,
teardown: teardownRedis,
2015-10-12 13:29:27 -07:00
backup: backupRedis,
restore: restoreRedis,
2018-11-15 19:59:08 +01:00
clear: clearRedis,
},
2015-10-18 09:52:37 -07:00
sendmail: {
setup: setupSendMail,
teardown: teardownSendMail,
backup: NOOP,
restore: setupSendMail,
2018-11-15 19:59:08 +01:00
clear: NOOP,
2015-10-18 09:52:37 -07:00
},
scheduler: {
setup: NOOP,
teardown: NOOP,
backup: NOOP,
restore: NOOP,
2018-11-15 19:59:08 +01:00
clear: NOOP,
},
2018-08-10 12:31:46 -07:00
docker: {
setup: NOOP,
teardown: NOOP,
backup: NOOP,
restore: NOOP,
2018-11-15 19:59:08 +01:00
clear: NOOP,
2018-12-02 18:05:19 -08:00
}
};
const KNOWN_SERVICES = {
mail: {
status: containerStatus.bind(null, 'mail', 'CLOUDRON_MAIL_TOKEN'),
restart: mail.restartMail,
2018-12-02 18:05:19 -08:00
defaultMemoryLimit: Math.max((1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 128, 256) * 1024 * 1024
},
mongodb: {
status: containerStatus.bind(null, 'mongodb', 'CLOUDRON_MONGODB_TOKEN'),
restart: restartContainer.bind(null, 'mongodb'),
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 200 * 1024 * 1024
},
mysql: {
status: containerStatus.bind(null, 'mysql', 'CLOUDRON_MYSQL_TOKEN'),
restart: restartContainer.bind(null, 'mysql'),
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024
},
postgresql: {
status: containerStatus.bind(null, 'postgresql', 'CLOUDRON_POSTGRESQL_TOKEN'),
restart: restartContainer.bind(null, 'postgresql'),
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024
},
docker: {
status: statusDocker,
2018-12-02 18:05:19 -08:00
restart: restartDocker,
defaultMemoryLimit: 0
2018-12-02 19:38:21 -08:00
},
unbound: {
status: statusUnbound,
restart: restartUnbound,
defaultMemoryLimit: 0
2019-03-18 19:02:32 -07:00
},
2019-04-04 20:46:01 -07:00
sftp: {
status: statusSftp,
restart: restartContainer.bind(null, 'sftp'),
2019-04-05 10:34:24 -07:00
defaultMemoryLimit: 256 * 1024 * 1024
2019-03-19 15:56:29 -07:00
},
graphite: {
status: statusGraphite,
restart: restartContainer.bind(null, 'graphite'),
defaultMemoryLimit: 75 * 1024 * 1024
},
nginx: {
status: statusNginx,
restart: restartNginx,
defaultMemoryLimit: 0
}
};
2019-03-19 15:56:29 -07:00
function debugApp(app /*, args */) {
2018-02-08 15:07:49 +01:00
assert(typeof app === 'object');
debug((app.fqdn || app.location) + ' ' + util.format.apply(util, Array.prototype.slice.call(arguments, 1)));
}
function parseImageTag(tag) {
let repository = tag.split(':', 1)[0];
let version = tag.substr(repository.length + 1).split('@', 1)[0];
let digest = tag.substr(repository.length + 1 + version.length + 1).split(':', 2)[1];
return { repository, version: semver.parse(version), digest };
}
function requiresUpgrade(existingTag, currentTag) {
let etag = parseImageTag(existingTag), ctag = parseImageTag(currentTag);
return etag.version.major !== ctag.version.major;
}
2018-11-09 12:02:38 -08:00
// paths for dumps
function dumpPath(addon, appId) {
switch (addon) {
2018-11-12 09:32:02 -08:00
case 'postgresql': return path.join(paths.APPS_DATA_DIR, appId, 'postgresqldump');
2018-11-09 12:02:38 -08:00
case 'mysql': return path.join(paths.APPS_DATA_DIR, appId, 'mysqldump');
case 'mongodb': return path.join(paths.APPS_DATA_DIR, appId, 'mongodbdump');
case 'redis': return path.join(paths.APPS_DATA_DIR, appId, 'dump.rdb');
}
}
2019-12-04 13:17:58 -08:00
function rebuildService(serviceName, callback) {
2018-12-02 18:05:19 -08:00
assert.strictEqual(typeof serviceName, 'string');
assert.strictEqual(typeof callback, 'function');
2019-12-04 13:17:58 -08:00
// this attempts to recreate the service docker container if they don't exist but platform infra version is unchanged
// passing an infra version of 'none' will not attempt to purge existing data, not sure if this is good or bad
if (serviceName === 'mongodb') return startMongodb({ version: 'none' }, callback);
if (serviceName === 'postgresql') return startPostgresql({ version: 'none' }, callback);
if (serviceName === 'mysql') return startMysql({ version: 'none' }, callback);
if (serviceName === 'sftp') return sftp.startSftp({ version: 'none' }, callback);
if (serviceName === 'graphite') return graphs.startGraphite({ version: 'none' }, callback);
// nothing to rebuild for now
callback();
}
function restartContainer(serviceName, callback) {
assert.strictEqual(typeof serviceName, 'string');
assert.strictEqual(typeof callback, 'function');
2018-12-02 18:05:19 -08:00
docker.stopContainer(serviceName, function (error) {
2019-10-23 15:57:01 -07:00
if (error) return callback(error);
2018-12-02 18:05:19 -08:00
docker.startContainer(serviceName, function (error) {
2019-09-23 12:13:21 -07:00
if (error && error.reason === BoxError.NOT_FOUND) {
callback(null); // callback early since rebuilding takes long
return rebuildService(serviceName, function (error) { if (error) console.error(`Unable to rebuild service ${serviceName}`, error); });
}
2019-12-04 13:17:58 -08:00
callback(error);
});
});
}
2019-03-19 15:56:29 -07:00
function getServiceDetails(containerName, tokenEnvName, callback) {
assert.strictEqual(typeof containerName, 'string');
assert.strictEqual(typeof tokenEnvName, 'string');
assert.strictEqual(typeof callback, 'function');
docker.inspect(containerName, function (error, result) {
2019-10-23 15:57:01 -07:00
if (error) return callback(error);
2019-03-19 15:56:29 -07:00
const ip = safe.query(result, 'NetworkSettings.Networks.cloudron.IPAddress', null);
2019-12-04 10:09:57 -08:00
if (!ip) return callback(new BoxError(BoxError.INACTIVE, `Error getting IP of ${containerName} service`));
2019-03-19 15:56:29 -07:00
// extract the cloudron token for auth
const env = safe.query(result, 'Config.Env', null);
2019-12-04 10:09:57 -08:00
if (!env) return callback(new BoxError(BoxError.DOCKER_ERROR, `Error inspecting environment of ${containerName} service`));
2019-03-19 15:56:29 -07:00
const tmp = env.find(function (e) { return e.indexOf(tokenEnvName) === 0; });
2019-12-04 10:09:57 -08:00
if (!tmp) return callback(new BoxError(BoxError.DOCKER_ERROR, `Error getting token of ${containerName} service`));
2019-03-19 15:56:29 -07:00
const token = tmp.slice(tokenEnvName.length + 1); // +1 for the = sign
2019-12-04 10:09:57 -08:00
if (!token) return callback(new BoxError(BoxError.DOCKER_ERROR, `Error getting token of ${containerName} service`));
2019-03-19 15:56:29 -07:00
callback(null, { ip: ip, token: token, state: result.State });
});
}
2018-12-02 18:05:19 -08:00
function containerStatus(addonName, addonTokenName, callback) {
2018-11-21 17:28:44 +01:00
assert.strictEqual(typeof addonName, 'string');
assert.strictEqual(typeof addonTokenName, 'string');
assert.strictEqual(typeof callback, 'function');
getServiceDetails(addonName, addonTokenName, function (error, addonDetails) {
2019-10-23 15:57:01 -07:00
if (error && error.reason === BoxError.NOT_FOUND) return callback(null, { status: exports.SERVICE_STATUS_STOPPED });
2018-11-21 17:28:44 +01:00
if (error) return callback(error);
2018-11-28 10:39:12 +01:00
request.get(`https://${addonDetails.ip}:3000/healthcheck?access_token=${addonDetails.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
2018-12-02 19:40:27 -08:00
if (error) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for ${addonName}: ${error.message}` });
if (response.statusCode !== 200 || !response.body.status) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for ${addonName}. Status code: ${response.statusCode} message: ${response.body.message}` });
2018-11-21 17:28:44 +01:00
2018-11-28 10:39:12 +01:00
docker.memoryUsage(addonName, function (error, result) {
2019-10-23 15:57:01 -07:00
if (error) return callback(error);
2018-11-28 10:39:12 +01:00
var tmp = {
2018-12-02 19:40:27 -08:00
status: addonDetails.state.Running ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STOPPED,
2018-11-28 10:39:12 +01:00
memoryUsed: result.memory_stats.usage,
memoryPercent: parseInt(100 * result.memory_stats.usage / result.memory_stats.limit)
};
callback(null, tmp);
});
2018-11-21 17:28:44 +01:00
});
});
}
2018-12-02 18:05:19 -08:00
function getServices(callback) {
2018-11-15 19:59:08 +01:00
assert.strictEqual(typeof callback, 'function');
2018-12-02 18:05:19 -08:00
let services = Object.keys(KNOWN_SERVICES);
2018-11-15 19:59:08 +01:00
2018-12-02 18:05:19 -08:00
callback(null, services);
2018-11-15 19:59:08 +01:00
}
2018-12-02 18:05:19 -08:00
function getService(serviceName, callback) {
assert.strictEqual(typeof serviceName, 'string');
2018-11-15 19:59:08 +01:00
assert.strictEqual(typeof callback, 'function');
2019-10-23 15:57:01 -07:00
if (!KNOWN_SERVICES[serviceName]) return callback(new BoxError(BoxError.NOT_FOUND));
2018-11-20 16:53:27 +01:00
var tmp = {
2018-12-02 18:05:19 -08:00
name: serviceName,
2018-11-20 16:53:27 +01:00
status: null,
2019-12-16 15:21:26 +01:00
memoryUsed: 0,
memoryPercent: 0,
error: null,
2018-11-20 16:53:27 +01:00
config: {
// If a property is not set then we cannot change it through the api, see below
// memory: 0,
// memorySwap: 0
}
};
2018-11-15 19:59:08 +01:00
2018-11-20 16:53:27 +01:00
settings.getPlatformConfig(function (error, platformConfig) {
2019-10-23 15:57:01 -07:00
if (error) return callback(error);
2018-11-15 19:59:08 +01:00
2018-12-02 18:05:19 -08:00
if (platformConfig[serviceName] && platformConfig[serviceName].memory && platformConfig[serviceName].memorySwap) {
tmp.config.memory = platformConfig[serviceName].memory;
tmp.config.memorySwap = platformConfig[serviceName].memorySwap;
} else if (KNOWN_SERVICES[serviceName].defaultMemoryLimit) {
tmp.config.memory = KNOWN_SERVICES[serviceName].defaultMemoryLimit;
2018-11-20 16:53:27 +01:00
tmp.config.memorySwap = tmp.config.memory * 2;
}
2018-12-02 18:05:19 -08:00
KNOWN_SERVICES[serviceName].status(function (error, result) {
if (error) return callback(error);
2018-11-16 17:53:22 +01:00
2018-11-20 16:53:27 +01:00
tmp.status = result.status;
2018-11-28 10:39:12 +01:00
tmp.memoryUsed = result.memoryUsed;
tmp.memoryPercent = result.memoryPercent;
2018-11-20 16:53:27 +01:00
tmp.error = result.error || null;
callback(null, tmp);
});
2018-11-15 19:59:08 +01:00
});
}
2018-12-02 18:05:19 -08:00
function configureService(serviceName, data, callback) {
assert.strictEqual(typeof serviceName, 'string');
2018-11-21 15:47:34 +01:00
assert.strictEqual(typeof data, 'object');
assert.strictEqual(typeof callback, 'function');
2019-10-23 15:57:01 -07:00
if (!KNOWN_SERVICES[serviceName]) return callback(new BoxError(BoxError.NOT_FOUND));
2018-11-21 15:47:34 +01:00
settings.getPlatformConfig(function (error, platformConfig) {
2019-10-23 15:57:01 -07:00
if (error) return callback(error);
2018-11-21 15:47:34 +01:00
2018-12-02 18:05:19 -08:00
if (!platformConfig[serviceName]) platformConfig[serviceName] = {};
2018-11-21 15:47:34 +01:00
// if not specified we clear the entry and use defaults
if (!data.memory || !data.memorySwap) {
2018-12-02 18:05:19 -08:00
delete platformConfig[serviceName];
2018-11-21 15:47:34 +01:00
} else {
2018-12-02 18:05:19 -08:00
platformConfig[serviceName].memory = data.memory;
platformConfig[serviceName].memorySwap = data.memorySwap;
2018-11-21 15:47:34 +01:00
}
settings.setPlatformConfig(platformConfig, function (error) {
2019-10-23 15:57:01 -07:00
if (error) return callback(error);
2018-11-21 15:47:34 +01:00
callback(null);
});
});
}
2018-12-02 19:07:12 -08:00
function getServiceLogs(serviceName, options, callback) {
assert.strictEqual(typeof serviceName, 'string');
2018-11-15 19:59:08 +01:00
assert(options && typeof options === 'object');
assert.strictEqual(typeof callback, 'function');
assert.strictEqual(typeof options.lines, 'number');
assert.strictEqual(typeof options.format, 'string');
assert.strictEqual(typeof options.follow, 'boolean');
2019-10-23 15:57:01 -07:00
if (!KNOWN_SERVICES[serviceName]) return callback(new BoxError(BoxError.NOT_FOUND));
2018-11-15 19:59:08 +01:00
2018-12-02 19:07:12 -08:00
debug(`Getting logs for ${serviceName}`);
2018-11-15 19:59:08 +01:00
var lines = options.lines,
2018-11-15 19:59:08 +01:00
format = options.format || 'json',
follow = options.follow;
2018-11-15 19:59:08 +01:00
let cmd, args = [];
2018-11-21 18:38:19 +01:00
// docker and unbound use journald
if (serviceName === 'docker' || serviceName === 'unbound') {
cmd = 'journalctl';
args.push('--lines=' + (lines === -1 ? 'all' : lines));
args.push(`--unit=${serviceName}`);
args.push('--no-pager');
args.push('--output=short-iso');
if (follow) args.push('--follow');
} else {
cmd = '/usr/bin/tail';
args.push('--lines=' + (lines === -1 ? '+1' : lines));
if (follow) args.push('--follow', '--retry', '--quiet'); // same as -F. to make it work if file doesn't exist, --quiet to not output file headers, which are no logs
args.push(path.join(paths.LOG_DIR, serviceName, 'app.log'));
}
var cp = spawn(cmd, args);
2018-11-15 19:59:08 +01:00
var transformStream = split(function mapper(line) {
if (format !== 'json') return line + '\n';
var data = line.split(' '); // logs are <ISOtimestamp> <msg>
var timestamp = (new Date(data[0])).getTime();
if (isNaN(timestamp)) timestamp = 0;
var message = line.slice(data[0].length+1);
// ignore faulty empty logs
if (!timestamp && !message) return;
return JSON.stringify({
realtimeTimestamp: timestamp * 1000,
message: message,
2018-12-02 19:07:12 -08:00
source: serviceName
2018-11-15 19:59:08 +01:00
}) + '\n';
});
transformStream.close = cp.kill.bind(cp, 'SIGKILL'); // closing stream kills the child process
cp.stdout.pipe(transformStream);
callback(null, transformStream);
}
2018-12-02 18:05:19 -08:00
function restartService(serviceName, callback) {
assert.strictEqual(typeof serviceName, 'string');
2018-11-15 19:59:08 +01:00
assert.strictEqual(typeof callback, 'function');
2019-10-23 15:57:01 -07:00
if (!KNOWN_SERVICES[serviceName]) return callback(new BoxError(BoxError.NOT_FOUND));
2018-11-19 14:30:14 +01:00
2018-12-02 19:07:12 -08:00
KNOWN_SERVICES[serviceName].restart(callback);
2018-11-15 19:59:08 +01:00
}
function waitForService(containerName, tokenEnvName, callback) {
assert.strictEqual(typeof containerName, 'string');
assert.strictEqual(typeof tokenEnvName, 'string');
assert.strictEqual(typeof callback, 'function');
debug(`Waiting for ${containerName}`);
getServiceDetails(containerName, tokenEnvName, function (error, result) {
if (error) return callback(error);
async.retry({ times: 10, interval: 15000 }, function (retryCallback) {
request.get(`https://${result.ip}:3000/healthcheck?access_token=${result.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
2019-12-04 13:17:58 -08:00
if (error) return retryCallback(new BoxError(BoxError.ADDONS_ERROR, `Network error waiting for ${containerName}: ${error.message}`));
if (response.statusCode !== 200 || !response.body.status) return retryCallback(new BoxError(BoxError.ADDONS_ERROR, `Error waiting for ${containerName}. Status code: ${response.statusCode} message: ${response.body.message}`));
retryCallback(null);
});
}, callback);
});
}
function setupAddons(app, addons, callback) {
assert.strictEqual(typeof app, 'object');
assert(!addons || typeof addons === 'object');
assert.strictEqual(typeof callback, 'function');
if (!addons) return callback(null);
2017-12-21 01:04:38 -08:00
debugApp(app, 'setupAddons: Setting up %j', Object.keys(addons));
2015-07-20 11:03:11 -07:00
async.eachSeries(Object.keys(addons), function iterator(addon, iteratorCallback) {
2019-12-04 13:17:58 -08:00
if (!(addon in KNOWN_ADDONS)) return iteratorCallback(new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`));
2015-10-07 16:10:08 -07:00
debugApp(app, 'Setting up addon %s with options %j', addon, addons[addon]);
2015-10-07 16:10:08 -07:00
KNOWN_ADDONS[addon].setup(app, addons[addon], iteratorCallback);
}, callback);
}
function teardownAddons(app, addons, callback) {
assert.strictEqual(typeof app, 'object');
assert(!addons || typeof addons === 'object');
assert.strictEqual(typeof callback, 'function');
if (!addons) return callback(null);
2015-07-20 11:03:11 -07:00
debugApp(app, 'teardownAddons: Tearing down %j', Object.keys(addons));
async.eachSeries(Object.keys(addons), function iterator(addon, iteratorCallback) {
2019-12-04 13:17:58 -08:00
if (!(addon in KNOWN_ADDONS)) return iteratorCallback(new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`));
2015-10-07 16:10:08 -07:00
debugApp(app, 'Tearing down addon %s with options %j', addon, addons[addon]);
2015-10-07 16:10:08 -07:00
KNOWN_ADDONS[addon].teardown(app, addons[addon], iteratorCallback);
}, callback);
}
function backupAddons(app, addons, callback) {
assert.strictEqual(typeof app, 'object');
assert(!addons || typeof addons === 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'backupAddons');
if (!addons) return callback(null);
2015-07-20 11:03:11 -07:00
debugApp(app, 'backupAddons: Backing up %j', Object.keys(addons));
async.eachSeries(Object.keys(addons), function iterator (addon, iteratorCallback) {
2019-12-04 13:17:58 -08:00
if (!(addon in KNOWN_ADDONS)) return iteratorCallback(new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`));
2015-10-07 16:10:08 -07:00
KNOWN_ADDONS[addon].backup(app, addons[addon], iteratorCallback);
}, callback);
}
function clearAddons(app, addons, callback) {
assert.strictEqual(typeof app, 'object');
assert(!addons || typeof addons === 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'clearAddons');
if (!addons) return callback(null);
debugApp(app, 'clearAddons: clearing %j', Object.keys(addons));
async.eachSeries(Object.keys(addons), function iterator (addon, iteratorCallback) {
2019-12-04 13:17:58 -08:00
if (!(addon in KNOWN_ADDONS)) return iteratorCallback(new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`));
KNOWN_ADDONS[addon].clear(app, addons[addon], iteratorCallback);
}, callback);
}
function restoreAddons(app, addons, callback) {
assert.strictEqual(typeof app, 'object');
assert(!addons || typeof addons === 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'restoreAddons');
if (!addons) return callback(null);
2015-07-20 11:03:11 -07:00
debugApp(app, 'restoreAddons: restoring %j', Object.keys(addons));
async.eachSeries(Object.keys(addons), function iterator (addon, iteratorCallback) {
2019-12-04 13:17:58 -08:00
if (!(addon in KNOWN_ADDONS)) return iteratorCallback(new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`));
2015-10-07 16:10:08 -07:00
KNOWN_ADDONS[addon].restore(app, addons[addon], iteratorCallback);
}, callback);
}
2018-11-11 21:58:02 -08:00
function importAppDatabase(app, addon, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof addon, 'string');
assert.strictEqual(typeof callback, 'function');
2019-12-04 13:17:58 -08:00
if (!(addon in KNOWN_ADDONS)) return callback(new BoxError(BoxError.NOT_FOUND, `No such addon: ${addon}`));
2018-11-11 21:58:02 -08:00
async.series([
KNOWN_ADDONS[addon].setup.bind(null, app, app.manifest.addons[addon]),
KNOWN_ADDONS[addon].clear.bind(null, app, app.manifest.addons[addon]), // clear in case we crashed in a restore
KNOWN_ADDONS[addon].restore.bind(null, app, app.manifest.addons[addon])
], callback);
}
function importDatabase(addon, callback) {
assert.strictEqual(typeof addon, 'string');
assert.strictEqual(typeof callback, 'function');
debug(`importDatabase: Importing ${addon}`);
appdb.getAll(function (error, apps) {
if (error) return callback(error);
async.eachSeries(apps, function iterator (app, iteratorCallback) {
if (!(addon in app.manifest.addons)) return iteratorCallback(); // app doesn't use the addon
debug(`importDatabase: Importing addon ${addon} of app ${app.id}`);
2018-11-11 21:58:02 -08:00
importAppDatabase(app, addon, function (error) {
2018-11-11 10:35:26 -08:00
if (!error) return iteratorCallback();
debug(`importDatabase: Error importing ${addon} of app ${app.id}. Marking as errored`, error);
// FIXME: there is no way to 'repair' if we are here. we need to make a separate apptask that re-imports db
// not clear, if repair workflow should be part of addon or per-app
2019-08-30 13:12:49 -07:00
appdb.update(app.id, { installationState: apps.ISTATE_ERROR, error: { message: error.message } }, iteratorCallback);
2018-11-11 10:35:26 -08:00
});
}, callback);
});
}
function updateServiceConfig(platformConfig, callback) {
callback = callback || NOOP_CALLBACK;
debug('updateServiceConfig: %j', platformConfig);
2019-03-19 15:56:29 -07:00
async.eachSeries([ 'mysql', 'postgresql', 'mail', 'mongodb', 'graphite' ], function iterator(serviceName, iteratorCallback) {
2018-12-02 18:05:19 -08:00
const containerConfig = platformConfig[serviceName];
let memory, memorySwap;
if (containerConfig && containerConfig.memory && containerConfig.memorySwap) {
memory = containerConfig.memory;
memorySwap = containerConfig.memorySwap;
} else {
2018-12-02 18:05:19 -08:00
memory = KNOWN_SERVICES[serviceName].defaultMemoryLimit;
memorySwap = memory * 2;
}
2018-12-02 18:05:19 -08:00
const args = `update --memory ${memory} --memory-swap ${memorySwap} ${serviceName}`.split(' ');
shell.spawn(`update${serviceName}`, '/usr/bin/docker', args, { }, iteratorCallback);
}, callback);
}
function startServices(existingInfra, callback) {
assert.strictEqual(typeof existingInfra, 'object');
assert.strictEqual(typeof callback, 'function');
let startFuncs = [ ];
// always start addons on any infra change, regardless of minor or major update
if (existingInfra.version !== infra.version) {
debug(`startServices: ${existingInfra.version} -> ${infra.version}. starting all services`);
startFuncs.push(
startMysql.bind(null, existingInfra),
startPostgresql.bind(null, existingInfra),
startMongodb.bind(null, existingInfra),
startRedis.bind(null, existingInfra),
mail.startMail);
} else {
assert.strictEqual(typeof existingInfra.images, 'object');
if (infra.images.mysql.tag !== existingInfra.images.mysql.tag) startFuncs.push(startMysql.bind(null, existingInfra));
if (infra.images.postgresql.tag !== existingInfra.images.postgresql.tag) startFuncs.push(startPostgresql.bind(null, existingInfra));
if (infra.images.mongodb.tag !== existingInfra.images.mongodb.tag) startFuncs.push(startMongodb.bind(null, existingInfra));
if (infra.images.mail.tag !== existingInfra.images.mail.tag) startFuncs.push(mail.startMail);
if (infra.images.redis.tag !== existingInfra.images.redis.tag) startFuncs.push(startRedis.bind(null, existingInfra));
debug('startServices: existing infra. incremental service create %j', startFuncs.map(function (f) { return f.name; }));
}
async.series(startFuncs, callback);
}
function getEnvironment(app, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
appdb.getAddonConfigByAppId(app.id, function (error, result) {
if (error) return callback(error);
if (app.manifest.addons['docker']) result.push({ name: 'DOCKER_HOST', value: `tcp://172.18.0.1:${constants.DOCKER_PROXY_PORT}` });
return callback(null, result.map(function (e) { return e.name + '=' + e.value; }));
});
}
function getMountsSync(app, addons) {
assert.strictEqual(typeof app, 'object');
assert(!addons || typeof addons === 'object');
let mounts = [ ];
if (!addons) return mounts;
for (let addon in addons) {
switch (addon) {
case 'localstorage':
mounts.push({
Target: '/app/data',
Source: `${app.id}-localstorage`,
Type: 'volume',
ReadOnly: false
});
break;
default: break;
}
}
return mounts;
}
2015-11-02 11:20:50 -08:00
function getContainerNamesSync(app, addons) {
assert.strictEqual(typeof app, 'object');
assert(!addons || typeof addons === 'object');
var names = [ ];
if (!addons) return names;
for (var addon in addons) {
switch (addon) {
case 'scheduler':
// names here depend on how scheduler.js creates containers
names = names.concat(Object.keys(addons.scheduler).map(function (taskName) { return app.id + '-' + taskName; }));
break;
default: break;
}
}
return names;
}
function setupLocalStorage(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'setupLocalStorage');
const volumeDataDir = apps.getDataDir(app, app.dataDir);
// reomve any existing volume in case it's bound with an old dataDir
async.series([
docker.removeVolume.bind(null, app, `${app.id}-localstorage`),
docker.createVolume.bind(null, app, `${app.id}-localstorage`, volumeDataDir)
], callback);
}
function clearLocalStorage(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'clearLocalStorage');
docker.clearVolume(app, `${app.id}-localstorage`, { removeDirectory: false }, callback);
}
function teardownLocalStorage(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'teardownLocalStorage');
2019-01-15 09:36:32 -08:00
async.series([
docker.clearVolume.bind(null, app, `${app.id}-localstorage`, { removeDirectory: true }),
2019-01-15 09:36:32 -08:00
docker.removeVolume.bind(null, app, `${app.id}-localstorage`)
], callback);
}
2015-10-07 16:10:08 -07:00
function setupOauth(app, options, callback) {
assert.strictEqual(typeof app, 'object');
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
2017-12-21 00:50:53 -08:00
debugApp(app, 'setupOauth');
if (!app.sso) return callback(null);
2020-02-06 16:57:33 +01:00
const env = [];
2020-02-06 16:57:33 +01:00
debugApp(app, 'Setting oauth addon config to %j', env);
2020-02-06 16:57:33 +01:00
appdb.setAddonConfig(app.id, 'oauth', env, callback);
}
2015-10-07 16:10:08 -07:00
function teardownOauth(app, options, callback) {
assert.strictEqual(typeof app, 'object');
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'teardownOauth');
2020-02-06 16:57:33 +01:00
appdb.unsetAddonConfig(app.id, 'oauth', callback);
}
2016-05-12 08:54:59 -07:00
function setupEmail(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
2018-04-03 14:37:52 -07:00
mail.getDomains(function (error, mailDomains) {
2018-03-07 20:39:58 -08:00
if (error) return callback(error);
const mailInDomains = mailDomains.filter(function (d) { return d.enabled; }).map(function (d) { return d.domain; }).join(',');
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
2018-03-07 20:39:58 -08:00
// note that "external" access info can be derived from MAIL_DOMAIN (since it's part of user documentation)
var env = [
{ name: `${envPrefix}MAIL_SMTP_SERVER`, value: 'mail' },
{ name: `${envPrefix}MAIL_SMTP_PORT`, value: '2525' },
{ name: `${envPrefix}MAIL_IMAP_SERVER`, value: 'mail' },
{ name: `${envPrefix}MAIL_IMAP_PORT`, value: '9993' },
{ name: `${envPrefix}MAIL_SIEVE_SERVER`, value: 'mail' },
{ name: `${envPrefix}MAIL_SIEVE_PORT`, value: '4190' },
{ name: `${envPrefix}MAIL_DOMAIN`, value: app.domain },
{ name: `${envPrefix}MAIL_DOMAINS`, value: mailInDomains },
{ name: `${envPrefix}LDAP_MAILBOXES_BASE_DN`, value: 'ou=mailboxes,dc=cloudron' }
2018-03-07 20:39:58 -08:00
];
2016-05-12 08:54:59 -07:00
2018-03-07 20:39:58 -08:00
debugApp(app, 'Setting up Email');
2016-05-12 08:54:59 -07:00
2018-03-07 20:39:58 -08:00
appdb.setAddonConfig(app.id, 'email', env, callback);
});
2016-05-12 08:54:59 -07:00
}
function teardownEmail(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Tearing down Email');
appdb.unsetAddonConfig(app.id, 'email', callback);
}
2015-10-07 16:10:08 -07:00
function setupLdap(app, options, callback) {
assert.strictEqual(typeof app, 'object');
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
if (!app.sso) return callback(null);
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
var env = [
{ name: `${envPrefix}LDAP_SERVER`, value: '172.18.0.1' },
{ name: `${envPrefix}LDAP_PORT`, value: '' + constants.LDAP_PORT },
{ name: `${envPrefix}LDAP_URL`, value: 'ldap://172.18.0.1:' + constants.LDAP_PORT },
{ name: `${envPrefix}LDAP_USERS_BASE_DN`, value: 'ou=users,dc=cloudron' },
{ name: `${envPrefix}LDAP_GROUPS_BASE_DN`, value: 'ou=groups,dc=cloudron' },
{ name: `${envPrefix}LDAP_BIND_DN`, value: 'cn='+ app.id + ',ou=apps,dc=cloudron' },
{ name: `${envPrefix}LDAP_BIND_PASSWORD`, value: hat(4 * 128) } // this is ignored
];
debugApp(app, 'Setting up LDAP');
appdb.setAddonConfig(app.id, 'ldap', env, callback);
}
2015-10-07 16:10:08 -07:00
function teardownLdap(app, options, callback) {
assert.strictEqual(typeof app, 'object');
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Tearing down LDAP');
appdb.unsetAddonConfig(app.id, 'ldap', callback);
}
2015-10-07 16:10:08 -07:00
function setupSendMail(app, options, callback) {
assert.strictEqual(typeof app, 'object');
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Setting up SendMail');
2016-01-14 12:56:35 -08:00
appdb.getAddonConfigByName(app.id, 'sendmail', '%MAIL_SMTP_PASSWORD', function (error, existingPassword) {
if (error && error.reason !== BoxError.NOT_FOUND) return callback(error);
var password = error ? hat(4 * 48) : existingPassword; // see box#565 for password length
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
rework how app mailboxes are allocated Our current setup had a mailbox allocated for an app during app install (into the mailboxes table). This has many issues: * When set to a custom mailbox location, there was no way to access this mailbox even via IMAP. Even when using app credentials, we cannot use IMAP since the ldap logic was testing on the addon type (most of our apps only use sendmail addon and thus cannot recvmail). * The mailboxes table was being used to add hidden 'app' type entries. This made it very hard for the user to understand why a mailbox conflicts. For example, if you set an app to use custom mailbox 'blog', this is hidden from all views. The solution is to let an app send email as whatever mailbox name is allocated to it (which we now track in the apps table. the default is in the db already so that REST response contains it). When not using Cloudron email, it will just send mail as that mailbox and the auth checks the "app password" in the addons table. Any replies to that mailbox will end up in the domain's mail server (not our problem). When using cloudron email, the app can send mail like above. Any responses will not end anywhere and bounce since there is no 'mailbox'. This is the expected behavior. If user wants to access this mailbox name, he can create a concrete mailbox and set himself as owner OR set this as an alias. For apps using the recvmail addon, the workflow is to actually create a mailbox at some point. Currently, we have no UI for this 'flow'. It's fine because we have only meemo using it. Intuitive much!
2018-12-06 21:08:19 -08:00
var env = [
{ name: `${envPrefix}MAIL_SMTP_SERVER`, value: 'mail' },
{ name: `${envPrefix}MAIL_SMTP_PORT`, value: '2525' },
{ name: `${envPrefix}MAIL_SMTPS_PORT`, value: '2465' },
2019-11-14 21:43:14 -08:00
{ name: `${envPrefix}MAIL_SMTP_USERNAME`, value: app.mailboxName + '@' + app.mailboxDomain },
{ name: `${envPrefix}MAIL_SMTP_PASSWORD`, value: password },
2019-11-14 21:43:14 -08:00
{ name: `${envPrefix}MAIL_FROM`, value: app.mailboxName + '@' + app.mailboxDomain },
{ name: `${envPrefix}MAIL_DOMAIN`, value: app.mailboxDomain }
rework how app mailboxes are allocated Our current setup had a mailbox allocated for an app during app install (into the mailboxes table). This has many issues: * When set to a custom mailbox location, there was no way to access this mailbox even via IMAP. Even when using app credentials, we cannot use IMAP since the ldap logic was testing on the addon type (most of our apps only use sendmail addon and thus cannot recvmail). * The mailboxes table was being used to add hidden 'app' type entries. This made it very hard for the user to understand why a mailbox conflicts. For example, if you set an app to use custom mailbox 'blog', this is hidden from all views. The solution is to let an app send email as whatever mailbox name is allocated to it (which we now track in the apps table. the default is in the db already so that REST response contains it). When not using Cloudron email, it will just send mail as that mailbox and the auth checks the "app password" in the addons table. Any replies to that mailbox will end up in the domain's mail server (not our problem). When using cloudron email, the app can send mail like above. Any responses will not end anywhere and bounce since there is no 'mailbox'. This is the expected behavior. If user wants to access this mailbox name, he can create a concrete mailbox and set himself as owner OR set this as an alias. For apps using the recvmail addon, the workflow is to actually create a mailbox at some point. Currently, we have no UI for this 'flow'. It's fine because we have only meemo using it. Intuitive much!
2018-12-06 21:08:19 -08:00
];
debugApp(app, 'Setting sendmail addon config to %j', env);
appdb.setAddonConfig(app.id, 'sendmail', env, callback);
2016-05-15 21:23:44 -07:00
});
}
2015-10-07 16:10:08 -07:00
function teardownSendMail(app, options, callback) {
assert.strictEqual(typeof app, 'object');
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Tearing down sendmail');
2016-05-15 21:23:44 -07:00
appdb.unsetAddonConfig(app.id, 'sendmail', callback);
}
2016-05-13 14:13:25 -07:00
function setupRecvMail(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Setting up recvmail');
appdb.getAddonConfigByName(app.id, 'recvmail', '%MAIL_IMAP_PASSWORD', function (error, existingPassword) {
2019-10-24 14:34:10 -07:00
if (error && error.reason !== BoxError.NOT_FOUND) return callback(error);
2016-05-13 14:13:25 -07:00
var password = error ? hat(4 * 48) : existingPassword; // see box#565 for password length
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
rework how app mailboxes are allocated Our current setup had a mailbox allocated for an app during app install (into the mailboxes table). This has many issues: * When set to a custom mailbox location, there was no way to access this mailbox even via IMAP. Even when using app credentials, we cannot use IMAP since the ldap logic was testing on the addon type (most of our apps only use sendmail addon and thus cannot recvmail). * The mailboxes table was being used to add hidden 'app' type entries. This made it very hard for the user to understand why a mailbox conflicts. For example, if you set an app to use custom mailbox 'blog', this is hidden from all views. The solution is to let an app send email as whatever mailbox name is allocated to it (which we now track in the apps table. the default is in the db already so that REST response contains it). When not using Cloudron email, it will just send mail as that mailbox and the auth checks the "app password" in the addons table. Any replies to that mailbox will end up in the domain's mail server (not our problem). When using cloudron email, the app can send mail like above. Any responses will not end anywhere and bounce since there is no 'mailbox'. This is the expected behavior. If user wants to access this mailbox name, he can create a concrete mailbox and set himself as owner OR set this as an alias. For apps using the recvmail addon, the workflow is to actually create a mailbox at some point. Currently, we have no UI for this 'flow'. It's fine because we have only meemo using it. Intuitive much!
2018-12-06 21:08:19 -08:00
var env = [
{ name: `${envPrefix}MAIL_IMAP_SERVER`, value: 'mail' },
{ name: `${envPrefix}MAIL_IMAP_PORT`, value: '9993' },
2019-11-14 21:43:14 -08:00
{ name: `${envPrefix}MAIL_IMAP_USERNAME`, value: app.mailboxName + '@' + app.mailboxDomain },
{ name: `${envPrefix}MAIL_IMAP_PASSWORD`, value: password },
2019-11-14 21:43:14 -08:00
{ name: `${envPrefix}MAIL_TO`, value: app.mailboxName + '@' + app.mailboxDomain },
{ name: `${envPrefix}MAIL_DOMAIN`, value: app.mailboxDomain }
rework how app mailboxes are allocated Our current setup had a mailbox allocated for an app during app install (into the mailboxes table). This has many issues: * When set to a custom mailbox location, there was no way to access this mailbox even via IMAP. Even when using app credentials, we cannot use IMAP since the ldap logic was testing on the addon type (most of our apps only use sendmail addon and thus cannot recvmail). * The mailboxes table was being used to add hidden 'app' type entries. This made it very hard for the user to understand why a mailbox conflicts. For example, if you set an app to use custom mailbox 'blog', this is hidden from all views. The solution is to let an app send email as whatever mailbox name is allocated to it (which we now track in the apps table. the default is in the db already so that REST response contains it). When not using Cloudron email, it will just send mail as that mailbox and the auth checks the "app password" in the addons table. Any replies to that mailbox will end up in the domain's mail server (not our problem). When using cloudron email, the app can send mail like above. Any responses will not end anywhere and bounce since there is no 'mailbox'. This is the expected behavior. If user wants to access this mailbox name, he can create a concrete mailbox and set himself as owner OR set this as an alias. For apps using the recvmail addon, the workflow is to actually create a mailbox at some point. Currently, we have no UI for this 'flow'. It's fine because we have only meemo using it. Intuitive much!
2018-12-06 21:08:19 -08:00
];
rework how app mailboxes are allocated Our current setup had a mailbox allocated for an app during app install (into the mailboxes table). This has many issues: * When set to a custom mailbox location, there was no way to access this mailbox even via IMAP. Even when using app credentials, we cannot use IMAP since the ldap logic was testing on the addon type (most of our apps only use sendmail addon and thus cannot recvmail). * The mailboxes table was being used to add hidden 'app' type entries. This made it very hard for the user to understand why a mailbox conflicts. For example, if you set an app to use custom mailbox 'blog', this is hidden from all views. The solution is to let an app send email as whatever mailbox name is allocated to it (which we now track in the apps table. the default is in the db already so that REST response contains it). When not using Cloudron email, it will just send mail as that mailbox and the auth checks the "app password" in the addons table. Any replies to that mailbox will end up in the domain's mail server (not our problem). When using cloudron email, the app can send mail like above. Any responses will not end anywhere and bounce since there is no 'mailbox'. This is the expected behavior. If user wants to access this mailbox name, he can create a concrete mailbox and set himself as owner OR set this as an alias. For apps using the recvmail addon, the workflow is to actually create a mailbox at some point. Currently, we have no UI for this 'flow'. It's fine because we have only meemo using it. Intuitive much!
2018-12-06 21:08:19 -08:00
debugApp(app, 'Setting sendmail addon config to %j', env);
appdb.setAddonConfig(app.id, 'recvmail', env, callback);
2016-05-13 14:13:25 -07:00
});
}
function teardownRecvMail(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Tearing down recvmail');
2016-05-13 14:13:25 -07:00
appdb.unsetAddonConfig(app.id, 'recvmail', callback);
2016-05-13 14:13:25 -07:00
}
function mysqlDatabaseName(appId) {
assert.strictEqual(typeof appId, 'string');
var md5sum = crypto.createHash('md5'); // get rid of "-"
md5sum.update(appId);
return md5sum.digest('hex').substring(0, 16); // max length of mysql usernames is 16
}
function startMysql(existingInfra, callback) {
assert.strictEqual(typeof existingInfra, 'object');
assert.strictEqual(typeof callback, 'function');
const tag = infra.images.mysql.tag;
const dataDir = paths.PLATFORM_DATA_DIR;
const rootPassword = hat(8 * 128);
const cloudronToken = hat(8 * 128);
const memoryLimit = 4 * 256;
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.mysql.tag, tag);
2018-11-25 14:43:29 -08:00
if (upgrading) debug('startMysql: mysql will be upgraded');
const upgradeFunc = upgrading ? shell.sudo.bind(null, 'startMysql', [ RMADDONDIR_CMD, 'mysql' ], {}) : (next) => next();
2018-11-25 14:43:29 -08:00
upgradeFunc(function (error) {
if (error) return callback(error);
2018-11-25 14:43:29 -08:00
const cmd = `docker run --restart=always -d --name="mysql" \
--hostname mysql \
2018-11-25 14:43:29 -08:00
--net cloudron \
--net-alias mysql \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=mysql \
-m ${memoryLimit}m \
--memory-swap ${memoryLimit * 2}m \
--dns 172.18.0.1 \
--dns-search=. \
-e CLOUDRON_MYSQL_TOKEN=${cloudronToken} \
-e CLOUDRON_MYSQL_ROOT_HOST=172.18.0.1 \
-e CLOUDRON_MYSQL_ROOT_PASSWORD=${rootPassword} \
-v "${dataDir}/mysql:/var/lib/mysql" \
--label isCloudronManaged=true \
--read-only -v /tmp -v /run "${tag}"`;
shell.exec('startMysql', cmd, function (error) {
if (error) return callback(error);
waitForService('mysql', 'CLOUDRON_MYSQL_TOKEN', function (error) {
2018-11-25 14:43:29 -08:00
if (error) return callback(error);
if (!upgrading) return callback(null);
importDatabase('mysql', callback);
});
});
});
}
2015-10-07 16:10:08 -07:00
function setupMySql(app, options, callback) {
assert.strictEqual(typeof app, 'object');
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Setting up mysql');
appdb.getAddonConfigByName(app.id, 'mysql', '%MYSQL_PASSWORD', function (error, existingPassword) {
if (error && error.reason !== BoxError.NOT_FOUND) return callback(error);
2018-09-18 19:41:24 +02:00
const tmp = mysqlDatabaseName(app.id);
2018-09-18 19:41:24 +02:00
const data = {
database: tmp,
prefix: tmp,
username: tmp,
password: error ? hat(4 * 48) : existingPassword // see box#362 for password length
};
getServiceDetails('mysql', 'CLOUDRON_MYSQL_TOKEN', function (error, result) {
if (error) return callback(error);
request.post(`https://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `?access_token=${result.token}`, { rejectUnauthorized: false, json: data }, function (error, response) {
2019-12-04 13:17:58 -08:00
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Network error setting up mysql: ${error.message}`));
if (response.statusCode !== 201) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error setting up mysql. Status code: ${response.statusCode} message: ${response.body.message}`));
2018-09-18 19:41:24 +02:00
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
2018-09-18 19:41:24 +02:00
var env = [
{ name: `${envPrefix}MYSQL_USERNAME`, value: data.username },
{ name: `${envPrefix}MYSQL_PASSWORD`, value: data.password },
{ name: `${envPrefix}MYSQL_HOST`, value: 'mysql' },
{ name: `${envPrefix}MYSQL_PORT`, value: '3306' }
2018-09-18 19:41:24 +02:00
];
if (options.multipleDatabases) {
2019-06-15 10:06:51 -07:00
env = env.concat({ name: `${envPrefix}MYSQL_DATABASE_PREFIX`, value: `${data.prefix}_` });
2018-09-18 19:41:24 +02:00
} else {
env = env.concat(
2019-06-15 10:06:51 -07:00
{ name: `${envPrefix}MYSQL_URL`, value: `mysql://${data.username}:${data.password}@mysql/${data.database}` },
{ name: `${envPrefix}MYSQL_DATABASE`, value: data.database }
2018-09-18 19:41:24 +02:00
);
}
debugApp(app, 'Setting mysql addon config to %j', env);
appdb.setAddonConfig(app.id, 'mysql', env, callback);
});
});
});
}
function clearMySql(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
2018-09-18 19:41:24 +02:00
const database = mysqlDatabaseName(app.id);
getServiceDetails('mysql', 'CLOUDRON_MYSQL_TOKEN', function (error, result) {
if (error) return callback(error);
2019-12-04 13:17:58 -08:00
request.post(`https://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}/clear?access_token=${result.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Network error clearing mysql: ${error.message}`));
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error clearing mysql. Status code: ${response.statusCode} message: ${response.body.message}`));
2018-09-18 19:41:24 +02:00
callback();
});
});
}
2015-10-07 16:10:08 -07:00
function teardownMySql(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
2018-09-18 19:41:24 +02:00
const database = mysqlDatabaseName(app.id);
const username = database;
getServiceDetails('mysql', 'CLOUDRON_MYSQL_TOKEN', function (error, result) {
if (error) return callback(error);
2019-12-04 13:17:58 -08:00
request.delete(`https://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}?access_token=${result.token}&username=${username}`, { json: true, rejectUnauthorized: false }, function (error, response) {
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mysql: ${error.message}`));
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mysql. Status code: ${response.statusCode} message: ${response.body.message}`));
2018-09-18 19:41:24 +02:00
appdb.unsetAddonConfig(app.id, 'mysql', callback);
});
});
}
function pipeRequestToFile(url, filename, callback) {
assert.strictEqual(typeof url, 'string');
assert.strictEqual(typeof filename, 'string');
assert.strictEqual(typeof callback, 'function');
const writeStream = fs.createWriteStream(filename);
const done = once(function (error) { // the writeStream and the request can both error
if (error) writeStream.close();
callback(error);
});
2019-12-05 09:54:29 -08:00
writeStream.on('error', (error) => done(new BoxError(BoxError.FS_ERROR, `Error writing to ${filename}: ${error.message}`)));
writeStream.on('open', function () {
// note: do not attach to post callback handler because this will buffer the entire reponse!
// see https://github.com/request/request/issues/2270
const req = request.post(url, { rejectUnauthorized: false });
2019-12-05 09:54:29 -08:00
req.on('error', (error) => done(new BoxError(BoxError.NETWORK_ERROR, `Request error writing to ${filename}: ${error.message}`))); // network error, dns error, request errored in middle etc
req.on('response', function (response) {
2019-12-04 13:17:58 -08:00
if (response.statusCode !== 200) return done(new BoxError(BoxError.ADDONS_ERROR, `Unexpected response code when piping ${url}: ${response.statusCode} message: ${response.statusMessage} filename: ${filename}`));
response.pipe(writeStream).on('finish', done); // this is hit after data written to disk
});
});
}
2015-10-07 16:10:08 -07:00
function backupMySql(app, options, callback) {
2018-02-08 15:07:49 +01:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
2018-09-18 19:41:24 +02:00
const database = mysqlDatabaseName(app.id);
debugApp(app, 'Backing up mysql');
getServiceDetails('mysql', 'CLOUDRON_MYSQL_TOKEN', function (error, result) {
2018-09-18 19:41:24 +02:00
if (error) return callback(error);
const url = `https://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}/backup?access_token=${result.token}`;
pipeRequestToFile(url, dumpPath('mysql', app.id), callback);
2018-09-18 19:41:24 +02:00
});
}
2015-10-07 16:10:08 -07:00
function restoreMySql(app, options, callback) {
2018-02-08 15:07:49 +01:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
2018-09-18 19:41:24 +02:00
const database = mysqlDatabaseName(app.id);
debugApp(app, 'restoreMySql');
2018-09-18 19:41:24 +02:00
callback = once(callback); // protect from multiple returns with streams
getServiceDetails('mysql', 'CLOUDRON_MYSQL_TOKEN', function (error, result) {
2018-09-18 19:41:24 +02:00
if (error) return callback(error);
2018-11-09 12:02:38 -08:00
var input = fs.createReadStream(dumpPath('mysql', app.id));
2019-12-05 09:54:29 -08:00
input.on('error', (error) => callback(new BoxError(BoxError.FS_ERROR, `Error reading input stream when restoring mysql: ${error.message}`)));
2019-12-04 13:17:58 -08:00
const restoreReq = request.post(`https://${result.ip}:3000/` + (options.multipleDatabases ? 'prefixes' : 'databases') + `/${database}/restore?access_token=${result.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error restoring mysql: ${error.message}`));
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error restoring mysql. Status code: ${response.statusCode} message: ${response.body.message}`));
2018-09-18 19:41:24 +02:00
callback(null);
});
input.pipe(restoreReq);
});
}
function postgreSqlNames(appId) {
appId = appId.replace(/-/g, '');
2018-09-19 15:46:29 -07:00
return { database: `db${appId}`, username: `user${appId}` };
}
function startPostgresql(existingInfra, callback) {
assert.strictEqual(typeof existingInfra, 'object');
assert.strictEqual(typeof callback, 'function');
const tag = infra.images.postgresql.tag;
const dataDir = paths.PLATFORM_DATA_DIR;
const rootPassword = hat(8 * 128);
const cloudronToken = hat(8 * 128);
const memoryLimit = 4 * 256;
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.postgresql.tag, tag);
2018-11-25 14:43:29 -08:00
if (upgrading) debug('startPostgresql: postgresql will be upgraded');
const upgradeFunc = upgrading ? shell.sudo.bind(null, 'startPostgresql', [ RMADDONDIR_CMD, 'postgresql' ], {}) : (next) => next();
2018-11-25 14:43:29 -08:00
upgradeFunc(function (error) {
if (error) return callback(error);
2018-11-25 14:43:29 -08:00
const cmd = `docker run --restart=always -d --name="postgresql" \
--hostname postgresql \
2018-11-25 14:43:29 -08:00
--net cloudron \
--net-alias postgresql \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=postgresql \
-m ${memoryLimit}m \
--memory-swap ${memoryLimit * 2}m \
--dns 172.18.0.1 \
--dns-search=. \
-e CLOUDRON_POSTGRESQL_ROOT_PASSWORD="${rootPassword}" \
-e CLOUDRON_POSTGRESQL_TOKEN="${cloudronToken}" \
-v "${dataDir}/postgresql:/var/lib/postgresql" \
--label isCloudronManaged=true \
--read-only -v /tmp -v /run "${tag}"`;
shell.exec('startPostgresql', cmd, function (error) {
if (error) return callback(error);
waitForService('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN', function (error) {
2018-11-25 14:43:29 -08:00
if (error) return callback(error);
if (!upgrading) return callback(null);
importDatabase('postgresql', callback);
});
});
});
}
2015-10-07 16:10:08 -07:00
function setupPostgreSql(app, options, callback) {
assert.strictEqual(typeof app, 'object');
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Setting up postgresql');
const { database, username } = postgreSqlNames(app.id);
appdb.getAddonConfigByName(app.id, 'postgresql', '%POSTGRESQL_PASSWORD', function (error, existingPassword) {
if (error && error.reason !== BoxError.NOT_FOUND) return callback(error);
2018-09-16 22:21:34 +02:00
const data = {
database: database,
username: username,
2018-09-16 22:21:34 +02:00
password: error ? hat(4 * 128) : existingPassword
};
getServiceDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN', function (error, result) {
if (error) return callback(error);
request.post(`https://${result.ip}:3000/databases?access_token=${result.token}`, { rejectUnauthorized: false, json: data }, function (error, response) {
2019-12-04 13:17:58 -08:00
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Network error setting up postgresql: ${error.message}`));
if (response.statusCode !== 201) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error setting up postgresql. Status code: ${response.statusCode} message: ${response.body.message}`));
2018-09-16 22:21:34 +02:00
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
2018-09-16 22:21:34 +02:00
var env = [
{ name: `${envPrefix}POSTGRESQL_URL`, value: `postgres://${data.username}:${data.password}@postgresql/${data.database}` },
{ name: `${envPrefix}POSTGRESQL_USERNAME`, value: data.username },
{ name: `${envPrefix}POSTGRESQL_PASSWORD`, value: data.password },
{ name: `${envPrefix}POSTGRESQL_HOST`, value: 'postgresql' },
{ name: `${envPrefix}POSTGRESQL_PORT`, value: '5432' },
{ name: `${envPrefix}POSTGRESQL_DATABASE`, value: data.database }
2018-09-16 22:21:34 +02:00
];
debugApp(app, 'Setting postgresql addon config to %j', env);
appdb.setAddonConfig(app.id, 'postgresql', env, callback);
});
});
});
}
function clearPostgreSql(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
const { database, username } = postgreSqlNames(app.id);
debugApp(app, 'Clearing postgresql');
getServiceDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN', function (error, result) {
if (error) return callback(error);
2019-12-04 13:17:58 -08:00
request.post(`https://${result.ip}:3000/databases/${database}/clear?access_token=${result.token}&username=${username}`, { json: true, rejectUnauthorized: false }, function (error, response) {
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Network error clearing postgresql: ${error.message}`));
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error clearing postgresql. Status code: ${response.statusCode} message: ${response.body.message}`));
2018-09-16 22:21:34 +02:00
callback(null);
});
});
}
2015-10-07 16:10:08 -07:00
function teardownPostgreSql(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
const { database, username } = postgreSqlNames(app.id);
getServiceDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN', function (error, result) {
if (error) return callback(error);
2019-12-04 13:17:58 -08:00
request.delete(`https://${result.ip}:3000/databases/${database}?access_token=${result.token}&username=${username}`, { json: true, rejectUnauthorized: false }, function (error, response) {
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Network error tearing down postgresql: ${error.message}`));
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error tearing down postgresql. Status code: ${response.statusCode} message: ${response.body.message}`));
2018-09-16 22:21:34 +02:00
appdb.unsetAddonConfig(app.id, 'postgresql', callback);
});
});
}
2015-10-07 16:10:08 -07:00
function backupPostgreSql(app, options, callback) {
2018-02-08 15:07:49 +01:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Backing up postgresql');
const { database } = postgreSqlNames(app.id);
getServiceDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN', function (error, result) {
2018-09-16 22:21:34 +02:00
if (error) return callback(error);
const url = `https://${result.ip}:3000/databases/${database}/backup?access_token=${result.token}`;
pipeRequestToFile(url, dumpPath('postgresql', app.id), callback);
2018-09-16 22:21:34 +02:00
});
}
2015-10-07 16:10:08 -07:00
function restorePostgreSql(app, options, callback) {
2018-02-08 15:07:49 +01:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
2018-09-16 22:21:34 +02:00
debugApp(app, 'Restore postgresql');
const { database, username } = postgreSqlNames(app.id);
2018-09-16 22:21:34 +02:00
callback = once(callback); // protect from multiple returns with streams
getServiceDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN', function (error, result) {
2018-09-16 22:21:34 +02:00
if (error) return callback(error);
2018-11-09 12:02:38 -08:00
var input = fs.createReadStream(dumpPath('postgresql', app.id));
2019-12-05 09:54:29 -08:00
input.on('error', (error) => callback(new BoxError(BoxError.FS_ERROR, `Error reading input stream when restoring postgresql: ${error.message}`)));
2018-09-16 22:21:34 +02:00
2019-12-04 13:17:58 -08:00
const restoreReq = request.post(`https://${result.ip}:3000/databases/${database}/restore?access_token=${result.token}&username=${username}`, { json: true, rejectUnauthorized: false }, function (error, response) {
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error restoring postgresql: ${error.message}`));
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error restoring postgresql. Status code: ${response.statusCode} message: ${response.body.message}`));
2018-09-16 22:21:34 +02:00
callback(null);
2018-09-16 22:21:34 +02:00
});
input.pipe(restoreReq);
2018-09-16 22:21:34 +02:00
});
}
function startMongodb(existingInfra, callback) {
assert.strictEqual(typeof existingInfra, 'object');
assert.strictEqual(typeof callback, 'function');
const tag = infra.images.mongodb.tag;
const dataDir = paths.PLATFORM_DATA_DIR;
const rootPassword = hat(8 * 128);
const cloudronToken = hat(8 * 128);
const memoryLimit = 4 * 256;
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.mongodb.tag, tag);
2018-11-25 14:43:29 -08:00
if (upgrading) debug('startMongodb: mongodb will be upgraded');
const upgradeFunc = upgrading ? shell.sudo.bind(null, 'startMongodb', [ RMADDONDIR_CMD, 'mongodb' ], {}) : (next) => next();
2018-11-25 14:43:29 -08:00
upgradeFunc(function (error) {
if (error) return callback(error);
2018-11-25 14:43:29 -08:00
const cmd = `docker run --restart=always -d --name="mongodb" \
--hostname mongodb \
2018-11-25 14:43:29 -08:00
--net cloudron \
--net-alias mongodb \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=mongodb \
-m ${memoryLimit}m \
--memory-swap ${memoryLimit * 2}m \
--dns 172.18.0.1 \
--dns-search=. \
-e CLOUDRON_MONGODB_ROOT_PASSWORD="${rootPassword}" \
-e CLOUDRON_MONGODB_TOKEN="${cloudronToken}" \
-v "${dataDir}/mongodb:/var/lib/mongodb" \
--label isCloudronManaged=true \
--read-only -v /tmp -v /run "${tag}"`;
shell.exec('startMongodb', cmd, function (error) {
if (error) return callback(error);
waitForService('mongodb', 'CLOUDRON_MONGODB_TOKEN', function (error) {
2018-11-25 14:43:29 -08:00
if (error) return callback(error);
if (!upgrading) return callback(null);
importDatabase('mongodb', callback);
});
});
});
}
2015-10-07 16:10:08 -07:00
function setupMongoDb(app, options, callback) {
assert.strictEqual(typeof app, 'object');
2015-10-07 16:10:08 -07:00
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Setting up mongodb');
appdb.getAddonConfigByName(app.id, 'mongodb', '%MONGODB_PASSWORD', function (error, existingPassword) {
if (error && error.reason !== BoxError.NOT_FOUND) return callback(error);
2018-09-11 19:27:06 +02:00
const data = {
database: app.id,
username: app.id,
password: error ? hat(4 * 128) : existingPassword,
oplog: !!options.oplog
2018-09-11 19:27:06 +02:00
};
getServiceDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN', function (error, result) {
if (error) return callback(error);
request.post(`https://${result.ip}:3000/databases?access_token=${result.token}`, { rejectUnauthorized: false, json: data }, function (error, response) {
2019-12-04 13:17:58 -08:00
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Network error setting up mongodb: ${error.message}`));
if (response.statusCode !== 201) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error setting up mongodb. Status code: ${response.statusCode} message: ${response.body.message}`));
2018-09-11 19:27:06 +02:00
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
2018-09-11 19:27:06 +02:00
var env = [
{ name: `${envPrefix}MONGODB_URL`, value : `mongodb://${data.username}:${data.password}@mongodb:27017/${data.database}` },
{ name: `${envPrefix}MONGODB_USERNAME`, value : data.username },
{ name: `${envPrefix}MONGODB_PASSWORD`, value: data.password },
{ name: `${envPrefix}MONGODB_HOST`, value : 'mongodb' },
{ name: `${envPrefix}MONGODB_PORT`, value : '27017' },
{ name: `${envPrefix}MONGODB_DATABASE`, value : data.database }
2018-09-11 19:27:06 +02:00
];
if (options.oplog) {
env.push({ name: `${envPrefix}MONGODB_OPLOG_URL`, value : `mongodb://${data.username}:${data.password}@mongodb:27017/local?authSource=${data.database}` });
}
2018-09-11 19:27:06 +02:00
debugApp(app, 'Setting mongodb addon config to %j', env);
appdb.setAddonConfig(app.id, 'mongodb', env, callback);
});
});
});
}
function clearMongodb(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Clearing mongodb');
getServiceDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN', function (error, result) {
if (error) return callback(error);
2019-12-04 13:17:58 -08:00
request.post(`https://${result.ip}:3000/databases/${app.id}/clear?access_token=${result.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Network error clearing mongodb: ${error.message}`));
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error clearing mongodb. Status code: ${response.statusCode} message: ${response.body.message}`));
callback();
});
});
}
2015-10-07 16:10:08 -07:00
function teardownMongoDb(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Tearing down mongodb');
getServiceDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN', function (error, result) {
if (error) return callback(error);
2019-12-04 13:17:58 -08:00
request.delete(`https://${result.ip}:3000/databases/${app.id}?access_token=${result.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mongodb: ${error.message}`));
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error tearing down mongodb. Status code: ${response.statusCode} message: ${response.body.message}`));
2018-09-11 19:27:06 +02:00
appdb.unsetAddonConfig(app.id, 'mongodb', callback);
});
});
}
2015-10-07 16:10:08 -07:00
function backupMongoDb(app, options, callback) {
2018-02-08 15:07:49 +01:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Backing up mongodb');
getServiceDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN', function (error, result) {
if (error) return callback(error);
const url = `https://${result.ip}:3000/databases/${app.id}/backup?access_token=${result.token}`;
pipeRequestToFile(url, dumpPath('mongodb', app.id), callback);
});
}
2015-10-07 16:10:08 -07:00
function restoreMongoDb(app, options, callback) {
2018-02-08 15:07:49 +01:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
callback = once(callback); // protect from multiple returns with streams
debugApp(app, 'restoreMongoDb');
getServiceDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN', function (error, result) {
if (error) return callback(error);
2018-11-12 10:27:56 -08:00
const readStream = fs.createReadStream(dumpPath('mongodb', app.id));
2019-12-05 09:54:29 -08:00
readStream.on('error', (error) => callback(new BoxError(BoxError.FS_ERROR, `Error reading input stream when restoring mongodb: ${error.message}`)));
2019-12-04 13:17:58 -08:00
const restoreReq = request.post(`https://${result.ip}:3000/databases/${app.id}/restore?access_token=${result.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error restoring mongodb: ${error.message}`));
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error restoring mongodb. Status code: ${response.statusCode} message: ${response.body.message}`));
callback(null);
});
readStream.pipe(restoreReq);
});
}
function startRedis(existingInfra, callback) {
assert.strictEqual(typeof existingInfra, 'object');
assert.strictEqual(typeof callback, 'function');
const tag = infra.images.redis.tag;
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.redis.tag, tag);
appdb.getAll(function (error, apps) {
if (error) return callback(error);
async.eachSeries(apps, function iterator (app, iteratorCallback) {
if (!('redis' in app.manifest.addons)) return iteratorCallback(); // app doesn't use the addon
setupRedis(app, app.manifest.addons.redis, iteratorCallback);
}, function (error) {
if (error) return callback(error);
if (!upgrading) return callback();
importDatabase('redis', callback); // setupRedis currently starts the app container
});
});
}
// Ensures that app's addon redis container is running. Can be called when named container already exists/running
2015-10-07 16:10:08 -07:00
function setupRedis(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
2018-09-18 14:15:23 -07:00
const redisName = 'redis-' + app.id;
appdb.getAddonConfigByName(app.id, 'redis', '%REDIS_PASSWORD', function (error, existingPassword) {
if (error && error.reason !== BoxError.NOT_FOUND) return callback(error);
2019-11-05 10:48:36 -08:00
const redisPassword = options.noPassword ? '' : (error ? hat(4 * 48) : existingPassword); // see box#362 for password length
const redisServiceToken = hat(4 * 48);
// Compute redis memory limit based on app's memory limit (this is arbitrary)
var memoryLimit = app.memoryLimit || app.manifest.memoryLimit || 0;
if (memoryLimit === -1) { // unrestricted (debug mode)
memoryLimit = 0;
} else if (memoryLimit === 0 || memoryLimit <= (2 * 1024 * 1024 * 1024)) { // less than 2G (ram+swap)
memoryLimit = 150 * 1024 * 1024; // 150m
} else {
memoryLimit = 600 * 1024 * 1024; // 600m
}
const tag = infra.images.redis.tag;
const label = app.fqdn;
// note that we do not add appId label because this interferes with the stop/start app logic
const cmd = `docker run --restart=always -d --name=${redisName} \
--hostname ${redisName} \
--label=location=${label} \
--net cloudron \
--net-alias ${redisName} \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag="${redisName}" \
-m ${memoryLimit/2} \
--memory-swap ${memoryLimit} \
--dns 172.18.0.1 \
--dns-search=. \
-e CLOUDRON_REDIS_PASSWORD="${redisPassword}" \
-e CLOUDRON_REDIS_TOKEN="${redisServiceToken}" \
-v "${paths.PLATFORM_DATA_DIR}/redis/${app.id}:/var/lib/redis" \
--label isCloudronManaged=true \
--read-only -v /tmp -v /run ${tag}`;
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
var env = [
{ name: `${envPrefix}REDIS_URL`, value: 'redis://redisuser:' + redisPassword + '@redis-' + app.id },
{ name: `${envPrefix}REDIS_PASSWORD`, value: redisPassword },
{ name: `${envPrefix}REDIS_HOST`, value: redisName },
{ name: `${envPrefix}REDIS_PORT`, value: '6379' }
];
async.series([
(next) => {
docker.inspect(redisName, function (inspectError, result) { // fast-path
if (!inspectError) {
debug(`Re-using existing redis container with state: ${JSON.stringify(result.State)}`);
return next();
}
shell.exec('startRedis', cmd, next);
});
},
appdb.setAddonConfig.bind(null, app.id, 'redis', env),
waitForService.bind(null, 'redis-' + app.id, 'CLOUDRON_REDIS_TOKEN')
], function (error) {
if (error) debug('Error setting up redis: ', error);
callback(error);
});
});
}
function clearRedis(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Clearing redis');
getServiceDetails('redis-' + app.id, 'CLOUDRON_REDIS_TOKEN', function (error, result) {
if (error) return callback(error);
2019-12-04 13:17:58 -08:00
request.post(`https://${result.ip}:3000/clear?access_token=${result.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Network error clearing redis: ${error.message}`));
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error clearing redis. Status code: ${response.statusCode} message: ${response.body.message}`));
callback(null);
});
});
}
2015-10-07 16:10:08 -07:00
function teardownRedis(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
2019-12-04 13:17:58 -08:00
docker.deleteContainer(`redis-${app.id}`, function (error) {
if (error) return callback(error);
2018-11-25 14:57:17 -08:00
shell.sudo('removeVolume', [ RMADDONDIR_CMD, 'redis', app.id ], {}, function (error) {
2019-12-04 13:17:58 -08:00
if (error) return callback(new BoxError(BoxError.FS_ERROR, `Error removing redis data: ${error.message}`));
2018-09-18 14:15:23 -07:00
rimraf(path.join(paths.LOG_DIR, `redis-${app.id}`), function (error) {
if (error) debugApp(app, 'cannot cleanup logs: %s', error);
appdb.unsetAddonConfig(app.id, 'redis', callback);
});
});
2018-01-17 21:05:43 -08:00
});
}
2015-10-12 13:29:27 -07:00
function backupRedis(app, options, callback) {
2018-02-08 15:07:49 +01:00
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
2015-10-12 13:29:27 -07:00
debugApp(app, 'Backing up redis');
getServiceDetails('redis-' + app.id, 'CLOUDRON_REDIS_TOKEN', function (error, result) {
if (error) return callback(error);
const url = `https://${result.ip}:3000/backup?access_token=${result.token}`;
pipeRequestToFile(url, dumpPath('redis', app.id), callback);
});
}
function restoreRedis(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Restoring redis');
2019-12-05 09:54:29 -08:00
callback = once(callback); // protect from multiple returns with streams
getServiceDetails('redis-' + app.id, 'CLOUDRON_REDIS_TOKEN', function (error, result) {
if (error) return callback(error);
2018-09-26 09:48:18 -07:00
let input;
2018-11-09 12:02:38 -08:00
const newDumpLocation = dumpPath('redis', app.id);
if (fs.existsSync(newDumpLocation)) {
input = fs.createReadStream(newDumpLocation);
} else { // old location of dumps
input = fs.createReadStream(path.join(paths.APPS_DATA_DIR, app.id, 'redis/dump.rdb'));
2018-09-26 09:48:18 -07:00
}
2019-12-05 09:54:29 -08:00
input.on('error', (error) => callback(new BoxError(BoxError.FS_ERROR, `Error reading input stream when restoring redis: ${error.message}`)));
2019-12-04 13:17:58 -08:00
const restoreReq = request.post(`https://${result.ip}:3000/restore?access_token=${result.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error restoring redis: ${error.message}`));
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error restoring redis. Status code: ${response.statusCode} message: ${response.body.message}`));
callback(null);
});
input.pipe(restoreReq);
});
2015-10-12 13:29:27 -07:00
}
2018-11-15 19:59:08 +01:00
function statusDocker(callback) {
assert.strictEqual(typeof callback, 'function');
2018-11-19 10:19:46 +01:00
docker.ping(function (error) {
2018-12-02 19:40:27 -08:00
callback(null, { status: error ? exports.SERVICE_STATUS_STOPPED: exports.SERVICE_STATUS_ACTIVE });
2018-11-19 10:19:46 +01:00
});
2018-11-15 19:59:08 +01:00
}
2018-11-23 15:49:47 +01:00
function restartDocker(callback) {
assert.strictEqual(typeof callback, 'function');
2018-11-25 14:57:17 -08:00
shell.sudo('restartdocker', [ path.join(__dirname, 'scripts/restartdocker.sh') ], {}, NOOP_CALLBACK);
2018-11-23 15:49:47 +01:00
callback(null);
2018-11-25 14:43:29 -08:00
}
2018-12-02 19:38:21 -08:00
function statusUnbound(callback) {
assert.strictEqual(typeof callback, 'function');
shell.exec('statusUnbound', 'systemctl is-active unbound', function (error) {
2018-12-02 19:40:27 -08:00
callback(null, { status: error ? exports.SERVICE_STATUS_STOPPED : exports.SERVICE_STATUS_ACTIVE });
2018-12-02 19:38:21 -08:00
});
}
function restartUnbound(callback) {
assert.strictEqual(typeof callback, 'function');
shell.sudo('restartunbound', [ path.join(__dirname, 'scripts/restartunbound.sh') ], {}, NOOP_CALLBACK);
callback(null);
}
2019-03-18 19:02:32 -07:00
function statusNginx(callback) {
assert.strictEqual(typeof callback, 'function');
shell.exec('statusNginx', 'systemctl is-active nginx', function (error) {
callback(null, { status: error ? exports.SERVICE_STATUS_STOPPED : exports.SERVICE_STATUS_ACTIVE });
});
}
function restartNginx(callback) {
assert.strictEqual(typeof callback, 'function');
shell.sudo('reloadnginx', [ path.join(__dirname, 'scripts/reloadnginx.sh') ], {}, NOOP_CALLBACK);
callback(null);
}
2019-04-04 20:46:01 -07:00
function statusSftp(callback) {
2019-03-18 19:02:32 -07:00
assert.strictEqual(typeof callback, 'function');
2019-04-04 20:46:01 -07:00
docker.inspect('sftp', function (error, container) {
2019-09-23 12:13:21 -07:00
if (error && error.reason === BoxError.NOT_FOUND) return callback(null, { status: exports.SERVICE_STATUS_STOPPED });
if (error) return callback(error);
2019-03-18 19:02:32 -07:00
2019-04-04 20:46:01 -07:00
docker.memoryUsage('sftp', function (error, result) {
2019-10-23 15:57:01 -07:00
if (error) return callback(error);
2019-03-18 19:02:32 -07:00
2019-04-04 20:46:01 -07:00
var tmp = {
status: container.State.Running ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STOPPED,
memoryUsed: result.memory_stats.usage,
memoryPercent: parseInt(100 * result.memory_stats.usage / result.memory_stats.limit)
};
2019-03-18 19:02:32 -07:00
2019-04-04 20:46:01 -07:00
callback(null, tmp);
});
});
2019-03-18 19:02:32 -07:00
}
2019-03-19 15:56:29 -07:00
function statusGraphite(callback) {
assert.strictEqual(typeof callback, 'function');
docker.inspect('graphite', function (error, container) {
2019-09-23 12:13:21 -07:00
if (error && error.reason === BoxError.NOT_FOUND) return callback(null, { status: exports.SERVICE_STATUS_STOPPED });
2019-10-23 15:57:01 -07:00
if (error) return callback(error);
2019-03-19 15:56:29 -07:00
2019-12-04 13:17:58 -08:00
request.get('http://127.0.0.1:8417/graphite-web/dashboard', { json: true, timeout: 3000 }, function (error, response) {
2019-03-19 15:56:29 -07:00
if (error) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite: ${error.message}` });
if (response.statusCode !== 200) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite. Status code: ${response.statusCode} message: ${response.body.message}` });
docker.memoryUsage('graphite', function (error, result) {
2019-10-23 15:57:01 -07:00
if (error) return callback(error);
2019-03-19 15:56:29 -07:00
var tmp = {
status: container.State.Running ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STOPPED,
memoryUsed: result.memory_stats.usage,
memoryPercent: parseInt(100 * result.memory_stats.usage / result.memory_stats.limit)
};
callback(null, tmp);
});
});
});
}