diff --git a/src/apptaskmanager.js b/src/apptaskmanager.js index f6be87394..a2529b987 100644 --- a/src/apptaskmanager.js +++ b/src/apptaskmanager.js @@ -13,7 +13,7 @@ let assert = require('assert'), path = require('path'), paths = require('./paths.js'), scheduler = require('./scheduler.js'), - sftp = require('./sftp.js'), + services = require('./services.js'), tasks = require('./tasks.js'); let gActiveTasks = { }; // indexed by app id @@ -80,7 +80,7 @@ function scheduleTask(appId, taskId, options, callback) { locker.unlock(locker.OP_APPTASK); // unlock event will trigger next task // post app task hooks - sftp.rebuild(error => { if (error) debug('Unable to rebuild sftp:', error); }); + services.rebuildService('sftp', error => { if (error) debug('Unable to rebuild sftp:', error); }); scheduler.resumeJobs(appId); }); } diff --git a/src/graphs.js b/src/graphite.js similarity index 74% rename from src/graphs.js rename to src/graphite.js index 56a1f77e9..fa57723ab 100644 --- a/src/graphs.js +++ b/src/graphite.js @@ -1,22 +1,27 @@ 'use strict'; exports = module.exports = { - startGraphite: startGraphite + start, + + DEFAULT_MEMORY_LIMIT: 256 * 1024 * 1024 }; var assert = require('assert'), async = require('async'), infra = require('./infra_version.js'), paths = require('./paths.js'), - shell = require('./shell.js'); + shell = require('./shell.js'), + system = require('./system.js'); -function startGraphite(existingInfra, callback) { +function start(existingInfra, serviceConfig, callback) { assert.strictEqual(typeof existingInfra, 'object'); + assert.strictEqual(typeof serviceConfig, 'object'); assert.strictEqual(typeof callback, 'function'); const tag = infra.images.graphite.tag; const dataDir = paths.PLATFORM_DATA_DIR; - const memoryLimit = 256; + const memoryLimit = serviceConfig.memoryLimit || exports.DEFAULT_MEMORY_LIMIT; + const memory = system.getMemoryAllocation(memoryLimit); const cmd = `docker run --restart=always -d --name="graphite" \ --hostname graphite \ @@ -26,8 +31,8 @@ function startGraphite(existingInfra, callback) { --log-opt syslog-address=udp://127.0.0.1:2514 \ --log-opt syslog-format=rfc5424 \ --log-opt tag=graphite \ - -m ${memoryLimit}m \ - --memory-swap ${memoryLimit * 2}m \ + -m ${memory} \ + --memory-swap ${memoryLimit} \ --dns 172.18.0.1 \ --dns-search=. \ -p 127.0.0.1:2003:2003 \ diff --git a/src/ldap.js b/src/ldap.js index 118654263..acf277d5b 100644 --- a/src/ldap.js +++ b/src/ldap.js @@ -572,10 +572,10 @@ function authenticateSftp(req, res, next) { } function loadSftpConfig(req, res, next) { - services.getServiceConfig('sftp', function (error, service, servicesConfig) { + services.getServiceConfig('sftp', function (error, serviceConfig) { if (error) return next(new ldap.OperationsError(error.toString())); - req.requireAdmin = servicesConfig['sftp'].requireAdmin; + req.requireAdmin = serviceConfig.requireAdmin; next(); }); diff --git a/src/mail.js b/src/mail.js index f298794b4..d86f7945d 100644 --- a/src/mail.js +++ b/src/mail.js @@ -55,6 +55,8 @@ exports = module.exports = { OWNERTYPE_USER: 'user', OWNERTYPE_GROUP: 'group', + DEFAULT_MEMORY_LIMIT: 512 * 1024 * 1024, + _removeMailboxes: removeMailboxes, _readDkimPublicKeySync: readDkimPublicKeySync }; @@ -86,6 +88,7 @@ const assert = require('assert'), shell = require('./shell.js'), smtpTransport = require('nodemailer-smtp-transport'), sysinfo = require('./sysinfo.js'), + system = require('./system.js'), tasks = require('./tasks.js'), users = require('./users.js'), validator = require('validator'), @@ -624,9 +627,10 @@ function createMailConfig(mailFqdn, mailDomain, callback) { }); } -function configureMail(mailFqdn, mailDomain, callback) { +function configureMail(mailFqdn, mailDomain, serviceConfig, callback) { assert.strictEqual(typeof mailFqdn, 'string'); assert.strictEqual(typeof mailDomain, 'string'); + assert.strictEqual(typeof serviceConfig, 'object'); assert.strictEqual(typeof callback, 'function'); // mail (note: 2525 is hardcoded in mail container and app use this port) @@ -635,7 +639,8 @@ function configureMail(mailFqdn, mailDomain, callback) { // mail container uses /app/data for backed up data and /run for restart-able data const tag = infra.images.mail.tag; - const memoryLimit = 4 * 256; + const memoryLimit = serviceConfig.memoryLimit || exports.DEFAULT_MEMORY_LIMIT; + const memory = system.getMemoryAllocation(memoryLimit); const cloudronToken = hat(8 * 128), relayToken = hat(8 * 128); reverseProxy.getCertificate(mailFqdn, mailDomain, function (error, bundle) { @@ -666,8 +671,8 @@ function configureMail(mailFqdn, mailDomain, callback) { --log-opt syslog-address=udp://127.0.0.1:2514 \ --log-opt syslog-format=rfc5424 \ --log-opt tag=mail \ - -m ${memoryLimit}m \ - --memory-swap ${memoryLimit * 2}m \ + -m ${memory} \ + --memory-swap ${memoryLimit} \ --dns 172.18.0.1 \ --dns-search=. \ -e CLOUDRON_MAIL_TOKEN="${cloudronToken}" \ @@ -714,8 +719,12 @@ function restartMail(callback) { if (process.env.BOX_ENV === 'test' && !process.env.TEST_CREATE_INFRA) return callback(); - debug(`restartMail: restarting mail container with ${settings.mailFqdn()} ${settings.adminDomain()}`); - configureMail(settings.mailFqdn(), settings.adminDomain(), callback); + services.getServiceConfig('mail', function (error, serviceConfig) { + if (error) return callback(error); + + debug(`restartMail: restarting mail container with ${settings.mailFqdn()} ${settings.adminDomain()}`); + configureMail(settings.mailFqdn(), settings.adminDomain(), serviceConfig, callback); + }); } function restartMailIfActivated(callback) { diff --git a/src/platform.js b/src/platform.js index 331aa7262..9a94801a6 100644 --- a/src/platform.js +++ b/src/platform.js @@ -19,7 +19,6 @@ const apps = require('./apps.js'), reverseProxy = require('./reverseproxy.js'), safe = require('safetydance'), services = require('./services.js'), - settings = require('./settings.js'), shell = require('./shell.js'), tasks = require('./tasks.js'), _ = require('underscore'); @@ -76,7 +75,7 @@ function onPlatformReady(infraChanged) { exports._isReady = true; let tasks = [ apps.schedulePendingTasks ]; - if (infraChanged) tasks.push(applyServicesConfig, pruneInfraImages); + if (infraChanged) tasks.push(pruneInfraImages); async.series(async.reflectAll(tasks), function (error, results) { results.forEach((result, idx) => { @@ -85,14 +84,6 @@ function onPlatformReady(infraChanged) { }); } -function applyServicesConfig(callback) { - settings.getServicesConfig(function (error, platformConfig) { - if (error) return callback(error); - - services.updateServiceConfig(platformConfig, callback); - }); -} - function pruneInfraImages(callback) { debug('pruneInfraImages: checking existing images'); diff --git a/src/routes/graphs.js b/src/routes/graphs.js index 187ede538..6ce26d453 100644 --- a/src/routes/graphs.js +++ b/src/routes/graphs.js @@ -1,7 +1,7 @@ 'use strict'; exports = module.exports = { - getGraphs: getGraphs + getGraphs }; var middleware = require('../middleware/index.js'), diff --git a/src/routes/services.js b/src/routes/services.js index ef087ccd4..66e598e7e 100644 --- a/src/routes/services.js +++ b/src/routes/services.js @@ -6,7 +6,8 @@ exports = module.exports = { configure, getLogs, getLogStream, - restart + restart, + rebuild }; const assert = require('assert'), @@ -126,3 +127,13 @@ function restart(req, res, next) { next(new HttpSuccess(202, {})); }); } + +function rebuild(req, res, next) { + assert.strictEqual(typeof req.params.service, 'string'); + + services.rebuildService(req.params.service, function (error) { + if (error) return next(BoxError.toHttpError(error)); + + next(new HttpSuccess(202, {})); + }); +} diff --git a/src/server.js b/src/server.js index 24d761e6c..ed92a194e 100644 --- a/src/server.js +++ b/src/server.js @@ -319,6 +319,7 @@ function initializeExpressSync() { router.get ('/api/v1/services/:service/logs', token, authorizeAdmin, routes.services.getLogs); router.get ('/api/v1/services/:service/logstream', token, authorizeAdmin, routes.services.getLogStream); router.post('/api/v1/services/:service/restart', json, token, authorizeAdmin, routes.services.restart); + router.post('/api/v1/services/:service/rebuild', json, token, authorizeAdmin, routes.services.rebuild); // well known router.get ('/well-known-handler/*', routes.wellknown.get); diff --git a/src/services.js b/src/services.js index d1763bb40..7bf0795b2 100644 --- a/src/services.js +++ b/src/services.js @@ -4,8 +4,9 @@ exports = module.exports = { getServiceIds, getServiceStatus, getServiceConfig, - configureService, getServiceLogs, + + configureService, restartService, rebuildService, @@ -13,7 +14,6 @@ exports = module.exports = { stopAppServices, startServices, - updateServiceConfig, setupAddons, teardownAddons, @@ -42,7 +42,7 @@ var appdb = require('./appdb.js'), debug = require('debug')('box:addons'), docker = require('./docker.js'), fs = require('fs'), - graphs = require('./graphs.js'), + graphite = require('./graphite.js'), hat = require('./hat.js'), infra = require('./infra_version.js'), mail = require('./mail.js'), @@ -173,27 +173,27 @@ var ADDONS = { const SERVICES = { turn: { status: statusTurn, - restart: restartContainer.bind(null, 'turn'), + restart: docker.restartContainer.bind(null, 'turn'), defaultMemoryLimit: 256 * 1024 * 1024 }, mail: { status: containerStatus.bind(null, 'mail', 'CLOUDRON_MAIL_TOKEN'), restart: mail.restartMail, - defaultMemoryLimit: Math.max((1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 128, 256) * 1024 * 1024 + defaultMemoryLimit: mail.DEFAULT_MEMORY_LIMIT }, mongodb: { status: containerStatus.bind(null, 'mongodb', 'CLOUDRON_MONGODB_TOKEN'), - restart: restartContainer.bind(null, 'mongodb'), + restart: docker.restartContainer.bind(null, 'mongodb'), defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024 }, mysql: { status: containerStatus.bind(null, 'mysql', 'CLOUDRON_MYSQL_TOKEN'), - restart: restartContainer.bind(null, 'mysql'), + restart: docker.restartContainer.bind(null, 'mysql'), defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024 }, postgresql: { status: containerStatus.bind(null, 'postgresql', 'CLOUDRON_POSTGRESQL_TOKEN'), - restart: restartContainer.bind(null, 'postgresql'), + restart: docker.restartContainer.bind(null, 'postgresql'), defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024 }, docker: { @@ -208,13 +208,13 @@ const SERVICES = { }, sftp: { status: statusSftp, - restart: restartContainer.bind(null, 'sftp'), - defaultMemoryLimit: 256 * 1024 * 1024 + restart: docker.restartContainer.bind(null, 'sftp'), + defaultMemoryLimit: sftp.DEFAULT_MEMORY_LIMIT }, graphite: { status: statusGraphite, - restart: restartContainer.bind(null, 'graphite'), - defaultMemoryLimit: 75 * 1024 * 1024 + restart: docker.restartContainer.bind(null, 'graphite'), + defaultMemoryLimit: graphite.DEFAULT_MEMORY_LIMIT }, nginx: { status: statusNginx, @@ -228,7 +228,7 @@ const APP_SERVICES = { status: (instance, done) => containerStatus(`redis-${instance}`, 'CLOUDRON_REDIS_TOKEN', done), start: (instance, done) => docker.startContainer(`redis-${instance}`, done), stop: (instance, done) => docker.stopContainer(`redis-${instance}`, done), - restart: (instance, done) => restartContainer(`redis-${instance}`, done), + restart: (instance, done) => docker.restartContainer(`redis-${instance}`, done), defaultMemoryLimit: 150 * 1024 * 1024 } }; @@ -263,38 +263,6 @@ function dumpPath(addon, appId) { } } -function rebuildService(serviceName, callback) { - assert.strictEqual(typeof serviceName, 'string'); - assert.strictEqual(typeof callback, 'function'); - - // this attempts to recreate the service docker container if they don't exist but platform infra version is unchanged - // passing an infra version of 'none' will not attempt to purge existing data, not sure if this is good or bad - if (serviceName === 'turn') return startTurn({ version: 'none' }, callback); - if (serviceName === 'mongodb') return startMongodb({ version: 'none' }, callback); - if (serviceName === 'postgresql') return startPostgresql({ version: 'none' }, callback); - if (serviceName === 'mysql') return startMysql({ version: 'none' }, callback); - if (serviceName === 'sftp') return sftp.startSftp({ version: 'none' }, callback); - if (serviceName === 'graphite') return graphs.startGraphite({ version: 'none' }, callback); - - // nothing to rebuild for now - callback(); -} - -function restartContainer(name, callback) { - assert.strictEqual(typeof name, 'string'); - assert.strictEqual(typeof callback, 'function'); - - docker.restartContainer(name, function (error) { - if (error && error.reason === BoxError.NOT_FOUND) { - callback(null); // callback early since rebuilding takes long - return rebuildService(name, function (error) { if (error) debug(`restartContainer: Unable to rebuild service ${name}`, error); }); - } - if (error) return callback(error); - - callback(error); - }); -} - function getContainerDetails(containerName, tokenEnvName, callback) { assert.strictEqual(typeof containerName, 'string'); assert.strictEqual(typeof tokenEnvName, 'string'); @@ -367,12 +335,12 @@ function getServiceConfig(id, callback) { assert.strictEqual(typeof id, 'string'); assert.strictEqual(typeof callback, 'function'); - const [name, instance ] = id.split(':'); + const [name, instance] = id.split(':'); if (!instance) { settings.getServicesConfig(function (error, servicesConfig) { if (error) return callback(error); - callback(null, SERVICES[name], servicesConfig); + callback(null, servicesConfig[name] || {}); }); return; @@ -381,7 +349,7 @@ function getServiceConfig(id, callback) { appdb.get(instance, function (error, app) { if (error) return callback(error); - callback(null, APP_SERVICES[name], app.servicesConfig); + callback(null, app.servicesConfig[name] || {}); }); } @@ -390,13 +358,15 @@ function getServiceStatus(id, callback) { assert.strictEqual(typeof callback, 'function'); const [name, instance ] = id.split(':'); - let containerStatusFunc; + let containerStatusFunc, service; if (instance) { - if (!APP_SERVICES[name]) return callback(new BoxError(BoxError.NOT_FOUND)); - containerStatusFunc = APP_SERVICES[name].status.bind(null, instance); + service = APP_SERVICES[name]; + if (!service) return callback(new BoxError(BoxError.NOT_FOUND)); + containerStatusFunc = service.status.bind(null, instance); } else if (SERVICES[name]) { - containerStatusFunc = SERVICES[name].status; + service = SERVICES[name]; + containerStatusFunc = service.status; } else { return callback(new BoxError(BoxError.NOT_FOUND)); } @@ -420,14 +390,13 @@ function getServiceStatus(id, callback) { tmp.error = result.error || null; tmp.healthcheck = result.healthcheck || null; - getServiceConfig(id, function (error, service, servicesConfig) { + getServiceConfig(id, function (error, serviceConfig) { if (error) return callback(error); - const serviceConfig = servicesConfig[name]; - tmp.config = Object.assign({}, serviceConfig); + tmp.config = serviceConfig; if (!tmp.config.memoryLimit && service.defaultMemoryLimit) { - tmp.config.memoryLimit = service.defaultMemoryLimit * 2; + tmp.config.memoryLimit = service.defaultMemoryLimit; } callback(null, tmp); @@ -444,38 +413,34 @@ function configureService(id, data, callback) { if (instance) { if (!APP_SERVICES[name]) return callback(new BoxError(BoxError.NOT_FOUND)); - } else if (!SERVICES[name]) { - return callback(new BoxError(BoxError.NOT_FOUND)); - } - getServiceConfig(id, function (error, service, servicesConfig) { - if (error) return callback(error); + apps.get(instance, function (error, app) { + if (error) return callback(error); - if (!servicesConfig[name]) servicesConfig[name] = {}; - - // if not specified we clear the entry and use defaults - if (!data.memoryLimit) { - delete servicesConfig[name].memoryLimit; - } else { + const servicesConfig = app.servicesConfig; servicesConfig[name] = data; - } - if (instance) { appdb.update(instance, { servicesConfig }, function (error) { if (error) return callback(error); - updateAppServiceConfig(name, instance, servicesConfig, callback); + applyServiceConfig(id, data, callback); }); - } else { + }); + } else if (SERVICES[name]) { + settings.getServicesConfig(function (error, servicesConfig) { + if (error) return callback(error); + + servicesConfig[name] = data; + settings.setServicesConfig(servicesConfig, function (error) { if (error) return callback(error); - updateServiceConfig(servicesConfig, NOOP_CALLBACK); // this can take a while - - callback(null); + applyServiceConfig(id, data, callback); }); - } - }); + }); + } else { + return callback(new BoxError(BoxError.NOT_FOUND)); + } } function getServiceLogs(id, options, callback) { @@ -556,6 +521,29 @@ function getServiceLogs(id, options, callback) { callback(null, transformStream); } +function rebuildService(id, callback) { + assert.strictEqual(typeof id, 'string'); + assert.strictEqual(typeof callback, 'function'); + + // this attempts to recreate the service docker container if they don't exist but platform infra version is unchanged + // passing an infra version of 'none' will not attempt to purge existing data, not sure if this is good or bad + getServiceConfig(id, function (error, serviceConfig) { + if (error) return callback(error); + + if (id === 'turn') return startTurn({ version: 'none' }, serviceConfig, callback); + if (id === 'mongodb') return startMongodb({ version: 'none' }, callback); + if (id === 'postgresql') return startPostgresql({ version: 'none' }, callback); + if (id === 'mysql') return startMysql({ version: 'none' }, callback); + if (id === 'sftp') return sftp.start({ version: 'none' }, serviceConfig, callback); + if (id === 'graphite') return graphite.start({ version: 'none' }, serviceConfig, callback); + + // nothing to rebuild for now. + // TODO: mongo/postgresql/mysql need to be scaled down. + // TODO: missing redis container is not created + callback(); + }); +} + function restartService(id, callback) { assert.strictEqual(typeof id, 'string'); assert.strictEqual(typeof callback, 'function'); @@ -565,9 +553,9 @@ function restartService(id, callback) { if (instance) { if (!APP_SERVICES[name]) return callback(new BoxError(BoxError.NOT_FOUND)); - APP_SERVICES[name].restart(instance, callback); + return APP_SERVICES[name].restart(instance, callback); } else if (SERVICES[name]) { - SERVICES[name].restart(callback); + return SERVICES[name].restart(callback); } else { return callback(new BoxError(BoxError.NOT_FOUND)); } @@ -804,80 +792,77 @@ function exportDatabase(addon, callback) { }); } -function updateServiceConfig(platformConfig, callback) { - assert.strictEqual(typeof platformConfig, 'object'); +function applyServiceConfig(id, serviceConfig, callback) { + assert.strictEqual(typeof id, 'string'); + assert.strictEqual(typeof serviceConfig, 'object'); assert.strictEqual(typeof callback, 'function'); - async.eachSeries([ 'mysql', 'postgresql', 'mail', 'mongodb', 'graphite' ], function iterator(serviceName, iteratorCallback) { - const containerConfig = platformConfig[serviceName]; - let memory, memorySwap; - if (containerConfig && containerConfig.memoryLimit) { - memory = system.getMemoryAllocation(containerConfig.memoryLimit); - memorySwap = containerConfig.memoryLimit; - } else { - memory = SERVICES[serviceName].defaultMemoryLimit; - memorySwap = memory * 2; - } + const [name, instance] = id.split(':'); + let containerName, memoryLimit; - docker.update(serviceName, memory, memorySwap, iteratorCallback); - }, callback); -} + if (instance) { + if (!APP_SERVICES[name]) return callback(new BoxError(BoxError.NOT_FOUND)); -function updateAppServiceConfig(name, instance, servicesConfig, callback) { - assert.strictEqual(typeof name, 'string'); - assert.strictEqual(typeof instance, 'string'); - assert.strictEqual(typeof servicesConfig, 'object'); - assert.strictEqual(typeof callback, 'function'); - - debug(`updateAppServiceConfig: ${name}-${instance} ${JSON.stringify(servicesConfig)}`); - - const serviceConfig = servicesConfig[name]; - let memory, memorySwap; - if (serviceConfig && serviceConfig.memoryLimit) { - memory = system.getMemoryAllocation(serviceConfig.memoryLimit); - memorySwap = serviceConfig.memoryLimit; + containerName = `${name}-${instance}`; + memoryLimit = serviceConfig && serviceConfig.memoryLimit ? serviceConfig.memoryLimit : APP_SERVICES[name].defaultMemoryLimit; + } else if (SERVICES[name]) { + containerName = name; + memoryLimit = serviceConfig && serviceConfig.memoryLimit ? serviceConfig.memoryLimit : SERVICES[name].defaultMemoryLimit; } else { - memory = APP_SERVICES[name].defaultMemoryLimit; - memorySwap = memory * 2; + return callback(new BoxError(BoxError.NOT_FOUND)); } - docker.update(`${name}-${instance}`, memory, memorySwap, callback); + debug(`updateServiceConfig: ${containerName} ${JSON.stringify(serviceConfig)}`); + + const memory = system.getMemoryAllocation(memoryLimit); + docker.update(containerName, memory, memoryLimit, callback); } function startServices(existingInfra, callback) { assert.strictEqual(typeof existingInfra, 'object'); assert.strictEqual(typeof callback, 'function'); - let startFuncs = [ ]; + settings.getServicesConfig(function (error, servicesConfig) { + if (error) return callback(error); - // always start addons on any infra change, regardless of minor or major update - if (existingInfra.version !== infra.version) { - debug(`startServices: ${existingInfra.version} -> ${infra.version}. starting all services`); - startFuncs.push( - startTurn.bind(null, existingInfra), - startMysql.bind(null, existingInfra), - startPostgresql.bind(null, existingInfra), - startMongodb.bind(null, existingInfra), - startRedis.bind(null, existingInfra), - graphs.startGraphite.bind(null, existingInfra), - sftp.startSftp.bind(null, existingInfra), - mail.startMail); - } else { - assert.strictEqual(typeof existingInfra.images, 'object'); + let startFuncs = [ ]; - if (infra.images.turn.tag !== existingInfra.images.turn.tag) startFuncs.push(startTurn.bind(null, existingInfra)); - if (infra.images.mysql.tag !== existingInfra.images.mysql.tag) startFuncs.push(startMysql.bind(null, existingInfra)); - if (infra.images.postgresql.tag !== existingInfra.images.postgresql.tag) startFuncs.push(startPostgresql.bind(null, existingInfra)); - if (infra.images.mongodb.tag !== existingInfra.images.mongodb.tag) startFuncs.push(startMongodb.bind(null, existingInfra)); - if (infra.images.mail.tag !== existingInfra.images.mail.tag) startFuncs.push(mail.startMail); - if (infra.images.redis.tag !== existingInfra.images.redis.tag) startFuncs.push(startRedis.bind(null, existingInfra)); - if (infra.images.graphite.tag !== existingInfra.images.graphite.tag) startFuncs.push(graphs.startGraphite.bind(null, existingInfra)); - if (infra.images.sftp.tag !== existingInfra.images.sftp.tag) startFuncs.push(sftp.startSftp.bind(null, existingInfra)); + // always start addons on any infra change, regardless of minor or major update + if (existingInfra.version !== infra.version) { + debug(`startServices: ${existingInfra.version} -> ${infra.version}. starting all services`); + startFuncs.push( + startTurn.bind(null, existingInfra, servicesConfig['turn'] || {}), + startMysql.bind(null, existingInfra), + startPostgresql.bind(null, existingInfra), + startMongodb.bind(null, existingInfra), + startRedis.bind(null, existingInfra), + graphite.start.bind(null, existingInfra, servicesConfig['graphite'] | {}), + sftp.start.bind(null, existingInfra, servicesConfig['sftp'] | {}), + mail.startMail); + } else { + assert.strictEqual(typeof existingInfra.images, 'object'); - debug('startServices: existing infra. incremental service create %j', startFuncs.map(function (f) { return f.name; })); - } + if (infra.images.turn.tag !== existingInfra.images.turn.tag) startFuncs.push(startTurn.bind(null, existingInfra, servicesConfig['turn'] || {})); + if (infra.images.mysql.tag !== existingInfra.images.mysql.tag) startFuncs.push(startMysql.bind(null, existingInfra)); + if (infra.images.postgresql.tag !== existingInfra.images.postgresql.tag) startFuncs.push(startPostgresql.bind(null, existingInfra)); + if (infra.images.mongodb.tag !== existingInfra.images.mongodb.tag) startFuncs.push(startMongodb.bind(null, existingInfra)); + if (infra.images.mail.tag !== existingInfra.images.mail.tag) startFuncs.push(mail.startMail); + if (infra.images.redis.tag !== existingInfra.images.redis.tag) startFuncs.push(startRedis.bind(null, existingInfra)); + if (infra.images.graphite.tag !== existingInfra.images.graphite.tag) startFuncs.push(graphite.start.bind(null, existingInfra, servicesConfig['graphite'] | {})); + if (infra.images.sftp.tag !== existingInfra.images.sftp.tag) startFuncs.push(sftp.start.bind(null, existingInfra, servicesConfig['sftp'] || {})); - async.series(startFuncs, callback); + debug('startServices: existing infra. incremental service create %j', startFuncs.map(function (f) { return f.name; })); + } + + // we always start db containers with unlimited memory. we then scale them down per configuration + let updateFuncs = [ + applyServiceConfig.bind(null, 'mysql', servicesConfig['mysql'] || {}), + applyServiceConfig.bind(null, 'postgresql', servicesConfig['postgresql'] || {}), + applyServiceConfig.bind(null, 'mongodb', servicesConfig['mongodb'] || {}), + ]; + + async.series(startFuncs.concat(updateFuncs), callback); + }); } function getEnvironment(app, callback) { @@ -1572,8 +1557,9 @@ function restorePostgreSql(app, options, callback) { }); } -function startTurn(existingInfra, callback) { +function startTurn(existingInfra, serviceConfig, callback) { assert.strictEqual(typeof existingInfra, 'object'); + assert.strictEqual(typeof serviceConfig, 'object'); assert.strictEqual(typeof callback, 'function'); // get and ensure we have a turn secret @@ -1584,7 +1570,8 @@ function startTurn(existingInfra, callback) { } const tag = infra.images.turn.tag; - const memoryLimit = 256; + const memoryLimit = serviceConfig.memoryLimit || SERVICES['turn'].defaultMemoryLimit; + const memory = system.getMemoryAllocation(memoryLimit); const realm = settings.adminFqdn(); // this exports 3478/tcp, 5349/tls and 50000-51000/udp. note that this runs on the host network! @@ -1595,8 +1582,8 @@ function startTurn(existingInfra, callback) { --log-opt syslog-address=udp://127.0.0.1:2514 \ --log-opt syslog-format=rfc5424 \ --log-opt tag=turn \ - -m ${memoryLimit}m \ - --memory-swap ${memoryLimit * 2}m \ + -m ${memory} \ + --memory-swap ${memoryLimit} \ --dns 172.18.0.1 \ --dns-search=. \ -e CLOUDRON_TURN_SECRET="${turnSecret}" \ @@ -1872,7 +1859,8 @@ function setupRedis(app, options, callback) { const redisServiceToken = hat(4 * 48); // Compute redis memory limit based on app's memory limit (this is arbitrary) - const memoryLimit = app.servicesConfig['redis'] ? app.servicesConfig['redis'].memory : APP_SERVICES['redis'].defaultMemoryLimit; + const memoryLimit = app.servicesConfig['redis'] ? app.servicesConfig['redis'].memoryLimit : APP_SERVICES['redis'].defaultMemoryLimit; + const memory = system.getMemoryAllocation(memoryLimit); const tag = infra.images.redis.tag; const label = app.fqdn; @@ -1886,7 +1874,7 @@ function setupRedis(app, options, callback) { --log-opt syslog-address=udp://127.0.0.1:2514 \ --log-opt syslog-format=rfc5424 \ --log-opt tag="${redisName}" \ - -m ${memoryLimit/2} \ + -m ${memory} \ --memory-swap ${memoryLimit} \ --dns 172.18.0.1 \ --dns-search=. \ diff --git a/src/sftp.js b/src/sftp.js index 4183a00e3..9c2b7305f 100644 --- a/src/sftp.js +++ b/src/sftp.js @@ -1,8 +1,9 @@ 'use strict'; exports = module.exports = { - startSftp, - rebuild + start, + + DEFAULT_MEMORY_LIMIT: 256 * 1024 * 1024 }; var apps = require('./apps.js'), @@ -15,36 +16,32 @@ var apps = require('./apps.js'), paths = require('./paths.js'), safe = require('safetydance'), shell = require('./shell.js'), + system = require('./system.js'), volumes = require('./volumes.js'), _ = require('underscore'); -function startSftp(existingInfra, callback) { - assert.strictEqual(typeof existingInfra, 'object'); +var gRebuildInProgress = false; +function rebuild(serviceConfig, callback) { + assert.strictEqual(typeof serviceConfig, 'object'); assert.strictEqual(typeof callback, 'function'); - rebuild(callback); -} - -var rebuildInProgress = false; -function rebuild(callback) { - assert.strictEqual(typeof callback, 'function'); - - if (rebuildInProgress) { + if (gRebuildInProgress) { debug('waiting for other rebuild to finish'); - return setTimeout(function () { rebuild(callback); }, 5000); + return setTimeout(function () { rebuild(serviceConfig, callback); }, 5000); } - rebuildInProgress = true; + gRebuildInProgress = true; function done(error) { - rebuildInProgress = false; + gRebuildInProgress = false; callback(error); } debug('rebuilding container'); const tag = infra.images.sftp.tag; - const memoryLimit = 256; + const memoryLimit = serviceConfig.memoryLimit || exports.DEFAULT_MEMORY_LIMIT; + const memory = system.getMemoryAllocation(memoryLimit); const cloudronToken = hat(8 * 128); apps.getAll(function (error, result) { @@ -103,8 +100,8 @@ function rebuild(callback) { --log-opt syslog-address=udp://127.0.0.1:2514 \ --log-opt syslog-format=rfc5424 \ --log-opt tag=sftp \ - -m ${memoryLimit}m \ - --memory-swap ${memoryLimit * 2}m \ + -m ${memory} \ + --memory-swap ${memoryLimit} \ --dns 172.18.0.1 \ --dns-search=. \ -p 222:22 \ @@ -124,3 +121,11 @@ function rebuild(callback) { }); }); } + +function start(existingInfra, serviceConfig, callback) { + assert.strictEqual(typeof existingInfra, 'object'); + assert.strictEqual(typeof serviceConfig, 'object'); + assert.strictEqual(typeof callback, 'function'); + + rebuild(serviceConfig, callback); +} diff --git a/src/volumes.js b/src/volumes.js index cb20e6808..b5e4597ae 100644 --- a/src/volumes.js +++ b/src/volumes.js @@ -16,7 +16,7 @@ const assert = require('assert'), fs = require('fs'), path = require('path'), safe = require('safetydance'), - sftp = require('./sftp.js'), + services = require('./services.js'), uuid = require('uuid'), volumedb = require('./volumedb.js'); @@ -69,7 +69,7 @@ function add(name, hostPath, auditSource, callback) { if (error) return callback(error); eventlog.add(eventlog.ACTION_VOLUME_ADD, auditSource, { id, name, hostPath }); - sftp.rebuild(NOOP_CALLBACK); + services.rebuildService('sftp', NOOP_CALLBACK); const collectdConf = ejs.render(COLLECTD_CONFIG_EJS, { volumeId: id, hostPath }); collectd.addProfile(id, collectdConf, NOOP_CALLBACK); @@ -108,7 +108,7 @@ function del(volume, auditSource, callback) { if (error) return callback(error); eventlog.add(eventlog.ACTION_VOLUME_REMOVE, auditSource, { volume }); - sftp.rebuild(NOOP_CALLBACK); + services.rebuildService('sftp', NOOP_CALLBACK); collectd.removeProfile(volume.id, NOOP_CALLBACK); return callback(null);