docker.js and services.js: async'ify

This commit is contained in:
Girish Ramakrishnan
2021-08-25 19:41:46 -07:00
parent 1cc11fece8
commit 42774eac8c
24 changed files with 1618 additions and 2130 deletions
+78 -105
View File
@@ -2,7 +2,6 @@
const apps = require('./apps.js'),
assert = require('assert'),
async = require('async'),
auditSource = require('./auditsource.js'),
BoxError = require('./boxerror.js'),
constants = require('./constants.js'),
@@ -10,24 +9,21 @@ const apps = require('./apps.js'),
docker = require('./docker.js'),
eventlog = require('./eventlog.js'),
safe = require('safetydance'),
superagent = require('superagent'),
util = require('util');
superagent = require('superagent');
exports = module.exports = {
run
};
const HEALTHCHECK_INTERVAL = 10 * 1000; // every 10 seconds. this needs to be small since the UI makes only healthy apps clickable
const UNHEALTHY_THRESHOLD = 20 * 60 * 1000; // 20 minutes
const OOM_EVENT_LIMIT = 60 * 60 * 1000; // will only raise 1 oom event every hour
let gStartTime = null; // time when apphealthmonitor was started
let gLastOomMailTime = Date.now() - (5 * 60 * 1000); // pretend we sent email 5 minutes ago
function setHealth(app, health, callback) {
async function setHealth(app, health) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof health, 'string');
assert.strictEqual(typeof callback, 'function');
// app starts out with null health
// if it became healthy, we update immediately. this is required for ui to say "running" etc
@@ -53,68 +49,60 @@ function setHealth(app, health, callback) {
}
} else {
debug(`setHealth: ${app.id} (${app.fqdn}) waiting for ${(UNHEALTHY_THRESHOLD - Math.abs(now - healthTime))/1000} to update health`);
return callback(null);
return;
}
util.callbackify(apps.setHealth)(app.id, health, healthTime, function (error) {
if (error && error.reason === BoxError.NOT_FOUND) return callback(null); // app uninstalled?
if (error) return callback(error);
const [error] = await safe(apps.setHealth(app.id, health, healthTime));
if (error && error.reason === BoxError.NOT_FOUND) return; // app uninstalled?
if (error) throw error;
app.health = health;
app.healthTime = healthTime;
callback(null);
});
app.health = health;
app.healthTime = healthTime;
}
// callback is called with error for fatal errors and not if health check failed
function checkAppHealth(app, callback) {
async function checkAppHealth(app, options) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
assert.strictEqual(typeof options, 'object');
if (app.installationState !== apps.ISTATE_INSTALLED || app.runState !== apps.RSTATE_RUNNING) {
return callback(null);
}
if (app.installationState !== apps.ISTATE_INSTALLED || app.runState !== apps.RSTATE_RUNNING) return;
const manifest = app.manifest;
docker.inspect(app.containerId, function (error, data) {
if (error || !data || !data.State) return setHealth(app, apps.HEALTH_ERROR, callback);
if (data.State.Running !== true) return setHealth(app, apps.HEALTH_DEAD, callback);
const [error, data] = await safe(docker.inspect(app.containerId));
if (error || !data || !data.State) return await setHealth(app, apps.HEALTH_ERROR);
if (data.State.Running !== true) return await setHealth(app, apps.HEALTH_DEAD);
// non-appstore apps may not have healthCheckPath
if (!manifest.healthCheckPath) return setHealth(app, apps.HEALTH_HEALTHY, callback);
// non-appstore apps may not have healthCheckPath
if (!manifest.healthCheckPath) return await setHealth(app, apps.HEALTH_HEALTHY);
const healthCheckUrl = `http://${app.containerIp}:${manifest.httpPort}${manifest.healthCheckPath}`;
superagent
.get(healthCheckUrl)
.set('Host', app.fqdn) // required for some apache configs with rewrite rules
.set('User-Agent', 'Mozilla (CloudronHealth)') // required for some apps (e.g. minio)
.redirects(0)
.timeout(HEALTHCHECK_INTERVAL)
.end(function (error, res) {
if (error && !error.response) {
setHealth(app, apps.HEALTH_UNHEALTHY, callback);
} else if (res.statusCode > 403) { // 2xx and 3xx are ok. even 401 and 403 are ok for now (for WP sites)
setHealth(app, apps.HEALTH_UNHEALTHY, callback);
} else {
setHealth(app, apps.HEALTH_HEALTHY, callback);
}
});
});
const healthCheckUrl = `http://${app.containerIp}:${manifest.httpPort}${manifest.healthCheckPath}`;
const [healthCheckError, response] = await safe(superagent
.get(healthCheckUrl)
.set('Host', app.fqdn) // required for some apache configs with rewrite rules
.set('User-Agent', 'Mozilla (CloudronHealth)') // required for some apps (e.g. minio)
.redirects(0)
.ok(() => true)
.timeout(options.timeout * 1000));
if (healthCheckError) {
await setHealth(app, apps.HEALTH_UNHEALTHY);
} else if (response.status > 403) { // 2xx and 3xx are ok. even 401 and 403 are ok for now (for WP sites)
await setHealth(app, apps.HEALTH_UNHEALTHY);
} else {
await setHealth(app, apps.HEALTH_HEALTHY);
}
}
function getContainerInfo(containerId, callback) {
docker.inspect(containerId, function (error, result) {
if (error) return callback(error);
async function getContainerInfo(containerId) {
const result = await docker.inspect(containerId);
const appId = safe.query(result, 'Config.Labels.appId', null);
const appId = safe.query(result, 'Config.Labels.appId', null);
if (!appId) return callback(null, null /* app */, { name: result.Name.slice(1) }); // addon . Name has a '/' in the beginning for some reason
if (!appId) return { app: null, addon: result.Name.slice(1) }; // addon . Name has a '/' in the beginning for some reason
util.callbackify(apps.get)(appId, callback); // don't get by container id as this can be an exec container
});
return await apps.get(appId); // don't get by container id as this can be an exec container
}
/*
@@ -122,83 +110,68 @@ function getContainerInfo(containerId, callback) {
docker run -ti -m 100M cloudron/base:3.0.0 /bin/bash
stress --vm 1 --vm-bytes 200M --vm-hang 0
*/
function processDockerEvents(intervalSecs, callback) {
assert.strictEqual(typeof intervalSecs, 'number');
assert.strictEqual(typeof callback, 'function');
async function processDockerEvents(options) {
assert.strictEqual(typeof options, 'object');
const since = ((new Date().getTime() / 1000) - intervalSecs).toFixed(0);
const since = ((new Date().getTime() / 1000) - options.intervalSecs).toFixed(0);
const until = ((new Date().getTime() / 1000) - 1).toFixed(0);
docker.getEvents({ since: since, until: until, filters: JSON.stringify({ event: [ 'oom' ] }) }, function (error, stream) {
if (error) return callback(error);
const stream = await docker.getEvents({ since: since, until: until, filters: JSON.stringify({ event: [ 'oom' ] }) });
stream.setEncoding('utf8');
stream.on('data', async function (data) {
const event = JSON.parse(data);
const containerId = String(event.id);
stream.setEncoding('utf8');
stream.on('data', function (data) {
const event = JSON.parse(data);
const containerId = String(event.id);
const [error, info] = await safe(getContainerInfo(containerId));
const program = error ? containerId : (info.app ? info.app.fqdn : info.addon.name);
const now = Date.now();
const notifyUser = !(info.app && info.app.debugMode) && ((now - gLastOomMailTime) > OOM_EVENT_LIMIT);
getContainerInfo(containerId, function (error, app, addon) {
const program = error ? containerId : (app ? app.fqdn : addon.name);
const now = Date.now();
const notifyUser = !(app && app.debugMode) && ((now - gLastOomMailTime) > OOM_EVENT_LIMIT);
debug(`OOM ${program} notifyUser: ${notifyUser}. lastOomTime: ${gLastOomMailTime} (now: ${now})`);
debug(`OOM ${program} notifyUser: ${notifyUser}. lastOomTime: ${gLastOomMailTime} (now: ${now})`);
// do not send mails for dev apps
if (notifyUser) {
// app can be null for addon containers
await eventlog.add(eventlog.ACTION_APP_OOM, auditSource.HEALTH_MONITOR, { event, containerId, addon: info.addon || null, app: info.app || null });
// do not send mails for dev apps
if (notifyUser) {
// app can be null for addon containers
eventlog.add(eventlog.ACTION_APP_OOM, auditSource.HEALTH_MONITOR, { event, containerId, addon: addon || null, app: app || null });
gLastOomMailTime = now;
}
});
});
stream.on('error', function (error) {
debug('Error reading docker events', error);
callback();
});
stream.on('end', callback);
// safety hatch if 'until' doesn't work (there are cases where docker is working with a different time)
setTimeout(stream.destroy.bind(stream), 3000); // https://github.com/apocas/dockerode/issues/179
gLastOomMailTime = now;
}
});
stream.on('error', function (error) {
debug('Error reading docker events', error);
});
stream.on('end', function () {
// debug('Event stream ended');
});
// safety hatch if 'until' doesn't work (there are cases where docker is working with a different time)
setTimeout(stream.destroy.bind(stream), options.timeout); // https://github.com/apocas/dockerode/issues/179
}
function processApp(callback) {
assert.strictEqual(typeof callback, 'function');
async function processApp(options) {
assert.strictEqual(typeof options, 'object');
const appsList = util.callbackify(apps.list);
const allApps = await apps.list();
appsList(function (error, allApps) {
if (error) return callback(error);
const healthChecks = allApps.map((app) => checkAppHealth(app, options)); // start healthcheck in parallel
async.each(allApps, checkAppHealth, function (error) {
const alive = allApps
.filter(function (a) { return a.installationState === apps.ISTATE_INSTALLED && a.runState === apps.RSTATE_RUNNING && a.health === apps.HEALTH_HEALTHY; });
await Promise.allSettled(healthChecks); // wait for all promises to finish
debug(`app health: ${alive.length} alive / ${allApps.length - alive.length} dead.` + (error ? ` ${error.reason}` : ''));
const alive = allApps
.filter(function (a) { return a.installationState === apps.ISTATE_INSTALLED && a.runState === apps.RSTATE_RUNNING && a.health === apps.HEALTH_HEALTHY; });
callback(null);
});
});
debug(`app health: ${alive.length} alive / ${allApps.length - alive.length} dead.`);
}
function run(intervalSecs, callback) {
async function run(intervalSecs) {
assert.strictEqual(typeof intervalSecs, 'number');
assert.strictEqual(typeof callback, 'function');
if (constants.TEST) return;
if (!gStartTime) gStartTime = new Date();
async.series([
processApp, // this is first because docker.getEvents seems to get 'stuck' sometimes
processDockerEvents.bind(null, intervalSecs)
], function (error) {
if (error) debug(`run: could not check app health. ${error.message}`);
callback();
});
await processApp({ timeout: (intervalSecs - 3) * 1000 });
await processDockerEvents({ intervalSecs, timeout: 3000 });
}
+9 -16
View File
@@ -2019,19 +2019,18 @@ function checkManifestConstraints(manifest) {
return null;
}
function exec(app, options, callback) {
async function exec(app, options) {
assert.strictEqual(typeof app, 'object');
assert(options && typeof options === 'object');
assert.strictEqual(typeof callback, 'function');
let cmd = options.cmd || [ '/bin/bash' ];
const cmd = options.cmd || [ '/bin/bash' ];
assert(Array.isArray(cmd) && cmd.length > 0);
if (app.installationState !== exports.ISTATE_INSTALLED || app.runState !== exports.RSTATE_RUNNING) {
return callback(new BoxError(BoxError.BAD_STATE, 'App not installed or running'));
throw new BoxError(BoxError.BAD_STATE, 'App not installed or running');
}
var execOptions = {
const execOptions = {
AttachStdin: true,
AttachStdout: true,
AttachStderr: true,
@@ -2043,7 +2042,7 @@ function exec(app, options, callback) {
Cmd: cmd
};
var startOptions = {
const startOptions = {
Detach: false,
Tty: options.tty,
// hijacking upgrades the docker connection from http to tcp. because of this upgrade,
@@ -2058,12 +2057,8 @@ function exec(app, options, callback) {
stderr: true
};
docker.execContainer(app.containerId, { execOptions, startOptions, rows: options.rows, columns: options.columns }, function (error, stream) {
if (error && error.statusCode === 409) return callback(new BoxError(BoxError.BAD_STATE, error.message)); // container restarting/not running
if (error) return callback(error);
callback(null, stream);
});
const stream = await docker.execContainer(app.containerId, { execOptions, startOptions, rows: options.rows, columns: options.columns });
return stream;
}
function canAutoupdateApp(app, updateInfo) {
@@ -2205,8 +2200,6 @@ async function configureInstalledApps() {
async function restartAppsUsingAddons(changedAddons) {
assert(Array.isArray(changedAddons));
const stopContainers = util.promisify(docker.stopContainers);
let apps = await list();
apps = apps.filter(app => app.manifest.addons && _.intersection(Object.keys(app.manifest.addons), changedAddons).length !== 0);
apps = apps.filter(app => app.installationState !== exports.ISTATE_ERROR); // remove errored apps. let them be 'repaired' by hand
@@ -2222,7 +2215,7 @@ async function restartAppsUsingAddons(changedAddons) {
};
// stop apps before updating the databases because postgres will "lock" them preventing import
const [stopError] = await safe(stopContainers(app.id));
const [stopError] = await safe(docker.stopContainers(app.id));
if (stopError) debug(`restartAppsUsingAddons: error stopping ${app.fqdn}`, stopError);
const [addTaskError, taskId] = await safe(addTask(app.id, exports.ISTATE_PENDING_RESTART, task));
@@ -2235,7 +2228,7 @@ async function restartAppsUsingAddons(changedAddons) {
async function schedulePendingTasks() {
debug('schedulePendingTasks: scheduling app tasks');
const result = list();
const result = await list();
result.forEach(function (app) {
if (!app.taskId) return; // if not in any pending state, do nothing
+93 -127
View File
@@ -31,8 +31,8 @@ const apps = require('./apps.js'),
os = require('os'),
path = require('path'),
paths = require('./paths.js'),
promiseRetry = require('./promise-retry.js'),
reverseProxy = require('./reverseproxy.js'),
rimraf = require('rimraf'),
safe = require('safetydance'),
services = require('./services.js'),
settings = require('./settings.js'),
@@ -64,54 +64,41 @@ function makeTaskError(error, app) {
}
// updates the app object and the database
function updateApp(app, values, callback) {
async function updateApp(app, values) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof values, 'object');
assert.strictEqual(typeof callback, 'function');
util.callbackify(apps.update)(app.id, values, function (error) {
if (error) return callback(error);
await apps.update(app.id, values);
for (var value in values) {
app[value] = values[value];
}
callback(null);
});
for (const value in values) {
app[value] = values[value];
}
}
function allocateContainerIp(app, callback) {
async function allocateContainerIp(app) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
async.retry({ times: 10 }, function (retryCallback) {
await promiseRetry({ times: 10, interval: 0}, async function () {
const iprange = iputils.intFromIp('172.18.20.255') - iputils.intFromIp('172.18.16.1');
let rnd = Math.floor(Math.random() * iprange);
const containerIp = iputils.ipFromInt(iputils.intFromIp('172.18.16.1') + rnd);
updateApp(app, { containerIp }, retryCallback);
}, callback);
updateApp(app, { containerIp });
});
}
function createContainer(app, callback) {
async function createContainer(app) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
assert(!app.containerId); // otherwise, it will trigger volumeFrom
debugApp(app, 'creating container');
docker.createContainer(app, function (error, container) {
if (error) return callback(error);
const container = await docker.createContainer(app);
updateApp(app, { containerId: container.id }, function (error) {
if (error) return callback(error);
await updateApp(app, { containerId: container.id });
// re-generate configs that rely on container id
async.series([
addLogrotateConfig.bind(null, app),
addCollectdProfile.bind(null, app)
], callback);
});
});
// re-generate configs that rely on container id
await addLogrotateConfig(app);
await addCollectdProfile(app);
}
function deleteContainers(app, options, callback) {
@@ -197,40 +184,30 @@ async function removeCollectdProfile(app) {
await collectd.removeProfile(app.id);
}
function addLogrotateConfig(app, callback) {
async function addLogrotateConfig(app) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
docker.inspect(app.containerId, function (error, result) {
if (error) return callback(error);
const result = await docker.inspect(app.containerId);
var runVolume = result.Mounts.find(function (mount) { return mount.Destination === '/run'; });
if (!runVolume) return callback(new BoxError(BoxError.DOCKER_ERROR, 'App does not have /run mounted'));
const runVolume = result.Mounts.find(function (mount) { return mount.Destination === '/run'; });
if (!runVolume) throw new BoxError(BoxError.DOCKER_ERROR, 'App does not have /run mounted');
// logrotate configs can have arbitrary commands, so the config files must be owned by root
var logrotateConf = ejs.render(LOGROTATE_CONFIG_EJS, { volumePath: runVolume.Source, appId: app.id });
var tmpFilePath = path.join(os.tmpdir(), app.id + '.logrotate');
fs.writeFile(tmpFilePath, logrotateConf, function (error) {
if (error) return callback(new BoxError(BoxError.FS_ERROR, `Error writing logrotate config: ${error.message}`));
// logrotate configs can have arbitrary commands, so the config files must be owned by root
const logrotateConf = ejs.render(LOGROTATE_CONFIG_EJS, { volumePath: runVolume.Source, appId: app.id });
const tmpFilePath = path.join(os.tmpdir(), app.id + '.logrotate');
shell.sudo('addLogrotateConfig', [ CONFIGURE_LOGROTATE_CMD, 'add', app.id, tmpFilePath ], {}, function (error) {
if (error) return callback(new BoxError(BoxError.LOGROTATE_ERROR, `Error adding logrotate config: ${error.message}`));
safe.fs.writeFileSync(tmpFilePath, logrotateConf);
if (safe.error) throw new BoxError(BoxError.FS_ERROR, `Error writing logrotate config: ${safe.error.message}`);
callback(null);
});
});
});
const [error] = await safe(shell.promises.sudo('addLogrotateConfig', [ CONFIGURE_LOGROTATE_CMD, 'add', app.id, tmpFilePath ], {}));
if (error) throw new BoxError(BoxError.LOGROTATE_ERROR, `Error adding logrotate config: ${error.message}`);
}
function removeLogrotateConfig(app, callback) {
async function removeLogrotateConfig(app) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
shell.sudo('removeLogrotateConfig', [ CONFIGURE_LOGROTATE_CMD, 'remove', app.id ], {}, function (error) {
if (error) return callback(new BoxError(BoxError.LOGROTATE_ERROR, `Error removing logrotate config: ${error.message}`));
callback(null);
});
const [error] = await safe(shell.promises.sudo('removeLogrotateConfig', [ CONFIGURE_LOGROTATE_CMD, 'remove', app.id ], {}));
if (error) throw new BoxError(BoxError.LOGROTATE_ERROR, `Error removing logrotate config: ${error.message}`);
}
function cleanupLogs(app, callback) {
@@ -238,7 +215,7 @@ function cleanupLogs(app, callback) {
assert.strictEqual(typeof callback, 'function');
// note that redis container logs are cleaned up by the addon
rimraf(path.join(paths.LOG_DIR, app.id), function (error) {
fs.rm(path.join(paths.LOG_DIR, app.id), { force: true, recursive: true }, function (error) {
if (error) debugApp(app, 'cannot cleanup logs:', error);
callback(null);
@@ -258,29 +235,27 @@ function verifyManifest(manifest, callback) {
callback(null);
}
function downloadIcon(app, callback) {
async function downloadIcon(app) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
// nothing to download if we dont have an appStoreId
if (!app.appStoreId) return callback(null);
if (!app.appStoreId) return;
debugApp(app, `Downloading icon of ${app.appStoreId}@${app.manifest.version}`);
const iconUrl = settings.apiServerOrigin() + '/api/v1/apps/' + app.appStoreId + '/versions/' + app.manifest.version + '/icon';
async.retry({ times: 10, interval: 5000 }, function (retryCallback) {
superagent
.get(iconUrl)
await promiseRetry({ times: 10, interval: 5000 }, async function () {
const [networkError, response] = await safe(superagent.get(iconUrl)
.buffer(true)
.timeout(30 * 1000)
.end(function (error, res) {
if (error && !error.response) return retryCallback(new BoxError(BoxError.NETWORK_ERROR, `Network error downloading icon : ${error.message}`));
if (res.statusCode !== 200) return retryCallback(null); // ignore error. this can also happen for apps installed with cloudron-cli
.ok(() => true));
updateApp(app, { appStoreIcon: res.body }, retryCallback);
});
}, callback);
if (networkError) throw new BoxError(BoxError.NETWORK_ERROR, `Network error downloading icon : ${networkError.message}`);
if (response.status !== 200) return; // ignore error. this can also happen for apps installed with cloudron-cli
await updateApp(app, { appStoreIcon: response.body });
});
}
function waitForDnsPropagation(app, callback) {
@@ -329,33 +304,24 @@ function moveDataDir(app, targetDir, callback) {
});
}
function downloadImage(manifest, callback) {
async function downloadImage(manifest) {
assert.strictEqual(typeof manifest, 'object');
assert.strictEqual(typeof callback, 'function');
docker.info(function (error, info) {
if (error) return callback(error);
const info = await docker.info();
const [dfError, diskUsage] = await safe(df.file(info.DockerRootDir));
if (dfError) throw new BoxError(BoxError.FS_ERROR, `Error getting file system info: ${dfError.message}`);
const dfAsync = util.callbackify(df.file);
dfAsync(info.DockerRootDir, function (error, diskUsage) {
if (error) return callback(new BoxError(BoxError.FS_ERROR, `Error getting file system info: ${error.message}`));
if (diskUsage.available < (1024*1024*1024)) return callback(new BoxError(BoxError.DOCKER_ERROR, 'Not enough disk space to pull docker image', { diskUsage: diskUsage, dockerRootDir: info.DockerRootDir }));
if (diskUsage.available < (1024*1024*1024)) throw new BoxError(BoxError.DOCKER_ERROR, 'Not enough disk space to pull docker image', { diskUsage: diskUsage, dockerRootDir: info.DockerRootDir });
docker.downloadImage(manifest, function (error) {
if (error) return callback(error);
callback(null);
});
});
});
await docker.downloadImage(manifest);
}
function startApp(app, callback){
async function startApp(app) {
debugApp(app, 'startApp: starting container');
if (app.runState === apps.RSTATE_STOPPED) return callback();
if (app.runState === apps.RSTATE_STOPPED) return;
docker.startContainer(app.id, callback);
await docker.startContainer(app.id);
}
function install(app, args, progressCallback, callback) {
@@ -378,7 +344,7 @@ function install(app, args, progressCallback, callback) {
progressCallback.bind(null, { percent: 10, message: 'Cleaning up old install' }),
reverseProxy.unconfigureApp.bind(null, app),
deleteContainers.bind(null, app, { managedOnly: true }),
function teardownAddons(next) {
async function teardownAddons() {
// when restoring, app does not require these addons anymore. remove carefully to preserve the db passwords
let addonsToRemove;
if (oldManifest) {
@@ -387,7 +353,7 @@ function install(app, args, progressCallback, callback) {
addonsToRemove = app.manifest.addons;
}
services.teardownAddons(app, addonsToRemove, next);
await services.teardownAddons(app, addonsToRemove);
},
function deleteAppDirIfNeeded(done) {
@@ -396,10 +362,10 @@ function install(app, args, progressCallback, callback) {
deleteAppDir(app, { removeDirectory: false }, done); // do not remove any symlinked appdata dir
},
function deleteImageIfChanged(done) {
if (!oldManifest || oldManifest.dockerImage === app.manifest.dockerImage) return done();
async function deleteImageIfChanged() {
if (!oldManifest || oldManifest.dockerImage === app.manifest.dockerImage) return;
docker.deleteImage(oldManifest, done);
await docker.deleteImage(oldManifest);
},
// allocating container ip here, lets the users "repair" an app if allocation fails at apps.add time
@@ -472,12 +438,12 @@ function install(app, args, progressCallback, callback) {
progressCallback.bind(null, { percent: 100, message: 'Done' }),
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null })
], function seriesDone(error) {
], async function seriesDone(error) {
if (error) {
debugApp(app, 'error installing app:', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
await safe(updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }));
}
callback(null);
callback(error);
});
}
@@ -495,13 +461,13 @@ function backup(app, args, progressCallback, callback) {
progressCallback.bind(null, { percent: 100, message: 'Done' }),
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null })
], function seriesDone(error) {
], async function seriesDone(error) {
if (error) {
debugApp(app, 'error backing up app:', error);
// return to installed state intentionally. the error is stashed only in the task and not the app (the UI shows error state otherwise)
return updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null }, callback.bind(null, error));
await safe(updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null }));
}
callback(null);
callback(error);
});
}
@@ -526,12 +492,12 @@ function create(app, args, progressCallback, callback) {
progressCallback.bind(null, { percent: 100, message: 'Done' }),
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null })
], function seriesDone(error) {
], async function seriesDone(error) {
if (error) {
debugApp(app, 'error creating :', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
await safe(updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }));
}
callback(null);
callback(error);
});
}
@@ -600,12 +566,12 @@ function changeLocation(app, args, progressCallback, callback) {
progressCallback.bind(null, { percent: 100, message: 'Done' }),
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null })
], function seriesDone(error) {
], async function seriesDone(error) {
if (error) {
debugApp(app, 'error changing location:', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
await safe(updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }));
}
callback(null);
callback(error);
});
}
@@ -639,13 +605,13 @@ function migrateDataDir(app, args, progressCallback, callback) {
progressCallback.bind(null, { percent: 100, message: 'Done' }),
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null, dataDir: newDataDir })
], function seriesDone(error) {
], async function seriesDone(error) {
if (error) {
debugApp(app, 'error migrating data dir:', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
await safe(updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }));
}
callback();
callback(error);
});
}
@@ -684,13 +650,13 @@ function configure(app, args, progressCallback, callback) {
progressCallback.bind(null, { percent: 100, message: 'Done' }),
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null })
], function seriesDone(error) {
], async function seriesDone(error) {
if (error) {
debugApp(app, 'error reconfiguring:', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
await safe(updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }));
}
callback();
callback(error);
});
}
@@ -740,10 +706,10 @@ function update(app, args, progressCallback, callback) {
// we cannot easily 'recover' from backup failures because we have to revert manfest and portBindings
progressCallback.bind(null, { percent: 35, message: 'Cleaning up old install' }),
deleteContainers.bind(null, app, { managedOnly: true }),
function deleteImageIfChanged(done) {
if (app.manifest.dockerImage === updateConfig.manifest.dockerImage) return done();
async function deleteImageIfChanged() {
if (app.manifest.dockerImage === updateConfig.manifest.dockerImage) return;
docker.deleteImage(app.manifest, done);
await docker.deleteImage(app.manifest);
},
// only delete unused addons after backup
@@ -792,16 +758,16 @@ function update(app, args, progressCallback, callback) {
progressCallback.bind(null, { percent: 100, message: 'Done' }),
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null, updateTime: new Date() })
], function seriesDone(error) {
], async function seriesDone(error) {
if (error && error.backupError) {
debugApp(app, 'update aborted because backup failed', error);
updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null }, callback.bind(null, error));
await safe(updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null }));
} else if (error) {
debugApp(app, 'Error updating app:', error);
updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
} else {
callback(null);
await safe(updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }));
}
callback(error);
});
}
@@ -827,12 +793,12 @@ function start(app, args, progressCallback, callback) {
progressCallback.bind(null, { percent: 100, message: 'Done' }),
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null })
], function seriesDone(error) {
], async function seriesDone(error) {
if (error) {
debugApp(app, 'error starting app:', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
await safe(updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }));
}
callback(null);
callback(error);
});
}
@@ -854,12 +820,12 @@ function stop(app, args, progressCallback, callback) {
progressCallback.bind(null, { percent: 100, message: 'Done' }),
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null })
], function seriesDone(error) {
], async function seriesDone(error) {
if (error) {
debugApp(app, 'error starting app:', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
await safe(updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }));
}
callback(null);
callback(error);
});
}
@@ -875,12 +841,12 @@ function restart(app, args, progressCallback, callback) {
progressCallback.bind(null, { percent: 100, message: 'Done' }),
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null })
], function seriesDone(error) {
], async function seriesDone(error) {
if (error) {
debugApp(app, 'error starting app:', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
await safe(updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }));
}
callback(null);
callback(error);
});
}
@@ -914,12 +880,12 @@ function uninstall(app, args, progressCallback, callback) {
progressCallback.bind(null, { percent: 95, message: 'Remove app from database' }),
apps.del.bind(null, app.id)
], function seriesDone(error) {
], async function seriesDone(error) {
if (error) {
debugApp(app, 'error uninstalling app:', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
await safe(updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }));
}
callback(null);
callback(error);
});
}
+5 -6
View File
@@ -914,16 +914,15 @@ function snapshotApp(app, progressCallback, callback) {
const appsBackupConfig = util.callbackify(apps.backupConfig);
appsBackupConfig(app, function (error) {
appsBackupConfig(app, async function (error) {
if (error) return callback(error);
services.backupAddons(app, app.manifest.addons, function (error) {
if (error) return callback(error);
[error] = await safe(services.backupAddons(app, app.manifest.addons));
if (error) return callback(error);
debugApp(app, `snapshotApp: took ${(new Date() - startTime)/1000} seconds`);
debugApp(app, `snapshotApp: took ${(new Date() - startTime)/1000} seconds`);
return callback(null);
});
return callback(null);
});
}
+3 -3
View File
@@ -75,7 +75,7 @@ async function startJobs() {
gJobs.diskSpaceChecker = new CronJob({
cronTime: '00 30 * * * *', // every 30 minutes. if you change this interval, change the notification messages with correct duration
onTick: () => system.checkDiskSpace(NOOP_CALLBACK),
onTick: async () => await safe(system.checkDiskSpace()),
start: true
});
@@ -112,7 +112,7 @@ async function startJobs() {
gJobs.schedulerSync = new CronJob({
cronTime: constants.TEST ? '*/10 * * * * *' : '00 */1 * * * *', // every minute
onTick: scheduler.sync,
onTick: async () => await safe(scheduler.sync()),
start: true
});
@@ -124,7 +124,7 @@ async function startJobs() {
gJobs.appHealthMonitor = new CronJob({
cronTime: '*/10 * * * * *', // every 10 seconds
onTick: appHealthMonitor.run.bind(null, 10, NOOP_CALLBACK),
onTick: async () => await safe(appHealthMonitor.run(10)), // 10 is the max run time
start: true
});
+266 -361
View File
@@ -18,10 +18,8 @@ exports = module.exports = {
deleteImage,
deleteContainers,
createSubcontainer,
getContainerIdByIp,
inspect,
getContainerIp,
inspectByName: inspect,
execContainer,
getEvents,
memoryUsage,
@@ -32,11 +30,11 @@ exports = module.exports = {
};
const apps = require('./apps.js'),
async = require('async'),
assert = require('assert'),
BoxError = require('./boxerror.js'),
constants = require('./constants.js'),
debug = require('debug')('box:docker'),
delay = require('delay'),
Docker = require('dockerode'),
path = require('path'),
reverseProxy = require('./reverseproxy.js'),
@@ -45,7 +43,6 @@ const apps = require('./apps.js'),
shell = require('./shell.js'),
safe = require('safetydance'),
system = require('./system.js'),
util = require('util'),
volumes = require('./volumes.js'),
_ = require('underscore');
@@ -55,17 +52,13 @@ const CLEARVOLUME_CMD = path.join(__dirname, 'scripts/clearvolume.sh'),
const DOCKER_SOCKET_PATH = '/var/run/docker.sock';
const gConnection = new Docker({ socketPath: DOCKER_SOCKET_PATH });
function testRegistryConfig(config, callback) {
async function testRegistryConfig(config) {
assert.strictEqual(typeof config, 'object');
assert.strictEqual(typeof callback, 'function');
if (config.provider === 'noop') return callback();
if (config.provider === 'noop') return;
gConnection.checkAuth(config, function (error /*, data */) { // this returns a 500 even for auth errors
if (error) return callback(new BoxError(BoxError.BAD_FIELD, error, { field: 'serverAddress' }));
callback();
});
const [error] = await safe(gConnection.checkAuth(config)); // this returns a 500 even for auth errors
if (error) throw new BoxError(BoxError.BAD_FIELD, error, { field: 'serverAddress' });
}
function injectPrivateFields(newConfig, currentConfig) {
@@ -95,83 +88,77 @@ function ping(callback) {
});
}
function getRegistryConfig(image, callback) {
async function getRegistryConfig(image) {
// https://github.com/docker/distribution/blob/release/2.7/reference/normalize.go#L62
const parts = image.split('/');
if (parts.length === 1 || (parts[0].match(/[.:]/) === null)) return callback(null, null); // public docker registry
if (parts.length === 1 || (parts[0].match(/[.:]/) === null)) return null; // public docker registry
util.callbackify(settings.getRegistryConfig)(function (error, registryConfig) {
if (error) return callback(error);
const registryConfig = await settings.getRegistryConfig();
// https://github.com/apocas/dockerode#pull-from-private-repos
const auth = {
username: registryConfig.username,
password: registryConfig.password,
auth: registryConfig.auth || '', // the auth token at login time
email: registryConfig.email || '',
serveraddress: registryConfig.serverAddress
};
// https://github.com/apocas/dockerode#pull-from-private-repos
const auth = {
username: registryConfig.username,
password: registryConfig.password,
auth: registryConfig.auth || '', // the auth token at login time
email: registryConfig.email || '',
serveraddress: registryConfig.serverAddress
};
callback(null, auth);
});
return auth;
}
function pullImage(manifest, callback) {
getRegistryConfig(manifest.dockerImage, function (error, config) {
if (error) return callback(error);
async function pullImage(manifest) {
const config = await getRegistryConfig(manifest.dockerImage);
debug(`pullImage: will pull ${manifest.dockerImage}. auth: ${config ? 'yes' : 'no'}`);
debug(`pullImage: will pull ${manifest.dockerImage}. auth: ${config ? 'yes' : 'no'}`);
gConnection.pull(manifest.dockerImage, { authconfig: config }, function (error, stream) {
if (error && error.statusCode === 404) return callback(new BoxError(BoxError.NOT_FOUND, `Unable to pull image ${manifest.dockerImage}. message: ${error.message} statusCode: ${error.statusCode}`));
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, `Unable to pull image ${manifest.dockerImage}. Please check the network or if the image needs authentication. statusCode: ${error.statusCode}`));
const [error, stream] = await safe(gConnection.pull(manifest.dockerImage, { authconfig: config }));
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Unable to pull image ${manifest.dockerImage}. message: ${error.message} statusCode: ${error.statusCode}`);
if (error) throw new BoxError(BoxError.DOCKER_ERROR, `Unable to pull image ${manifest.dockerImage}. Please check the network or if the image needs authentication. statusCode: ${error.statusCode}`);
// https://github.com/dotcloud/docker/issues/1074 says each status message
// is emitted as a chunk
stream.on('data', function (chunk) {
var data = safe.JSON.parse(chunk) || { };
debug('pullImage: %j', data);
return new Promise((resolve, reject) => {
// https://github.com/dotcloud/docker/issues/1074 says each status message
// is emitted as a chunk
stream.on('data', function (chunk) {
var data = safe.JSON.parse(chunk) || { };
debug('pullImage: %j', data);
// The data.status here is useless because this is per layer as opposed to per image
if (!data.status && data.error) {
debug('pullImage error %s: %s', manifest.dockerImage, data.errorDetail.message);
}
});
// The data.status here is useless because this is per layer as opposed to per image
if (!data.status && data.error) {
debug('pullImage error %s: %s', manifest.dockerImage, data.errorDetail.message);
}
});
stream.on('end', function () {
debug('downloaded image %s', manifest.dockerImage);
stream.on('end', function () {
debug('downloaded image %s', manifest.dockerImage);
resolve();
});
callback(null);
});
stream.on('error', function (error) {
debug('error pulling image %s: %j', manifest.dockerImage, error);
callback(new BoxError(BoxError.DOCKER_ERROR, error.message));
});
stream.on('error', function (error) {
debug('error pulling image %s: %j', manifest.dockerImage, error);
reject(new BoxError(BoxError.DOCKER_ERROR, error.message));
});
});
}
function downloadImage(manifest, callback) {
async function downloadImage(manifest) {
assert.strictEqual(typeof manifest, 'object');
assert.strictEqual(typeof callback, 'function');
debug('downloadImage %s', manifest.dockerImage);
debug(`downloadImage ${manifest.dockerImage}`);
const image = gConnection.getImage(manifest.dockerImage);
image.inspect(function (error, result) {
if (!error && result) return callback(null); // image is already present locally
const [error, result] = await safe(image.inspect());
if (!error && result) return; // image is already present locally
let attempt = 1;
for (let times = 0; times < 10; times++) {
debug(`downloadImage: pulling image. attempt ${times+1}`);
const [pullError] = await safe(pullImage(manifest));
if (pullError && pullError.reason === BoxError.NOT_FOUND) throw pullError;
if (!pullError) break;
async.retry({ times: 10, interval: 5000, errorFilter: e => e.reason !== BoxError.NOT_FOUND }, function (retryCallback) {
debug('Downloading image %s. attempt: %s', manifest.dockerImage, attempt++);
pullImage(manifest, retryCallback);
}, callback);
});
await delay(5000);
}
}
async function getVolumeMounts(app) {
@@ -200,16 +187,15 @@ async function getVolumeMounts(app) {
return mounts;
}
function getAddonMounts(app, callback) {
async function getAddonMounts(app) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
let mounts = [];
const addons = app.manifest.addons;
if (!addons) return callback(null, mounts);
if (!addons) return mounts;
async.eachSeries(Object.keys(addons), async function (addon) {
for (const addon of Object.keys(addons)) {
switch (addon) {
case 'localstorage':
mounts.push({
@@ -219,7 +205,7 @@ function getAddonMounts(app, callback) {
ReadOnly: false
});
return;
break;
case 'tls': {
const bundle = await reverseProxy.getCertificatePath(app.fqdn, app.domain);
@@ -237,28 +223,22 @@ function getAddonMounts(app, callback) {
ReadOnly: true
});
return;
break;
}
default:
return;
break;
}
}, function (error) {
callback(error, mounts);
});
}
return mounts;
}
async function getMounts(app, callback) {
async function getMounts(app) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
const [error, volumeMounts] = await safe(getVolumeMounts(app));
if (error) return callback(error);
getAddonMounts(app, function (error, addonMounts) {
if (error) return callback(error);
callback(null, volumeMounts.concat(addonMounts));
});
const volumeMounts = await getVolumeMounts(app);
const addonMounts = await getAddonMounts(app);
return volumeMounts.concat(addonMounts);
}
function getAddresses() {
@@ -278,22 +258,20 @@ function getAddresses() {
return addresses;
}
function createSubcontainer(app, name, cmd, options, callback) {
async function createSubcontainer(app, name, cmd, options) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof name, 'string');
assert(!cmd || Array.isArray(cmd));
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
let isAppContainer = !cmd; // non app-containers are like scheduler
var manifest = app.manifest;
var exposedPorts = {}, dockerPortBindings = { };
var domain = app.fqdn;
const manifest = app.manifest;
const exposedPorts = {}, dockerPortBindings = { };
const domain = app.fqdn;
const envPrefix = manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
let stdEnv = [
const stdEnv = [
'CLOUDRON=1',
'CLOUDRON_PROXY_IP=172.18.0.1',
`CLOUDRON_APP_HOSTNAME=${app.id}`,
@@ -303,13 +281,13 @@ function createSubcontainer(app, name, cmd, options, callback) {
`${envPrefix}APP_DOMAIN=${domain}`
];
var portEnv = [];
for (let portName in app.portBindings) {
const portEnv = [];
for (const portName in app.portBindings) {
const hostPort = app.portBindings[portName];
const portType = (manifest.tcpPorts && portName in manifest.tcpPorts) ? 'tcp' : 'udp';
const ports = portType == 'tcp' ? manifest.tcpPorts : manifest.udpPorts;
var containerPort = ports[portName].containerPort || hostPort;
const containerPort = ports[portName].containerPort || hostPort;
// docker portBindings requires ports to be exposed
exposedPorts[`${containerPort}/${portType}`] = {};
@@ -319,7 +297,7 @@ function createSubcontainer(app, name, cmd, options, callback) {
dockerPortBindings[`${containerPort}/${portType}`] = hostIps.map(hip => { return { HostIp: hip, HostPort: hostPort + '' }; });
}
let appEnv = [];
const appEnv = [];
Object.keys(app.env).forEach(function (name) { appEnv.push(`${name}=${app.env[name]}`); });
let memoryLimit = apps.getMemoryLimit(app);
@@ -328,219 +306,194 @@ function createSubcontainer(app, name, cmd, options, callback) {
// if required, we can make this a manifest and runtime argument later
if (!isAppContainer) memoryLimit *= 2;
getMounts(app, async function (error, mounts) {
if (error) return callback(error);
const mounts = await getMounts(app);
const [getEnvError, addonEnv] = await safe(services.getEnvironment(app));
if (getEnvError) return callback(getEnvError);
const addonEnv = await services.getEnvironment(app);
let containerOptions = {
name: name, // for referencing containers
Tty: isAppContainer,
Image: app.manifest.dockerImage,
Cmd: (isAppContainer && app.debugMode && app.debugMode.cmd) ? app.debugMode.cmd : cmd,
Env: stdEnv.concat(addonEnv).concat(portEnv).concat(appEnv),
ExposedPorts: isAppContainer ? exposedPorts : { },
Volumes: { // see also ReadonlyRootfs
'/tmp': {},
'/run': {}
let containerOptions = {
name: name, // for referencing containers
Tty: isAppContainer,
Image: app.manifest.dockerImage,
Cmd: (isAppContainer && app.debugMode && app.debugMode.cmd) ? app.debugMode.cmd : cmd,
Env: stdEnv.concat(addonEnv).concat(portEnv).concat(appEnv),
ExposedPorts: isAppContainer ? exposedPorts : { },
Volumes: { // see also ReadonlyRootfs
'/tmp': {},
'/run': {}
},
Labels: {
'fqdn': app.fqdn,
'appId': app.id,
'isSubcontainer': String(!isAppContainer),
'isCloudronManaged': String(true)
},
HostConfig: {
Mounts: mounts,
LogConfig: {
Type: 'syslog',
Config: {
'tag': app.id,
'syslog-address': 'udp://127.0.0.1:2514', // see apps.js:validatePortBindings()
'syslog-format': 'rfc5424'
}
},
Labels: {
'fqdn': app.fqdn,
'appId': app.id,
'isSubcontainer': String(!isAppContainer),
'isCloudronManaged': String(true)
Memory: system.getMemoryAllocation(memoryLimit),
MemorySwap: memoryLimit, // Memory + Swap
PortBindings: isAppContainer ? dockerPortBindings : { },
PublishAllPorts: false,
ReadonlyRootfs: app.debugMode ? !!app.debugMode.readonlyRootfs : true,
RestartPolicy: {
'Name': isAppContainer ? 'unless-stopped' : 'no',
'MaximumRetryCount': 0
},
HostConfig: {
Mounts: mounts,
LogConfig: {
Type: 'syslog',
Config: {
'tag': app.id,
'syslog-address': 'udp://127.0.0.1:2514', // see apps.js:validatePortBindings()
'syslog-format': 'rfc5424'
}
},
Memory: system.getMemoryAllocation(memoryLimit),
MemorySwap: memoryLimit, // Memory + Swap
PortBindings: isAppContainer ? dockerPortBindings : { },
PublishAllPorts: false,
ReadonlyRootfs: app.debugMode ? !!app.debugMode.readonlyRootfs : true,
RestartPolicy: {
'Name': isAppContainer ? 'unless-stopped' : 'no',
'MaximumRetryCount': 0
},
CpuShares: app.cpuShares,
VolumesFrom: isAppContainer ? null : [ app.containerId + ':rw' ],
SecurityOpt: [ 'apparmor=docker-cloudron-app' ],
CapAdd: [],
CapDrop: []
CpuShares: app.cpuShares,
VolumesFrom: isAppContainer ? null : [ app.containerId + ':rw' ],
SecurityOpt: [ 'apparmor=docker-cloudron-app' ],
CapAdd: [],
CapDrop: []
}
};
// do no set hostname of containers to location as it might conflict with addons names. for example, an app installed in mail
// location may not reach mail container anymore by DNS. We cannot set hostname to fqdn either as that sets up the dns
// name to look up the internal docker ip. this makes curl from within container fail
// Note that Hostname has no effect on DNS. We have to use the --net-alias for dns.
// Hostname cannot be set with container NetworkMode. Subcontainers run is the network space of the app container
// This is done to prevent lots of up/down events and iptables locking
if (isAppContainer) {
containerOptions.Hostname = app.id;
containerOptions.HostConfig.NetworkMode = 'cloudron'; // user defined bridge network
containerOptions.HostConfig.Dns = ['172.18.0.1']; // use internal dns
containerOptions.HostConfig.DnsSearch = ['.']; // use internal dns
containerOptions.NetworkingConfig = {
EndpointsConfig: {
cloudron: {
IPAMConfig: {
IPv4Address: app.containerIp
},
Aliases: [ name ] // adds hostname entry with container name
}
}
};
} else {
containerOptions.HostConfig.NetworkMode = `container:${app.containerId}`; // scheduler containers must have same IP as app for various addon auth
}
// do no set hostname of containers to location as it might conflict with addons names. for example, an app installed in mail
// location may not reach mail container anymore by DNS. We cannot set hostname to fqdn either as that sets up the dns
// name to look up the internal docker ip. this makes curl from within container fail
// Note that Hostname has no effect on DNS. We have to use the --net-alias for dns.
// Hostname cannot be set with container NetworkMode. Subcontainers run is the network space of the app container
// This is done to prevent lots of up/down events and iptables locking
if (isAppContainer) {
containerOptions.Hostname = app.id;
containerOptions.HostConfig.NetworkMode = 'cloudron'; // user defined bridge network
containerOptions.HostConfig.Dns = ['172.18.0.1']; // use internal dns
containerOptions.HostConfig.DnsSearch = ['.']; // use internal dns
const capabilities = manifest.capabilities || [];
containerOptions.NetworkingConfig = {
EndpointsConfig: {
cloudron: {
IPAMConfig: {
IPv4Address: app.containerIp
},
Aliases: [ name ] // adds hostname entry with container name
}
}
};
} else {
containerOptions.HostConfig.NetworkMode = `container:${app.containerId}`; // scheduler containers must have same IP as app for various addon auth
}
// https://docs-stage.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities
if (capabilities.includes('net_admin')) containerOptions.HostConfig.CapAdd.push('NET_ADMIN', 'NET_RAW');
if (capabilities.includes('mlock')) containerOptions.HostConfig.CapAdd.push('IPC_LOCK'); // mlock prevents swapping
if (!capabilities.includes('ping')) containerOptions.HostConfig.CapDrop.push('NET_RAW'); // NET_RAW is included by default by Docker
var capabilities = manifest.capabilities || [];
if (capabilities.includes('vaapi') && safe.fs.existsSync('/dev/dri')) {
containerOptions.HostConfig.Devices = [
{ PathOnHost: '/dev/dri', PathInContainer: '/dev/dri', CgroupPermissions: 'rwm' }
];
}
// https://docs-stage.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities
if (capabilities.includes('net_admin')) containerOptions.HostConfig.CapAdd.push('NET_ADMIN', 'NET_RAW');
if (capabilities.includes('mlock')) containerOptions.HostConfig.CapAdd.push('IPC_LOCK'); // mlock prevents swapping
if (!capabilities.includes('ping')) containerOptions.HostConfig.CapDrop.push('NET_RAW'); // NET_RAW is included by default by Docker
containerOptions = _.extend(containerOptions, options);
if (capabilities.includes('vaapi') && safe.fs.existsSync('/dev/dri')) {
containerOptions.HostConfig.Devices = [
{ PathOnHost: '/dev/dri', PathInContainer: '/dev/dri', CgroupPermissions: 'rwm' }
];
}
const [createError, container] = await safe(gConnection.createContainer(containerOptions));
if (createError && createError.statusCode === 409) throw new BoxError(BoxError.ALREADY_EXISTS, createError);
if (createError) throw new BoxError(BoxError.DOCKER_ERROR, createError);
containerOptions = _.extend(containerOptions, options);
gConnection.createContainer(containerOptions, function (error, container) {
if (error && error.statusCode === 409) return callback(new BoxError(BoxError.ALREADY_EXISTS, error));
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
callback(null, container);
});
});
return container;
}
function createContainer(app, callback) {
createSubcontainer(app, app.id /* name */, null /* cmd */, { } /* options */, callback);
async function createContainer(app) {
return await createSubcontainer(app, app.id /* name */, null /* cmd */, { } /* options */);
}
function startContainer(containerId, callback) {
async function startContainer(containerId) {
assert.strictEqual(typeof containerId, 'string');
assert.strictEqual(typeof callback, 'function');
var container = gConnection.getContainer(containerId);
const container = gConnection.getContainer(containerId);
container.start(function (error) {
if (error && error.statusCode === 404) return callback(new BoxError(BoxError.NOT_FOUND));
if (error && error.statusCode === 400) return callback(new BoxError(BoxError.BAD_FIELD, error)); // e.g start.sh is not executable
if (error && error.statusCode !== 304) return callback(new BoxError(BoxError.DOCKER_ERROR, error)); // 304 means already started
return callback(null);
});
const [error] = await safe(container.start());
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND);
if (error && error.statusCode === 400) throw new BoxError(BoxError.BAD_FIELD, error); // e.g start.sh is not executable
if (error && error.statusCode !== 304) throw new BoxError(BoxError.DOCKER_ERROR, error); // 304 means already started
}
function restartContainer(containerId, callback) {
async function restartContainer(containerId) {
assert.strictEqual(typeof containerId, 'string');
assert.strictEqual(typeof callback, 'function');
var container = gConnection.getContainer(containerId);
const container = gConnection.getContainer(containerId);
container.restart(function (error) {
if (error && error.statusCode === 404) return callback(new BoxError(BoxError.NOT_FOUND));
if (error && error.statusCode === 400) return callback(new BoxError(BoxError.BAD_FIELD, error)); // e.g start.sh is not executable
if (error && error.statusCode !== 204) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
return callback(null);
});
const [error] = await safe(container.restart());
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND);
if (error && error.statusCode === 400) throw new BoxError(BoxError.BAD_FIELD, error); // e.g start.sh is not executable
if (error && error.statusCode !== 204) throw new BoxError(BoxError.DOCKER_ERROR, error);
}
function stopContainer(containerId, callback) {
async function stopContainer(containerId) {
assert(!containerId || typeof containerId === 'string');
assert.strictEqual(typeof callback, 'function');
if (!containerId) {
debug('No previous container to stop');
return callback();
return;
}
var container = gConnection.getContainer(containerId);
const container = gConnection.getContainer(containerId);
var options = {
const options = {
t: 10 // wait for 10 seconds before killing it
};
container.stop(options, function (error) {
if (error && (error.statusCode !== 304 && error.statusCode !== 404)) return callback(new BoxError(BoxError.DOCKER_ERROR, 'Error stopping container:' + error.message));
let [error] = await safe(container.stop(options));
if (error && (error.statusCode !== 304 && error.statusCode !== 404)) throw new BoxError(BoxError.DOCKER_ERROR, 'Error stopping container:' + error.message);
container.wait(function (error/*, data */) {
if (error && (error.statusCode !== 304 && error.statusCode !== 404)) return callback(new BoxError(BoxError.DOCKER_ERROR, 'Error waiting on container:' + error.message));
return callback(null);
});
});
[error] = await safe(container.wait());
if (error && (error.statusCode !== 304 && error.statusCode !== 404)) throw new BoxError(BoxError.DOCKER_ERROR, 'Error waiting on container:' + error.message);
}
function deleteContainer(containerId, callback) { // id can also be name
async function deleteContainer(containerId) { // id can also be name
assert(!containerId || typeof containerId === 'string');
assert.strictEqual(typeof callback, 'function');
if (containerId === null) return callback(null);
if (containerId === null) return null;
var container = gConnection.getContainer(containerId);
const container = gConnection.getContainer(containerId);
var removeOptions = {
const removeOptions = {
force: true, // kill container if it's running
v: true // removes volumes associated with the container (but not host mounts)
};
container.remove(removeOptions, function (error) {
if (error && error.statusCode === 404) return callback(null);
const [error] = await safe(container.remove(removeOptions));
if (error && error.statusCode === 404) return;
if (error) {
debug('Error removing container %s : %j', containerId, error);
return callback(new BoxError(BoxError.DOCKER_ERROR, error));
}
callback(null);
});
if (error) {
debug('Error removing container %s : %j', containerId, error);
throw new BoxError(BoxError.DOCKER_ERROR, error);
}
}
function deleteContainers(appId, options, callback) {
async function deleteContainers(appId, options) {
assert.strictEqual(typeof appId, 'string');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
let labels = [ 'appId=' + appId ];
const labels = [ 'appId=' + appId ];
if (options.managedOnly) labels.push('isCloudronManaged=true');
gConnection.listContainers({ all: 1, filters: JSON.stringify({ label: labels }) }, function (error, containers) {
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
const [error, containers] = await safe(gConnection.listContainers({ all: 1, filters: JSON.stringify({ label: labels }) }));
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
async.eachSeries(containers, function (container, iteratorDone) {
deleteContainer(container.Id, iteratorDone);
}, callback);
});
for (const container of containers) {
await deleteContainer(container.Id);
}
}
function stopContainers(appId, callback) {
async function stopContainers(appId) {
assert.strictEqual(typeof appId, 'string');
assert.strictEqual(typeof callback, 'function');
gConnection.listContainers({ all: 1, filters: JSON.stringify({ label: [ 'appId=' + appId ] }) }, function (error, containers) {
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
const [error, containers] = await safe(gConnection.listContainers({ all: 1, filters: JSON.stringify({ label: [ 'appId=' + appId ] }) }));
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
async.eachSeries(containers, function (container, iteratorDone) {
stopContainer(container.Id, iteratorDone);
}, callback);
});
for (const container of containers) {
await stopContainer(container.Id);
}
}
function deleteImage(manifest, callback) {
@@ -573,114 +526,79 @@ function deleteImage(manifest, callback) {
});
}
function getContainerIdByIp(ip, callback) {
assert.strictEqual(typeof ip, 'string');
assert.strictEqual(typeof callback, 'function');
gConnection.getNetwork('cloudron').inspect(function (error, bridge) {
if (error && error.statusCode === 404) return callback(new BoxError(BoxError.DOCKER_ERROR, 'Unable to find the cloudron network'));
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
var containerId;
for (var id in bridge.Containers) {
if (bridge.Containers[id].IPv4Address.indexOf(ip + '/16') === 0) {
containerId = id;
break;
}
}
if (!containerId) return callback(new BoxError(BoxError.DOCKER_ERROR, 'No container with that ip'));
callback(null, containerId);
});
}
function inspect(containerId, callback) {
async function inspect(containerId) {
assert.strictEqual(typeof containerId, 'string');
assert.strictEqual(typeof callback, 'function');
var container = gConnection.getContainer(containerId);
const container = gConnection.getContainer(containerId);
container.inspect(function (error, result) {
if (error && error.statusCode === 404) return callback(new BoxError(BoxError.NOT_FOUND, `Unable to find container ${containerId}`));
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
const [error, result] = await safe(container.inspect());
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Unable to find container ${containerId}`);
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
callback(null, result);
});
return result;
}
function getContainerIp(containerId, callback) {
async function getContainerIp(containerId) {
assert.strictEqual(typeof containerId, 'string');
assert.strictEqual(typeof callback, 'function');
if (constants.TEST) return callback(null, '127.0.5.5');
if (constants.TEST) return '127.0.5.5';
inspect(containerId, function (error, result) {
if (error) return callback(error);
const result = await inspect(containerId);
const ip = safe.query(result, 'NetworkSettings.Networks.cloudron.IPAddress', null);
if (!ip) return callback(new BoxError(BoxError.DOCKER_ERROR, 'Error getting container IP'));
const ip = safe.query(result, 'NetworkSettings.Networks.cloudron.IPAddress', null);
if (!ip) throw new BoxError(BoxError.DOCKER_ERROR, 'Error getting container IP');
callback(null, ip);
});
return ip;
}
function execContainer(containerId, options, callback) {
async function execContainer(containerId, options) {
assert.strictEqual(typeof containerId, 'string');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
var container = gConnection.getContainer(containerId);
const container = gConnection.getContainer(containerId);
container.exec(options.execOptions, function (error, exec) {
if (error && error.statusCode === 409) return callback(new BoxError(BoxError.BAD_STATE, error.message)); // container restarting/not running
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
const [error, exec] = await safe(container.exec(options.execOptions));
if (error && error.statusCode === 409) throw new BoxError(BoxError.BAD_STATE, error.message); // container restarting/not running
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
exec.start(options.startOptions, function(error, stream /* in hijacked mode, this is a net.socket */) {
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
const [startError, stream] = await safe(exec.start(options.startOptions)); /* in hijacked mode, stream is a net.socket */
if (startError) throw new BoxError(BoxError.DOCKER_ERROR, startError);
if (options.rows && options.columns) {
// there is a race where resizing too early results in a 404 "no such exec"
// https://git.cloudron.io/cloudron/box/issues/549
setTimeout(function () {
exec.resize({ h: options.rows, w: options.columns }, function (error) { if (error) debug('Error resizing console', error); });
}, 2000);
}
if (options.rows && options.columns) {
// there is a race where resizing too early results in a 404 "no such exec"
// https://git.cloudron.io/cloudron/box/issues/549
setTimeout(function () {
exec.resize({ h: options.rows, w: options.columns }, function (error) { if (error) debug('Error resizing console', error); });
}, 2000);
}
callback(null, stream);
});
});
return stream;
}
function getEvents(options, callback) {
async function getEvents(options) {
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
gConnection.getEvents(options, function (error, stream) {
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
callback(null, stream);
});
const [error, stream] = await safe(gConnection.getEvents(options));
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
return stream;
}
function memoryUsage(containerId, callback) {
async function memoryUsage(containerId) {
assert.strictEqual(typeof containerId, 'string');
assert.strictEqual(typeof callback, 'function');
var container = gConnection.getContainer(containerId);
const container = gConnection.getContainer(containerId);
container.stats({ stream: false }, function (error, result) {
if (error && error.statusCode === 404) return callback(new BoxError(BoxError.NOT_FOUND));
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
const [error, result] = await safe(container.stats({ stream: false }));
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND);
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
callback(null, result);
});
return result;
}
function createVolume(name, volumeDataDir, labels, callback) {
async function createVolume(name, volumeDataDir, labels) {
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof volumeDataDir, 'string');
assert.strictEqual(typeof labels, 'object');
assert.strictEqual(typeof callback, 'function');
const volumeOptions = {
Name: name,
@@ -694,69 +612,56 @@ function createVolume(name, volumeDataDir, labels, callback) {
};
// requires sudo because the path can be outside appsdata
shell.sudo('createVolume', [ MKDIRVOLUME_CMD, volumeDataDir ], {}, function (error) {
if (error) return callback(new BoxError(BoxError.FS_ERROR, `Error creating app data dir: ${error.message}`));
let [error] = await safe(shell.promises.sudo('createVolume', [ MKDIRVOLUME_CMD, volumeDataDir ], {}));
if (error) throw new BoxError(BoxError.FS_ERROR, `Error creating app data dir: ${error.message}`);
gConnection.createVolume(volumeOptions, function (error) {
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
callback();
});
});
[error] = await safe(gConnection.createVolume(volumeOptions));
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
}
function clearVolume(name, options, callback) {
async function clearVolume(name, options) {
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
let volume = gConnection.getVolume(name);
volume.inspect(function (error, v) {
if (error && error.statusCode === 404) return callback();
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
let [error, v] = await safe(volume.inspect());
if (error && error.statusCode === 404) return;
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
const volumeDataDir = v.Options.device;
shell.sudo('clearVolume', [ CLEARVOLUME_CMD, options.removeDirectory ? 'rmdir' : 'clear', volumeDataDir ], {}, function (error) {
if (error) return callback(new BoxError(BoxError.FS_ERROR, error));
callback();
});
});
const volumeDataDir = v.Options.device;
[error] = await shell.promises.sudo('clearVolume', [ CLEARVOLUME_CMD, options.removeDirectory ? 'rmdir' : 'clear', volumeDataDir ], {});
if (error) throw new BoxError(BoxError.FS_ERROR, error);
}
// this only removes the volume and not the data
function removeVolume(name, callback) {
async function removeVolume(name) {
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof callback, 'function');
let volume = gConnection.getVolume(name);
volume.remove(function (error) {
if (error && error.statusCode !== 404) return callback(new BoxError(BoxError.DOCKER_ERROR, `removeVolume: Error removing volume: ${error.message}`));
callback();
});
const [error] = await safe(volume.remove());
if (error && error.statusCode !== 404) throw new BoxError(BoxError.DOCKER_ERROR, `removeVolume: Error removing volume: ${error.message}`);
}
function info(callback) {
assert.strictEqual(typeof callback, 'function');
gConnection.info(function (error, result) {
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, 'Error connecting to docker'));
callback(null, result);
});
async function info() {
const [error, result] = await safe(gConnection.info());
if (error) throw new BoxError(BoxError.DOCKER_ERROR, 'Error connecting to docker');
return result;
}
function update(name, memory, memorySwap, callback) {
async function update(name, memory, memorySwap) {
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof memory, 'number');
assert.strictEqual(typeof memorySwap, 'number');
assert.strictEqual(typeof callback, 'function');
const args = `update --memory ${memory} --memory-swap ${memorySwap} ${name}`.split(' ');
// scale back db containers, if possible. this is retried because updating memory constraints can fail
// with failed to write to memory.memsw.limit_in_bytes: write /sys/fs/cgroup/memory/docker/xx/memory.memsw.limit_in_bytes: device or resource busy
async.retry({ times: 10, interval: 60 * 1000 }, function (retryCallback) {
shell.spawn(`update(${name})`, '/usr/bin/docker', args, { }, retryCallback);
}, callback);
for (let times = 0; times < 10; times++) {
const [error] = await safe(shell.promises.spawn(`update(${name})`, '/usr/bin/docker', args, { }));
if (!error) return;
await delay(60 * 1000);
}
throw new BoxError(BoxError.DOCKER_ERROR, 'Unable to update container');
}
+1 -1
View File
@@ -285,7 +285,7 @@ async function del(domain, auditSource) {
eventlog.add(eventlog.ACTION_DOMAIN_REMOVE, auditSource, { domain });
mail.onDomainRemoved(domain, NOOP_CALLBACK);
safe(mail.onDomainRemoved(domain));
}
async function clear() {
+7 -8
View File
@@ -18,7 +18,7 @@ const addonConfigs = require('./addonconfigs.js'),
ldap = require('ldapjs'),
mail = require('./mail.js'),
safe = require('safetydance'),
services = require('./services.js'),
settings = require('./settings.js'),
users = require('./users.js');
var gServer = null;
@@ -550,14 +550,13 @@ async function authenticateSftp(req, res, next) {
res.end();
}
function loadSftpConfig(req, res, next) {
services.getServiceConfig('sftp', function (error, serviceConfig) {
if (error) return next(new ldap.OperationsError(error.toString()));
async function loadSftpConfig(req, res, next) {
const [error, servicesConfig] = await settings.getServicesConfig();
if (error) return next(new ldap.OperationsError(error.toString()));
req.requireAdmin = serviceConfig.requireAdmin;
next();
});
const sftpConfig = servicesConfig['sftp'] || {};
req.requireAdmin = sftpConfig.requireAdmin;
next();
}
async function userSearchSftp(req, res, next) {
+28 -34
View File
@@ -87,13 +87,13 @@ const assert = require('assert'),
nodemailer = require('nodemailer'),
path = require('path'),
paths = require('./paths.js'),
request = require('request'),
reverseProxy = require('./reverseproxy.js'),
safe = require('safetydance'),
services = require('./services.js'),
settings = require('./settings.js'),
shell = require('./shell.js'),
smtpTransport = require('nodemailer-smtp-transport'),
superagent = require('superagent'),
sysinfo = require('./sysinfo.js'),
system = require('./system.js'),
tasks = require('./tasks.js'),
@@ -716,9 +716,7 @@ async function configureMail(mailFqdn, mailDomain, serviceConfig) {
}
async function getMailAuth() {
const dockerInspect = util.promisify(docker.inspect);
const data = await dockerInspect('mail');
const data = await docker.inspect('mail');
const ip = safe.query(data, 'NetworkSettings.Networks.cloudron.IPAddress');
if (!ip) throw new BoxError(BoxError.MAIL_ERROR, 'Error querying mail server IP');
@@ -737,18 +735,14 @@ async function getMailAuth() {
};
}
function restartMail(callback) {
assert.strictEqual(typeof callback, 'function');
async function restartMail() {
if (process.env.BOX_ENV === 'test' && !process.env.TEST_CREATE_INFRA) return;
if (process.env.BOX_ENV === 'test' && !process.env.TEST_CREATE_INFRA) return callback();
const servicesConfig = await settings.getServicesConfig();
const mailConfig = servicesConfig['mail'] || {};
services.getServiceConfig('mail', async function (error, serviceConfig) {
if (error) return callback(error);
debug(`restartMail: restarting mail container with mailFqdn:${settings.mailFqdn()} dashboardDomain:${settings.dashboardDomain()}`);
[error] = await safe(configureMail(settings.mailFqdn(), settings.dashboardDomain(), serviceConfig));
callback(error);
});
debug(`restartMail: restarting mail container with mailFqdn:${settings.mailFqdn()} dashboardDomain:${settings.dashboardDomain()}`);
await configureMail(settings.mailFqdn(), settings.dashboardDomain(), mailConfig);
}
async function restartMailIfActivated() {
@@ -759,7 +753,7 @@ async function restartMailIfActivated() {
return; // not provisioned yet, do not restart container after dns setup
}
await util.promisify(restartMail)();
await restartMail();
}
async function handleCertChanged() {
@@ -1034,11 +1028,10 @@ function onDomainAdded(domain, callback) {
], callback);
}
function onDomainRemoved(domain, callback) {
async function onDomainRemoved(domain) {
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof callback, 'function');
restartMail(callback);
await restartMail();
}
async function clearDomains() {
@@ -1061,7 +1054,7 @@ async function setMailFromValidation(domain, enabled) {
await updateDomain(domain, { mailFromValidation: enabled });
restartMail(NOOP_CALLBACK); // have to restart mail container since haraka cannot watch symlinked config files (mail.ini)
safe(restartMail()); // have to restart mail container since haraka cannot watch symlinked config files (mail.ini)
}
async function setBanner(domain, banner) {
@@ -1070,7 +1063,7 @@ async function setBanner(domain, banner) {
await updateDomain(domain, { banner });
restartMail(NOOP_CALLBACK);
safe(restartMail());
}
async function setCatchAllAddress(domain, addresses) {
@@ -1079,7 +1072,7 @@ async function setCatchAllAddress(domain, addresses) {
await updateDomain(domain, { catchAll: addresses });
restartMail(NOOP_CALLBACK); // have to restart mail container since haraka cannot watch symlinked config files (mail.ini)
safe(restartMail()); // have to restart mail container since haraka cannot watch symlinked config files (mail.ini)
}
async function setMailRelay(domain, relay, options) {
@@ -1100,7 +1093,7 @@ async function setMailRelay(domain, relay, options) {
await updateDomain(domain, { relay: relay });
restartMail(NOOP_CALLBACK);
safe(restartMail());
}
async function setMailEnabled(domain, enabled, auditSource) {
@@ -1110,7 +1103,7 @@ async function setMailEnabled(domain, enabled, auditSource) {
await updateDomain(domain, { enabled: enabled });
restartMail(NOOP_CALLBACK);
safe(restartMail());
await eventlog.add(enabled ? eventlog.ACTION_MAIL_ENABLED : eventlog.ACTION_MAIL_DISABLED, auditSource, { domain });
}
@@ -1252,21 +1245,20 @@ async function updateMailbox(name, domain, data, auditSource) {
eventlog.add(eventlog.ACTION_MAIL_MAILBOX_UPDATE, auditSource, { name, domain, oldUserId: mailbox.userId, ownerId, ownerType, active });
}
function removeSolrIndex(mailbox, callback) {
async function removeSolrIndex(mailbox) {
assert.strictEqual(typeof mailbox, 'string');
assert.strictEqual(typeof callback, 'function');
services.getContainerDetails('mail', 'CLOUDRON_MAIL_TOKEN', function (error, addonDetails) {
if (error) return callback(error);
const addonDetails = await services.getContainerDetails('mail', 'CLOUDRON_MAIL_TOKEN');
request.post(`https://${addonDetails.ip}:3000/solr_delete_index?access_token=${addonDetails.token}`, { timeout: 2000, rejectUnauthorized: false, json: { mailbox } }, function (error, response) {
if (error) return callback(error);
const [error, response] = await safe(superagent.post(`https://${addonDetails.ip}:3000/solr_delete_index?access_token=${addonDetails.token}`)
.timeout(2000)
.disableTLSCerts()
.send({ mailbox })
.ok(() => true));
if (response.statusCode !== 200) return callback(new Error(`Error removing solr index - ${response.statusCode} ${JSON.stringify(response.body)}`));
if (error) throw new BoxError(BoxError.MAIL_ERROR, `Could not remove solr index: ${error.message}`);
callback(null);
});
});
if (response.status !== 200) throw new BoxError(BoxError.MAIL_ERROR, `Error removing solr index - ${response.status} ${JSON.stringify(response.body)}`);
}
async function delMailbox(name, domain, options, auditSource) {
@@ -1285,7 +1277,9 @@ async function delMailbox(name, domain, options, auditSource) {
const result = await database.query('DELETE FROM mailboxes WHERE ((name=? AND domain=?) OR (aliasName = ? AND aliasDomain=?))', [ name, domain, name, domain ]);
if (result.affectedRows === 0) throw new BoxError(BoxError.NOT_FOUND, 'Mailbox not found');
removeSolrIndex(mailbox, NOOP_CALLBACK);
const [error] = await safe(removeSolrIndex(mailbox));
if (error) debug(`delMailbox: failed to remove solr index: ${error.message}`);
eventlog.add(eventlog.ACTION_MAIL_MAILBOX_REMOVE, auditSource, { name, domain });
}
+44 -46
View File
@@ -626,92 +626,90 @@ function demuxStream(stream, stdin) {
});
}
function exec(req, res, next) {
async function exec(req, res, next) {
assert.strictEqual(typeof req.resource, 'object');
var cmd = null;
let cmd = null;
if (req.query.cmd) {
cmd = safe.JSON.parse(req.query.cmd);
if (!Array.isArray(cmd) || cmd.length < 1) return next(new HttpError(400, 'cmd must be array with atleast size 1'));
}
var columns = req.query.columns ? parseInt(req.query.columns, 10) : null;
const columns = req.query.columns ? parseInt(req.query.columns, 10) : null;
if (isNaN(columns)) return next(new HttpError(400, 'columns must be a number'));
var rows = req.query.rows ? parseInt(req.query.rows, 10) : null;
const rows = req.query.rows ? parseInt(req.query.rows, 10) : null;
if (isNaN(rows)) return next(new HttpError(400, 'rows must be a number'));
var tty = req.query.tty === 'true';
const tty = req.query.tty === 'true';
if (safe.query(req.resource, 'manifest.addons.docker') && req.user.role !== users.ROLE_OWNER) return next(new HttpError(403, '"owner" role is requied to exec app with docker addon'));
// in a badly configured reverse proxy, we might be here without an upgrade
if (req.headers['upgrade'] !== 'tcp') return next(new HttpError(404, 'exec requires TCP upgrade'));
apps.exec(req.resource, { cmd: cmd, rows: rows, columns: columns, tty: tty }, function (error, duplexStream) {
if (error) return next(BoxError.toHttpError(error));
const [error, duplexStream] = await safe(apps.exec(req.resource, { cmd: cmd, rows: rows, columns: columns, tty: tty }));
if (error) return next(BoxError.toHttpError(error));
req.clearTimeout();
res.sendUpgradeHandshake();
req.clearTimeout();
res.sendUpgradeHandshake();
// When tty is disabled, the duplexStream has 2 separate streams. When enabled, it has stdout/stderr merged.
duplexStream.pipe(res.socket);
// When tty is disabled, the duplexStream has 2 separate streams. When enabled, it has stdout/stderr merged.
duplexStream.pipe(res.socket);
if (tty) {
res.socket.pipe(duplexStream); // in tty mode, the client always waits for server to exit
} else {
demuxStream(res.socket, duplexStream);
res.socket.on('error', function () { duplexStream.end(); });
res.socket.on('end', function () { duplexStream.end(); });
}
});
if (tty) {
res.socket.pipe(duplexStream); // in tty mode, the client always waits for server to exit
} else {
demuxStream(res.socket, duplexStream);
res.socket.on('error', function () { duplexStream.end(); });
res.socket.on('end', function () { duplexStream.end(); });
}
}
function execWebSocket(req, res, next) {
async function execWebSocket(req, res, next) {
assert.strictEqual(typeof req.resource, 'object');
var cmd = null;
let cmd = null;
if (req.query.cmd) {
cmd = safe.JSON.parse(req.query.cmd);
if (!Array.isArray(cmd) || cmd.length < 1) return next(new HttpError(400, 'cmd must be array with atleast size 1'));
}
var columns = req.query.columns ? parseInt(req.query.columns, 10) : null;
const columns = req.query.columns ? parseInt(req.query.columns, 10) : null;
if (isNaN(columns)) return next(new HttpError(400, 'columns must be a number'));
var rows = req.query.rows ? parseInt(req.query.rows, 10) : null;
const rows = req.query.rows ? parseInt(req.query.rows, 10) : null;
if (isNaN(rows)) return next(new HttpError(400, 'rows must be a number'));
var tty = req.query.tty === 'true' ? true : false;
const tty = req.query.tty === 'true' ? true : false;
// in a badly configured reverse proxy, we might be here without an upgrade
if (req.headers['upgrade'] !== 'websocket') return next(new HttpError(404, 'exec requires websocket'));
apps.exec(req.resource, { cmd: cmd, rows: rows, columns: columns, tty: tty }, function (error, duplexStream) {
if (error) return next(BoxError.toHttpError(error));
const [error, duplexStream] = await safe(apps.exec(req.resource, { cmd: cmd, rows: rows, columns: columns, tty: tty }));
if (error) return next(BoxError.toHttpError(error));
req.clearTimeout();
req.clearTimeout();
res.handleUpgrade(function (ws) {
duplexStream.on('end', function () { ws.close(); });
duplexStream.on('close', function () { ws.close(); });
duplexStream.on('error', function (error) {
debug('duplexStream error:', error);
});
duplexStream.on('data', function (data) {
if (ws.readyState !== WebSocket.OPEN) return;
ws.send(data.toString());
});
res.handleUpgrade(function (ws) {
duplexStream.on('end', function () { ws.close(); });
duplexStream.on('close', function () { ws.close(); });
duplexStream.on('error', function (error) {
debug('duplexStream error:', error);
});
duplexStream.on('data', function (data) {
if (ws.readyState !== WebSocket.OPEN) return;
ws.send(data.toString());
});
ws.on('error', function (error) {
debug('websocket error:', error);
});
ws.on('message', function (msg) {
duplexStream.write(msg);
});
ws.on('close', function () {
// Clean things up, if any?
});
ws.on('error', function (error) {
debug('websocket error:', error);
});
ws.on('message', function (msg) {
duplexStream.write(msg);
});
ws.on('close', function () {
// Clean things up, if any?
});
});
}
+8 -10
View File
@@ -163,20 +163,18 @@ async function getConfig(req, res, next) {
next(new HttpSuccess(200, cloudronConfig));
}
function getDisks(req, res, next) {
system.getDisks(function (error, result) {
if (error) return next(BoxError.toHttpError(error));
async function getDisks(req, res, next) {
const [error, result] = await safe(system.getDisks());
if (error) return next(BoxError.toHttpError(error));
next(new HttpSuccess(200, result));
});
next(new HttpSuccess(200, result));
}
function getMemory(req, res, next) {
system.getMemory(function (error, result) {
if (error) return next(BoxError.toHttpError(error));
async function getMemory(req, res, next) {
const [error, result] = await safe(system.getMemory());
if (error) return next(BoxError.toHttpError(error));
next(new HttpSuccess(200, result));
});
next(new HttpSuccess(200, result));
}
function update(req, res, next) {
+15 -15
View File
@@ -8,35 +8,35 @@ const assert = require('assert'),
BoxError = require('../boxerror.js'),
middleware = require('../middleware/index.js'),
HttpError = require('connect-lastmile').HttpError,
safe = require('safetydance'),
services = require('../services.js'),
url = require('url');
function proxy(req, res, next) {
async function proxy(req, res, next) {
assert.strictEqual(typeof req.params.id, 'string');
const id = req.params.id; // app id or volume id
req.clearTimeout();
services.getContainerDetails('sftp', 'CLOUDRON_SFTP_TOKEN', function (error, result) {
if (error) return next(BoxError.toHttpError(error));
const [error, result] = await safe(services.getContainerDetails('sftp', 'CLOUDRON_SFTP_TOKEN'));
if (error) return next(BoxError.toHttpError(error));
let parsedUrl = url.parse(req.url, true /* parseQueryString */);
parsedUrl.query['access_token'] = result.token;
let parsedUrl = url.parse(req.url, true /* parseQueryString */);
parsedUrl.query['access_token'] = result.token;
req.url = url.format({ pathname: `/files/${id}/${encodeURIComponent(req.params[0])}`, query: parsedUrl.query }); // params[0] already contains leading '/'
req.url = url.format({ pathname: `/files/${id}/${encodeURIComponent(req.params[0])}`, query: parsedUrl.query }); // params[0] already contains leading '/'
const proxyOptions = url.parse(`https://${result.ip}:3000`);
proxyOptions.rejectUnauthorized = false;
const fileManagerProxy = middleware.proxy(proxyOptions);
const proxyOptions = url.parse(`https://${result.ip}:3000`);
proxyOptions.rejectUnauthorized = false;
const fileManagerProxy = middleware.proxy(proxyOptions);
fileManagerProxy(req, res, function (error) {
if (!error) return next();
fileManagerProxy(req, res, function (error) {
if (!error) return next();
if (error.code === 'ECONNREFUSED') return next(new HttpError(424, 'Unable to connect to filemanager server'));
if (error.code === 'ECONNRESET') return next(new HttpError(424, 'Unable to query filemanager server'));
if (error.code === 'ECONNREFUSED') return next(new HttpError(424, 'Unable to connect to filemanager server'));
if (error.code === 'ECONNRESET') return next(new HttpError(424, 'Unable to query filemanager server'));
next(new HttpError(500, error));
});
next(new HttpError(500, error));
});
}
+14 -15
View File
@@ -25,7 +25,7 @@ function restart(req, res, next) {
next();
}
function proxy(req, res, next) {
async function proxy(req, res, next) {
let parsedUrl = url.parse(req.url, true /* parseQueryString */);
const pathname = req.path.split('/').pop();
@@ -34,25 +34,24 @@ function proxy(req, res, next) {
delete req.headers['authorization'];
delete req.headers['cookies'];
services.getContainerDetails('mail', 'CLOUDRON_MAIL_TOKEN', function (error, addonDetails) {
if (error) return next(BoxError.toHttpError(error));
const [error, addonDetails] = await safe(services.getContainerDetails('mail', 'CLOUDRON_MAIL_TOKEN'));
if (error) return next(BoxError.toHttpError(error));
parsedUrl.query['access_token'] = addonDetails.token;
req.url = url.format({ pathname: pathname, query: parsedUrl.query });
parsedUrl.query['access_token'] = addonDetails.token;
req.url = url.format({ pathname: pathname, query: parsedUrl.query });
const proxyOptions = url.parse(`https://${addonDetails.ip}:3000`);
proxyOptions.rejectUnauthorized = false;
const mailserverProxy = middleware.proxy(proxyOptions);
const proxyOptions = url.parse(`https://${addonDetails.ip}:3000`);
proxyOptions.rejectUnauthorized = false;
const mailserverProxy = middleware.proxy(proxyOptions);
req.clearTimeout(); // TODO: add timeout to mail server proxy logic instead of this
mailserverProxy(req, res, function (error) {
if (!error) return next();
req.clearTimeout(); // TODO: add timeout to mail server proxy logic instead of this
mailserverProxy(req, res, function (error) {
if (!error) return next();
if (error.code === 'ECONNREFUSED') return next(new HttpError(424, 'Unable to connect to mail server'));
if (error.code === 'ECONNRESET') return next(new HttpError(424, 'Unable to query mail server'));
if (error.code === 'ECONNREFUSED') return next(new HttpError(424, 'Unable to connect to mail server'));
if (error.code === 'ECONNRESET') return next(new HttpError(424, 'Unable to query mail server'));
next(new HttpError(500, error));
});
next(new HttpError(500, error));
});
}
+8 -9
View File
@@ -16,9 +16,9 @@ const assert = require('assert'),
HttpSuccess = require('connect-lastmile').HttpSuccess,
paths = require('../paths.js'),
provision = require('../provision.js'),
request = require('request'),
safe = require('safetydance'),
settings = require('../settings.js');
settings = require('../settings.js'),
superagent = require('superagent');
function setupTokenAuth(req, res, next) {
assert.strictEqual(typeof req.body, 'object');
@@ -32,20 +32,19 @@ function setupTokenAuth(req, res, next) {
return next();
}
function providerTokenAuth(req, res, next) {
async function providerTokenAuth(req, res, next) {
assert.strictEqual(typeof req.body, 'object');
if (settings.provider() === 'ami') {
if (typeof req.body.providerToken !== 'string' || !req.body.providerToken) return next(new HttpError(400, 'providerToken must be a non empty string'));
request.get('http://169.254.169.254/latest/meta-data/instance-id', { timeout: 30 * 1000 }, function (error, result) {
if (error) return next(new HttpError(422, `Network error getting EC2 metadata: ${error.message}`));
if (result.statusCode !== 200) return next(new HttpError(422, `Unable to get EC2 meta data. statusCode: ${result.statusCode}`));
const [error, response] = await superagent.get('http://169.254.169.254/latest/meta-data/instance-id').timeout(30 * 1000).ok(() => true);
if (error) return next(new HttpError(422, `Network error getting EC2 metadata: ${error.message}`));
if (response.statusCode !== 200) return next(new HttpError(422, `Unable to get EC2 meta data. statusCode: ${response.status}`));
if (result.body !== req.body.providerToken) return next(new HttpError(422, 'Instance ID does not match'));
if (response.body !== req.body.providerToken) return next(new HttpError(422, 'Instance ID does not match'));
next();
});
next();
} else {
next();
}
+46 -52
View File
@@ -24,19 +24,18 @@ async function getAll(req, res, next) {
next(new HttpSuccess(200, { services: result }));
}
function get(req, res, next) {
async function get(req, res, next) {
assert.strictEqual(typeof req.params.service, 'string');
req.clearTimeout();
services.getServiceStatus(req.params.service, function (error, result) {
if (error) return next(BoxError.toHttpError(error));
const [error, result] = await safe(services.getServiceStatus(req.params.service));
if (error) return next(BoxError.toHttpError(error));
next(new HttpSuccess(200, { service: result }));
});
next(new HttpSuccess(200, { service: result }));
}
function configure(req, res, next) {
async function configure(req, res, next) {
assert.strictEqual(typeof req.params.service, 'string');
if (typeof req.body.memoryLimit !== 'number') return next(new HttpError(400, 'memoryLimit must be a number'));
@@ -50,92 +49,87 @@ function configure(req, res, next) {
data.requireAdmin = req.body.requireAdmin;
}
services.configureService(req.params.service, data, function (error) {
if (error) return next(BoxError.toHttpError(error));
const [error] = await safe(services.configureService(req.params.service, data));
if (error) return next(BoxError.toHttpError(error));
next(new HttpSuccess(202, {}));
});
next(new HttpSuccess(202, {}));
}
function getLogs(req, res, next) {
async function getLogs(req, res, next) {
assert.strictEqual(typeof req.params.service, 'string');
var lines = 'lines' in req.query ? parseInt(req.query.lines, 10) : 10; // we ignore last-event-id
const lines = 'lines' in req.query ? parseInt(req.query.lines, 10) : 10; // we ignore last-event-id
if (isNaN(lines)) return next(new HttpError(400, 'lines must be a number'));
var options = {
const options = {
lines: lines,
follow: false,
format: req.query.format || 'json'
};
services.getServiceLogs(req.params.service, options, function (error, logStream) {
if (error) return next(BoxError.toHttpError(error));
const [error, logStream] = await safe(services.getServiceLogs(req.params.service, options));
if (error) return next(BoxError.toHttpError(error));
res.writeHead(200, {
'Content-Type': 'application/x-logs',
'Content-Disposition': `attachment; filename="${req.params.service}.log"`,
'Cache-Control': 'no-cache',
'X-Accel-Buffering': 'no' // disable nginx buffering
});
logStream.pipe(res);
res.writeHead(200, {
'Content-Type': 'application/x-logs',
'Content-Disposition': `attachment; filename="${req.params.service}.log"`,
'Cache-Control': 'no-cache',
'X-Accel-Buffering': 'no' // disable nginx buffering
});
logStream.pipe(res);
}
// this route is for streaming logs
function getLogStream(req, res, next) {
async function getLogStream(req, res, next) {
assert.strictEqual(typeof req.params.service, 'string');
var lines = 'lines' in req.query ? parseInt(req.query.lines, 10) : 10; // we ignore last-event-id
const lines = 'lines' in req.query ? parseInt(req.query.lines, 10) : 10; // we ignore last-event-id
if (isNaN(lines)) return next(new HttpError(400, 'lines must be a valid number'));
function sse(id, data) { return 'id: ' + id + '\ndata: ' + data + '\n\n'; }
if (req.headers.accept !== 'text/event-stream') return next(new HttpError(400, 'This API call requires EventStream'));
var options = {
const options = {
lines: lines,
follow: true,
format: 'json'
};
services.getServiceLogs(req.params.service, options, function (error, logStream) {
if (error) return next(BoxError.toHttpError(error));
const [error, logStream] = await safe(services.getServiceLogs(req.params.service, options));
if (error) return next(BoxError.toHttpError(error));
res.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'X-Accel-Buffering': 'no', // disable nginx buffering
'Access-Control-Allow-Origin': '*'
});
res.write('retry: 3000\n');
res.on('close', logStream.close);
logStream.on('data', function (data) {
var obj = JSON.parse(data);
res.write(sse(obj.monotonicTimestamp, JSON.stringify(obj))); // send timestamp as id
});
logStream.on('end', res.end.bind(res));
logStream.on('error', res.end.bind(res, null));
res.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'X-Accel-Buffering': 'no', // disable nginx buffering
'Access-Control-Allow-Origin': '*'
});
res.write('retry: 3000\n');
res.on('close', logStream.close);
logStream.on('data', function (data) {
var obj = JSON.parse(data);
res.write(sse(obj.monotonicTimestamp, JSON.stringify(obj))); // send timestamp as id
});
logStream.on('end', res.end.bind(res));
logStream.on('error', res.end.bind(res, null));
}
function restart(req, res, next) {
async function restart(req, res, next) {
assert.strictEqual(typeof req.params.service, 'string');
services.restartService(req.params.service, function (error) {
if (error) return next(BoxError.toHttpError(error));
const [error] = await safe(services.restartService(req.params.service));
if (error) return next(BoxError.toHttpError(error));
next(new HttpSuccess(202, {}));
});
next(new HttpSuccess(202, {}));
}
function rebuild(req, res, next) {
async function rebuild(req, res, next) {
assert.strictEqual(typeof req.params.service, 'string');
services.rebuildService(req.params.service, function (error) {
if (error) return next(BoxError.toHttpError(error));
const [error] = await safe(services.rebuildService(req.params.service));
if (error) return next(BoxError.toHttpError(error));
next(new HttpSuccess(202, {}));
});
next(new HttpSuccess(202, {}));
}
-1
View File
@@ -5,7 +5,6 @@
'use strict';
const { execContainer } = require('../../docker.js');
const common = require('./common.js'),
expect = require('expect.js'),
superagent = require('superagent'),
+65 -89
View File
@@ -8,13 +8,12 @@ exports = module.exports = {
const apps = require('./apps.js'),
assert = require('assert'),
async = require('async'),
BoxError = require('./boxerror.js'),
constants = require('./constants.js'),
CronJob = require('cron').CronJob,
debug = require('debug')('box:scheduler'),
docker = require('./docker.js'),
util = require('util'),
safe = require('safetydance'),
_ = require('underscore');
// appId -> { containerId, schedulerConfig (manifest), cronjobs }
@@ -32,42 +31,37 @@ function resumeJobs(appId) {
gSuspendedAppIds.delete(appId);
}
function runTask(appId, taskName, callback) {
async function runTask(appId, taskName) {
assert.strictEqual(typeof appId, 'string');
assert.strictEqual(typeof taskName, 'string');
assert.strictEqual(typeof callback, 'function');
const JOB_MAX_TIME = 30 * 60 * 1000; // 30 minutes
const containerName = `${appId}-${taskName}`;
if (gSuspendedAppIds.has(appId)) return callback();
if (gSuspendedAppIds.has(appId)) return;
util.callbackify(apps.get)(appId, function (error, app) {
if (error) return callback(error);
if (!app) return callback(new BoxError(BoxError.NOT_FOUND, 'App not found'));
const app = await apps.get(appId);
if (!app) throw new BoxError(BoxError.NOT_FOUND, 'App not found');
if (app.installationState !== apps.ISTATE_INSTALLED || app.runState !== apps.RSTATE_RUNNING || app.health !== apps.HEALTH_HEALTHY) return callback();
if (app.installationState !== apps.ISTATE_INSTALLED || app.runState !== apps.RSTATE_RUNNING || app.health !== apps.HEALTH_HEALTHY) return;
docker.inspectByName(containerName, function (error, data) {
if (!error && data && data.State.Running === true) {
const jobStartTime = new Date(data.State.StartedAt); // iso 8601
if (new Date() - jobStartTime < JOB_MAX_TIME) return callback();
}
const [error, data] = await safe(docker.inspectByName(containerName));
if (!error && data && data.State.Running === true) {
const jobStartTime = new Date(data.State.StartedAt); // iso 8601
if (new Date() - jobStartTime < JOB_MAX_TIME) return;
}
docker.restartContainer(containerName, callback);
});
});
await docker.restartContainer(containerName);
}
function createJobs(app, schedulerConfig, callback) {
async function createJobs(app, schedulerConfig) {
assert.strictEqual(typeof app, 'object');
assert(schedulerConfig && typeof schedulerConfig === 'object');
assert.strictEqual(typeof callback, 'function');
const appId = app.id;
let jobs = { };
const jobs = { };
async.eachSeries(Object.keys(schedulerConfig), function (taskName, iteratorDone) {
for (const taskName of Object.keys(schedulerConfig)) {
const task = schedulerConfig[taskName];
const randomSecond = Math.floor(60*Math.random()); // don't start all crons to decrease memory pressure
const cronTime = (constants.TEST ? '*/5 ' : `${randomSecond} `) + task.schedule; // time ticks faster in tests
@@ -77,97 +71,79 @@ function createJobs(app, schedulerConfig, callback) {
// stopJobs only deletes jobs since previous run. This means that when box code restarts, none of the containers
// are removed. The deleteContainer here ensures we re-create the cron containers with the latest config
docker.deleteContainer(containerName, function ( /* ignoredError */) {
docker.createSubcontainer(app, containerName, [ '/bin/sh', '-c', cmd ], { } /* options */, function (error) {
if (error && error.reason !== BoxError.ALREADY_EXISTS) return iteratorDone(error);
await safe(docker.deleteContainer(containerName)); // ignore error
const [error] = await safe(docker.createSubcontainer(app, containerName, [ '/bin/sh', '-c', cmd ], { } /* options */));
if (error && error.reason !== BoxError.ALREADY_EXISTS) continue;
debug(`createJobs: ${taskName} (${app.fqdn}) will run in container ${containerName}`);
debug(`createJobs: ${taskName} (${app.fqdn}) will run in container ${containerName}`);
var cronJob = new CronJob({
cronTime: cronTime, // at this point, the pattern has been validated
onTick: () => runTask(appId, taskName, (error) => { // put the app id in closure, so we don't use the outdated app object by mistake
if (error) debug(`could not run task ${taskName} : ${error.message}`);
}),
start: true
});
jobs[taskName] = cronJob;
iteratorDone();
});
const cronJob = new CronJob({
cronTime: cronTime, // at this point, the pattern has been validated
onTick: async () => {
const [error] = await safe(runTask(appId, taskName)); // put the app id in closure, so we don't use the outdated app object by mistake
if (error) debug(`could not run task ${taskName} : ${error.message}`);
},
start: true
});
}, function (error) {
callback(error, jobs);
});
jobs[taskName] = cronJob;
}
return jobs;
}
function stopJobs(appId, appState, callback) {
async function stopJobs(appId, appState) {
assert.strictEqual(typeof appId, 'string');
assert.strictEqual(typeof appState, 'object');
assert.strictEqual(typeof callback, 'function');
if (!appState) return callback();
if (!appState) return;
async.eachSeries(Object.keys(appState.schedulerConfig), function (taskName, iteratorDone) {
for (const taskName of Object.keys(appState.schedulerConfig)) {
if (appState.cronJobs && appState.cronJobs[taskName]) appState.cronJobs[taskName].stop();
const containerName = `${appId}-${taskName}`;
docker.deleteContainer(containerName, function (error) {
if (error) debug(`stopJobs: failed to delete task container with name ${containerName} : ${error.message}`);
iteratorDone();
});
}, callback);
const [error] = await safe(docker.deleteContainer(containerName));
if (error) debug(`stopJobs: failed to delete task container with name ${containerName} : ${error.message}`);
}
}
function sync() {
async function sync() {
if (constants.TEST) return;
util.callbackify(apps.list)(function (error, allApps) {
if (error) return debug(`sync: error getting app list. ${error.message}`);
const allApps = await apps.list();
var allAppIds = allApps.map(function (app) { return app.id; });
var removedAppIds = _.difference(Object.keys(gState), allAppIds);
if (removedAppIds.length !== 0) debug(`sync: stopping jobs of removed apps ${JSON.stringify(removedAppIds)}`);
const allAppIds = allApps.map(function (app) { return app.id; });
const removedAppIds = _.difference(Object.keys(gState), allAppIds);
if (removedAppIds.length !== 0) debug(`sync: stopping jobs of removed apps ${JSON.stringify(removedAppIds)}`);
async.eachSeries(removedAppIds, function (appId, iteratorDone) {
debug(`sync: removing jobs of ${appId}`);
stopJobs(appId, gState[appId], iteratorDone);
}, function (error) {
if (error) debug(`sync: error stopping jobs of removed apps: ${error.message}`);
for (const appId of removedAppIds) {
debug(`sync: removing jobs of ${appId}`);
const [error] = await safe(stopJobs(appId, gState[appId]));
if (error) debug(`sync: error stopping jobs of removed app ${appId}: ${error.message}`);
}
gState = _.omit(gState, removedAppIds);
gState = _.omit(gState, removedAppIds);
async.eachSeries(allApps, function (app, iteratorDone) {
var appState = gState[app.id] || null;
var schedulerConfig = app.manifest.addons ? app.manifest.addons.scheduler : null;
for (const app of allApps) {
const appState = gState[app.id] || null;
const schedulerConfig = app.manifest.addons ? app.manifest.addons.scheduler : null;
if (!appState && !schedulerConfig) return iteratorDone(); // nothing to do
if (appState && appState.cronJobs) { // we had created jobs for this app previously
if (_.isEqual(appState.schedulerConfig, schedulerConfig) && appState.containerId === app.containerId) return iteratorDone(); // nothing changed
}
if (!appState && !schedulerConfig) continue; // nothing to do
if (appState && appState.cronJobs) { // we had created jobs for this app previously
if (_.isEqual(appState.schedulerConfig, schedulerConfig) && appState.containerId === app.containerId) continue; // nothing changed
}
debug(`sync: adding jobs of ${app.id} (${app.fqdn})`);
debug(`sync: adding jobs of ${app.id} (${app.fqdn})`);
stopJobs(app.id, appState, function (error) {
if (error) debug(`sync: error stopping jobs of ${app.id} : ${error.message}`);
const [error] = await safe(stopJobs(app.id, appState));
if (error) debug(`sync: error stopping jobs of ${app.id} : ${error.message}`);
if (!schedulerConfig) { // updated app version removed scheduler addon
delete gState[app.id];
return iteratorDone();
}
if (!schedulerConfig) { // updated app version removed scheduler addon
delete gState[app.id];
continue;
}
createJobs(app, schedulerConfig, function (error, cronJobs) {
if (error) return iteratorDone(error); // if docker is down, the next sync() will recreate everything for this app
gState[app.id] = { containerId: app.containerId, schedulerConfig, cronJobs };
iteratorDone();
});
});
}, function (error) {
if (error) return debug('sync: error creating jobs', error.message);
});
});
});
const cronJobs = await createJobs(app, schedulerConfig); // if docker is down, the next sync() will recreate everything for this app
gState[app.id] = { containerId: app.containerId, schedulerConfig, cronJobs };
}
}
+800 -1069
View File
File diff suppressed because it is too large Load Diff
+1 -2
View File
@@ -499,8 +499,7 @@ async function setRegistryConfig(registryConfig) {
docker.injectPrivateFields(registryConfig, currentConfig);
const dockerTestRegistryConfig = util.promisify(docker.testRegistryConfig);
await dockerTestRegistryConfig(registryConfig);
await docker.testRegistryConfig(registryConfig);
await set(exports.REGISTRY_CONFIG_KEY, JSON.stringify(registryConfig));
+54 -64
View File
@@ -9,7 +9,6 @@ exports = module.exports = {
const apps = require('./apps.js'),
assert = require('assert'),
async = require('async'),
BoxError = require('./boxerror.js'),
debug = require('debug')('box:sftp'),
hat = require('./hat.js'),
@@ -18,13 +17,11 @@ const apps = require('./apps.js'),
safe = require('safetydance'),
shell = require('./shell.js'),
system = require('./system.js'),
util = require('util'),
volumes = require('./volumes.js');
function rebuild(serviceConfig, options, callback) {
async function rebuild(serviceConfig, options) {
assert.strictEqual(typeof serviceConfig, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debug('rebuilding container');
@@ -36,78 +33,71 @@ function rebuild(serviceConfig, options, callback) {
let dataDirs = [];
const stat = safe.fs.lstatSync(paths.APPS_DATA_DIR);
if (!stat) return callback(new BoxError(BoxError.FS_ERROR, safe.error));
if (!stat) throw new BoxError(BoxError.FS_ERROR, safe.error);
const resolvedAppDataDir = stat.isSymbolicLink() ? safe.fs.readlinkSync(paths.APPS_DATA_DIR) : paths.APPS_DATA_DIR;
dataDirs.push({ hostDir: resolvedAppDataDir, mountDir: '/mnt/appsdata' });
util.callbackify(apps.list)(async function (error, result) {
if (error) return callback(error);
const result = await apps.list();
result.forEach(function (app) {
if (!app.manifest.addons['localstorage'] || !app.dataDir) return;
result.forEach(function (app) {
if (!app.manifest.addons['localstorage'] || !app.dataDir) return;
const hostDir = apps.getDataDir(app, app.dataDir), mountDir = `/mnt/${app.id}`;
if (!safe.fs.existsSync(hostDir)) { // this can fail if external mount does not have permissions for yellowtent user
// do not create host path when cloudron is restoring. this will then create dir with root perms making restore logic fail
debug(`Ignoring app data dir ${hostDir} for ${app.id} since it does not exist`);
return;
}
const hostDir = apps.getDataDir(app, app.dataDir), mountDir = `/mnt/${app.id}`;
if (!safe.fs.existsSync(hostDir)) { // this can fail if external mount does not have permissions for yellowtent user
// do not create host path when cloudron is restoring. this will then create dir with root perms making restore logic fail
debug(`Ignoring app data dir ${hostDir} for ${app.id} since it does not exist`);
return;
}
dataDirs.push({ hostDir, mountDir });
});
let allVolumes;
[error, allVolumes] = await safe(volumes.list());
if (error) return callback(error);
dataDirs.push({ hostDir: '/mnt/volumes', mountDir: '/mnt/volumes' });
allVolumes.forEach(function (volume) {
if (volume.hostPath.startsWith('/mnt/volumes/')) return;
if (!safe.fs.existsSync(volume.hostPath)) {
debug(`Ignoring volume host path ${volume.hostPath} since it does not exist`);
return;
}
dataDirs.push({ hostDir: volume.hostPath, mountDir: `/mnt/${volume.id}` });
});
const mounts = dataDirs.map(function (v) { return `-v "${v.hostDir}:${v.mountDir}"`; }).join(' ');
const cmd = `docker run --restart=always -d --name="sftp" \
--hostname sftp \
--net cloudron \
--net-alias sftp \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=sftp \
-m ${memory} \
--memory-swap ${memoryLimit} \
--dns 172.18.0.1 \
--dns-search=. \
-p 222:22 \
${mounts} \
-e CLOUDRON_SFTP_TOKEN="${cloudronToken}" \
-v "${paths.SFTP_KEYS_DIR}:/etc/ssh:ro" \
--label isCloudronManaged=true \
--read-only -v /tmp -v /run "${tag}"`;
// ignore error if container not found (and fail later) so that this code works across restarts
async.series([
shell.exec.bind(null, 'stopSftp', 'docker stop sftp || true'),
shell.exec.bind(null, 'removeSftp', 'docker rm -f sftp || true'),
shell.exec.bind(null, 'startSftp', cmd)
], callback);
dataDirs.push({ hostDir, mountDir });
});
let allVolumes = await volumes.list();
dataDirs.push({ hostDir: '/mnt/volumes', mountDir: '/mnt/volumes' });
allVolumes.forEach(function (volume) {
if (volume.hostPath.startsWith('/mnt/volumes/')) return;
if (!safe.fs.existsSync(volume.hostPath)) {
debug(`Ignoring volume host path ${volume.hostPath} since it does not exist`);
return;
}
dataDirs.push({ hostDir: volume.hostPath, mountDir: `/mnt/${volume.id}` });
});
const mounts = dataDirs.map(function (v) { return `-v "${v.hostDir}:${v.mountDir}"`; }).join(' ');
const cmd = `docker run --restart=always -d --name="sftp" \
--hostname sftp \
--net cloudron \
--net-alias sftp \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=sftp \
-m ${memory} \
--memory-swap ${memoryLimit} \
--dns 172.18.0.1 \
--dns-search=. \
-p 222:22 \
${mounts} \
-e CLOUDRON_SFTP_TOKEN="${cloudronToken}" \
-v "${paths.SFTP_KEYS_DIR}:/etc/ssh:ro" \
--label isCloudronManaged=true \
--read-only -v /tmp -v /run "${tag}"`;
// ignore error if container not found (and fail later) so that this code works across restarts
await shell.promises.exec('stopSftp', 'docker stop sftp || true');
await shell.promises.exec('removeSftp', 'docker rm -f sftp || true');
await shell.promises.exec('startSftp', cmd);
}
function start(existingInfra, serviceConfig, callback) {
async function start(existingInfra, serviceConfig) {
assert.strictEqual(typeof existingInfra, 'object');
assert.strictEqual(typeof serviceConfig, 'object');
assert.strictEqual(typeof callback, 'function');
rebuild(serviceConfig, { force: true }, callback); // force rebuild when infra changed
await rebuild(serviceConfig, { force: true }); // force rebuild when infra changed
}
+52 -72
View File
@@ -9,7 +9,6 @@ exports = module.exports = {
const apps = require('./apps.js'),
assert = require('assert'),
async = require('async'),
BoxError = require('./boxerror.js'),
debug = require('debug')('box:disks'),
df = require('@sindresorhus/df'),
@@ -21,8 +20,6 @@ const apps = require('./apps.js'),
settings = require('./settings.js'),
volumes = require('./volumes.js');
const dfAsync = async.asyncify(df), dfFileAsync = async.asyncify(df.file);
async function getVolumeDisks(appsDataDisk) {
assert.strictEqual(typeof appsDataDisk, 'string');
@@ -55,7 +52,7 @@ async function getAppDisks(appsDataDisk) {
return appDisks;
}
async function getBackupDisk() {
async function getBackupsFilesystem() {
const backupConfig = await settings.getBackupConfig();
if (backupConfig.provider !== 'filesystem') return null;
@@ -64,82 +61,67 @@ async function getBackupDisk() {
return result.filesystem;
}
function getDisks(callback) {
assert.strictEqual(typeof callback, 'function');
async function getDisks() {
const info = await docker.info();
docker.info(function (error, info) {
if (error) return callback(error);
let [error, allDisks] = await safe(df());
if (error) throw new BoxError(BoxError.FS_ERROR, error);
async.series([
dfAsync,
dfFileAsync.bind(null, paths.BOX_DATA_DIR),
dfFileAsync.bind(null, paths.PLATFORM_DATA_DIR),
dfFileAsync.bind(null, paths.APPS_DATA_DIR),
dfFileAsync.bind(null, info.DockerRootDir),
getBackupDisk,
], async function (error, values) {
if (error) return callback(new BoxError(BoxError.FS_ERROR, error));
// filter by ext4 and then sort to make sure root disk is first
const ext4Disks = allDisks.filter((r) => r.type === 'ext4').sort((a, b) => a.mountpoint.localeCompare(b.mountpoint));
// filter by ext4 and then sort to make sure root disk is first
const ext4Disks = values[0].filter((r) => r.type === 'ext4').sort((a, b) => a.mountpoint.localeCompare(b.mountpoint));
const diskInfos = [];
for (const p of [ paths.BOX_DATA_DIR, paths.PLATFORM_DATA_DIR, paths.APPS_DATA_DIR, info.DockerRootDir ]) {
const [dfError, diskInfo] = await safe(df.file(p));
if (dfError) throw new BoxError(BoxError.FS_ERROR, dfError);
diskInfos.push(diskInfo);
}
const disks = {
disks: ext4Disks, // root disk is first. { filesystem, type, size, used, avialable, capacity, mountpoint }
boxDataDisk: values[1].filesystem,
mailDataDisk: values[1].filesystem,
platformDataDisk: values[2].filesystem,
appsDataDisk: values[3].filesystem,
dockerDataDisk: values[4].filesystem,
backupsDisk: values[5],
apps: {}, // filled below
volumes: {} // filled below
};
const backupsFilesystem = await getBackupsFilesystem();
[error, disks.apps] = await safe(getAppDisks(disks.appsDataDisk));
if (error) return callback(error);
const result = {
disks: ext4Disks, // root disk is first. { filesystem, type, size, used, avialable, capacity, mountpoint }
boxDataDisk: diskInfos[0].filesystem,
mailDataDisk: diskInfos[0].filesystem,
platformDataDisk: diskInfos[1].filesystem,
appsDataDisk: diskInfos[2].filesystem,
dockerDataDisk: diskInfos[3].filesystem,
backupsDisk: backupsFilesystem,
apps: {}, // filled below
volumes: {} // filled below
};
[error, disks.volumes] = await safe(getVolumeDisks(disks.appsDataDisk));
if (error) return callback(error);
result.apps = await getAppDisks(result.appsDataDisk);
result.volumes = await getVolumeDisks(result.appsDataDisk);
callback(null, disks);
});
});
return result;
}
function checkDiskSpace(callback) {
assert.strictEqual(typeof callback, 'function');
async function checkDiskSpace() {
debug('checkDiskSpace: checking disk space');
getDisks(async function (error, disks) {
if (error) {
debug('checkDiskSpace: error getting disks %s', error.message);
return callback();
const disks = await getDisks();
let markdownMessage = '';
disks.disks.forEach(function (entry) {
// ignore other filesystems but where box, app and platform data is
if (entry.filesystem !== disks.boxDataDisk
&& entry.filesystem !== disks.platformDataDisk
&& entry.filesystem !== disks.appsDataDisk
&& entry.filesystem !== disks.backupsDisk
&& entry.filesystem !== disks.dockerDataDisk) return false;
if (entry.available <= (1.25 * 1024 * 1024 * 1024)) { // 1.5G
markdownMessage += `* ${entry.filesystem} is at ${entry.capacity*100}% capacity.\n`;
}
let markdownMessage = '';
disks.disks.forEach(function (entry) {
// ignore other filesystems but where box, app and platform data is
if (entry.filesystem !== disks.boxDataDisk
&& entry.filesystem !== disks.platformDataDisk
&& entry.filesystem !== disks.appsDataDisk
&& entry.filesystem !== disks.backupsDisk
&& entry.filesystem !== disks.dockerDataDisk) return false;
if (entry.available <= (1.25 * 1024 * 1024 * 1024)) { // 1.5G
markdownMessage += `* ${entry.filesystem} is at ${entry.capacity*100}% capacity.\n`;
}
});
debug(`checkDiskSpace: disk space checked. out of space: ${markdownMessage || 'no'}`);
if (markdownMessage) markdownMessage = `One or more file systems are running out of space. Please increase the disk size at the earliest.\n\n${markdownMessage}`;
await notifications.alert(notifications.ALERT_DISK_SPACE, 'Server is running out of disk space', markdownMessage);
callback();
});
debug(`checkDiskSpace: disk space checked. out of space: ${markdownMessage || 'no'}`);
if (markdownMessage) markdownMessage = `One or more file systems are running out of space. Please increase the disk size at the earliest.\n\n${markdownMessage}`;
await notifications.alert(notifications.ALERT_DISK_SPACE, 'Server is running out of disk space', markdownMessage);
}
function getSwapSize() {
@@ -149,13 +131,11 @@ function getSwapSize() {
return swap;
}
function getMemory(callback) {
assert.strictEqual(typeof callback, 'function');
callback(null, {
async function getMemory() {
return {
memory: os.totalmem(),
swap: getSwapSize()
});
};
}
function getMemoryAllocation(limit) {
+9 -20
View File
@@ -16,30 +16,19 @@ describe('System', function () {
before(setup);
after(cleanup);
it('can get disks', function (done) {
system.getDisks(function (error, disks) {
expect(!error).to.be.ok();
expect(disks).to.be.ok();
done();
});
it('can get disks', async function () {
const disks = await system.getDisks();
expect(disks).to.be.ok();
});
it('can check for disk space', function (done) {
system.checkDiskSpace(function (error) {
expect(!error).to.be.ok();
done();
});
it('can check for disk space', async function () {
await system.checkDiskSpace();
});
it('can get memory', function (done) {
system.getMemory(function (error, memory) {
expect(!error).to.be.ok();
expect(memory.memory).to.be.a('number');
expect(memory.swap).to.be.a('number');
done();
});
it('can get memory', async function () {
const memory = await system.getMemory();
expect(memory.memory).to.be.a('number');
expect(memory.swap).to.be.a('number');
});
});