Compare commits
19 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 9be09510d4 | |||
| 83488bc4ce | |||
| 2f9a8029c4 | |||
| 07af3edf51 | |||
| 636d1f3e20 | |||
| 3b69e4dcec | |||
| d977b0b238 | |||
| 909c6bccb1 | |||
| 3ee3786936 | |||
| c4d60bde83 | |||
| 4aae663b2e | |||
| da00bce4b7 | |||
| 0067766284 | |||
| bb0b5550e0 | |||
| 1db1f3faf4 | |||
| 9650a55c85 | |||
| 9451bcd38b | |||
| aa7dbdd1fa | |||
| ac18fb47b4 |
@@ -2221,3 +2221,20 @@
|
||||
* Avoid updown notifications on full restore
|
||||
* Add retries to downloader logic in installer
|
||||
|
||||
[6.2.2]
|
||||
* Fix ENOBUFS issue with backups when collecting fs metadata
|
||||
|
||||
[6.2.3]
|
||||
* Fix addon crashes with missing databases
|
||||
* Update mail container for LMTP cert fix
|
||||
* Fix services view showing yellow icon
|
||||
|
||||
[6.2.4]
|
||||
* Another addon crash fix
|
||||
|
||||
[6.2.5]
|
||||
* update: set memory limit properly
|
||||
* Fix bug where renew certs button did not work
|
||||
* sftp: fix rebuild condition
|
||||
* Fix display of user management/dashboard visiblity for email apps
|
||||
* graphite: disable tagdb and reduce log noise
|
||||
|
||||
+2
-8
@@ -13,9 +13,6 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/mkdirvolume.sh
|
||||
Defaults!/home/yellowtent/box/src/scripts/rmaddondir.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmaddondir.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/reloadnginx.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reloadnginx.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/reboot.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reboot.sh
|
||||
|
||||
@@ -41,11 +38,8 @@ yellowtent ALL=(root) NOPASSWD:SETENV: /home/yellowtent/box/src/scripts/backupup
|
||||
Defaults!/home/yellowtent/box/src/scripts/restart.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restart.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/restartdocker.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restartdocker.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/restartunbound.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restartunbound.sh
|
||||
Defaults!/home/yellowtent/box/src/scripts/restartservice.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restartservice.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/rmmailbox.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmmailbox.sh
|
||||
|
||||
+1
-1
@@ -629,7 +629,7 @@ function scheduleTask(appId, installationState, taskId, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
let memoryLimit = 400;
|
||||
if (installationState === exports.ISTATE_PENDING_BACKUP || installationState === exports.ISTATE_PENDING_CLONE || installationState === exports.ISTATE_PENDING_RESTORE) {
|
||||
if (installationState === exports.ISTATE_PENDING_BACKUP || installationState === exports.ISTATE_PENDING_CLONE || installationState === exports.ISTATE_PENDING_RESTORE || installationState === exports.ISTATE_PENDING_UPDATE) {
|
||||
memoryLimit = 'memoryLimit' in backupConfig ? Math.max(backupConfig.memoryLimit/1024/1024, 400) : 400;
|
||||
}
|
||||
|
||||
|
||||
@@ -79,14 +79,16 @@ function scheduleTask(appId, taskId, options, callback) {
|
||||
delete gActiveTasks[appId];
|
||||
locker.unlock(locker.OP_APPTASK); // unlock event will trigger next task
|
||||
|
||||
// post app task hooks
|
||||
services.rebuildService('sftp', error => { if (error) debug('Unable to rebuild sftp:', error); });
|
||||
scheduler.resumeJobs(appId);
|
||||
});
|
||||
}
|
||||
|
||||
function startNextTask() {
|
||||
if (gPendingTasks.length === 0) return;
|
||||
if (gPendingTasks.length === 0) {
|
||||
// rebuild sftp when task queue is empty. this minimizes risk of sftp rebuild overlapping with other app tasks
|
||||
services.rebuildService('sftp', error => { if (error) debug('Unable to rebuild sftp:', error); });
|
||||
return;
|
||||
}
|
||||
|
||||
assert(Object.keys(gActiveTasks).length < TASK_CONCURRENCY);
|
||||
|
||||
|
||||
+4
-3
@@ -554,16 +554,17 @@ function saveFsMetadata(dataLayout, metadataFile, callback) {
|
||||
symlinks: []
|
||||
};
|
||||
|
||||
// we assume small number of files. spawnSync will raise a ENOBUFS error after maxBuffer
|
||||
for (let lp of dataLayout.localPaths()) {
|
||||
const emptyDirs = safe.child_process.execSync(`find ${lp} -type d -empty\n`, { encoding: 'utf8' });
|
||||
const emptyDirs = safe.child_process.execSync(`find ${lp} -type d -empty`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
|
||||
if (emptyDirs === null) return callback(new BoxError(BoxError.FS_ERROR, `Error finding empty dirs: ${safe.error.message}`));
|
||||
if (emptyDirs.length) metadata.emptyDirs = metadata.emptyDirs.concat(emptyDirs.trim().split('\n').map((ed) => dataLayout.toRemotePath(ed)));
|
||||
|
||||
const execFiles = safe.child_process.execSync(`find ${lp} -type f -executable\n`, { encoding: 'utf8' });
|
||||
const execFiles = safe.child_process.execSync(`find ${lp} -type f -executable`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
|
||||
if (execFiles === null) return callback(new BoxError(BoxError.FS_ERROR, `Error finding executables: ${safe.error.message}`));
|
||||
if (execFiles.length) metadata.execFiles = metadata.execFiles.concat(execFiles.trim().split('\n').map((ef) => dataLayout.toRemotePath(ef)));
|
||||
|
||||
const symlinks = safe.child_process.execSync(`find ${lp} -type l\n`, { encoding: 'utf8' });
|
||||
const symlinks = safe.child_process.execSync(`find ${lp} -type l`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
|
||||
if (symlinks === null) return callback(new BoxError(BoxError.FS_ERROR, `Error finding symlinks: ${safe.error.message}`));
|
||||
if (symlinks.length) metadata.symlinks = metadata.symlinks.concat(symlinks.trim().split('\n').map((sl) => {
|
||||
const target = safe.fs.readlinkSync(sl);
|
||||
|
||||
+11
-9
@@ -572,18 +572,20 @@ Acme2.prototype.acmeFlow = function (hostname, domain, callback) {
|
||||
Acme2.prototype.getDirectory = function (callback) {
|
||||
const that = this;
|
||||
|
||||
request.get(this.caDirectory, { json: true, timeout: 30000 }, function (error, response) {
|
||||
if (error) return callback(new BoxError(BoxError.NETWORK_ERROR, `Network error getting directory: ${error.message}`));
|
||||
if (response.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response code when fetching directory : ' + response.statusCode));
|
||||
async.retry({ times: 3, interval: 20000 }, function (retryCallback) {
|
||||
request.get(that.caDirectory, { json: true, timeout: 30000 }, function (error, response) {
|
||||
if (error) return retryCallback(new BoxError(BoxError.NETWORK_ERROR, `Network error getting directory: ${error.message}`));
|
||||
if (response.statusCode !== 200) return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response code when fetching directory : ' + response.statusCode));
|
||||
|
||||
if (typeof response.body.newNonce !== 'string' ||
|
||||
typeof response.body.newOrder !== 'string' ||
|
||||
typeof response.body.newAccount !== 'string') return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Invalid response body : ${response.body}`));
|
||||
if (typeof response.body.newNonce !== 'string' ||
|
||||
typeof response.body.newOrder !== 'string' ||
|
||||
typeof response.body.newAccount !== 'string') return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, `Invalid response body : ${response.body}`));
|
||||
|
||||
that.directory = response.body;
|
||||
that.directory = response.body;
|
||||
|
||||
callback(null);
|
||||
});
|
||||
retryCallback(null);
|
||||
});
|
||||
}, callback);
|
||||
};
|
||||
|
||||
Acme2.prototype.getCertificate = function (vhost, domain, callback) {
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
start,
|
||||
|
||||
DEFAULT_MEMORY_LIMIT: 256 * 1024 * 1024
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
infra = require('./infra_version.js'),
|
||||
paths = require('./paths.js'),
|
||||
shell = require('./shell.js'),
|
||||
system = require('./system.js');
|
||||
|
||||
function start(existingInfra, serviceConfig, callback) {
|
||||
assert.strictEqual(typeof existingInfra, 'object');
|
||||
assert.strictEqual(typeof serviceConfig, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const tag = infra.images.graphite.tag;
|
||||
const dataDir = paths.PLATFORM_DATA_DIR;
|
||||
const memoryLimit = serviceConfig.memoryLimit || exports.DEFAULT_MEMORY_LIMIT;
|
||||
const memory = system.getMemoryAllocation(memoryLimit);
|
||||
|
||||
const cmd = `docker run --restart=always -d --name="graphite" \
|
||||
--hostname graphite \
|
||||
--net cloudron \
|
||||
--net-alias graphite \
|
||||
--log-driver syslog \
|
||||
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
||||
--log-opt syslog-format=rfc5424 \
|
||||
--log-opt tag=graphite \
|
||||
-m ${memory} \
|
||||
--memory-swap ${memoryLimit} \
|
||||
--dns 172.18.0.1 \
|
||||
--dns-search=. \
|
||||
-p 127.0.0.1:2003:2003 \
|
||||
-p 127.0.0.1:2004:2004 \
|
||||
-p 127.0.0.1:8417:8000 \
|
||||
-v "${dataDir}/graphite:/var/lib/graphite" \
|
||||
--label isCloudronManaged=true \
|
||||
--read-only -v /tmp -v /run "${tag}"`;
|
||||
|
||||
async.series([
|
||||
shell.exec.bind(null, 'stopGraphite', 'docker stop graphite || true'),
|
||||
shell.exec.bind(null, 'removeGraphite', 'docker rm -f graphite || true'),
|
||||
shell.exec.bind(null, 'startGraphite', cmd)
|
||||
], callback);
|
||||
}
|
||||
@@ -16,12 +16,12 @@ exports = module.exports = {
|
||||
// docker inspect --format='{{index .RepoDigests 0}}' $IMAGE to get the sha256
|
||||
'images': {
|
||||
'turn': { repo: 'cloudron/turn', tag: 'cloudron/turn:1.3.0@sha256:386fb755fc41edd7086f7bcb230f7f28078936f9ae4ead6d97c741df1cc194ae' },
|
||||
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:3.0.3@sha256:dde2522915a12bfb3c840771553ea8a670c7e0ef84d7ec619483b98b3dfec082' },
|
||||
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:4.0.1@sha256:279e66a20483f230e970078c7b51581e19017fb2436d1caa514969ff07a16464' },
|
||||
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:3.0.4@sha256:4d688c746f27b195d98f35a7d24ec01f3f754e0ca61e9de0b0bc9793553880f1' },
|
||||
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:4.0.2@sha256:424081fd38ebd35f3606c64f8f99138570e5f4d5066f12cfb4142447d249d3e7' },
|
||||
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:4.0.1@sha256:ad20a9a5dcb2ab132374a7c8d44b89af0ec37651cf889e570f7625b02ee85fdf' },
|
||||
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:3.0.1@sha256:9f8de95b88ce64650c08d5af7b0c2cd24465dd4e24cb0537a0d0f84e78da9840' },
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:3.2.2@sha256:ae7647ec5d32478f22c056d65700e458a089bb45667aa3c67a1b3dd049754cee' },
|
||||
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:2.4.0@sha256:953bbd8b72a9108a8526d2c0bdbba67e1e1563ff59d0a117f0884dba1576f3dd' },
|
||||
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:3.0.2@sha256:caaa1f7f4055ae8990d8ec65bd100567496df7e4ed5eb427867f3717a8dcbf92' },
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:3.2.3@sha256:fdc4aa6d2c85aeafe65eaa4243aada0cc2e57b94f6eaee02c9b1a8fb89b01dd7' },
|
||||
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:3.0.0@sha256:b00b64b8df4032985d7a1ddd548a2713b6d7d88a54ebe9b7d324cece2bd6829e' },
|
||||
'sftp': { repo: 'cloudron/sftp', tag: 'cloudron/sftp:3.2.0@sha256:61e8247ded1e07cf882ca478dab180960357c614472e80b938f1f690a46788c2' }
|
||||
}
|
||||
};
|
||||
|
||||
+3
-3
@@ -54,8 +54,8 @@ var acme2 = require('./cert/acme2.js'),
|
||||
users = require('./users.js'),
|
||||
util = require('util');
|
||||
|
||||
var NGINX_APPCONFIG_EJS = fs.readFileSync(__dirname + '/nginxconfig.ejs', { encoding: 'utf8' }),
|
||||
RELOAD_NGINX_CMD = path.join(__dirname, 'scripts/reloadnginx.sh');
|
||||
const NGINX_APPCONFIG_EJS = fs.readFileSync(__dirname + '/nginxconfig.ejs', { encoding: 'utf8' });
|
||||
const RESTART_SERVICE_CMD = path.join(__dirname, 'scripts/restartservice.sh');
|
||||
|
||||
function nginxLocation(s) {
|
||||
if (!s.startsWith('!')) return s;
|
||||
@@ -172,7 +172,7 @@ function validateCertificate(location, domainObject, certificate) {
|
||||
function reload(callback) {
|
||||
if (constants.TEST) return callback();
|
||||
|
||||
shell.sudo('reload', [ RELOAD_NGINX_CMD ], {}, function (error) {
|
||||
shell.sudo('reload', [ RESTART_SERVICE_CMD, 'nginx' ], {}, function (error) {
|
||||
if (error) return callback(new BoxError(BoxError.NGINX_ERROR, `Error reloading nginx: ${error.message}`));
|
||||
|
||||
callback();
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
echo "This script should be run as root." > /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $# == 1 && "$1" == "--check" ]]; then
|
||||
echo "OK"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ "${BOX_ENV}" == "cloudron" ]]; then
|
||||
nginx -s reload
|
||||
fi
|
||||
@@ -1,18 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
echo "This script should be run as root." > /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $# == 1 && "$1" == "--check" ]]; then
|
||||
echo "OK"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ "${BOX_ENV}" == "cloudron" ]]; then
|
||||
systemctl restart docker
|
||||
fi
|
||||
|
||||
Executable
+37
@@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
echo "This script should be run as root." > /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "No arguments supplied"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$1" == "--check" ]]; then
|
||||
echo "OK"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
[[ "${BOX_ENV}" != "cloudron" ]] && exit
|
||||
|
||||
service="$1"
|
||||
|
||||
if [[ "${service}" == "unbound" ]]; then
|
||||
unbound-anchor -a /var/lib/unbound/root.key
|
||||
systemctl restart unbound
|
||||
elif [[ "${service}" == "nginx" ]]; then
|
||||
nginx -s reload
|
||||
elif [[ "${service}" == "docker" ]]; then
|
||||
systemctl restart docker
|
||||
elif [[ "${service}" == "collectd" ]]; then
|
||||
systemctl restart collectd
|
||||
else
|
||||
echo "Unknown service ${service}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
echo "This script should be run as root." > /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $# == 1 && "$1" == "--check" ]]; then
|
||||
echo "OK"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ "${BOX_ENV}" == "cloudron" ]]; then
|
||||
unbound-anchor -a /var/lib/unbound/root.key
|
||||
systemctl restart unbound
|
||||
fi
|
||||
|
||||
@@ -19,13 +19,17 @@ fi
|
||||
|
||||
addon="$1"
|
||||
appid="${2:-}" # only valid for redis
|
||||
if [[ "${addon}" != "postgresql" && "${addon}" != "mysql" && "${addon}" != "mongodb" && "${addon}" != "redis" ]]; then
|
||||
echo "${addon} must be postgresql/mysql/mongodb/redis"
|
||||
if [[ "${addon}" != "postgresql" && "${addon}" != "mysql" && "${addon}" != "mongodb" && "${addon}" != "redis" && "${addon}" != "graphite" ]]; then
|
||||
echo "${addon} must be postgresql/mysql/mongodb/redis/graphite"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${BOX_ENV}" == "cloudron" ]]; then
|
||||
readonly addon_dir="${HOME}/platformdata/${addon}/${appid}"
|
||||
if [[ "${addon}" == "graphite" ]]; then
|
||||
readonly addon_dir="${HOME}/platformdata/graphite"
|
||||
else
|
||||
readonly addon_dir="${HOME}/platformdata/${addon}/${appid}"
|
||||
fi
|
||||
else
|
||||
readonly addon_dir="${HOME}/.cloudron_test/platformdata/${addon}/${appid}"
|
||||
fi
|
||||
|
||||
+74
-17
@@ -41,7 +41,6 @@ var appdb = require('./appdb.js'),
|
||||
debug = require('debug')('box:services'),
|
||||
docker = require('./docker.js'),
|
||||
fs = require('fs'),
|
||||
graphite = require('./graphite.js'),
|
||||
hat = require('./hat.js'),
|
||||
infra = require('./infra_version.js'),
|
||||
mail = require('./mail.js'),
|
||||
@@ -64,6 +63,7 @@ var appdb = require('./appdb.js'),
|
||||
const NOOP = function (app, options, callback) { return callback(); };
|
||||
const NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
const RMADDONDIR_CMD = path.join(__dirname, 'scripts/rmaddondir.sh');
|
||||
const RESTART_SERVICE_CMD = path.join(__dirname, 'scripts/restartservice.sh');
|
||||
|
||||
// setup can be called multiple times for the same app (configure crash restart) and existing data must not be lost
|
||||
// teardown is destructive. app data stored with the addon is lost
|
||||
@@ -219,8 +219,8 @@ const SERVICES = {
|
||||
},
|
||||
graphite: {
|
||||
status: statusGraphite,
|
||||
restart: docker.restartContainer.bind(null, 'graphite'),
|
||||
defaultMemoryLimit: graphite.DEFAULT_MEMORY_LIMIT
|
||||
restart: restartGraphite,
|
||||
defaultMemoryLimit: 256 * 1024 * 1024
|
||||
},
|
||||
nginx: {
|
||||
status: statusNginx,
|
||||
@@ -301,7 +301,7 @@ function containerStatus(containerName, tokenEnvName, callback) {
|
||||
if (error && (error.reason === BoxError.NOT_FOUND || error.reason === BoxError.INACTIVE)) return callback(null, { status: exports.SERVICE_STATUS_STOPPED });
|
||||
if (error) return callback(error);
|
||||
|
||||
request.get(`https://${addonDetails.ip}:3000/healthcheck?access_token=${addonDetails.token}`, { json: true, rejectUnauthorized: false, timeout: 3000 }, function (error, response) {
|
||||
request.get(`https://${addonDetails.ip}:3000/healthcheck?access_token=${addonDetails.token}`, { json: true, rejectUnauthorized: false, timeout: 20000 }, function (error, response) {
|
||||
if (error) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for ${containerName}: ${error.message}` });
|
||||
if (response.statusCode !== 200 || !response.body.status) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for ${containerName}. Status code: ${response.statusCode} message: ${response.body.message}` });
|
||||
|
||||
@@ -541,7 +541,7 @@ function rebuildService(id, callback) {
|
||||
if (id === 'postgresql') return startPostgresql({ version: 'none' }, callback);
|
||||
if (id === 'mysql') return startMysql({ version: 'none' }, callback);
|
||||
if (id === 'sftp') return sftp.rebuild(serviceConfig, { /* options */ }, callback);
|
||||
if (id === 'graphite') return graphite.start({ version: 'none' }, serviceConfig, callback);
|
||||
if (id === 'graphite') return startGraphite({ version: 'none' }, serviceConfig, callback);
|
||||
|
||||
// nothing to rebuild for now.
|
||||
// TODO: mongo/postgresql/mysql need to be scaled down.
|
||||
@@ -614,7 +614,7 @@ function waitForContainer(containerName, tokenEnvName, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.retry({ times: 10, interval: 15000 }, function (retryCallback) {
|
||||
request.get(`https://${result.ip}:3000/healthcheck?access_token=${result.token}`, { json: true, rejectUnauthorized: false, timeout: 3000 }, function (error, response) {
|
||||
request.get(`https://${result.ip}:3000/healthcheck?access_token=${result.token}`, { json: true, rejectUnauthorized: false, timeout: 5000 }, function (error, response) {
|
||||
if (error) return retryCallback(new BoxError(BoxError.ADDONS_ERROR, `Network error waiting for ${containerName}: ${error.message}`));
|
||||
if (response.statusCode !== 200 || !response.body.status) return retryCallback(new BoxError(BoxError.ADDONS_ERROR, `Error waiting for ${containerName}. Status code: ${response.statusCode} message: ${response.body.message}`));
|
||||
|
||||
@@ -769,18 +769,20 @@ function exportDatabase(addon, callback) {
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
appdb.getAll(function (error, apps) {
|
||||
appdb.getAll(function (error, allApps) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(apps, function iterator (app, iteratorCallback) {
|
||||
async.eachSeries(allApps, function iterator (app, iteratorCallback) {
|
||||
if (!app.manifest.addons || !(addon in app.manifest.addons)) return iteratorCallback(); // app doesn't use the addon
|
||||
if (app.installationState === apps.ISTATE_ERROR) return iteratorCallback(); // missing db causes crash in old app addon containers
|
||||
|
||||
debug(`exportDatabase: Exporting addon ${addon} of app ${app.id}`);
|
||||
|
||||
ADDONS[addon].backup(app, app.manifest.addons[addon], function (error) {
|
||||
if (error) {
|
||||
debug(`exportDatabase: Error exporting ${addon} of app ${app.id}.`, error);
|
||||
return iteratorCallback(error);
|
||||
// for errored apps, we can ignore if export had an error
|
||||
return iteratorCallback(app.installationState === apps.ISTATE_ERROR ? null : error);
|
||||
}
|
||||
|
||||
iteratorCallback();
|
||||
@@ -837,24 +839,25 @@ function startServices(existingInfra, callback) {
|
||||
if (existingInfra.version !== infra.version) {
|
||||
debug(`startServices: ${existingInfra.version} -> ${infra.version}. starting all services`);
|
||||
startFuncs.push(
|
||||
mail.startMail, // start this first to reduce email downtime
|
||||
startTurn.bind(null, existingInfra, servicesConfig['turn'] || {}),
|
||||
startMysql.bind(null, existingInfra),
|
||||
startPostgresql.bind(null, existingInfra),
|
||||
startMongodb.bind(null, existingInfra),
|
||||
startRedis.bind(null, existingInfra),
|
||||
graphite.start.bind(null, existingInfra, servicesConfig['graphite'] || {}),
|
||||
startGraphite.bind(null, existingInfra, servicesConfig['graphite'] || {}),
|
||||
sftp.start.bind(null, existingInfra, servicesConfig['sftp'] || {}),
|
||||
mail.startMail);
|
||||
);
|
||||
} else {
|
||||
assert.strictEqual(typeof existingInfra.images, 'object');
|
||||
|
||||
if (infra.images.mail.tag !== existingInfra.images.mail.tag) startFuncs.push(mail.startMail); // start this first to reduce email downtime
|
||||
if (infra.images.turn.tag !== existingInfra.images.turn.tag) startFuncs.push(startTurn.bind(null, existingInfra, servicesConfig['turn'] || {}));
|
||||
if (infra.images.mysql.tag !== existingInfra.images.mysql.tag) startFuncs.push(startMysql.bind(null, existingInfra));
|
||||
if (infra.images.postgresql.tag !== existingInfra.images.postgresql.tag) startFuncs.push(startPostgresql.bind(null, existingInfra));
|
||||
if (infra.images.mongodb.tag !== existingInfra.images.mongodb.tag) startFuncs.push(startMongodb.bind(null, existingInfra));
|
||||
if (infra.images.mail.tag !== existingInfra.images.mail.tag) startFuncs.push(mail.startMail);
|
||||
if (infra.images.redis.tag !== existingInfra.images.redis.tag) startFuncs.push(startRedis.bind(null, existingInfra));
|
||||
if (infra.images.graphite.tag !== existingInfra.images.graphite.tag) startFuncs.push(graphite.start.bind(null, existingInfra, servicesConfig['graphite'] || {}));
|
||||
if (infra.images.graphite.tag !== existingInfra.images.graphite.tag) startFuncs.push(startGraphite.bind(null, existingInfra, servicesConfig['graphite'] || {}));
|
||||
if (infra.images.sftp.tag !== existingInfra.images.sftp.tag) startFuncs.push(sftp.start.bind(null, existingInfra, servicesConfig['sftp'] || {}));
|
||||
|
||||
debug('startServices: existing infra. incremental service create %j', startFuncs.map(function (f) { return f.name; }));
|
||||
@@ -1778,6 +1781,49 @@ function restoreMongoDb(app, options, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function startGraphite(existingInfra, serviceConfig, callback) {
|
||||
assert.strictEqual(typeof existingInfra, 'object');
|
||||
assert.strictEqual(typeof serviceConfig, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const tag = infra.images.graphite.tag;
|
||||
const memoryLimit = serviceConfig.memoryLimit || 256 * 1024 * 1024;
|
||||
const memory = system.getMemoryAllocation(memoryLimit);
|
||||
|
||||
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.graphite.tag, tag);
|
||||
|
||||
if (upgrading) debug('startGraphite: graphite will be upgraded');
|
||||
|
||||
const cmd = `docker run --restart=always -d --name="graphite" \
|
||||
--hostname graphite \
|
||||
--net cloudron \
|
||||
--net-alias graphite \
|
||||
--log-driver syslog \
|
||||
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
||||
--log-opt syslog-format=rfc5424 \
|
||||
--log-opt tag=graphite \
|
||||
-m ${memory} \
|
||||
--memory-swap ${memoryLimit} \
|
||||
--dns 172.18.0.1 \
|
||||
--dns-search=. \
|
||||
-p 127.0.0.1:2003:2003 \
|
||||
-p 127.0.0.1:2004:2004 \
|
||||
-p 127.0.0.1:8417:8000 \
|
||||
-v "${paths.PLATFORM_DATA_DIR}/graphite:/var/lib/graphite" \
|
||||
--label isCloudronManaged=true \
|
||||
--read-only -v /tmp -v /run "${tag}"`;
|
||||
|
||||
async.series([
|
||||
shell.exec.bind(null, 'stopGraphite', 'docker stop graphite || true'),
|
||||
shell.exec.bind(null, 'removeGraphite', 'docker rm -f graphite || true'),
|
||||
(done) => {
|
||||
if (!upgrading) return done();
|
||||
shell.sudo('removeGraphiteDir', [ RMADDONDIR_CMD, 'graphite' ], {}, done);
|
||||
},
|
||||
shell.exec.bind(null, 'startGraphite', cmd)
|
||||
], callback);
|
||||
}
|
||||
|
||||
function setupProxyAuth(app, options, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
@@ -2017,7 +2063,7 @@ function statusDocker(callback) {
|
||||
function restartDocker(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
shell.sudo('restartdocker', [ path.join(__dirname, 'scripts/restartdocker.sh') ], {}, NOOP_CALLBACK);
|
||||
shell.sudo('restartdocker', [ RESTART_SERVICE_CMD, 'docker' ], {}, NOOP_CALLBACK);
|
||||
|
||||
callback(null);
|
||||
}
|
||||
@@ -2033,7 +2079,7 @@ function statusUnbound(callback) {
|
||||
function restartUnbound(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
shell.sudo('restartunbound', [ path.join(__dirname, 'scripts/restartunbound.sh') ], {}, NOOP_CALLBACK);
|
||||
shell.sudo('restartunbound', [ RESTART_SERVICE_CMD, 'unbound' ], {}, NOOP_CALLBACK);
|
||||
|
||||
callback(null);
|
||||
}
|
||||
@@ -2049,7 +2095,7 @@ function statusNginx(callback) {
|
||||
function restartNginx(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
shell.sudo('reloadnginx', [ path.join(__dirname, 'scripts/reloadnginx.sh') ], {}, NOOP_CALLBACK);
|
||||
shell.sudo('restartnginx', [ RESTART_SERVICE_CMD, 'nginx' ], {}, NOOP_CALLBACK);
|
||||
|
||||
callback(null);
|
||||
}
|
||||
@@ -2082,7 +2128,7 @@ function statusGraphite(callback) {
|
||||
if (error && error.reason === BoxError.NOT_FOUND) return callback(null, { status: exports.SERVICE_STATUS_STOPPED });
|
||||
if (error) return callback(error);
|
||||
|
||||
request.get('http://127.0.0.1:8417/graphite-web/dashboard', { json: true, timeout: 3000 }, function (error, response) {
|
||||
request.get('http://127.0.0.1:8417/graphite-web/dashboard', { json: true, timeout: 20000 }, function (error, response) {
|
||||
if (error) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite: ${error.message}` });
|
||||
if (response.statusCode !== 200) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite. Status code: ${response.statusCode} message: ${response.body.message}` });
|
||||
|
||||
@@ -2101,6 +2147,17 @@ function statusGraphite(callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function restartGraphite(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
docker.restartContainer('graphite', callback);
|
||||
|
||||
setTimeout(function () {
|
||||
// wait for graphite to startup and then restart collectd
|
||||
shell.sudo('restartcollectd', [ RESTART_SERVICE_CMD, 'collectd' ], {}, NOOP_CALLBACK);
|
||||
}, 10000);
|
||||
}
|
||||
|
||||
function teardownOauth(app, options, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
+3
-16
@@ -21,24 +21,11 @@ var apps = require('./apps.js'),
|
||||
volumes = require('./volumes.js'),
|
||||
_ = require('underscore');
|
||||
|
||||
var gRebuildInProgress = false;
|
||||
function rebuild(serviceConfig, options, callback) {
|
||||
assert.strictEqual(typeof serviceConfig, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (gRebuildInProgress) {
|
||||
debug('waiting for other rebuild to finish');
|
||||
return setTimeout(function () { rebuild(serviceConfig, options, callback); }, 5000);
|
||||
}
|
||||
|
||||
gRebuildInProgress = true;
|
||||
|
||||
function done(error) {
|
||||
gRebuildInProgress = false;
|
||||
callback(error);
|
||||
}
|
||||
|
||||
debug('rebuilding container');
|
||||
|
||||
const force = !!options.force;
|
||||
@@ -48,7 +35,7 @@ function rebuild(serviceConfig, options, callback) {
|
||||
const cloudronToken = hat(8 * 128);
|
||||
|
||||
apps.getAll(function (error, result) {
|
||||
if (error) return done(error);
|
||||
if (error) return callback(error);
|
||||
|
||||
let dataDirs = [];
|
||||
result.forEach(function (app) {
|
||||
@@ -89,7 +76,7 @@ function rebuild(serviceConfig, options, callback) {
|
||||
|
||||
if (!force && _.isEqual(currentDataDirs, dataDirs)) {
|
||||
debug('Skipping rebuild, no changes');
|
||||
return done();
|
||||
return callback();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -119,7 +106,7 @@ function rebuild(serviceConfig, options, callback) {
|
||||
shell.exec.bind(null, 'stopSftp', 'docker stop sftp || true'),
|
||||
shell.exec.bind(null, 'removeSftp', 'docker rm -f sftp || true'),
|
||||
shell.exec.bind(null, 'startSftp', cmd)
|
||||
], done);
|
||||
], callback);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
+6
-6
@@ -157,7 +157,7 @@ function exists(apiConfig, backupFilePath, callback) {
|
||||
s3.headObject(params, function (error) {
|
||||
if (!Object.keys(this.httpResponse.headers).some(h => h.startsWith('x-amz'))) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'not a s3 endpoint'));
|
||||
if (error && S3_NOT_FOUND(error)) return callback(null, false);
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code));
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Error headObject ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`));
|
||||
|
||||
callback(null, true);
|
||||
});
|
||||
@@ -169,7 +169,7 @@ function exists(apiConfig, backupFilePath, callback) {
|
||||
};
|
||||
|
||||
s3.listObjects(listParams, function (error, listData) {
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code));
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`));
|
||||
|
||||
callback(null, listData.Contents.length !== 0);
|
||||
});
|
||||
@@ -200,7 +200,7 @@ function download(apiConfig, backupFilePath, callback) {
|
||||
ps.emit('error', new BoxError(BoxError.NOT_FOUND, `Backup not found: ${backupFilePath}`));
|
||||
} else {
|
||||
debug(`download: ${apiConfig.bucket}:${backupFilePath} s3 stream error.`, error);
|
||||
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code)); // DO sets 'code'
|
||||
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, `Error multipartDownload ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`));
|
||||
}
|
||||
});
|
||||
|
||||
@@ -231,7 +231,7 @@ function listDir(apiConfig, dir, batchSize, iteratorCallback, callback) {
|
||||
|
||||
async.whilst((testDone) => testDone(null, !done), function listAndDownload(whilstCallback) {
|
||||
s3.listObjects(listParams, function (error, listData) {
|
||||
if (error) return whilstCallback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code));
|
||||
if (error) return whilstCallback(new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects in ${dir}. Message: ${error.message} HTTP Code: ${error.code}`));
|
||||
|
||||
if (listData.Contents.length === 0) { done = true; return whilstCallback(); }
|
||||
|
||||
@@ -505,7 +505,7 @@ function testConfig(apiConfig, callback) {
|
||||
|
||||
var s3 = new AWS.S3(_.omit(credentials, 'retryDelayOptions', 'maxRetries'));
|
||||
s3.putObject(params, function (error) {
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code)); // DO sets 'code'
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Error put object cloudron-testfile. Message: ${error.message} HTTP Code: ${error.code}`));
|
||||
|
||||
var params = {
|
||||
Bucket: apiConfig.bucket,
|
||||
@@ -513,7 +513,7 @@ function testConfig(apiConfig, callback) {
|
||||
};
|
||||
|
||||
s3.deleteObject(params, function (error) {
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code)); // DO sets 'code'
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Error del object cloudron-testfile. Message: ${error.message} HTTP Code: ${error.code}`));
|
||||
|
||||
callback();
|
||||
});
|
||||
|
||||
@@ -13,11 +13,9 @@ scripts=("${SOURCE_DIR}/src/scripts/clearvolume.sh" \
|
||||
"${SOURCE_DIR}/src/scripts/mvvolume.sh" \
|
||||
"${SOURCE_DIR}/src/scripts/mkdirvolume.sh" \
|
||||
"${SOURCE_DIR}/src/scripts/rmaddondir.sh" \
|
||||
"${SOURCE_DIR}/src/scripts/reloadnginx.sh" \
|
||||
"${SOURCE_DIR}/src/scripts/reboot.sh" \
|
||||
"${SOURCE_DIR}/src/scripts/restart.sh" \
|
||||
"${SOURCE_DIR}/src/scripts/restartdocker.sh" \
|
||||
"${SOURCE_DIR}/src/scripts/restartunbound.sh" \
|
||||
"${SOURCE_DIR}/src/scripts/restartservice.sh" \
|
||||
"${SOURCE_DIR}/src/scripts/update.sh" \
|
||||
"${SOURCE_DIR}/src/scripts/collectlogs.sh" \
|
||||
"${SOURCE_DIR}/src/scripts/configurecollectd.sh" \
|
||||
|
||||
@@ -39,8 +39,8 @@ describe('shell', function () {
|
||||
});
|
||||
|
||||
it('can sudo valid program', function (done) {
|
||||
var RELOAD_NGINX_CMD = path.join(__dirname, '../src/scripts/reloadnginx.sh');
|
||||
shell.sudo('test', [ RELOAD_NGINX_CMD ], {}, function (error) {
|
||||
var RELOAD_NGINX_CMD = path.join(__dirname, '../src/scripts/restartservice.sh');
|
||||
shell.sudo('test', [ RELOAD_NGINX_CMD, 'nginx' ], {}, function (error) {
|
||||
expect(error).to.be.ok();
|
||||
done();
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user