Compare commits

...

9 Commits
8.2 ... v6.2.6

Author SHA1 Message Date
Girish Ramakrishnan
6c40cceddc give graphite more time to start before restarting collectd
(cherry picked from commit 1f59974e83)
2021-03-24 10:26:36 -07:00
Girish Ramakrishnan
9be09510d4 graphite: restart collectd as well
(cherry picked from commit 0447dce0d6)
2021-03-23 16:40:20 -07:00
Girish Ramakrishnan
83488bc4ce graphite: implement upgrade
for the moment, we wipe out the old data and start afresh. this is because
the graphite web app keeps changing quite drastically.

(cherry picked from commit 32f385741a)
2021-03-23 16:39:51 -07:00
Girish Ramakrishnan
2f9a8029c4 sftp: only rebuild when app task queue is empty
when multiple apptasks are scheduled, we end up with a sequence like this:
    - task1 finishes
    - task2 (uninstall) removes  appdata directory
    - sftp rebuild (from task1 finish)
    - task2 fails because sftp rebuild created empty appdata directory

a fix is to delay the sftp rebuild until all tasks are done. of course,
the same race is still there, if a user initiated another task immediately
but this seems unlikely. if that happens often, we can further add a sftpRebuildInProgress
flag inside apptaskmanager.

(cherry picked from commit 4cba5ca405)
2021-03-22 14:33:44 -07:00
Girish Ramakrishnan
07af3edf51 request has no retry method
i thought it was using superagent

(cherry picked from commit 7df89e66c8)
2021-03-22 14:32:57 -07:00
Girish Ramakrishnan
636d1f3e20 acme2: add a retry to getDirectory, since users are reporting a 429
(cherry picked from commit 4954b94d4a)
2021-03-22 14:32:50 -07:00
Girish Ramakrishnan
3b69e4dcec graphite: disable tagdb
(cherry picked from commit 8048e68eb6)
2021-03-22 14:32:18 -07:00
Girish Ramakrishnan
d977b0b238 update: set memory limit properly
(cherry picked from commit 750f313c6a)
2021-03-22 14:30:22 -07:00
Girish Ramakrishnan
909c6bccb1 error.code is a number which causes crash at times in BoxError
(cherry picked from commit 1e96606110)
2021-03-22 14:29:26 -07:00
18 changed files with 148 additions and 168 deletions

View File

@@ -2232,3 +2232,9 @@
[6.2.4]
* Another addon crash fix
[6.2.5]
* update: set memory limit properly
* Fix bug where renew certs button did not work
* sftp: fix rebuild condition
* Fix display of user management/dashboard visiblity for email apps
* graphite: disable tagdb and reduce log noise

View File

@@ -13,9 +13,6 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/mkdirvolume.sh
Defaults!/home/yellowtent/box/src/scripts/rmaddondir.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmaddondir.sh
Defaults!/home/yellowtent/box/src/scripts/reloadnginx.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reloadnginx.sh
Defaults!/home/yellowtent/box/src/scripts/reboot.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reboot.sh
@@ -41,11 +38,8 @@ yellowtent ALL=(root) NOPASSWD:SETENV: /home/yellowtent/box/src/scripts/backupup
Defaults!/home/yellowtent/box/src/scripts/restart.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restart.sh
Defaults!/home/yellowtent/box/src/scripts/restartdocker.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restartdocker.sh
Defaults!/home/yellowtent/box/src/scripts/restartunbound.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restartunbound.sh
Defaults!/home/yellowtent/box/src/scripts/restartservice.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restartservice.sh
Defaults!/home/yellowtent/box/src/scripts/rmmailbox.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmmailbox.sh

View File

@@ -629,7 +629,7 @@ function scheduleTask(appId, installationState, taskId, callback) {
if (error) return callback(error);
let memoryLimit = 400;
if (installationState === exports.ISTATE_PENDING_BACKUP || installationState === exports.ISTATE_PENDING_CLONE || installationState === exports.ISTATE_PENDING_RESTORE) {
if (installationState === exports.ISTATE_PENDING_BACKUP || installationState === exports.ISTATE_PENDING_CLONE || installationState === exports.ISTATE_PENDING_RESTORE || installationState === exports.ISTATE_PENDING_UPDATE) {
memoryLimit = 'memoryLimit' in backupConfig ? Math.max(backupConfig.memoryLimit/1024/1024, 400) : 400;
}

View File

@@ -79,14 +79,16 @@ function scheduleTask(appId, taskId, options, callback) {
delete gActiveTasks[appId];
locker.unlock(locker.OP_APPTASK); // unlock event will trigger next task
// post app task hooks
services.rebuildService('sftp', error => { if (error) debug('Unable to rebuild sftp:', error); });
scheduler.resumeJobs(appId);
});
}
function startNextTask() {
if (gPendingTasks.length === 0) return;
if (gPendingTasks.length === 0) {
// rebuild sftp when task queue is empty. this minimizes risk of sftp rebuild overlapping with other app tasks
services.rebuildService('sftp', error => { if (error) debug('Unable to rebuild sftp:', error); });
return;
}
assert(Object.keys(gActiveTasks).length < TASK_CONCURRENCY);

View File

@@ -572,18 +572,20 @@ Acme2.prototype.acmeFlow = function (hostname, domain, callback) {
Acme2.prototype.getDirectory = function (callback) {
const that = this;
request.get(this.caDirectory, { json: true, timeout: 30000 }, function (error, response) {
if (error) return callback(new BoxError(BoxError.NETWORK_ERROR, `Network error getting directory: ${error.message}`));
if (response.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response code when fetching directory : ' + response.statusCode));
async.retry({ times: 3, interval: 20000 }, function (retryCallback) {
request.get(that.caDirectory, { json: true, timeout: 30000 }, function (error, response) {
if (error) return retryCallback(new BoxError(BoxError.NETWORK_ERROR, `Network error getting directory: ${error.message}`));
if (response.statusCode !== 200) return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response code when fetching directory : ' + response.statusCode));
if (typeof response.body.newNonce !== 'string' ||
typeof response.body.newOrder !== 'string' ||
typeof response.body.newAccount !== 'string') return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Invalid response body : ${response.body}`));
if (typeof response.body.newNonce !== 'string' ||
typeof response.body.newOrder !== 'string' ||
typeof response.body.newAccount !== 'string') return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, `Invalid response body : ${response.body}`));
that.directory = response.body;
that.directory = response.body;
callback(null);
});
retryCallback(null);
});
}, callback);
};
Acme2.prototype.getCertificate = function (vhost, domain, callback) {

View File

@@ -1,50 +0,0 @@
'use strict';
exports = module.exports = {
start,
DEFAULT_MEMORY_LIMIT: 256 * 1024 * 1024
};
var assert = require('assert'),
async = require('async'),
infra = require('./infra_version.js'),
paths = require('./paths.js'),
shell = require('./shell.js'),
system = require('./system.js');
function start(existingInfra, serviceConfig, callback) {
assert.strictEqual(typeof existingInfra, 'object');
assert.strictEqual(typeof serviceConfig, 'object');
assert.strictEqual(typeof callback, 'function');
const tag = infra.images.graphite.tag;
const dataDir = paths.PLATFORM_DATA_DIR;
const memoryLimit = serviceConfig.memoryLimit || exports.DEFAULT_MEMORY_LIMIT;
const memory = system.getMemoryAllocation(memoryLimit);
const cmd = `docker run --restart=always -d --name="graphite" \
--hostname graphite \
--net cloudron \
--net-alias graphite \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=graphite \
-m ${memory} \
--memory-swap ${memoryLimit} \
--dns 172.18.0.1 \
--dns-search=. \
-p 127.0.0.1:2003:2003 \
-p 127.0.0.1:2004:2004 \
-p 127.0.0.1:8417:8000 \
-v "${dataDir}/graphite:/var/lib/graphite" \
--label isCloudronManaged=true \
--read-only -v /tmp -v /run "${tag}"`;
async.series([
shell.exec.bind(null, 'stopGraphite', 'docker stop graphite || true'),
shell.exec.bind(null, 'removeGraphite', 'docker rm -f graphite || true'),
shell.exec.bind(null, 'startGraphite', cmd)
], callback);
}

View File

@@ -21,7 +21,7 @@ exports = module.exports = {
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:4.0.1@sha256:ad20a9a5dcb2ab132374a7c8d44b89af0ec37651cf889e570f7625b02ee85fdf' },
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:3.0.2@sha256:caaa1f7f4055ae8990d8ec65bd100567496df7e4ed5eb427867f3717a8dcbf92' },
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:3.2.3@sha256:fdc4aa6d2c85aeafe65eaa4243aada0cc2e57b94f6eaee02c9b1a8fb89b01dd7' },
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:2.4.0@sha256:953bbd8b72a9108a8526d2c0bdbba67e1e1563ff59d0a117f0884dba1576f3dd' },
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:3.0.0@sha256:b00b64b8df4032985d7a1ddd548a2713b6d7d88a54ebe9b7d324cece2bd6829e' },
'sftp': { repo: 'cloudron/sftp', tag: 'cloudron/sftp:3.2.0@sha256:61e8247ded1e07cf882ca478dab180960357c614472e80b938f1f690a46788c2' }
}
};

View File

@@ -54,8 +54,8 @@ var acme2 = require('./cert/acme2.js'),
users = require('./users.js'),
util = require('util');
var NGINX_APPCONFIG_EJS = fs.readFileSync(__dirname + '/nginxconfig.ejs', { encoding: 'utf8' }),
RELOAD_NGINX_CMD = path.join(__dirname, 'scripts/reloadnginx.sh');
const NGINX_APPCONFIG_EJS = fs.readFileSync(__dirname + '/nginxconfig.ejs', { encoding: 'utf8' });
const RESTART_SERVICE_CMD = path.join(__dirname, 'scripts/restartservice.sh');
function nginxLocation(s) {
if (!s.startsWith('!')) return s;
@@ -172,7 +172,7 @@ function validateCertificate(location, domainObject, certificate) {
function reload(callback) {
if (constants.TEST) return callback();
shell.sudo('reload', [ RELOAD_NGINX_CMD ], {}, function (error) {
shell.sudo('reload', [ RESTART_SERVICE_CMD, 'nginx' ], {}, function (error) {
if (error) return callback(new BoxError(BoxError.NGINX_ERROR, `Error reloading nginx: ${error.message}`));
callback();

View File

@@ -1,17 +0,0 @@
#!/bin/bash
set -eu -o pipefail
if [[ ${EUID} -ne 0 ]]; then
echo "This script should be run as root." > /dev/stderr
exit 1
fi
if [[ $# == 1 && "$1" == "--check" ]]; then
echo "OK"
exit 0
fi
if [[ "${BOX_ENV}" == "cloudron" ]]; then
nginx -s reload
fi

View File

@@ -1,18 +0,0 @@
#!/bin/bash
set -eu -o pipefail
if [[ ${EUID} -ne 0 ]]; then
echo "This script should be run as root." > /dev/stderr
exit 1
fi
if [[ $# == 1 && "$1" == "--check" ]]; then
echo "OK"
exit 0
fi
if [[ "${BOX_ENV}" == "cloudron" ]]; then
systemctl restart docker
fi

37
src/scripts/restartservice.sh Executable file
View File

@@ -0,0 +1,37 @@
#!/bin/bash
set -eu -o pipefail
if [[ ${EUID} -ne 0 ]]; then
echo "This script should be run as root." > /dev/stderr
exit 1
fi
if [[ $# -eq 0 ]]; then
echo "No arguments supplied"
exit 1
fi
if [[ "$1" == "--check" ]]; then
echo "OK"
exit 0
fi
[[ "${BOX_ENV}" != "cloudron" ]] && exit
service="$1"
if [[ "${service}" == "unbound" ]]; then
unbound-anchor -a /var/lib/unbound/root.key
systemctl restart unbound
elif [[ "${service}" == "nginx" ]]; then
nginx -s reload
elif [[ "${service}" == "docker" ]]; then
systemctl restart docker
elif [[ "${service}" == "collectd" ]]; then
systemctl restart collectd
else
echo "Unknown service ${service}"
exit 1
fi

View File

@@ -1,19 +0,0 @@
#!/bin/bash
set -eu -o pipefail
if [[ ${EUID} -ne 0 ]]; then
echo "This script should be run as root." > /dev/stderr
exit 1
fi
if [[ $# == 1 && "$1" == "--check" ]]; then
echo "OK"
exit 0
fi
if [[ "${BOX_ENV}" == "cloudron" ]]; then
unbound-anchor -a /var/lib/unbound/root.key
systemctl restart unbound
fi

View File

@@ -19,13 +19,17 @@ fi
addon="$1"
appid="${2:-}" # only valid for redis
if [[ "${addon}" != "postgresql" && "${addon}" != "mysql" && "${addon}" != "mongodb" && "${addon}" != "redis" ]]; then
echo "${addon} must be postgresql/mysql/mongodb/redis"
if [[ "${addon}" != "postgresql" && "${addon}" != "mysql" && "${addon}" != "mongodb" && "${addon}" != "redis" && "${addon}" != "graphite" ]]; then
echo "${addon} must be postgresql/mysql/mongodb/redis/graphite"
exit 1
fi
if [[ "${BOX_ENV}" == "cloudron" ]]; then
readonly addon_dir="${HOME}/platformdata/${addon}/${appid}"
if [[ "${addon}" == "graphite" ]]; then
readonly addon_dir="${HOME}/platformdata/graphite"
else
readonly addon_dir="${HOME}/platformdata/${addon}/${appid}"
fi
else
readonly addon_dir="${HOME}/.cloudron_test/platformdata/${addon}/${appid}"
fi

View File

@@ -41,7 +41,6 @@ var appdb = require('./appdb.js'),
debug = require('debug')('box:services'),
docker = require('./docker.js'),
fs = require('fs'),
graphite = require('./graphite.js'),
hat = require('./hat.js'),
infra = require('./infra_version.js'),
mail = require('./mail.js'),
@@ -64,6 +63,7 @@ var appdb = require('./appdb.js'),
const NOOP = function (app, options, callback) { return callback(); };
const NOOP_CALLBACK = function (error) { if (error) debug(error); };
const RMADDONDIR_CMD = path.join(__dirname, 'scripts/rmaddondir.sh');
const RESTART_SERVICE_CMD = path.join(__dirname, 'scripts/restartservice.sh');
// setup can be called multiple times for the same app (configure crash restart) and existing data must not be lost
// teardown is destructive. app data stored with the addon is lost
@@ -219,8 +219,8 @@ const SERVICES = {
},
graphite: {
status: statusGraphite,
restart: docker.restartContainer.bind(null, 'graphite'),
defaultMemoryLimit: graphite.DEFAULT_MEMORY_LIMIT
restart: restartGraphite,
defaultMemoryLimit: 256 * 1024 * 1024
},
nginx: {
status: statusNginx,
@@ -541,7 +541,7 @@ function rebuildService(id, callback) {
if (id === 'postgresql') return startPostgresql({ version: 'none' }, callback);
if (id === 'mysql') return startMysql({ version: 'none' }, callback);
if (id === 'sftp') return sftp.rebuild(serviceConfig, { /* options */ }, callback);
if (id === 'graphite') return graphite.start({ version: 'none' }, serviceConfig, callback);
if (id === 'graphite') return startGraphite({ version: 'none' }, serviceConfig, callback);
// nothing to rebuild for now.
// TODO: mongo/postgresql/mysql need to be scaled down.
@@ -845,7 +845,7 @@ function startServices(existingInfra, callback) {
startPostgresql.bind(null, existingInfra),
startMongodb.bind(null, existingInfra),
startRedis.bind(null, existingInfra),
graphite.start.bind(null, existingInfra, servicesConfig['graphite'] || {}),
startGraphite.bind(null, existingInfra, servicesConfig['graphite'] || {}),
sftp.start.bind(null, existingInfra, servicesConfig['sftp'] || {}),
);
} else {
@@ -857,7 +857,7 @@ function startServices(existingInfra, callback) {
if (infra.images.postgresql.tag !== existingInfra.images.postgresql.tag) startFuncs.push(startPostgresql.bind(null, existingInfra));
if (infra.images.mongodb.tag !== existingInfra.images.mongodb.tag) startFuncs.push(startMongodb.bind(null, existingInfra));
if (infra.images.redis.tag !== existingInfra.images.redis.tag) startFuncs.push(startRedis.bind(null, existingInfra));
if (infra.images.graphite.tag !== existingInfra.images.graphite.tag) startFuncs.push(graphite.start.bind(null, existingInfra, servicesConfig['graphite'] || {}));
if (infra.images.graphite.tag !== existingInfra.images.graphite.tag) startFuncs.push(startGraphite.bind(null, existingInfra, servicesConfig['graphite'] || {}));
if (infra.images.sftp.tag !== existingInfra.images.sftp.tag) startFuncs.push(sftp.start.bind(null, existingInfra, servicesConfig['sftp'] || {}));
debug('startServices: existing infra. incremental service create %j', startFuncs.map(function (f) { return f.name; }));
@@ -1781,6 +1781,49 @@ function restoreMongoDb(app, options, callback) {
});
}
function startGraphite(existingInfra, serviceConfig, callback) {
assert.strictEqual(typeof existingInfra, 'object');
assert.strictEqual(typeof serviceConfig, 'object');
assert.strictEqual(typeof callback, 'function');
const tag = infra.images.graphite.tag;
const memoryLimit = serviceConfig.memoryLimit || 256 * 1024 * 1024;
const memory = system.getMemoryAllocation(memoryLimit);
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.graphite.tag, tag);
if (upgrading) debug('startGraphite: graphite will be upgraded');
const cmd = `docker run --restart=always -d --name="graphite" \
--hostname graphite \
--net cloudron \
--net-alias graphite \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=graphite \
-m ${memory} \
--memory-swap ${memoryLimit} \
--dns 172.18.0.1 \
--dns-search=. \
-p 127.0.0.1:2003:2003 \
-p 127.0.0.1:2004:2004 \
-p 127.0.0.1:8417:8000 \
-v "${paths.PLATFORM_DATA_DIR}/graphite:/var/lib/graphite" \
--label isCloudronManaged=true \
--read-only -v /tmp -v /run "${tag}"`;
async.series([
shell.exec.bind(null, 'stopGraphite', 'docker stop graphite || true'),
shell.exec.bind(null, 'removeGraphite', 'docker rm -f graphite || true'),
(done) => {
if (!upgrading) return done();
shell.sudo('removeGraphiteDir', [ RMADDONDIR_CMD, 'graphite' ], {}, done);
},
shell.exec.bind(null, 'startGraphite', cmd)
], callback);
}
function setupProxyAuth(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
@@ -2020,7 +2063,7 @@ function statusDocker(callback) {
function restartDocker(callback) {
assert.strictEqual(typeof callback, 'function');
shell.sudo('restartdocker', [ path.join(__dirname, 'scripts/restartdocker.sh') ], {}, NOOP_CALLBACK);
shell.sudo('restartdocker', [ RESTART_SERVICE_CMD, 'docker' ], {}, NOOP_CALLBACK);
callback(null);
}
@@ -2036,7 +2079,7 @@ function statusUnbound(callback) {
function restartUnbound(callback) {
assert.strictEqual(typeof callback, 'function');
shell.sudo('restartunbound', [ path.join(__dirname, 'scripts/restartunbound.sh') ], {}, NOOP_CALLBACK);
shell.sudo('restartunbound', [ RESTART_SERVICE_CMD, 'unbound' ], {}, NOOP_CALLBACK);
callback(null);
}
@@ -2052,7 +2095,7 @@ function statusNginx(callback) {
function restartNginx(callback) {
assert.strictEqual(typeof callback, 'function');
shell.sudo('reloadnginx', [ path.join(__dirname, 'scripts/reloadnginx.sh') ], {}, NOOP_CALLBACK);
shell.sudo('restartnginx', [ RESTART_SERVICE_CMD, 'nginx' ], {}, NOOP_CALLBACK);
callback(null);
}
@@ -2104,6 +2147,17 @@ function statusGraphite(callback) {
});
}
function restartGraphite(callback) {
assert.strictEqual(typeof callback, 'function');
docker.restartContainer('graphite', callback);
setTimeout(function () {
// wait for graphite to startup and then restart collectd. graphite migrations can be quite slow!
shell.sudo('restartcollectd', [ RESTART_SERVICE_CMD, 'collectd' ], {}, NOOP_CALLBACK);
}, 60000);
}
function teardownOauth(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');

View File

@@ -21,24 +21,11 @@ var apps = require('./apps.js'),
volumes = require('./volumes.js'),
_ = require('underscore');
var gRebuildInProgress = false;
function rebuild(serviceConfig, options, callback) {
assert.strictEqual(typeof serviceConfig, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
if (gRebuildInProgress) {
debug('waiting for other rebuild to finish');
return setTimeout(function () { rebuild(serviceConfig, options, callback); }, 5000);
}
gRebuildInProgress = true;
function done(error) {
gRebuildInProgress = false;
callback(error);
}
debug('rebuilding container');
const force = !!options.force;
@@ -48,7 +35,7 @@ function rebuild(serviceConfig, options, callback) {
const cloudronToken = hat(8 * 128);
apps.getAll(function (error, result) {
if (error) return done(error);
if (error) return callback(error);
let dataDirs = [];
result.forEach(function (app) {
@@ -89,7 +76,7 @@ function rebuild(serviceConfig, options, callback) {
if (!force && _.isEqual(currentDataDirs, dataDirs)) {
debug('Skipping rebuild, no changes');
return done();
return callback();
}
}
}
@@ -119,7 +106,7 @@ function rebuild(serviceConfig, options, callback) {
shell.exec.bind(null, 'stopSftp', 'docker stop sftp || true'),
shell.exec.bind(null, 'removeSftp', 'docker rm -f sftp || true'),
shell.exec.bind(null, 'startSftp', cmd)
], done);
], callback);
});
});
});

View File

@@ -157,7 +157,7 @@ function exists(apiConfig, backupFilePath, callback) {
s3.headObject(params, function (error) {
if (!Object.keys(this.httpResponse.headers).some(h => h.startsWith('x-amz'))) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'not a s3 endpoint'));
if (error && S3_NOT_FOUND(error)) return callback(null, false);
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code));
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Error headObject ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`));
callback(null, true);
});
@@ -169,7 +169,7 @@ function exists(apiConfig, backupFilePath, callback) {
};
s3.listObjects(listParams, function (error, listData) {
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code));
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`));
callback(null, listData.Contents.length !== 0);
});
@@ -200,7 +200,7 @@ function download(apiConfig, backupFilePath, callback) {
ps.emit('error', new BoxError(BoxError.NOT_FOUND, `Backup not found: ${backupFilePath}`));
} else {
debug(`download: ${apiConfig.bucket}:${backupFilePath} s3 stream error.`, error);
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code)); // DO sets 'code'
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, `Error multipartDownload ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`));
}
});
@@ -231,7 +231,7 @@ function listDir(apiConfig, dir, batchSize, iteratorCallback, callback) {
async.whilst((testDone) => testDone(null, !done), function listAndDownload(whilstCallback) {
s3.listObjects(listParams, function (error, listData) {
if (error) return whilstCallback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code));
if (error) return whilstCallback(new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects in ${dir}. Message: ${error.message} HTTP Code: ${error.code}`));
if (listData.Contents.length === 0) { done = true; return whilstCallback(); }
@@ -505,7 +505,7 @@ function testConfig(apiConfig, callback) {
var s3 = new AWS.S3(_.omit(credentials, 'retryDelayOptions', 'maxRetries'));
s3.putObject(params, function (error) {
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code)); // DO sets 'code'
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Error put object cloudron-testfile. Message: ${error.message} HTTP Code: ${error.code}`));
var params = {
Bucket: apiConfig.bucket,
@@ -513,7 +513,7 @@ function testConfig(apiConfig, callback) {
};
s3.deleteObject(params, function (error) {
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code)); // DO sets 'code'
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Error del object cloudron-testfile. Message: ${error.message} HTTP Code: ${error.code}`));
callback();
});

View File

@@ -13,11 +13,9 @@ scripts=("${SOURCE_DIR}/src/scripts/clearvolume.sh" \
"${SOURCE_DIR}/src/scripts/mvvolume.sh" \
"${SOURCE_DIR}/src/scripts/mkdirvolume.sh" \
"${SOURCE_DIR}/src/scripts/rmaddondir.sh" \
"${SOURCE_DIR}/src/scripts/reloadnginx.sh" \
"${SOURCE_DIR}/src/scripts/reboot.sh" \
"${SOURCE_DIR}/src/scripts/restart.sh" \
"${SOURCE_DIR}/src/scripts/restartdocker.sh" \
"${SOURCE_DIR}/src/scripts/restartunbound.sh" \
"${SOURCE_DIR}/src/scripts/restartservice.sh" \
"${SOURCE_DIR}/src/scripts/update.sh" \
"${SOURCE_DIR}/src/scripts/collectlogs.sh" \
"${SOURCE_DIR}/src/scripts/configurecollectd.sh" \

View File

@@ -39,8 +39,8 @@ describe('shell', function () {
});
it('can sudo valid program', function (done) {
var RELOAD_NGINX_CMD = path.join(__dirname, '../src/scripts/reloadnginx.sh');
shell.sudo('test', [ RELOAD_NGINX_CMD ], {}, function (error) {
var RELOAD_NGINX_CMD = path.join(__dirname, '../src/scripts/restartservice.sh');
shell.sudo('test', [ RELOAD_NGINX_CMD, 'nginx' ], {}, function (error) {
expect(error).to.be.ok();
done();
});