Compare commits

..

17 Commits

Author SHA1 Message Date
Girish Ramakrishnan 3c0064e0b4 more changes 2021-07-10 11:14:38 -07:00
Girish Ramakrishnan c039fdecb9 eventlog: typo in cleanup
(cherry picked from commit eafd72b4e7)
2021-07-10 11:14:38 -07:00
Girish Ramakrishnan dac86d6856 sshfs: only chown when auth as root user
(cherry picked from commit 5d836b3f7c)

also contains typo fix of username to user
2021-07-10 11:14:27 -07:00
Girish Ramakrishnan 86ca9ae9ce 6.3.5 changes 2021-07-09 14:47:40 -07:00
Girish Ramakrishnan 700a7637b6 6.3.4 changes 2021-06-27 09:00:09 -07:00
Girish Ramakrishnan feb61c27d9 reverseproxy: remove any old dashboard domain configs
(cherry picked from commit c052882de9)
2021-06-27 08:59:43 -07:00
Girish Ramakrishnan 31d742fa67 fix sporadic ETIMEDOUT
it seems when docker is busy deleting images, we get a ETIMEDOUT.
the default was 10000.

2021-06-25T22:18:32.324Z box:apps BoxError: connect ETIMEDOUT
    at /home/yellowtent/box/src/settingsdb.js:26:36
    at Query.queryCallback [as _callback] (/home/yellowtent/box/src/database.js:96:42)
2021-06-25 16:46:49 -07:00
Girish Ramakrishnan dd5737f948 mail: enable editheader sieve extension and upgrade solr 2021-06-25 16:38:44 -07:00
Girish Ramakrishnan 50d7610bfd cloudron-support: createdAt -> creationTime 2021-06-25 12:51:42 -07:00
Girish Ramakrishnan e51dd8f530 installer: prepare apt before installing more packages
currently, this is only prepared when needed because don't want this
to happen on every update
2021-06-25 12:14:24 -07:00
Girish Ramakrishnan bad6e39d59 volume: add filesystem type for shared folders
rename noop to mountpoint
2021-06-25 10:12:28 -07:00
Girish Ramakrishnan 1ce4875db1 volumes: set hostPath based on volume id
this is required for the file browser to work which does operations
based on the id

fixes #789
2021-06-24 17:32:41 -07:00
Girish Ramakrishnan 097a7d6b60 sftp: rework appdata and volume mounting logic
this tries to solve two issues:

* the current approach mounts the data directories of apps/volumes individually.
this causes a problem with volume mounts that mount after the container is started i.e not
network time/delay but systemd ordering. With CIFS, the mount is a hostname. This requires
unbound to be running but unbound can only start after docker because it wants to bind to
the docker network. one way to fix is to not start sftp automatically and only start sftp
container in the box code. This results in the sftp container attaching itself of the
directory before mounting and it appears empty. (on the host, the directory will appear
to have mount data!)

* every time apptask runs we keep rebuilding this sftp container. this results in much race.

the fix is: mount the parent directory of apps and volumes. in addition, then any specialized appdata
paths and volume paths are mounted individually. this greatly minimized rebuilding and also since we don't rely
on binding to the mount point itself. the child directories can mount in leisure. this limits the race
issue to only no-op volume mounts.

part of #789
2021-06-24 16:51:58 -07:00
Girish Ramakrishnan 87b2b63043 sshfs: add StrictHostKeyChecking=no so that it can connect the first time 2021-06-24 15:10:00 -07:00
Girish Ramakrishnan 0b0d552f58 Fix usage of execSync
important thing is to not use encoding: 'utf8' because in that case
it will return a string instead of a Buffer object. '' is false but
Buffer() is not.
2021-06-24 12:59:47 -07:00
Girish Ramakrishnan 5437291177 add to changes 2021-06-24 09:09:39 -07:00
Girish Ramakrishnan 78754f943d read avatar as binary and not base64 2021-06-24 09:09:08 -07:00
21 changed files with 222 additions and 115 deletions
+25
View File
@@ -2286,3 +2286,28 @@
[6.3.1]
* Fix cert migration issues
[6.3.2]
* Avatar was migrated as base64 instead of binary
* Fix issue where filemanager came up empty for CIFS mounts
[6.3.3]
* volumes: add filesystem volume type for shared folders
* mail: enable sieve extension editheader
* mail: update solr to 8.9.0
[6.3.4]
* Fix issue where old nginx configs where not removed before upgrade
[6.3.5]
* filemanager: reset selection if directory has changed
* branding: fix error highlight with empty cloudron name
* better text instead of "Cloudron in the wild"
* Make sso login hint translatable
* Give unread notifications a small left border
* Fix issue where clicking update indicator opened app in new tab
* Ensure notifications are only fetched and shown for at least admins
* setupaccount: Show input field errors below input field
* Set focus automatically for new alias or redirect
* eventlog: fix issue where old events are not periodically removed
* ssfs: fix chown
@@ -6,7 +6,7 @@ exports.up = function(db, callback) {
db.runSql('ALTER TABLE settings ADD COLUMN valueBlob MEDIUMBLOB', function (error) {
if (error) return callback(error);
fs.readFile('/home/yellowtent/boxdata/avatar.png', { encoding: 'base64' }, function (error, avatar) {
fs.readFile('/home/yellowtent/boxdata/avatar.png', function (error, avatar) {
if (error && error.code === 'ENOENT') return callback();
if (error) return callback(error);
@@ -0,0 +1,26 @@
'use strict';
const async = require('async'),
safe = require('safetydance');
exports.up = function(db, callback) {
db.all('SELECT * FROM volumes', function (error, volumes) {
if (error || volumes.length === 0) return callback(error);
async.eachSeries(volumes, function (volume, iteratorDone) {
if (volume.mountType !== 'noop') return iteratorDone();
let mountType;
if (safe.child_process.execSync(`mountpoint -q -- ${volume.hostPath}`)) {
mountType = 'mountpoint';
} else {
mountType = 'filesystem';
}
db.runSql('UPDATE volumes SET mountType=? WHERE id=?', [ mountType, volume.id ], iteratorDone);
}, callback);
});
};
exports.down = function(db, callback) {
callback();
};
+3 -3
View File
@@ -705,9 +705,9 @@
}
},
"connect-lastmile": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/connect-lastmile/-/connect-lastmile-2.1.0.tgz",
"integrity": "sha512-nLV3loAO+1N5GK8OmwbNMEhObgrhNwn9qR1j3tgjfM63BPru5badZYxzQ5qsOP/MiXteFJCmRpga1IH8UV6JgQ==",
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/connect-lastmile/-/connect-lastmile-2.1.1.tgz",
"integrity": "sha512-723vDmZuy6KBUAmuXff1mb+l9ZMs+JqXJuAGHgWNI3fNYAu9DKXC+GYdxqY0+9oMXyVJNf5AscoONcq9Nqb0Ig==",
"requires": {
"underscore": "^1.13.1"
}
+1 -2
View File
@@ -20,7 +20,7 @@
"body-parser": "^1.19.0",
"cloudron-manifestformat": "^5.10.2",
"connect": "^3.7.0",
"connect-lastmile": "^2.1.0",
"connect-lastmile": "^2.1.1",
"connect-timeout": "^1.9.0",
"cookie-parser": "^1.4.5",
"cookie-session": "^1.4.0",
@@ -30,7 +30,6 @@
"debug": "^4.3.1",
"delay": "^5.0.0",
"dockerode": "^3.3.0",
"delay": "^5.0.0",
"ejs": "^3.1.6",
"ejs-cli": "^2.2.1",
"express": "^4.17.1",
+1 -1
View File
@@ -37,7 +37,7 @@ while true; do
# fall through
;&
--owner-login)
admin_username=$(mysql -NB -uroot -ppassword -e "SELECT username FROM box.users WHERE role='owner' AND username IS NOT NULL ORDER BY createdAt LIMIT 1" 2>/dev/null)
admin_username=$(mysql -NB -uroot -ppassword -e "SELECT username FROM box.users WHERE role='owner' AND username IS NOT NULL ORDER BY creationTime LIMIT 1" 2>/dev/null)
admin_password=$(pwgen -1s 12)
dashboard_domain=$(mysql -NB -uroot -ppassword -e "SELECT value FROM box.settings WHERE name='admin_fqdn'" 2>/dev/nul)
ghost_file=/home/yellowtent/platformdata/cloudron_ghost.json
+48 -15
View File
@@ -15,6 +15,48 @@ function log() {
echo -e "$(date +'%Y-%m-%dT%H:%M:%S')" "==> installer: $1"
}
apt_ready="no"
function prepare_apt_once() {
[[ "${apt_ready}" == "yes" ]] && return
log "Making sure apt is in a good state"
log "Waiting for all dpkg tasks to finish..."
while fuser /var/lib/dpkg/lock; do
sleep 1
done
# it's unclear what needs to be run first or whether both these command should be run. so keep trying both
for count in {1..3}; do
# alternative to apt-install -y --fix-missing ?
if ! dpkg --force-confold --configure -a; then
log "dpkg reconfigure failed (try $count)"
dpkg_configure="no"
else
dpkg_configure="yes"
fi
if ! apt update -y; then
log "apt update failed (try $count)"
apt_update="no"
else
apt_update="yes"
fi
[[ "${dpkg_configure}" == "yes" && "${apt_update}" == "yes" ]] && break
sleep 1
done
apt_ready="yes"
if [[ "${dpkg_configure}" == "yes" && "${apt_update}" == "yes" ]]; then
log "apt is ready"
else
log "apt is not ready but proceeding anyway"
fi
}
readonly user=yellowtent
readonly box_src_dir=/home/${user}/box
@@ -38,21 +80,7 @@ if [[ $(docker version --format {{.Client.Version}}) != "${docker_version}" ]];
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
log "Waiting for all dpkg tasks to finish..."
while fuser /var/lib/dpkg/lock; do
sleep 1
done
while ! dpkg --force-confold --configure -a; do
log "Failed to fix packages. Retry"
sleep 1
done
# the latest docker might need newer packages
while ! apt update -y; do
log "Failed to update packages. Retry"
sleep 1
done
prepare_apt_once
while ! apt install -y /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb; do
log "Failed to install docker. Retry"
@@ -66,6 +94,9 @@ readonly nginx_version=$(nginx -v 2>&1)
if [[ "${nginx_version}" != *"1.18."* ]]; then
log "installing nginx 1.18"
$curl -sL http://nginx.org/packages/ubuntu/pool/nginx/n/nginx/nginx_1.18.0-2~${ubuntu_codename}_amd64.deb -o /tmp/nginx.deb
prepare_apt_once
# apt install with install deps (as opposed to dpkg -i)
apt install -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes /tmp/nginx.deb
rm /tmp/nginx.deb
@@ -73,11 +104,13 @@ fi
if ! which mount.nfs; then
log "installing nfs-common"
prepare_apt_once
apt install -y nfs-common
fi
if ! which sshfs; then
log "installing sshfs"
prepare_apt_once
apt install -y sshfs
fi
+5 -1
View File
@@ -137,6 +137,7 @@ const appdb = require('./appdb.js'),
reverseProxy = require('./reverseproxy.js'),
safe = require('safetydance'),
semver = require('semver'),
services = require('./services.js'),
settings = require('./settings.js'),
spawn = require('child_process').spawn,
split = require('split'),
@@ -1273,7 +1274,10 @@ function setDataDir(app, dataDir, auditSource, callback) {
const task = {
args: { newDataDir: dataDir },
values: { }
values: { },
onFinished: (error) => {
if (!error) services.rebuildService('sftp', NOOP_CALLBACK);
}
};
addTask(appId, exports.ISTATE_PENDING_DATA_DIR_MIGRATION, task, function (error, result) {
if (error) return callback(error);
+2 -8
View File
@@ -4,7 +4,7 @@ exports = module.exports = {
scheduleTask
};
let assert = require('assert'),
const assert = require('assert'),
BoxError = require('./boxerror.js'),
debug = require('debug')('box:apptaskmanager'),
fs = require('fs'),
@@ -13,7 +13,6 @@ let assert = require('assert'),
path = require('path'),
paths = require('./paths.js'),
scheduler = require('./scheduler.js'),
services = require('./services.js'),
tasks = require('./tasks.js');
let gActiveTasks = { }; // indexed by app id
@@ -84,15 +83,10 @@ function scheduleTask(appId, taskId, options, callback) {
}
function startNextTask() {
if (gPendingTasks.length === 0) {
// rebuild sftp when task queue is empty. this minimizes risk of sftp rebuild overlapping with other app tasks
services.rebuildService('sftp', error => { if (error) debug('Unable to rebuild sftp:', error); });
return;
}
if (gPendingTasks.length === 0) return;
assert(Object.keys(gActiveTasks).length < TASK_CONCURRENCY);
const t = gPendingTasks.shift();
scheduleTask(t.appId, t.taskId, t.options, t.callback);
}
+1 -1
View File
@@ -101,7 +101,7 @@ function startJobs(callback) {
gJobs.cleanupEventlog = new CronJob({
cronTime: '00 */30 * * * *', // every 30 minutes
onTick: eventlog.cleanup.bind(null, new Date(Date.now() - 60 * 60 * 24 * 10 * 1000)), // 10 days ago
onTick: eventlog.cleanup.bind(null, { creationTime: new Date(Date.now() - 60 * 60 * 24 * 10 * 1000) }), // 10 days ago
start: true
});
+2
View File
@@ -45,6 +45,8 @@ function initialize(callback) {
// https://github.com/mysqljs/mysql#pool-options
gConnectionPool = mysql.createPool({
connectionLimit: 5,
acquireTimeout: 60000,
connectTimeout: 60000,
host: gDatabase.hostname,
user: gDatabase.username,
password: gDatabase.password,
+2 -2
View File
@@ -20,8 +20,8 @@ exports = module.exports = {
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:4.0.6@sha256:e583082e15e8e41b0e3b80c3efc917ec429f19fa08a19e14fc27144a8bfe446a' },
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:4.0.2@sha256:9df297ccc3370f38c54f8d614e214e082b363777cd1c6c9522e29663cc8f5362' },
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:3.0.3@sha256:37e5222e01ae89bc5a742ce12030631de25a127b5deec8a0e992c68df0fdec10' },
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:3.3.2@sha256:d0d153612f478a0ef099809d4c3c72c3e02f43a55a796987d922f16367e7881e' },
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:3.3.3@sha256:b1093e6f38bebf4a9ae903ca385aea3a32e7cccae5ede7f2e01a34681e361a5f' },
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:3.0.1@sha256:bed9f6b5d06fe2c5289e895e806cfa5b74ad62993d705be55d4554a67d128029' },
'sftp': { repo: 'cloudron/sftp', tag: 'cloudron/sftp:3.2.0@sha256:61e8247ded1e07cf882ca478dab180960357c614472e80b938f1f690a46788c2' }
'sftp': { repo: 'cloudron/sftp', tag: 'cloudron/sftp:3.3.0@sha256:183c11150d5a681cb02f7d2bd542ddb8a8f097422feafb7fac8fdbca0ca55d47' }
}
};
+1 -2
View File
@@ -17,7 +17,6 @@ const assert = require('assert'),
ldap = require('ldapjs'),
mail = require('./mail.js'),
mailboxdb = require('./mailboxdb.js'),
path = require('path'),
safe = require('safetydance'),
services = require('./services.js'),
users = require('./users.js');
@@ -625,7 +624,7 @@ function userSearchSftp(req, res, next) {
var obj = {
dn: ldap.parseDN(`cn=${username}@${appFqdn},ou=sftp,dc=cloudron`).toString(),
attributes: {
homeDirectory: path.join('/app/data', app.id),
homeDirectory: app.dataDir ? `/mnt/${app.id}` : `/mnt/appsdata/${app.id}/data`,
objectclass: ['user'],
objectcategory: 'person',
cn: user.id,
+11 -10
View File
@@ -27,8 +27,8 @@ function validateMountOptions(type, options) {
assert.strictEqual(typeof options, 'object');
switch (type) {
case 'noop': // volume provider
case 'mountpoint': // backup provider
case 'filesystem':
case 'mountpoint':
return null;
case 'cifs':
if (typeof options.username !== 'string') return new BoxError(BoxError.BAD_FIELD, 'username is not a string');
@@ -84,12 +84,12 @@ function renderMountFile(volume) {
case 'sshfs': {
const keyFilePath = path.join(paths.SSHFS_KEYS_DIR, `id_rsa_${mountOptions.host}`);
type = 'fuse.sshfs';
what= `${mountOptions.user}@${mountOptions.host}:${mountOptions.remoteDir}`;
options = `allow_other,port=${mountOptions.port},IdentityFile=${keyFilePath},reconnect`; // allow_other mean non-root users can access it
what = `${mountOptions.user}@${mountOptions.host}:${mountOptions.remoteDir}`;
options = `allow_other,port=${mountOptions.port},IdentityFile=${keyFilePath},StrictHostKeyChecking=no,reconnect`; // allow_other means non-root users can access it
break;
}
case 'noop': // volume provider
case 'mountpoint': // backup provider
case 'filesystem':
case 'mountpoint':
return;
}
@@ -115,9 +115,10 @@ async function getStatus(mountType, hostPath) {
assert.strictEqual(typeof mountType, 'string');
assert.strictEqual(typeof hostPath, 'string');
if (mountType === 'noop' || mountType === 'mountpoint') { // noop is from volume provider and mountpoint is from backup provider
safe.child_process.execSync(`mountpoint -q -- ${hostPath}`, { encoding: 'utf8' });
if (!safe.error) {
if (mountType === 'filesystem') return { state: 'active', message: 'Mounted' };
if (mountType === 'mountpoint') {
if (safe.child_process.execSync(`mountpoint -q -- ${hostPath}`)) {
return { state: 'active', message: 'Mounted' };
} else {
return { state: 'inactive', message: 'Not mounted' };
@@ -148,7 +149,7 @@ async function tryAddMount(volume, options) {
assert.strictEqual(typeof volume, 'object');
assert.strictEqual(typeof options, 'object');
if (volume.mountType === 'noop' || volume.mountType === 'mountpoint') return; // noop is from volume provider and mountpoint is from backup provider
if (volume.mountType === 'mountpoint') return;
if (constants.TEST) return;
+2
View File
@@ -21,6 +21,8 @@ exports = module.exports = {
PROVIDER_FILE: '/etc/cloudron/PROVIDER',
SETUP_TOKEN_FILE: '/etc/cloudron/SETUP_TOKEN',
VOLUMES_MOUNT_DIR: '/mnt/volumes',
PLATFORM_DATA_DIR: path.join(baseDir(), 'platformdata'),
APPS_DATA_DIR: path.join(baseDir(), 'appsdata'),
BOX_DATA_DIR: path.join(baseDir(), 'boxdata'), // box data dir is part of box backup
+4 -1
View File
@@ -788,8 +788,11 @@ function checkCerts(options, auditSource, progressCallback, callback) {
}
function removeAppConfigs() {
const dashboardConfigFilename = `${settings.dashboardFqdn()}.conf`;
// remove all configs which are not the default or current dashboard
for (let appConfigFile of fs.readdirSync(paths.NGINX_APPCONFIG_DIR)) {
if (appConfigFile !== constants.NGINX_DEFAULT_CONFIG_FILE_NAME && !appConfigFile.startsWith(constants.DASHBOARD_LOCATION)) {
if (appConfigFile !== constants.NGINX_DEFAULT_CONFIG_FILE_NAME && appConfigFile !== dashboardConfigFilename) {
fs.unlinkSync(path.join(paths.NGINX_APPCONFIG_DIR, appConfigFile));
}
}
+2 -1
View File
@@ -31,10 +31,11 @@ async function add(req, res, next) {
assert.strictEqual(typeof req.body, 'object');
if (typeof req.body.name !== 'string') return next(new HttpError(400, 'name must be a string'));
if (typeof req.body.hostPath !== 'string') return next(new HttpError(400, 'hostPath must be a string'));
if (typeof req.body.mountType !== 'string') return next(new HttpError(400, 'mountType must be a string'));
if (typeof req.body.mountOptions !== 'object') return next(new HttpError(400, 'mountOptions must be a object'));
if ('hostPath' in req.body && typeof req.body.hostPath !== 'string') return next(new HttpError(400, 'hostPath must be a string'));
req.clearTimeout(); // waiting for mount can take time
const [error, id] = await safe(volumes.add(req.body, auditSource.fromRequest(req)));
+1
View File
@@ -23,4 +23,5 @@ mount_filename=$(systemd-escape -p --suffix=mount "$where")
systemctl stop "${mount_filename}" || true
rm -f "/etc/systemd/system/${mount_filename}"
rmdir "${where}" || true
systemctl daemon-reload
+43 -51
View File
@@ -10,16 +10,15 @@ exports = module.exports = {
const apps = require('./apps.js'),
assert = require('assert'),
async = require('async'),
BoxError = require('./boxerror.js'),
debug = require('debug')('box:sftp'),
docker = require('./docker.js'),
hat = require('./hat.js'),
infra = require('./infra_version.js'),
paths = require('./paths.js'),
safe = require('safetydance'),
shell = require('./shell.js'),
system = require('./system.js'),
volumes = require('./volumes.js'),
_ = require('underscore');
volumes = require('./volumes.js');
function rebuild(serviceConfig, options, callback) {
assert.strictEqual(typeof serviceConfig, 'object');
@@ -28,20 +27,27 @@ function rebuild(serviceConfig, options, callback) {
debug('rebuilding container');
const force = !!options.force;
const tag = infra.images.sftp.tag;
const memoryLimit = serviceConfig.memoryLimit || exports.DEFAULT_MEMORY_LIMIT;
const memory = system.getMemoryAllocation(memoryLimit);
const cloudronToken = hat(8 * 128);
let dataDirs = [];
const stat = safe.fs.lstatSync(paths.APPS_DATA_DIR);
if (!stat) return callback(new BoxError(BoxError.FS_ERROR, safe.error));
const resolvedAppDataDir = stat.isSymbolicLink() ? safe.fs.readlinkSync(paths.APPS_DATA_DIR) : paths.APPS_DATA_DIR;
dataDirs.push({ hostDir: resolvedAppDataDir, mountDir: '/mnt/appsdata' });
apps.getAll(async function (error, result) {
if (error) return callback(error);
let dataDirs = [];
result.forEach(function (app) {
if (!app.manifest.addons['localstorage']) return;
if (!app.manifest.addons['localstorage'] || !app.dataDir) return;
const hostDir = apps.getDataDir(app, app.dataDir), mountDir = `/app/data/${app.id}`;
const hostDir = apps.getDataDir(app, app.dataDir), mountDir = `/mnt/${app.id}`;
if (!safe.fs.existsSync(hostDir)) { // this can fail if external mount does not have permissions for yellowtent user
// do not create host path when cloudron is restoring. this will then create dir with root perms making restore logic fail
debug(`Ignoring app data dir ${hostDir} for ${app.id} since it does not exist`);
@@ -55,59 +61,45 @@ function rebuild(serviceConfig, options, callback) {
[error, allVolumes] = await safe(volumes.list());
if (error) return callback(error);
dataDirs.push({ hostDir: '/mnt/volumes', mountDir: '/mnt/volumes' });
allVolumes.forEach(function (volume) {
if (volume.hostPath.startsWith('/mnt/volumes/')) return;
if (!safe.fs.existsSync(volume.hostPath)) {
debug(`Ignoring volume host path ${volume.hostPath} since it does not exist`);
return;
}
dataDirs.push({ hostDir: volume.hostPath, mountDir: `/app/data/${volume.id}` });
dataDirs.push({ hostDir: volume.hostPath, mountDir: `/mnt/${volume.id}` });
});
docker.inspect('sftp', function (error, data) {
if (!error && data && data.Mounts) {
let currentDataDirs = data.Mounts;
if (currentDataDirs) {
currentDataDirs = currentDataDirs.filter(function (d) { return d.Destination.indexOf('/app/data/') === 0; }).map(function (d) { return { hostDir: d.Source, mountDir: d.Destination }; });
const mounts = dataDirs.map(function (v) { return `-v "${v.hostDir}:${v.mountDir}"`; }).join(' ');
const cmd = `docker run --restart=always -d --name="sftp" \
--hostname sftp \
--net cloudron \
--net-alias sftp \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=sftp \
-m ${memory} \
--memory-swap ${memoryLimit} \
--dns 172.18.0.1 \
--dns-search=. \
-p 222:22 \
${mounts} \
-e CLOUDRON_SFTP_TOKEN="${cloudronToken}" \
-v "${paths.SFTP_KEYS_DIR}:/etc/ssh:ro" \
--label isCloudronManaged=true \
--read-only -v /tmp -v /run "${tag}"`;
// sort for comparison
currentDataDirs.sort(function (a, b) { return a.hostDir < b.hostDir ? -1 : 1; });
dataDirs.sort(function (a, b) { return a.hostDir < b.hostDir ? -1 : 1; });
if (!force && _.isEqual(currentDataDirs, dataDirs)) {
debug('Skipping rebuild, no changes');
return callback();
}
}
}
const mounts = dataDirs.map(function (v) { return `-v "${v.hostDir}:${v.mountDir}"`; }).join(' ');
const cmd = `docker run --restart=always -d --name="sftp" \
--hostname sftp \
--net cloudron \
--net-alias sftp \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=sftp \
-m ${memory} \
--memory-swap ${memoryLimit} \
--dns 172.18.0.1 \
--dns-search=. \
-p 222:22 \
${mounts} \
-e CLOUDRON_SFTP_TOKEN="${cloudronToken}" \
-v "${paths.SFTP_KEYS_DIR}:/etc/ssh:ro" \
--label isCloudronManaged=true \
--read-only -v /tmp -v /run "${tag}"`;
// ignore error if container not found (and fail later) so that this code works across restarts
async.series([
shell.exec.bind(null, 'stopSftp', 'docker stop sftp || true'),
shell.exec.bind(null, 'removeSftp', 'docker rm -f sftp || true'),
shell.exec.bind(null, 'startSftp', cmd)
], callback);
});
// ignore error if container not found (and fail later) so that this code works across restarts
async.series([
shell.exec.bind(null, 'stopSftp', 'docker stop sftp || true'),
shell.exec.bind(null, 'removeSftp', 'docker rm -f sftp || true'),
shell.exec.bind(null, 'startSftp', cmd)
], callback);
});
}
+19 -3
View File
@@ -91,6 +91,23 @@ function checkPreconditions(apiConfig, dataLayout, callback) {
});
}
function hasChownSupportSync(apiConfig) {
switch (apiConfig.provider) {
case PROVIDER_NFS:
case PROVIDER_EXT4:
case PROVIDER_FILESYSTEM:
return true;
case PROVIDER_SSHFS:
// sshfs can be mounted as root or normal user. when mounted as root, we have to chown since we remove backups as the yellowtent user
// when mounted as non-root user, files are created as yellowtent user but they are still owned by the non-root user (thus del also works)
return apiConfig.mountOptions.user === 'root';
case PROVIDER_CIFS:
return true;
case PROVIDER_MOUNTPOINT:
return apiConfig.chown;
}
}
function upload(apiConfig, backupFilePath, sourceStream, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupFilePath, 'string');
@@ -117,7 +134,7 @@ function upload(apiConfig, backupFilePath, sourceStream, callback) {
fileStream.on('finish', function () {
const backupUid = parseInt(process.env.SUDO_UID, 10) || process.getuid(); // in test, upload() may or may not be called via sudo script
if ((apiConfig.provider !== PROVIDER_MOUNTPOINT && apiConfig.provider !== PROVIDER_CIFS) || apiConfig.chown) {
if (hasChownSupportSync(apiConfig)) {
if (!safe.fs.chownSync(backupFilePath, backupUid, backupUid)) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'Unable to chown:' + safe.error.message));
if (!safe.fs.chownSync(path.dirname(backupFilePath), backupUid, backupUid)) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'Unable to chown:' + safe.error.message));
}
@@ -284,8 +301,7 @@ function testConfig(apiConfig, callback) {
}
if (apiConfig.provider === PROVIDER_MOUNTPOINT) {
safe.child_process.execSync(`mountpoint -q -- ${apiConfig.mountPoint}`, { encoding: 'utf8' });
if (safe.error) return callback(new BoxError(BoxError.BAD_FIELD, `${apiConfig.mountPoint} is not mounted`, { field: 'mountPoint' }));
if (!safe.child_process.execSync(`mountpoint -q -- ${apiConfig.mountPoint}`)) return callback(new BoxError(BoxError.BAD_FIELD, `${apiConfig.mountPoint} is not mounted`, { field: 'mountPoint' }));
}
const backupPath = getBackupPath(apiConfig);
+22 -13
View File
@@ -20,6 +20,7 @@ const assert = require('assert'),
fs = require('fs'),
mounts = require('./mounts.js'),
path = require('path'),
paths = require('./paths.js'),
safe = require('safetydance'),
services = require('./services.js'),
uuid = require('uuid');
@@ -60,7 +61,7 @@ function validateHostPath(hostPath, mountType) {
if (!allowedPaths.some(p => hostPath.startsWith(p))) return new BoxError(BoxError.BAD_FIELD, 'hostPath must be under /mnt, /media, /opt or /srv', { field: 'hostPath' });
if (!constants.TEST && mountType === 'noop') { // we expect user to have already mounted this
if (!constants.TEST) { // we expect user to have already mounted this
const stat = safe.fs.lstatSync(hostPath);
if (!stat) return new BoxError(BoxError.BAD_FIELD, 'mount point does not exist. Please create it on the server first', { field: 'hostPath' });
if (!stat.isDirectory()) return new BoxError(BoxError.BAD_FIELD, 'mount point is not a directory', { field: 'hostPath' });
@@ -73,23 +74,26 @@ async function add(volume, auditSource) {
assert.strictEqual(typeof volume, 'object');
assert.strictEqual(typeof auditSource, 'object');
const {name, hostPath, mountType, mountOptions} = volume;
const {name, mountType, mountOptions} = volume;
let error = validateName(name);
if (error) throw error;
error = validateHostPath(hostPath, mountType);
if (error) throw error;
error = mounts.validateMountOptions(mountType, mountOptions);
if (error) throw error;
const id = uuid.v4();
const id = uuid.v4().replace(/-/g, ''); // to make systemd mount file names more readable
if (volume.mountType !== 'noop') await mounts.tryAddMount(volume, { timeout: 10 }); // 10 seconds
if (mountType === 'mountpoint' || mountType === 'filesystem') {
error = validateHostPath(volume.hostPath, mountType);
if (error) throw error;
} else {
volume.hostPath = path.join(paths.VOLUMES_MOUNT_DIR, id);
await mounts.tryAddMount(volume, { timeout: 10 }); // 10 seconds
}
try {
await database.query('INSERT INTO volumes (id, name, hostPath, mountType, mountOptionsJson) VALUES (?, ?, ?, ?, ?)', [ id, name, hostPath, mountType, JSON.stringify(mountOptions) ]);
await database.query('INSERT INTO volumes (id, name, hostPath, mountType, mountOptionsJson) VALUES (?, ?, ?, ?, ?)', [ id, name, volume.hostPath, mountType, JSON.stringify(mountOptions) ]);
} catch (error) {
if (error.code === 'ER_DUP_ENTRY' && error.sqlMessage.indexOf('name') !== -1) throw new BoxError(BoxError.ALREADY_EXISTS, 'name already exists');
if (error.code === 'ER_DUP_ENTRY' && error.sqlMessage.indexOf('hostPath') !== -1) throw new BoxError(BoxError.ALREADY_EXISTS, 'hostPath already exists');
@@ -97,10 +101,11 @@ async function add(volume, auditSource) {
throw error;
}
eventlog.add(eventlog.ACTION_VOLUME_ADD, auditSource, { id, name, hostPath });
eventlog.add(eventlog.ACTION_VOLUME_ADD, auditSource, { id, name, hostPath: volume.hostPath });
// in theory, we only need to do this mountpoint volumes. but for some reason a restart is required to detect new "mounts"
services.rebuildService('sftp', NOOP_CALLBACK);
const collectdConf = ejs.render(COLLECTD_CONFIG_EJS, { volumeId: id, hostPath });
const collectdConf = ejs.render(COLLECTD_CONFIG_EJS, { volumeId: id, hostPath: volume.hostPath });
collectd.addProfile(id, collectdConf, NOOP_CALLBACK);
return id;
@@ -152,8 +157,12 @@ async function del(volume, auditSource) {
}
eventlog.add(eventlog.ACTION_VOLUME_REMOVE, auditSource, { volume });
services.rebuildService('sftp', async function () {
if (volume.mountType !== 'noop') await safe(mounts.removeMount(volume));
});
if (volume.mountType === 'mountpoint' || volume.mountType === 'filesystem') {
services.rebuildService('sftp', NOOP_CALLBACK);
} else {
await safe(mounts.removeMount(volume));
}
collectd.removeProfile(volume.id, NOOP_CALLBACK);
}