sftp: rework appdata and volume mounting logic
this tries to solve two issues: * the current approach mounts the data directories of apps/volumes individually. this causes a problem with volume mounts that mount after the container is started i.e not network time/delay but systemd ordering. With CIFS, the mount is a hostname. This requires unbound to be running but unbound can only start after docker because it wants to bind to the docker network. one way to fix is to not start sftp automatically and only start sftp container in the box code. This results in the sftp container attaching itself of the directory before mounting and it appears empty. (on the host, the directory will appear to have mount data!) * every time apptask runs we keep rebuilding this sftp container. this results in much race. the fix is: mount the parent directory of apps and volumes. in addition, then any specialized appdata paths and volume paths are mounted individually. this greatly minimized rebuilding and also since we don't rely on binding to the mount point itself. the child directories can mount in leisure. this limits the race issue to only no-op volume mounts. part of #789
This commit is contained in:
94
src/sftp.js
94
src/sftp.js
@@ -10,16 +10,15 @@ exports = module.exports = {
|
||||
const apps = require('./apps.js'),
|
||||
assert = require('assert'),
|
||||
async = require('async'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
debug = require('debug')('box:sftp'),
|
||||
docker = require('./docker.js'),
|
||||
hat = require('./hat.js'),
|
||||
infra = require('./infra_version.js'),
|
||||
paths = require('./paths.js'),
|
||||
safe = require('safetydance'),
|
||||
shell = require('./shell.js'),
|
||||
system = require('./system.js'),
|
||||
volumes = require('./volumes.js'),
|
||||
_ = require('underscore');
|
||||
volumes = require('./volumes.js');
|
||||
|
||||
function rebuild(serviceConfig, options, callback) {
|
||||
assert.strictEqual(typeof serviceConfig, 'object');
|
||||
@@ -28,20 +27,27 @@ function rebuild(serviceConfig, options, callback) {
|
||||
|
||||
debug('rebuilding container');
|
||||
|
||||
const force = !!options.force;
|
||||
const tag = infra.images.sftp.tag;
|
||||
const memoryLimit = serviceConfig.memoryLimit || exports.DEFAULT_MEMORY_LIMIT;
|
||||
const memory = system.getMemoryAllocation(memoryLimit);
|
||||
const cloudronToken = hat(8 * 128);
|
||||
|
||||
let dataDirs = [];
|
||||
|
||||
const stat = safe.fs.lstatSync(paths.APPS_DATA_DIR);
|
||||
if (!stat) return callback(new BoxError(BoxError.FS_ERROR, safe.error));
|
||||
|
||||
const resolvedAppDataDir = stat.isSymbolicLink() ? safe.fs.readlinkSync(paths.APPS_DATA_DIR) : paths.APPS_DATA_DIR;
|
||||
|
||||
dataDirs.push({ hostDir: resolvedAppDataDir, mountDir: '/mnt/appsdata' });
|
||||
|
||||
apps.getAll(async function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
let dataDirs = [];
|
||||
result.forEach(function (app) {
|
||||
if (!app.manifest.addons['localstorage']) return;
|
||||
if (!app.manifest.addons['localstorage'] || !app.dataDir) return;
|
||||
|
||||
const hostDir = apps.getDataDir(app, app.dataDir), mountDir = `/app/data/${app.id}`;
|
||||
const hostDir = apps.getDataDir(app, app.dataDir), mountDir = `/mnt/${app.id}`;
|
||||
if (!safe.fs.existsSync(hostDir)) { // this can fail if external mount does not have permissions for yellowtent user
|
||||
// do not create host path when cloudron is restoring. this will then create dir with root perms making restore logic fail
|
||||
debug(`Ignoring app data dir ${hostDir} for ${app.id} since it does not exist`);
|
||||
@@ -55,59 +61,45 @@ function rebuild(serviceConfig, options, callback) {
|
||||
[error, allVolumes] = await safe(volumes.list());
|
||||
if (error) return callback(error);
|
||||
|
||||
dataDirs.push({ hostDir: '/mnt/volumes', mountDir: '/mnt/volumes' });
|
||||
|
||||
allVolumes.forEach(function (volume) {
|
||||
if (volume.hostPath.startsWith('/mnt/volumes/')) return;
|
||||
|
||||
if (!safe.fs.existsSync(volume.hostPath)) {
|
||||
debug(`Ignoring volume host path ${volume.hostPath} since it does not exist`);
|
||||
return;
|
||||
}
|
||||
|
||||
dataDirs.push({ hostDir: volume.hostPath, mountDir: `/app/data/${volume.id}` });
|
||||
dataDirs.push({ hostDir: volume.hostPath, mountDir: `/mnt/${volume.id}` });
|
||||
});
|
||||
|
||||
docker.inspect('sftp', function (error, data) {
|
||||
if (!error && data && data.Mounts) {
|
||||
let currentDataDirs = data.Mounts;
|
||||
if (currentDataDirs) {
|
||||
currentDataDirs = currentDataDirs.filter(function (d) { return d.Destination.indexOf('/app/data/') === 0; }).map(function (d) { return { hostDir: d.Source, mountDir: d.Destination }; });
|
||||
const mounts = dataDirs.map(function (v) { return `-v "${v.hostDir}:${v.mountDir}"`; }).join(' ');
|
||||
const cmd = `docker run --restart=always -d --name="sftp" \
|
||||
--hostname sftp \
|
||||
--net cloudron \
|
||||
--net-alias sftp \
|
||||
--log-driver syslog \
|
||||
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
||||
--log-opt syslog-format=rfc5424 \
|
||||
--log-opt tag=sftp \
|
||||
-m ${memory} \
|
||||
--memory-swap ${memoryLimit} \
|
||||
--dns 172.18.0.1 \
|
||||
--dns-search=. \
|
||||
-p 222:22 \
|
||||
${mounts} \
|
||||
-e CLOUDRON_SFTP_TOKEN="${cloudronToken}" \
|
||||
-v "${paths.SFTP_KEYS_DIR}:/etc/ssh:ro" \
|
||||
--label isCloudronManaged=true \
|
||||
--read-only -v /tmp -v /run "${tag}"`;
|
||||
|
||||
// sort for comparison
|
||||
currentDataDirs.sort(function (a, b) { return a.hostDir < b.hostDir ? -1 : 1; });
|
||||
dataDirs.sort(function (a, b) { return a.hostDir < b.hostDir ? -1 : 1; });
|
||||
|
||||
if (!force && _.isEqual(currentDataDirs, dataDirs)) {
|
||||
debug('Skipping rebuild, no changes');
|
||||
return callback();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const mounts = dataDirs.map(function (v) { return `-v "${v.hostDir}:${v.mountDir}"`; }).join(' ');
|
||||
const cmd = `docker run --restart=always -d --name="sftp" \
|
||||
--hostname sftp \
|
||||
--net cloudron \
|
||||
--net-alias sftp \
|
||||
--log-driver syslog \
|
||||
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
||||
--log-opt syslog-format=rfc5424 \
|
||||
--log-opt tag=sftp \
|
||||
-m ${memory} \
|
||||
--memory-swap ${memoryLimit} \
|
||||
--dns 172.18.0.1 \
|
||||
--dns-search=. \
|
||||
-p 222:22 \
|
||||
${mounts} \
|
||||
-e CLOUDRON_SFTP_TOKEN="${cloudronToken}" \
|
||||
-v "${paths.SFTP_KEYS_DIR}:/etc/ssh:ro" \
|
||||
--label isCloudronManaged=true \
|
||||
--read-only -v /tmp -v /run "${tag}"`;
|
||||
|
||||
// ignore error if container not found (and fail later) so that this code works across restarts
|
||||
async.series([
|
||||
shell.exec.bind(null, 'stopSftp', 'docker stop sftp || true'),
|
||||
shell.exec.bind(null, 'removeSftp', 'docker rm -f sftp || true'),
|
||||
shell.exec.bind(null, 'startSftp', cmd)
|
||||
], callback);
|
||||
});
|
||||
// ignore error if container not found (and fail later) so that this code works across restarts
|
||||
async.series([
|
||||
shell.exec.bind(null, 'stopSftp', 'docker stop sftp || true'),
|
||||
shell.exec.bind(null, 'removeSftp', 'docker rm -f sftp || true'),
|
||||
shell.exec.bind(null, 'startSftp', cmd)
|
||||
], callback);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user