diff --git a/src/routes/filemanager.js b/src/routes/filemanager.js index a6d757fef..cdd9ccdbb 100644 --- a/src/routes/filemanager.js +++ b/src/routes/filemanager.js @@ -14,7 +14,7 @@ var addons = require('../addons.js'), function proxy(req, res, next) { assert.strictEqual(typeof req.params.id, 'string'); - const appId = req.params.id; + const id = req.params.id; // app id or volume id req.clearTimeout(); @@ -24,7 +24,7 @@ function proxy(req, res, next) { let parsedUrl = url.parse(req.url, true /* parseQueryString */); parsedUrl.query['access_token'] = result.token; - req.url = url.format({ pathname: `/files/${appId}${req.params[0]}`, query: parsedUrl.query }); // params[0] already contains leading '/' + req.url = url.format({ pathname: `/files/${id}${req.params[0]}`, query: parsedUrl.query }); // params[0] already contains leading '/' const proxyOptions = url.parse(`https://${result.ip}:3000`); proxyOptions.rejectUnauthorized = false; diff --git a/src/server.js b/src/server.js index 848f04b10..956b419d0 100644 --- a/src/server.js +++ b/src/server.js @@ -300,10 +300,11 @@ function initializeExpressSync() { router.get ('/api/v1/domains/:domain/dns_check', token, authorizeAdmin, routes.domains.checkDnsRecords); // volume routes - router.post('/api/v1/volumes', json, token, authorizeAdmin, routes.volumes.add); - router.get ('/api/v1/volumes', token, authorizeAdmin, routes.volumes.list); - router.get ('/api/v1/volumes/:id', token, authorizeAdmin, routes.volumes.load, routes.volumes.get); - router.del ('/api/v1/volumes/:id', token, authorizeAdmin, routes.volumes.load, routes.volumes.del); + router.post('/api/v1/volumes', json, token, authorizeAdmin, routes.volumes.add); + router.get ('/api/v1/volumes', token, authorizeAdmin, routes.volumes.list); + router.get ('/api/v1/volumes/:id', token, authorizeAdmin, routes.volumes.load, routes.volumes.get); + router.del ('/api/v1/volumes/:id', token, authorizeAdmin, routes.volumes.load, routes.volumes.del); + router.use ('/api/v1/volumes/:id/files/*', token, authorizeAdmin, routes.filemanager.proxy); // addon routes router.get ('/api/v1/services', token, authorizeAdmin, routes.services.getAll); diff --git a/src/sftp.js b/src/sftp.js index 87ac2ccbc..eef7ec3f5 100644 --- a/src/sftp.js +++ b/src/sftp.js @@ -1,8 +1,8 @@ 'use strict'; exports = module.exports = { - startSftp: startSftp, - rebuild: rebuild + startSftp, + rebuild }; var apps = require('./apps.js'), @@ -13,6 +13,7 @@ var apps = require('./apps.js'), infra = require('./infra_version.js'), safe = require('safetydance'), shell = require('./shell.js'), + volumes = require('./volumes.js'), _ = require('underscore'); function startSftp(existingInfra, callback) { @@ -64,49 +65,62 @@ function rebuild(callback) { }); - shell.exec('inspectSftp', 'docker inspect --format="{{json .Mounts }}" sftp', function (error, result) { - if (!error && result) { - let currentDataDirs = safe.JSON.parse(result); - if (currentDataDirs) { - currentDataDirs = currentDataDirs.filter(function (d) { return d.Destination.indexOf('/app/data/') === 0; }).map(function (d) { return { hostDir: d.Source, mountDir: d.Destination }; }); + volumes.list(function (error, allVolumes) { + if (error) return callback(error); - // sort for comparison - currentDataDirs.sort(function (a, b) { return a.hostDir < b.hostDir ? -1 : 1; }); - dataDirs.sort(function (a, b) { return a.hostDir < b.hostDir ? -1 : 1; }); + allVolumes.forEach(function (volume) { + if (!safe.fs.existsSync(volume.hostPath)) { + debug(`Ignoring volume host path ${volume.hostPath} since it does not exist`); + return; + } - if (_.isEqual(currentDataDirs, dataDirs)) { - debug('Skipping rebuild, no changes'); - return done(); + dataDirs.push({ hostDir: volume.hostPath, mountDir: `/app/data/${volume.id}` }); + }); + + shell.exec('inspectSftp', 'docker inspect --format="{{json .Mounts }}" sftp', function (error, result) { + if (!error && result) { + let currentDataDirs = safe.JSON.parse(result); + if (currentDataDirs) { + currentDataDirs = currentDataDirs.filter(function (d) { return d.Destination.indexOf('/app/data/') === 0; }).map(function (d) { return { hostDir: d.Source, mountDir: d.Destination }; }); + + // sort for comparison + currentDataDirs.sort(function (a, b) { return a.hostDir < b.hostDir ? -1 : 1; }); + dataDirs.sort(function (a, b) { return a.hostDir < b.hostDir ? -1 : 1; }); + + if (_.isEqual(currentDataDirs, dataDirs)) { + debug('Skipping rebuild, no changes'); + return done(); + } } } - } - const appDataVolumes = dataDirs.map(function (v) { return `-v "${v.hostDir}:${v.mountDir}"`; }).join(' '); - const cmd = `docker run --restart=always -d --name="sftp" \ - --hostname sftp \ - --net cloudron \ - --net-alias sftp \ - --log-driver syslog \ - --log-opt syslog-address=udp://127.0.0.1:2514 \ - --log-opt syslog-format=rfc5424 \ - --log-opt tag=sftp \ - -m ${memoryLimit}m \ - --memory-swap ${memoryLimit * 2}m \ - --dns 172.18.0.1 \ - --dns-search=. \ - -p 222:22 \ - ${appDataVolumes} \ - -e CLOUDRON_SFTP_TOKEN="${cloudronToken}" \ - -v "/etc/ssh:/etc/ssh:ro" \ - --label isCloudronManaged=true \ - --read-only -v /tmp -v /run "${tag}"`; + const mounts = dataDirs.map(function (v) { return `-v "${v.hostDir}:${v.mountDir}"`; }).join(' '); + const cmd = `docker run --restart=always -d --name="sftp" \ + --hostname sftp \ + --net cloudron \ + --net-alias sftp \ + --log-driver syslog \ + --log-opt syslog-address=udp://127.0.0.1:2514 \ + --log-opt syslog-format=rfc5424 \ + --log-opt tag=sftp \ + -m ${memoryLimit}m \ + --memory-swap ${memoryLimit * 2}m \ + --dns 172.18.0.1 \ + --dns-search=. \ + -p 222:22 \ + ${mounts} \ + -e CLOUDRON_SFTP_TOKEN="${cloudronToken}" \ + -v "/etc/ssh:/etc/ssh:ro" \ + --label isCloudronManaged=true \ + --read-only -v /tmp -v /run "${tag}"`; - // ignore error if container not found (and fail later) so that this code works across restarts - async.series([ - shell.exec.bind(null, 'stopSftp', 'docker stop sftp || true'), - shell.exec.bind(null, 'removeSftp', 'docker rm -f sftp || true'), - shell.exec.bind(null, 'startSftp', cmd) - ], done); + // ignore error if container not found (and fail later) so that this code works across restarts + async.series([ + shell.exec.bind(null, 'stopSftp', 'docker stop sftp || true'), + shell.exec.bind(null, 'removeSftp', 'docker rm -f sftp || true'), + shell.exec.bind(null, 'startSftp', cmd) + ], done); + }); }); }); } diff --git a/src/volumes.js b/src/volumes.js index b48b785c2..de126e7aa 100644 --- a/src/volumes.js +++ b/src/volumes.js @@ -11,6 +11,7 @@ const assert = require('assert'), BoxError = require('./boxerror.js'), volumedb = require('./volumedb.js'), eventlog = require('./eventlog.js'), + sftp = require('./sftp.js'), uuid = require('uuid'); function validateName(name) { @@ -47,6 +48,7 @@ function add(name, hostPath, auditSource, callback) { if (error) return callback(error); eventlog.add(eventlog.ACTION_VOLUME_ADD, auditSource, { id, name, hostPath }); + sftp.rebuild((error) => { if (error) console.error('Unable to rebuild sftp:', error); }); callback(null, id); }); @@ -82,8 +84,8 @@ function del(volume, auditSource, callback) { if (error) return callback(error); eventlog.add(eventlog.ACTION_VOLUME_REMOVE, auditSource, { volume }); + sftp.rebuild((error) => { if (error) console.error('Unable to rebuild sftp:', error); }); return callback(null); }); } -