Files
cloudron-box/src/sftp.js
2022-06-02 16:29:01 -07:00

142 lines
6.2 KiB
JavaScript

'use strict';
exports = module.exports = {
start,
status,
DEFAULT_MEMORY_LIMIT: 256 * 1024 * 1024
};
const apps = require('./apps.js'),
assert = require('assert'),
blobs = require('./blobs.js'),
BoxError = require('./boxerror.js'),
debug = require('debug')('box:sftp'),
docker = require('./docker.js'),
hat = require('./hat.js'),
infra = require('./infra_version.js'),
paths = require('./paths.js'),
safe = require('safetydance'),
settings = require('./settings.js'),
services = require('./services.js'),
shell = require('./shell.js'),
system = require('./system.js'),
volumes = require('./volumes.js');
async function ensureKeys() {
const sftpPrivateKey = await blobs.get(blobs.SFTP_PRIVATE_KEY);
const sftpPublicKey = await blobs.get(blobs.SFTP_PUBLIC_KEY);
if (!sftpPrivateKey || !sftpPublicKey) {
debug('ensureSecrets: generating new sftp keys');
if (!safe.child_process.execSync(`ssh-keygen -m PEM -t rsa -f "${paths.SFTP_KEYS_DIR}/ssh_host_rsa_key" -q -N ""`)) throw new BoxError(BoxError.OPENSSL_ERROR, `Could not generate sftp ssh keys: ${safe.error.message}`);
const newSftpPublicKey = safe.fs.readFileSync(paths.SFTP_PUBLIC_KEY_FILE);
await blobs.set(blobs.SFTP_PUBLIC_KEY, newSftpPublicKey);
const newSftpPrivateKey = safe.fs.readFileSync(paths.SFTP_PRIVATE_KEY_FILE);
await blobs.set(blobs.SFTP_PRIVATE_KEY, newSftpPrivateKey);
} else {
if (!safe.fs.writeFileSync(paths.SFTP_PUBLIC_KEY_FILE, sftpPublicKey)) throw new BoxError(BoxError.FS_ERROR, `Could not save sftp public key: ${safe.error.message}`);
if (!safe.fs.writeFileSync(paths.SFTP_PRIVATE_KEY_FILE, sftpPrivateKey, { mode: 0o600 })) throw new BoxError(BoxError.FS_ERROR, `Could not save sftp private key: ${safe.error.message}`);
}
}
async function start(existingInfra) {
assert.strictEqual(typeof existingInfra, 'object');
debug('start: re-creating container');
const servicesConfig = await settings.getServicesConfig();
const serviceConfig = servicesConfig['sftp'] || {};
const tag = infra.images.sftp.tag;
const memoryLimit = serviceConfig.memoryLimit || exports.DEFAULT_MEMORY_LIMIT;
const memory = system.getMemoryAllocation(memoryLimit);
const cloudronToken = hat(8 * 128);
await ensureKeys();
const resolvedAppDataDir = safe.fs.realpathSync(paths.APPS_DATA_DIR);
if (!resolvedAppDataDir) throw new BoxError(BoxError.FS_ERROR, `Could not resolve apps data dir: ${safe.error.message}`);
const dataDirs = [{ hostDir: resolvedAppDataDir, mountDir: '/mnt/appsdata' }];
// custom app data directories
const allApps = await apps.list();
for (const app of allApps) {
if (!app.manifest.addons['localstorage'] || !app.storageVolumeId) continue;
const hostDir = await apps.getStorageDir(app), mountDir = `/mnt/app-${app.id}`; // see also sftp:userSearchSftp
if (!safe.fs.existsSync(hostDir)) { // this can fail if external mount does not have permissions for yellowtent user
// do not create host path when cloudron is restoring. this will then create dir with root perms making restore logic fail
debug(`Ignoring app data dir ${hostDir} for ${app.id} since it does not exist`);
continue;
}
dataDirs.push({ hostDir, mountDir });
}
// volume directories
dataDirs.push({ hostDir: '/mnt/volumes', mountDir: '/mnt/volumes' }); // managed volumes
const allVolumes = await volumes.list();
for (const volume of allVolumes) {
if (volume.hostPath.startsWith('/mnt/volumes/')) continue; // skip managed volume
if (!safe.fs.existsSync(volume.hostPath)) {
debug(`Ignoring volume host path ${volume.hostPath} since it does not exist`);
continue;
}
dataDirs.push({ hostDir: volume.hostPath, mountDir: `/mnt/volume-${volume.id}` });
}
// mail data dir
const resolvedMailDataDir = safe.fs.realpathSync(paths.MAIL_DATA_DIR);
if (!resolvedMailDataDir) throw new BoxError(BoxError.FS_ERROR, `Could not resolve mail data dir: ${safe.error.message}`);
dataDirs.push({ hostDir: resolvedMailDataDir, mountDir: '/mnt/maildata' });
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
const mounts = dataDirs.map(v => `-v "${v.hostDir}:${v.mountDir}"`).join(' ');
const runCmd = `docker run --restart=always -d --name="sftp" \
--hostname sftp \
--net cloudron \
--net-alias sftp \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=sftp \
-m ${memory} \
--memory-swap ${memoryLimit} \
--dns 172.18.0.1 \
--dns-search=. \
-p 222:22 \
${mounts} \
-e CLOUDRON_SFTP_TOKEN="${cloudronToken}" \
-v "${paths.SFTP_KEYS_DIR}:/etc/ssh:ro" \
--label isCloudronManaged=true \
${readOnly} -v /tmp -v /run "${tag}" ${cmd}`;
// ignore error if container not found (and fail later) so that this code works across restarts
await shell.promises.exec('stopSftp', 'docker stop sftp || true');
await shell.promises.exec('removeSftp', 'docker rm -f sftp || true');
await shell.promises.exec('startSftp', runCmd);
}
async function status() {
const [error, container] = await safe(docker.inspect('sftp'));
if (error && error.reason === BoxError.NOT_FOUND) return { status: services.SERVICE_STATUS_STOPPED };
if (error) throw error;
const result = await docker.memoryUsage('sftp');
const status = container.State.Running
? (container.HostConfig.ReadonlyRootfs ? services.SERVICE_STATUS_ACTIVE : services.SERVICE_STATUS_STARTING)
: services.SERVICE_STATUS_STOPPED;
return {
status,
memoryUsed: result.memory_stats.usage,
memoryPercent: parseInt(100 * result.memory_stats.usage / result.memory_stats.limit)
};
}