104997d77c
docker is using a extra udp port for every container. when there is a lot of containers, a lot of random udp ports get used up. this causes problems when installing apps that require contiguous port ranges
152 lines
6.5 KiB
JavaScript
152 lines
6.5 KiB
JavaScript
'use strict';
|
|
|
|
exports = module.exports = {
|
|
start,
|
|
status,
|
|
|
|
DEFAULT_MEMORY_LIMIT: 256 * 1024 * 1024
|
|
};
|
|
|
|
const apps = require('./apps.js'),
|
|
assert = require('assert'),
|
|
blobs = require('./blobs.js'),
|
|
BoxError = require('./boxerror.js'),
|
|
debug = require('debug')('box:sftp'),
|
|
docker = require('./docker.js'),
|
|
hat = require('./hat.js'),
|
|
infra = require('./infra_version.js'),
|
|
path = require('path'),
|
|
paths = require('./paths.js'),
|
|
safe = require('safetydance'),
|
|
services = require('./services.js'),
|
|
shell = require('./shell.js'),
|
|
system = require('./system.js'),
|
|
volumes = require('./volumes.js');
|
|
|
|
async function ensureKeys() {
|
|
for (const keyType of [ 'rsa', 'ed25519' ]) {
|
|
const privateKey = await blobs.get(`sftp_${keyType}_private_key`);
|
|
const publicKey = await blobs.get(`sftp_${keyType}_public_key`);
|
|
const publicKeyFile = path.join(paths.SFTP_KEYS_DIR, `ssh_host_${keyType}_key.pub`);
|
|
const privateKeyFile = path.join(paths.SFTP_KEYS_DIR, `ssh_host_${keyType}_key`);
|
|
|
|
if (!privateKey || !publicKey) {
|
|
debug(`ensureSecrets: generating new sftp keys of type ${keyType}`);
|
|
safe.fs.unlinkSync(publicKeyFile);
|
|
safe.fs.unlinkSync(privateKeyFile);
|
|
const [error] = await safe(shell.exec('ensureKeys', `ssh-keygen -m PEM -t ${keyType} -f ${paths.SFTP_KEYS_DIR}/ssh_host_${keyType}_key -q -N ""`, { shell: '/bin/bash' }));
|
|
if (error) throw new BoxError(BoxError.OPENSSL_ERROR, `Could not generate sftp ${keyType} keys: ${error.message}`);
|
|
const newPublicKey = safe.fs.readFileSync(publicKeyFile);
|
|
await blobs.set(`sftp_${keyType}_public_key`, newPublicKey);
|
|
const newPrivateKey = safe.fs.readFileSync(privateKeyFile);
|
|
await blobs.set(`sftp_${keyType}_private_key`, newPrivateKey);
|
|
} else {
|
|
if (!safe.fs.writeFileSync(publicKeyFile, publicKey)) throw new BoxError(BoxError.FS_ERROR, `Could not save sftp public ${keyType} key: ${safe.error.message}`);
|
|
if (!safe.fs.writeFileSync(privateKeyFile, privateKey, { mode: 0o600 })) throw new BoxError(BoxError.FS_ERROR, `Could not save sftp private ${keyType} key: ${safe.error.message}`);
|
|
}
|
|
}
|
|
}
|
|
|
|
async function start(existingInfra) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
|
|
debug('start: re-creating container');
|
|
|
|
const serviceConfig = await services.getServiceConfig('sftp');
|
|
const image = infra.images.sftp;
|
|
const memoryLimit = serviceConfig.memoryLimit || exports.DEFAULT_MEMORY_LIMIT;
|
|
const memory = await system.getMemoryAllocation(memoryLimit);
|
|
const cloudronToken = hat(8 * 128);
|
|
|
|
await ensureKeys();
|
|
|
|
const resolvedAppDataDir = safe.fs.realpathSync(paths.APPS_DATA_DIR);
|
|
if (!resolvedAppDataDir) throw new BoxError(BoxError.FS_ERROR, `Could not resolve apps data dir: ${safe.error.message}`);
|
|
|
|
const dataDirs = [{ hostDir: resolvedAppDataDir, mountDir: '/mnt/appsdata' }];
|
|
|
|
// custom app data directories
|
|
const allApps = await apps.list();
|
|
for (const app of allApps) {
|
|
if (!app.manifest.addons?.localstorage || !app.storageVolumeId) continue;
|
|
|
|
const hostDir = await apps.getStorageDir(app), mountDir = `/mnt/app-${app.id}`; // see also sftp:userSearchSftp
|
|
if (!safe.fs.existsSync(hostDir)) { // this can fail if external mount does not have permissions for yellowtent user
|
|
// do not create host path when cloudron is restoring. this will then create dir with root perms making restore logic fail
|
|
debug(`Ignoring app data dir ${hostDir} for ${app.id} since it does not exist`);
|
|
continue;
|
|
}
|
|
|
|
dataDirs.push({ hostDir, mountDir });
|
|
}
|
|
|
|
// volume directories
|
|
dataDirs.push({ hostDir: '/mnt/volumes', mountDir: '/mnt/volumes' }); // managed volumes
|
|
const allVolumes = await volumes.list();
|
|
for (const volume of allVolumes) {
|
|
if (volume.hostPath.startsWith('/mnt/volumes/')) continue; // skip managed volume
|
|
|
|
if (!safe.fs.existsSync(volume.hostPath)) {
|
|
debug(`Ignoring volume host path ${volume.hostPath} since it does not exist`);
|
|
continue;
|
|
}
|
|
|
|
dataDirs.push({ hostDir: volume.hostPath, mountDir: `/mnt/volume-${volume.id}` });
|
|
}
|
|
|
|
// mail data dir
|
|
const resolvedMailDataDir = safe.fs.realpathSync(paths.MAIL_DATA_DIR);
|
|
if (!resolvedMailDataDir) throw new BoxError(BoxError.FS_ERROR, `Could not resolve mail data dir: ${safe.error.message}`);
|
|
dataDirs.push({ hostDir: resolvedMailDataDir, mountDir: '/mnt/maildata' });
|
|
|
|
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
|
|
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
|
|
|
|
const mounts = dataDirs.map(v => `-v "${v.hostDir}:${v.mountDir}"`).join(' ');
|
|
const runCmd = `docker run --restart=always -d --name=sftp \
|
|
--hostname sftp \
|
|
--net cloudron \
|
|
--net-alias sftp \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=unix://${paths.SYSLOG_SOCKET_FILE} \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=sftp \
|
|
-m ${memory} \
|
|
--memory-swap ${memoryLimit} \
|
|
--dns 172.18.0.1 \
|
|
--dns-search=. \
|
|
-p 222:22 \
|
|
${mounts} \
|
|
-e CLOUDRON_SFTP_TOKEN=${cloudronToken} \
|
|
-v ${paths.SFTP_KEYS_DIR}:/etc/ssh:ro \
|
|
--label isCloudronManaged=true \
|
|
${readOnly} -v /tmp -v /run ${image} ${cmd}`;
|
|
|
|
debug('startSftp: stopping and deleting previous sftp container');
|
|
await docker.stopContainer('sftp');
|
|
await docker.deleteContainer('sftp');
|
|
|
|
debug('startSftp: starting sftp container');
|
|
await shell.exec('startSftp', runCmd, { shell: '/bin/bash' });
|
|
}
|
|
|
|
async function status() {
|
|
const [error, container] = await safe(docker.inspect('sftp'));
|
|
if (error && error.reason === BoxError.NOT_FOUND) return { status: services.SERVICE_STATUS_STOPPED };
|
|
if (error) throw error;
|
|
|
|
const result = await docker.memoryUsage('sftp');
|
|
|
|
const status = container.State.Running
|
|
? (container.HostConfig.ReadonlyRootfs ? services.SERVICE_STATUS_ACTIVE : services.SERVICE_STATUS_STARTING)
|
|
: services.SERVICE_STATUS_STOPPED;
|
|
|
|
const stats = result.memory_stats || { usage: 0, limit: 1 };
|
|
|
|
return {
|
|
status,
|
|
memoryUsed: stats.usage,
|
|
memoryPercent: parseInt(100 * stats.usage / stats.limit)
|
|
};
|
|
}
|