Files
cloudron-box/src/sftp.js
2026-04-01 09:49:34 +02:00

135 lines
6.0 KiB
JavaScript

import apps from './apps.js';
import assert from 'node:assert';
import blobs from './blobs.js';
import BoxError from './boxerror.js';
import constants from './constants.js';
import logger from './logger.js';
import docker from './docker.js';
import hat from './hat.js';
import infra from './infra_version.js';
import mounts from './mounts.js';
import path from 'node:path';
import paths from './paths.js';
import safe from '@cloudron/safetydance';
import services from './services.js';
import shellModule from './shell.js';
import volumes from './volumes.js';
const { log } = logger('sftp');
const shell = shellModule('sftp');
const DEFAULT_MEMORY_LIMIT = 256 * 1024 * 1024;
async function ensureKeys() {
for (const keyType of [ 'rsa', 'ed25519' ]) {
const privateKey = await blobs.get(`sftp_${keyType}_private_key`);
const publicKey = await blobs.get(`sftp_${keyType}_public_key`);
const publicKeyFile = path.join(paths.SFTP_KEYS_DIR, `ssh_host_${keyType}_key.pub`);
const privateKeyFile = path.join(paths.SFTP_KEYS_DIR, `ssh_host_${keyType}_key`);
if (!privateKey || !publicKey) {
log(`ensureSecrets: generating new sftp keys of type ${keyType}`);
safe.fs.unlinkSync(publicKeyFile);
safe.fs.unlinkSync(privateKeyFile);
const [error] = await safe(shell.spawn('ssh-keygen', ['-m', 'PEM', '-t', keyType, '-f', `${paths.SFTP_KEYS_DIR}/ssh_host_${keyType}_key`, '-q', '-N', ''], {}));
if (error) throw new BoxError(BoxError.OPENSSL_ERROR, `Could not generate sftp ${keyType} keys: ${error.message}`);
const newPublicKey = safe.fs.readFileSync(publicKeyFile);
await blobs.set(`sftp_${keyType}_public_key`, newPublicKey);
const newPrivateKey = safe.fs.readFileSync(privateKeyFile);
await blobs.set(`sftp_${keyType}_private_key`, newPrivateKey);
} else {
if (!safe.fs.writeFileSync(publicKeyFile, publicKey)) throw new BoxError(BoxError.FS_ERROR, `Could not save sftp public ${keyType} key: ${safe.error.message}`);
if (!safe.fs.writeFileSync(privateKeyFile, privateKey, { mode: 0o600 })) throw new BoxError(BoxError.FS_ERROR, `Could not save sftp private ${keyType} key: ${safe.error.message}`);
}
}
}
async function start(existingInfra) {
assert.strictEqual(typeof existingInfra, 'object');
log('start: re-creating container');
const serviceConfig = await services.getServiceConfig('sftp');
const image = infra.images.sftp;
const memoryLimit = serviceConfig.memoryLimit || DEFAULT_MEMORY_LIMIT;
const cloudronToken = hat(8 * 128);
await ensureKeys();
const resolvedAppDataDir = safe.fs.realpathSync(paths.APPS_DATA_DIR);
if (!resolvedAppDataDir) throw new BoxError(BoxError.FS_ERROR, `Could not resolve apps data dir: ${safe.error.message}`);
const dataDirs = [{ hostDir: resolvedAppDataDir, mountDir: '/mnt/appsdata' }];
// custom app data directories
const allApps = await apps.list();
for (const app of allApps) {
if (!app.manifest.addons?.localstorage || !app.storageVolumeId) continue;
const hostDir = await apps.getStorageDir(app), mountDir = `/mnt/app-${app.id}`; // see also sftp:userSearchSftp
if (hostDir === null || !safe.fs.existsSync(hostDir)) { // this can fail if external mount does not have permissions for yellowtent user
// do not create host path when cloudron is restoring. this will then create dir with root perms making restore logic fail
log(`Ignoring app data dir ${hostDir} for ${app.id} since it does not exist`);
continue;
}
dataDirs.push({ hostDir, mountDir });
}
// volume directories
dataDirs.push({ hostDir: '/mnt/volumes', mountDir: '/mnt/volumes' }); // managed volumes
const allVolumes = await volumes.list();
for (const volume of allVolumes) {
if (mounts.isManagedProvider(volume.mountType)) continue; // skip managed volume. these are acessed via /mnt/volumes mount above
if (!safe.fs.existsSync(volume.hostPath)) {
log(`Ignoring volume host path ${volume.hostPath} since it does not exist`);
continue;
}
dataDirs.push({ hostDir: volume.hostPath, mountDir: `/mnt/volume-${volume.id}` });
}
// mail data dir
const resolvedMailDataDir = safe.fs.realpathSync(paths.MAIL_DATA_DIR);
if (!resolvedMailDataDir) throw new BoxError(BoxError.FS_ERROR, `Could not resolve mail data dir: ${safe.error.message}`);
dataDirs.push({ hostDir: resolvedMailDataDir, mountDir: '/mnt/maildata' });
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
const volumeMounts = dataDirs.map(v => `-v "${v.hostDir}:${v.mountDir}"`).join(' ');
const runCmd = `docker run --restart=unless-stopped -d --name=sftp \
--hostname sftp \
--net cloudron \
--net-alias sftp \
--ip ${constants.SFTP_SERVICE_IPv4} \
--log-driver syslog \
--log-opt syslog-address=unix://${paths.SYSLOG_SOCKET_FILE} \
--log-opt syslog-format=rfc5424 \
--log-opt tag=sftp \
-m ${memoryLimit} \
--memory-swap -1 \
-p 222:22 \
${volumeMounts} \
-e CLOUDRON_SFTP_TOKEN=${cloudronToken} \
-v ${paths.SFTP_KEYS_DIR}:/etc/ssh:ro \
--label isCloudronManaged=true \
${readOnly} -v /tmp -v /run ${image} ${cmd}`;
log('startSftp: stopping and deleting previous sftp container');
await docker.stopContainer('sftp');
await docker.deleteContainer('sftp');
log('startSftp: starting sftp container');
await shell.bash(runCmd, { encoding: 'utf8' });
if (existingInfra.version !== 'none' && existingInfra.images.sftp !== image) await docker.deleteImage(existingInfra.images.sftp);
}
export default {
start,
DEFAULT_MEMORY_LIMIT,
};