mostly because code is being autogenerated by all the AI stuff using this prefix. it's also used in the stack trace.
130 lines
5.9 KiB
JavaScript
130 lines
5.9 KiB
JavaScript
'use strict';
|
|
|
|
exports = module.exports = {
|
|
start,
|
|
|
|
DEFAULT_MEMORY_LIMIT: 256 * 1024 * 1024
|
|
};
|
|
|
|
const apps = require('./apps.js'),
|
|
assert = require('node:assert'),
|
|
blobs = require('./blobs.js'),
|
|
BoxError = require('./boxerror.js'),
|
|
debug = require('debug')('box:sftp'),
|
|
docker = require('./docker.js'),
|
|
hat = require('./hat.js'),
|
|
infra = require('./infra_version.js'),
|
|
mounts = require('./mounts.js'),
|
|
path = require('node:path'),
|
|
paths = require('./paths.js'),
|
|
safe = require('safetydance'),
|
|
services = require('./services.js'),
|
|
shell = require('./shell.js')('sftp'),
|
|
volumes = require('./volumes.js');
|
|
|
|
async function ensureKeys() {
|
|
for (const keyType of [ 'rsa', 'ed25519' ]) {
|
|
const privateKey = await blobs.get(`sftp_${keyType}_private_key`);
|
|
const publicKey = await blobs.get(`sftp_${keyType}_public_key`);
|
|
const publicKeyFile = path.join(paths.SFTP_KEYS_DIR, `ssh_host_${keyType}_key.pub`);
|
|
const privateKeyFile = path.join(paths.SFTP_KEYS_DIR, `ssh_host_${keyType}_key`);
|
|
|
|
if (!privateKey || !publicKey) {
|
|
debug(`ensureSecrets: generating new sftp keys of type ${keyType}`);
|
|
safe.fs.unlinkSync(publicKeyFile);
|
|
safe.fs.unlinkSync(privateKeyFile);
|
|
const [error] = await safe(shell.spawn('ssh-keygen', ['-m', 'PEM', '-t', keyType, '-f', `${paths.SFTP_KEYS_DIR}/ssh_host_${keyType}_key`, '-q', '-N', ''], {}));
|
|
if (error) throw new BoxError(BoxError.OPENSSL_ERROR, `Could not generate sftp ${keyType} keys: ${error.message}`);
|
|
const newPublicKey = safe.fs.readFileSync(publicKeyFile);
|
|
await blobs.set(`sftp_${keyType}_public_key`, newPublicKey);
|
|
const newPrivateKey = safe.fs.readFileSync(privateKeyFile);
|
|
await blobs.set(`sftp_${keyType}_private_key`, newPrivateKey);
|
|
} else {
|
|
if (!safe.fs.writeFileSync(publicKeyFile, publicKey)) throw new BoxError(BoxError.FS_ERROR, `Could not save sftp public ${keyType} key: ${safe.error.message}`);
|
|
if (!safe.fs.writeFileSync(privateKeyFile, privateKey, { mode: 0o600 })) throw new BoxError(BoxError.FS_ERROR, `Could not save sftp private ${keyType} key: ${safe.error.message}`);
|
|
}
|
|
}
|
|
}
|
|
|
|
async function start(existingInfra) {
|
|
assert.strictEqual(typeof existingInfra, 'object');
|
|
|
|
debug('start: re-creating container');
|
|
|
|
const serviceConfig = await services.getServiceConfig('sftp');
|
|
const image = infra.images.sftp;
|
|
const memoryLimit = serviceConfig.memoryLimit || exports.DEFAULT_MEMORY_LIMIT;
|
|
const cloudronToken = hat(8 * 128);
|
|
|
|
await ensureKeys();
|
|
|
|
const resolvedAppDataDir = safe.fs.realpathSync(paths.APPS_DATA_DIR);
|
|
if (!resolvedAppDataDir) throw new BoxError(BoxError.FS_ERROR, `Could not resolve apps data dir: ${safe.error.message}`);
|
|
|
|
const dataDirs = [{ hostDir: resolvedAppDataDir, mountDir: '/mnt/appsdata' }];
|
|
|
|
// custom app data directories
|
|
const allApps = await apps.list();
|
|
for (const app of allApps) {
|
|
if (!app.manifest.addons?.localstorage || !app.storageVolumeId) continue;
|
|
|
|
const hostDir = await apps.getStorageDir(app), mountDir = `/mnt/app-${app.id}`; // see also sftp:userSearchSftp
|
|
if (hostDir === null || !safe.fs.existsSync(hostDir)) { // this can fail if external mount does not have permissions for yellowtent user
|
|
// do not create host path when cloudron is restoring. this will then create dir with root perms making restore logic fail
|
|
debug(`Ignoring app data dir ${hostDir} for ${app.id} since it does not exist`);
|
|
continue;
|
|
}
|
|
|
|
dataDirs.push({ hostDir, mountDir });
|
|
}
|
|
|
|
// volume directories
|
|
dataDirs.push({ hostDir: '/mnt/volumes', mountDir: '/mnt/volumes' }); // managed volumes
|
|
const allVolumes = await volumes.list();
|
|
for (const volume of allVolumes) {
|
|
if (mounts.isManagedProvider(volume.mountType)) continue; // skip managed volume. these are acessed via /mnt/volumes mount above
|
|
|
|
if (!safe.fs.existsSync(volume.hostPath)) {
|
|
debug(`Ignoring volume host path ${volume.hostPath} since it does not exist`);
|
|
continue;
|
|
}
|
|
|
|
dataDirs.push({ hostDir: volume.hostPath, mountDir: `/mnt/volume-${volume.id}` });
|
|
}
|
|
|
|
// mail data dir
|
|
const resolvedMailDataDir = safe.fs.realpathSync(paths.MAIL_DATA_DIR);
|
|
if (!resolvedMailDataDir) throw new BoxError(BoxError.FS_ERROR, `Could not resolve mail data dir: ${safe.error.message}`);
|
|
dataDirs.push({ hostDir: resolvedMailDataDir, mountDir: '/mnt/maildata' });
|
|
|
|
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
|
|
const cmd = serviceConfig.recoveryMode ? '/bin/bash -c \'echo "Debug mode. Sleeping" && sleep infinity\'' : '';
|
|
|
|
const volumeMounts = dataDirs.map(v => `-v "${v.hostDir}:${v.mountDir}"`).join(' ');
|
|
const runCmd = `docker run --restart=unless-stopped -d --name=sftp \
|
|
--hostname sftp \
|
|
--net cloudron \
|
|
--net-alias sftp \
|
|
--log-driver syslog \
|
|
--log-opt syslog-address=unix://${paths.SYSLOG_SOCKET_FILE} \
|
|
--log-opt syslog-format=rfc5424 \
|
|
--log-opt tag=sftp \
|
|
-m ${memoryLimit} \
|
|
--memory-swap -1 \
|
|
-p 222:22 \
|
|
${volumeMounts} \
|
|
-e CLOUDRON_SFTP_TOKEN=${cloudronToken} \
|
|
-v ${paths.SFTP_KEYS_DIR}:/etc/ssh:ro \
|
|
--label isCloudronManaged=true \
|
|
${readOnly} -v /tmp -v /run ${image} ${cmd}`;
|
|
|
|
debug('startSftp: stopping and deleting previous sftp container');
|
|
await docker.stopContainer('sftp');
|
|
await docker.deleteContainer('sftp');
|
|
|
|
debug('startSftp: starting sftp container');
|
|
await shell.bash(runCmd, { encoding: 'utf8' });
|
|
|
|
if (existingInfra.version !== 'none' && existingInfra.images.sftp !== image) await docker.deleteImage(existingInfra.images.sftp);
|
|
}
|