Files
cloudron-box/src/docker.js
Girish Ramakrishnan 4ed6fbbd74 eslint: add no-shadow
2026-02-18 08:18:37 +01:00

752 lines
31 KiB
JavaScript

import apps from './apps.js';
import assert from 'node:assert';
import BoxError from './boxerror.js';
import constants from './constants.js';
import dashboard from './dashboard.js';
import debugModule from 'debug';
import Docker from 'dockerode';
import dockerRegistries from './dockerregistries.js';
import fs from 'node:fs';
import mailServer from './mailserver.js';
import os from 'node:os';
import paths from './paths.js';
import promiseRetry from './promise-retry.js';
import services from './services.js';
import shellModule from './shell.js';
import safe from 'safetydance';
import timers from 'timers/promises';
import volumes from './volumes.js';
const debug = debugModule('box:docker');
const shell = shellModule('docker');
const gConnection = new Docker({ socketPath: paths.DOCKER_SOCKET_PATH });
const CLOUDRON_REGISTRIES = [ 'registry.docker.com', 'registry.ipv4.docker.com', 'quay.io' ]; // order determines priority and is important!
function parseImageRef(imageRef) {
assert.strictEqual(typeof imageRef, 'string');
// a ref is like registry.docker.com/cloudron/base:4.2.0@sha256:46da2fffb36353ef714f97ae8e962bd2c212ca091108d768ba473078319a47f4
// registry.docker.com is registry name . cloudron is (optional) namespace . base is image name . cloudron/base is repository path
// registry.docker.com/cloudron/base is fullRepositoryName
const result = { fullRepositoryName: null, registry: null, tag: null, digest: null };
result.fullRepositoryName = imageRef.split(/[:@]/)[0];
const parts = result.fullRepositoryName.split('/');
result.registry = parts[0].includes('.') ? parts[0] : null; // https://docs.docker.com/admin/faqs/general-faqs/#what-is-a-docker-id
let remaining = imageRef.substr(result.fullRepositoryName.length);
if (remaining.startsWith(':')) {
result.tag = remaining.substr(1).split('@', 1)[0];
remaining = remaining.substr(result.tag.length + 1); // also ':'
}
if (remaining.startsWith('@sha256:')) result.digest = remaining.substr(8);
return result;
}
async function ping() {
// do not let the request linger
const connection = new Docker({ socketPath: paths.DOCKER_SOCKET_PATH, timeout: 1000 });
const [error, result] = await safe(connection.ping());
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
if (Buffer.isBuffer(result) && result.toString('utf8') === 'OK') return; // sometimes it returns buffer
if (result === 'OK') return;
throw new BoxError(BoxError.DOCKER_ERROR, 'Unable to ping the docker daemon');
}
async function getAuthConfig(imageRef) {
assert.strictEqual(typeof imageRef, 'string');
const parsedRef = parseImageRef(imageRef);
// images in our cloudron namespace are always unauthenticated to not interfere with any user limits
if ((parsedRef.registry === null || CLOUDRON_REGISTRIES.includes(parsedRef.registry)) && parsedRef.fullRepositoryName.startsWith('cloudron/')) return null;
const registries = await dockerRegistries.list();
for (const registry of registries) {
if (registry.serverAddress !== parsedRef.registry) { // ideally they match but there's too many docker registry domains!
if (!registry.serverAddress.includes('.docker.')) continue;
if (parsedRef.registry !== null && !parsedRef.registry.includes('.docker.')) continue;
}
// https://github.com/apocas/dockerode#pull-from-private-repos
const authConfig = {
username: registry.username,
password: registry.password,
auth: registry.auth || '', // the auth token at login time
email: registry.email || '',
serveraddress: registry.serverAddress
};
return authConfig;
}
return null;
}
async function pullImage(imageRef) {
assert.strictEqual(typeof imageRef, 'string');
const authConfig = await getAuthConfig(imageRef);
debug(`pullImage: will pull ${imageRef}. auth: ${authConfig ? 'yes' : 'no'}`);
const [error, stream] = await safe(gConnection.pull(imageRef, { authconfig: authConfig }));
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Unable to pull image ${imageRef}. message: ${error.message} statusCode: ${error.statusCode}`);
// toomanyrequests is flagged as a 500. dockerhub appears to have 10 pulls her hour per IP limit
if (error && error.statusCode === 500) throw new BoxError(BoxError.DOCKER_ERROR, `Unable to pull image ${imageRef}. registry error: ${JSON.stringify(error)}`);
if (error) throw new BoxError(BoxError.DOCKER_ERROR, `Unable to pull image ${imageRef}. Please check the network or if the image needs authentication. statusCode: ${error.statusCode}`);
return new Promise((resolve, reject) => {
// https://github.com/dotcloud/docker/issues/1074 says each status message is emitted as a chunk
let layerError = null;
stream.on('data', function (chunk) {
const data = safe.JSON.parse(chunk) || { };
debug('pullImage: %j', data);
// The data.status here is useless because this is per layer as opposed to per image
if (!data.status && data.error) { // data is { errorDetail: { message: xx } , error: xx }
debug(`pullImage error ${imageRef}: ${data.errorDetail.message}`);
layerError = data.errorDetail;
}
});
stream.on('end', function () {
debug(`downloaded image ${imageRef} . error: ${!!layerError}`);
if (!layerError) return resolve();
reject(new BoxError(layerError.message.includes('no space') ? BoxError.FS_ERROR : BoxError.DOCKER_ERROR, layerError.message));
});
stream.on('error', function (streamError) { // this is only hit for stream error and not for some download error
debug(`error pulling image ${imageRef}: %o`, streamError);
reject(new BoxError(BoxError.DOCKER_ERROR, streamError.message));
});
});
}
async function buildImage(dockerImage, sourceArchiveFilePath) {
assert.strictEqual(typeof dockerImage, 'string');
assert.strictEqual(typeof sourceArchiveFilePath, 'string');
debug(`buildImage: building ${dockerImage} from ${sourceArchiveFilePath}`);
const tarStream = fs.createReadStream(sourceArchiveFilePath);
const [error, stream] = await safe(gConnection.buildImage(tarStream, { t: dockerImage }));
if (error) throw new BoxError(BoxError.DOCKER_ERROR, `Unable to build image from ${sourceArchiveFilePath}: ${error.message}`);
return new Promise((resolve, reject) => {
let buildError = null;
stream.on('data', (chunk) => {
const data = safe.JSON.parse(chunk) || {};
if (data.error) {
buildError = data.errorDetail || { message: data.error };
} else {
const message = (data.stream || data.status || data.aux?.ID || '').replace(/\n$/, '');
if (message) debug('buildImage: ' + message);
}
});
stream.on('end', () => {
if (buildError) {
debug(`buildImage: error ${buildError}`);
return reject(new BoxError(buildError.message.includes('no space') ? BoxError.FS_ERROR : BoxError.DOCKER_ERROR, buildError.message));
} else {
debug(`buildImage: success ${dockerImage}`);
}
resolve();
});
stream.on('error', (streamError) => {
debug(`buildImage: error building image ${dockerImage}: %o`, streamError);
reject(new BoxError(BoxError.DOCKER_ERROR, streamError.message));
});
});
}
async function getVolumeMounts(app) {
assert.strictEqual(typeof app, 'object');
if (app.mounts.length === 0) return [];
const result = await volumes.list();
const volumesById = {};
result.forEach(r => volumesById[r.id] = r);
const mounts = [];
for (const mount of app.mounts) {
const volume = volumesById[mount.volumeId];
mounts.push({
Source: volume.hostPath,
Target: `/media/${volume.name}`,
Type: 'bind',
ReadOnly: mount.readOnly
});
}
return mounts;
}
async function getAddonMounts(app) {
assert.strictEqual(typeof app, 'object');
const mounts = [];
const addons = app.manifest.addons;
if (!addons) return mounts;
for (const addon of Object.keys(addons)) {
switch (addon) {
case 'localstorage': {
const storageDir = await apps.getStorageDir(app);
mounts.push({
Target: '/app/data',
Source: storageDir,
Type: 'bind',
ReadOnly: false
});
break;
}
case 'tls': {
const certificateDir = `${paths.PLATFORM_DATA_DIR}/tls/${app.id}`;
mounts.push({
Target: '/etc/certs',
Source: certificateDir,
Type: 'bind',
ReadOnly: true
});
break;
}
default:
break;
}
}
return mounts;
}
async function getMounts(app) {
assert.strictEqual(typeof app, 'object');
const volumeMounts = await getVolumeMounts(app);
const addonMounts = await getAddonMounts(app);
return volumeMounts.concat(addonMounts);
}
async function getAddressesForPort53() {
const [error, deviceLinks] = await safe(fs.promises.readdir('/sys/class/net')); // https://man7.org/linux/man-pages/man5/sysfs.5.html
if (error) return [];
const devices = deviceLinks.map(d => { return { name: d, link: safe.fs.readlinkSync(`/sys/class/net/${d}`) }; });
const physicalDevices = devices.filter(d => d.link && !d.link.includes('virtual'));
const addresses = [];
for (const phy of physicalDevices) {
const [ipError, output] = await safe(shell.spawn('ip', ['-f', 'inet', '-j', 'addr', 'show', 'dev', phy.name, 'scope', 'global'], { encoding: 'utf8' }));
if (ipError) continue;
const inet = safe.JSON.parse(output) || [];
for (const r of inet) {
const address = safe.query(r, 'addr_info[0].local');
if (address) addresses.push(address);
}
}
return addresses;
}
// This only returns ipv4 addresses
// We dont bind to ipv6 interfaces, public prefix changes and container restarts wont work
async function startContainer(containerId) {
assert.strictEqual(typeof containerId, 'string');
const container = gConnection.getContainer(containerId);
const [error] = await safe(container.start());
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Container ${containerId} not found`);
if (error && error.statusCode === 400) throw new BoxError(BoxError.BAD_FIELD, error); // e.g start.sh is not executable
if (error && error.statusCode !== 304) throw new BoxError(BoxError.DOCKER_ERROR, error); // 304 means already started
}
async function restartContainer(containerId) {
assert.strictEqual(typeof containerId, 'string');
const container = gConnection.getContainer(containerId);
const [error] = await safe(container.restart());
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Contanier ${containerId} not found`);
if (error && error.statusCode === 400) throw new BoxError(BoxError.BAD_FIELD, error); // e.g start.sh is not executable
if (error && error.statusCode !== 204) throw new BoxError(BoxError.DOCKER_ERROR, error);
}
async function stopContainer(containerId) {
assert.strictEqual(typeof containerId, 'string');
debug(`stopContainer: stopping container ${containerId}`);
const container = gConnection.getContainer(containerId);
const options = {
t: 10 // wait for 10 seconds before killing it
};
let [error] = await safe(container.stop(options));
if (error && (error.statusCode !== 304 && error.statusCode !== 404)) throw new BoxError(BoxError.DOCKER_ERROR, 'Error stopping container:' + error.message);
[error] = await safe(container.wait());
if (error && (error.statusCode !== 304 && error.statusCode !== 404)) throw new BoxError(BoxError.DOCKER_ERROR, 'Error waiting on container:' + error.message);
}
async function deleteContainer(containerId) { // id can also be name
assert.strictEqual(typeof containerId, 'string');
debug(`deleteContainer: deleting ${containerId}`);
const container = gConnection.getContainer(containerId);
const removeOptions = {
force: true, // kill container if it's running
v: true // removes volumes associated with the container (but not host mounts)
};
const [error] = await safe(container.remove(removeOptions));
if (error && error.statusCode === 404) return;
if (error) {
debug('Error removing container %s : %o', containerId, error);
throw new BoxError(BoxError.DOCKER_ERROR, error);
}
}
async function deleteContainers(appId, options) {
assert.strictEqual(typeof appId, 'string');
assert.strictEqual(typeof options, 'object');
const labels = [ 'appId=' + appId ];
if (options.managedOnly) labels.push('isCloudronManaged=true');
const [error, containers] = await safe(gConnection.listContainers({ all: 1, filters: JSON.stringify({ label: labels }) }));
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
for (const container of containers) {
await deleteContainer(container.Id);
}
}
async function stopContainers(appId) {
assert.strictEqual(typeof appId, 'string');
const [error, containers] = await safe(gConnection.listContainers({ all: 1, filters: JSON.stringify({ label: [ 'appId=' + appId ] }) }));
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
for (const container of containers) {
await stopContainer(container.Id);
}
}
async function deleteImage(imageRef) {
assert.strictEqual(typeof imageRef, 'string');
if (!imageRef) return;
if (imageRef.includes('//') || imageRef.startsWith('/')) return; // a common mistake is to paste a https:// as docker image. this results in a crash at runtime in dockerode module (https://github.com/apocas/dockerode/issues/548)
const removeOptions = {
force: false, // might be shared with another instance of this app
noprune: false // delete untagged parents
};
// registry v1 used to pull down all *tags*. this meant that deleting image by tag was not enough (since that
// just removes the tag). we used to remove the image by id. this is not required anymore because aliases are
// not created anymore after https://github.com/docker/docker/pull/10571
debug(`deleteImage: removing ${imageRef}`);
const [error] = await safe(gConnection.getImage(imageRef.replace(/@sha256:.*/,'')).remove(removeOptions)); // can't have the manifest id. won't remove anythin
if (error && error.statusCode === 400) return; // invalid image format. this can happen if user installed with a bad --docker-image
if (error && error.statusCode === 404) return; // not found
if (error && error.statusCode === 409) return; // another container using the image
if (error) {
debug(`Error removing image ${imageRef} : %o`, error);
throw new BoxError(BoxError.DOCKER_ERROR, error);
}
}
async function inspect(containerId) {
assert.strictEqual(typeof containerId, 'string');
const container = gConnection.getContainer(containerId);
const [error, result] = await safe(container.inspect());
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Unable to find container ${containerId}`);
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
return result;
}
async function downloadImage(manifest) {
assert.strictEqual(typeof manifest, 'object');
debug(`downloadImage: ${manifest.dockerImage}`);
const image = gConnection.getImage(manifest.dockerImage);
const [error, result] = await safe(image.inspect());
if (!error && result) return; // image is already present locally
const parsedManifestRef = parseImageRef(manifest.dockerImage);
await promiseRetry({ times: 10, interval: 5000, debug, retry: (pullError) => pullError.reason !== BoxError.FS_ERROR }, async () => {
// custom (non appstore) image
if (parsedManifestRef.registry !== null || !parsedManifestRef.fullRepositoryName.startsWith('cloudron/')) return await pullImage(manifest.dockerImage);
// docker hub only uses first 64 bits for ipv6 addressing. this causes many ipv6 rate limit errors
// https://www.docker.com/blog/beta-ipv6-support-on-docker-hub-registry/ . as a hack, we try ipv4 explicity
let upstreamRef = null, pullError = null;
for (const registry of CLOUDRON_REGISTRIES) {
upstreamRef = `${registry}/${manifest.dockerImage}`;
[pullError] = await safe(pullImage(upstreamRef));
if (!pullError) break;
}
if (pullError || !upstreamRef) throw new BoxError(BoxError.DOCKER_ERROR, `Unable to pull ${manifest.dockerImage} from dockerhub or quay: ${pullError?.message}`);
// retag the downloaded image to not have the registry name. this prevents 'docker run' from redownloading it
debug(`downloadImage: tagging ${upstreamRef} as ${parsedManifestRef.fullRepositoryName}:${parsedManifestRef.tag}`);
await gConnection.getImage(upstreamRef).tag({ repo: parsedManifestRef.fullRepositoryName, tag: parsedManifestRef.tag });
debug(`downloadImage: untagging ${upstreamRef}`);
await deleteImage(upstreamRef);
});
}
async function getContainerIp(containerId) {
assert.strictEqual(typeof containerId, 'string');
if (constants.TEST) return '127.0.5.5';
const result = await inspect(containerId);
const ip = safe.query(result, 'NetworkSettings.Networks.cloudron.IPAddress', null);
if (!ip) throw new BoxError(BoxError.DOCKER_ERROR, 'Error getting container IP');
return ip;
}
async function createExec(containerId, options) {
assert.strictEqual(typeof containerId, 'string');
assert.strictEqual(typeof options, 'object');
const container = gConnection.getContainer(containerId);
const [error, exec] = await safe(container.exec(options));
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Container ${containerId} not found`);
if (error && error.statusCode === 409) throw new BoxError(BoxError.BAD_STATE, error.message); // container restarting/not running
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
return exec.id;
}
async function getExec(execId) {
assert.strictEqual(typeof execId, 'string');
const exec = gConnection.getExec(execId);
const [error, result] = await safe(exec.inspect());
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Unable to find exec container ${execId}`);
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
return { exitCode: result.ExitCode, running: result.Running };
}
async function startExec(execId, options) {
assert.strictEqual(typeof execId, 'string');
assert.strictEqual(typeof options, 'object');
const exec = gConnection.getExec(execId);
const [error, stream] = await safe(exec.start(options)); /* in hijacked mode, stream is a net.socket */
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Exec container ${execId} not found`);
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
return stream;
}
async function resizeExec(execId, options) {
assert.strictEqual(typeof execId, 'string');
assert.strictEqual(typeof options, 'object');
const exec = gConnection.getExec(execId);
const [error] = await safe(exec.resize(options)); // { h, w }
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Exec container ${execId} not found`);
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
}
async function getEvents(options) {
assert.strictEqual(typeof options, 'object');
const [error, stream] = await safe(gConnection.getEvents(options));
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
return stream;
}
async function getStats(containerId, options) {
assert.strictEqual(typeof containerId, 'string');
assert.strictEqual(typeof options, 'object');
const container = gConnection.getContainer(containerId);
const [error, result] = await safe(container.stats({ stream: !!options.stream }));
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Container ${containerId} not found`);
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
return result;
}
async function info() {
const [error, result] = await safe(gConnection.info());
if (error) throw new BoxError(BoxError.DOCKER_ERROR, `Error connecting to docker: ${error.message}`);
return result;
}
async function df(options) {
assert.strictEqual(typeof options, 'object');
const [error, result] = await safe(gConnection.df(options));
if (error) throw new BoxError(BoxError.DOCKER_ERROR, `Error connecting to docker: ${error.message}`);
return result;
}
async function update(name, memory) {
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof memory, 'number');
// scale back db containers, if possible. this is retried because updating memory constraints can fail
// with failed to write to memory.memsw.limit_in_bytes: write /sys/fs/cgroup/memory/docker/xx/memory.memsw.limit_in_bytes: device or resource busy
for (let times = 0; times < 10; times++) {
const [error] = await safe(shell.spawn('docker', ['update', '--memory', memory, '--memory-swap', '-1', name], { encoding: 'utf8' }));
if (!error) return;
await timers.setTimeout(60 * 1000);
}
throw new BoxError(BoxError.DOCKER_ERROR, 'Unable to update container');
}
async function createSubcontainer(app, name, cmd, options) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof name, 'string');
assert(!cmd || Array.isArray(cmd));
assert.strictEqual(typeof options, 'object');
const isAppContainer = !cmd; // non app-containers are like scheduler
const manifest = app.manifest;
const exposedPorts = {}, dockerPortBindings = { };
const domain = app.fqdn;
const { fqdn:dashboardFqdn } = await dashboard.getLocation();
const stdEnv = [
'LANG=C.UTF-8',
'CLOUDRON=1',
'CLOUDRON_PROXY_IP=172.18.0.1',
`CLOUDRON_APP_HOSTNAME=${app.id}`,
`CLOUDRON_WEBADMIN_ORIGIN=https://${dashboardFqdn}`,
`CLOUDRON_API_ORIGIN=https://${dashboardFqdn}`,
`CLOUDRON_APP_ORIGIN=https://${domain}`,
`CLOUDRON_APP_DOMAIN=${domain}`
];
if (app.manifest.multiDomain) stdEnv.push(`CLOUDRON_ALIAS_DOMAINS=${app.aliasDomains.map(ad => ad.fqdn).join(',')}`);
const secondaryDomainsEnv = app.secondaryDomains.map(sd => `${sd.environmentVariable}=${sd.fqdn}`);
const portEnv = [];
for (const portName in app.portBindings) {
const { hostPort, type:portType, count:portCount } = app.portBindings[portName];
const portSpec = portType == 'tcp' ? manifest.tcpPorts : manifest.udpPorts;
const containerPort = portSpec[portName].containerPort || hostPort;
// port 53 is special. systemd-resolved is listening on 127.0.0.x port 53 and another process cannot listen to 0.0.0.0 port 53
// for port 53 alone, we listen explicitly on the server's interface IP
const hostIps = hostPort === 53 ? await getAddressesForPort53() : [ '0.0.0.0', '::0' ];
portEnv.push(`${portName}=${hostPort}`);
if (portCount > 1) portEnv.push(`${portName}_COUNT=${portCount}`);
// docker portBindings requires ports to be exposed
for (let i = 0; i < portCount; ++i) {
exposedPorts[`${containerPort+i}/${portType}`] = {};
dockerPortBindings[`${containerPort+i}/${portType}`] = hostIps.map(hip => { return { HostIp: hip, HostPort: String(hostPort + i) }; });
}
}
const appEnv = [];
Object.keys(app.env).forEach(function (envName) { appEnv.push(`${envName}=${app.env[envName]}`); });
let memoryLimit = apps.getMemoryLimit(app);
// give scheduler tasks twice the memory limit since background jobs take more memory
// if required, we can make this a manifest and runtime argument later
if (!isAppContainer) memoryLimit *= 2;
const mounts = await getMounts(app);
const addonEnv = await services.getEnvironment(app);
const runtimeVolumes = {
'/tmp': {},
'/run': {},
};
if (app.manifest.runtimeDirs) {
app.manifest.runtimeDirs.forEach(dir => runtimeVolumes[dir] = {});
}
const containerOptions = {
name: name, // for referencing containers
Tty: isAppContainer,
Image: app.manifest.dockerImage,
Cmd: (isAppContainer && app.debugMode && app.debugMode.cmd) ? app.debugMode.cmd : cmd,
Env: stdEnv.concat(addonEnv).concat(portEnv).concat(appEnv).concat(secondaryDomainsEnv),
ExposedPorts: isAppContainer ? exposedPorts : { },
Volumes: runtimeVolumes,
Labels: {
'fqdn': app.fqdn,
'appId': app.id,
'isSubcontainer': String(!isAppContainer),
'isCloudronManaged': String(true)
},
HostConfig: {
Mounts: mounts,
LogConfig: {
Type: 'syslog',
Config: {
'tag': app.id,
'syslog-address': `unix://${paths.SYSLOG_SOCKET_FILE}`,
'syslog-format': 'rfc5424'
}
},
Memory: memoryLimit,
MemorySwap: -1, // Unlimited swap
PortBindings: isAppContainer ? dockerPortBindings : {},
PublishAllPorts: false,
ReadonlyRootfs: app.debugMode ? !!app.debugMode.readonlyRootfs : true,
RestartPolicy: {
'Name': isAppContainer ? 'unless-stopped' : 'no',
'MaximumRetryCount': 0
},
// CpuPeriod (100000 microseconds) and CpuQuota(app.cpuQuota% of CpuPeriod)
// 1000000000 is one core https://github.com/moby/moby/issues/24713#issuecomment-233167619 and https://stackoverflow.com/questions/52391877/set-the-number-of-cpu-cores-of-a-container-using-docker-engine-api
NanoCPUs: app.cpuQuota === 100 ? 0 : Math.round(os.cpus().length * app.cpuQuota/100 * 1000000000),
VolumesFrom: isAppContainer ? null : [ app.containerId + ':rw' ],
SecurityOpt: [ 'apparmor=docker-cloudron-app' ],
CapAdd: [],
CapDrop: [],
Sysctls: {},
ExtraHosts: []
}
};
// do no set hostname of containers to location as it might conflict with addons names. for example, an app installed in mail
// location may not reach mail container anymore by DNS. We cannot set hostname to fqdn either as that sets up the dns
// name to look up the internal docker ip. this makes curl from within container fail
// Note that Hostname has no effect on DNS. We have to use the --net-alias for dns.
// Hostname cannot be set with container NetworkMode. Subcontainers run is the network space of the app container
// This is done to prevent lots of up/down events and iptables locking
if (isAppContainer) {
containerOptions.Hostname = app.id;
containerOptions.HostConfig.NetworkMode = 'cloudron'; // user defined bridge network
// Do not inject for AdGuard. It ends up resolving the dashboard domain as the docker bridge IP
if (manifest.id !== 'com.adguard.home.cloudronapp') containerOptions.HostConfig.ExtraHosts.push(`${dashboardFqdn}:172.18.0.1`);
if (manifest.addons?.sendmail?.requiresValidCertificate) {
const { fqdn:mailFqdn } = await mailServer.getLocation();
containerOptions.HostConfig.ExtraHosts.push(`${mailFqdn}:${constants.MAIL_SERVICE_IPv4}`);
}
containerOptions.NetworkingConfig = {
EndpointsConfig: {
cloudron: {
IPAMConfig: {
IPv4Address: app.containerIp
},
Aliases: [ name ] // adds hostname entry with container name
}
}
};
} else {
containerOptions.HostConfig.NetworkMode = `container:${app.containerId}`; // scheduler containers must have same IP as app for various addon auth
}
const capabilities = manifest.capabilities || [];
// https://docs-stage.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities
if (capabilities.includes('net_admin')) {
containerOptions.HostConfig.CapAdd.push('NET_ADMIN', 'NET_RAW');
// ipv6 for new interfaces is disabled in the container. this prevents the openvpn tun device having ipv6
// See https://github.com/moby/moby/issues/20569 and https://github.com/moby/moby/issues/33099
containerOptions.HostConfig.Sysctls['net.ipv6.conf.all.disable_ipv6'] = '0';
containerOptions.HostConfig.Sysctls['net.ipv6.conf.all.forwarding'] = '1';
}
if (capabilities.includes('mlock')) containerOptions.HostConfig.CapAdd.push('IPC_LOCK'); // mlock prevents swapping
if (!capabilities.includes('ping')) containerOptions.HostConfig.CapDrop.push('NET_RAW'); // NET_RAW is included by default by Docker
containerOptions.HostConfig.Devices = Object.keys(app.devices).map((d) => {
if (!safe.fs.existsSync(d)) {
debug(`createSubcontainer: device ${d} does not exist. Skipping...`);
return null;
}
return { PathOnHost: d, PathInContainer: d, CgroupPermissions: 'rwm' };
}).filter(d => d);
if (capabilities.includes('vaapi') && safe.fs.existsSync('/dev/dri')) {
containerOptions.HostConfig.Devices.push({ PathOnHost: '/dev/dri', PathInContainer: '/dev/dri', CgroupPermissions: 'rwm' });
}
const mergedOptions = Object.assign({}, containerOptions, options);
const [createError, container] = await safe(gConnection.createContainer(mergedOptions));
if (createError && createError.statusCode === 409) throw new BoxError(BoxError.ALREADY_EXISTS, createError);
if (createError) throw new BoxError(BoxError.DOCKER_ERROR, createError);
return container;
}
async function createContainer(app) {
return await createSubcontainer(app, app.id /* name */, null /* cmd */, { } /* options */);
}
export default {
ping,
info,
df,
buildImage,
downloadImage,
createContainer,
startContainer,
restartContainer,
stopContainer,
stopContainers,
deleteContainer,
deleteImage,
deleteContainers,
createSubcontainer,
inspect,
getContainerIp,
getEvents,
getStats,
update,
parseImageRef,
createExec,
startExec,
getExec,
resizeExec
};