403 lines
16 KiB
JavaScript
403 lines
16 KiB
JavaScript
'use strict';
|
|
|
|
exports = module.exports = {
|
|
reboot,
|
|
getInfo,
|
|
getUbuntuVersion,
|
|
getKernelVersion,
|
|
getSwaps,
|
|
checkDiskSpace,
|
|
getMemory,
|
|
getLogs,
|
|
getBlockDevices,
|
|
runSystemChecks,
|
|
getProvider,
|
|
getCpus,
|
|
getFilesystems,
|
|
getFilesystemUsage
|
|
};
|
|
|
|
const apps = require('./apps.js'),
|
|
assert = require('node:assert'),
|
|
{ AsyncTask } = require('./asynctask.js'),
|
|
backupSites = require('./backupsites.js'),
|
|
BoxError = require('./boxerror.js'),
|
|
debug = require('debug')('box:system'),
|
|
df = require('./df.js'),
|
|
docker = require('./docker.js'),
|
|
eventlog = require('./eventlog.js'),
|
|
fs = require('node:fs'),
|
|
logs = require('./logs.js'),
|
|
notifications = require('./notifications.js'),
|
|
os = require('node:os'),
|
|
path = require('node:path'),
|
|
paths = require('./paths.js'),
|
|
safe = require('safetydance'),
|
|
shell = require('./shell.js')('system'),
|
|
volumes = require('./volumes.js');
|
|
|
|
const DU_CMD = path.join(__dirname, 'scripts/du.sh');
|
|
const HDPARM_CMD = path.join(__dirname, 'scripts/hdparm.sh');
|
|
const REBOOT_CMD = path.join(__dirname, 'scripts/reboot.sh');
|
|
|
|
async function du(file, options) {
|
|
assert.strictEqual(typeof file, 'string');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const [error, stdoutResult] = await safe(shell.sudo([ DU_CMD, file ], { encoding: 'utf8', abortSignal: options.abortSignal }));
|
|
if (error) throw new BoxError(BoxError.FS_ERROR, error);
|
|
|
|
return parseInt(stdoutResult.trim(), 10);
|
|
}
|
|
|
|
async function hdparm(file, options) {
|
|
assert.strictEqual(typeof file, 'string');
|
|
assert.strictEqual(typeof options, 'object');
|
|
|
|
const [error, stdoutResult] = await safe(shell.sudo([ HDPARM_CMD, file ], { encoding: 'utf8', abortSignal: options.abortSignal }));
|
|
if (error) throw new BoxError(BoxError.FS_ERROR, error);
|
|
|
|
const lines = stdoutResult.split('\n');
|
|
|
|
if (lines.length != 4) return -1;
|
|
if (lines[2].split('=').length !== 2) return -1;
|
|
|
|
const speed = lines[2].split('=')[1].slice(0, 'MB/sec'.length).trim();
|
|
|
|
return Number(speed);
|
|
}
|
|
|
|
async function getSwaps() {
|
|
const [error, stdout] = await safe(shell.spawn('swapon', ['--noheadings', '--raw', '--bytes', '--show=type,size,used,name'], { encoding: 'utf8' }));
|
|
if (error) return {};
|
|
const output = stdout.trim();
|
|
if (!output) return {}; // no swaps
|
|
|
|
const swaps = {};
|
|
for (const line of output.split('\n')) {
|
|
const parts = line.split(' ', 4);
|
|
const name = parts[3];
|
|
swaps[name] = {
|
|
name: parts[3],
|
|
type: parts[0], // partition or file
|
|
size: parseInt(parts[1]),
|
|
used: parseInt(parts[2]),
|
|
};
|
|
}
|
|
|
|
return swaps;
|
|
}
|
|
|
|
// this gets information based on mounted filesystems, swaps and the filesystem contents
|
|
async function getFilesystems() {
|
|
const FS_TYPES = [ 'ext4', 'xfs', 'cifs', 'nfs', 'fuse.sshfs' ]; // we don't show size of contents in untracked disk types
|
|
|
|
const [dfError, dfEntries] = await safe(df.filesystems());
|
|
if (dfError) throw new BoxError(BoxError.FS_ERROR, `Error running df: ${dfError.message}`);
|
|
|
|
const filesystems = {}; // by file system (device path)
|
|
let rootDisk;
|
|
|
|
for (const dfEntry of dfEntries) {
|
|
if (!FS_TYPES.includes(dfEntry.type)) continue;
|
|
if (dfEntry.mountpoint === '/') rootDisk = dfEntry;
|
|
filesystems[dfEntry.filesystem] = {
|
|
filesystem: dfEntry.filesystem,
|
|
type: dfEntry.type,
|
|
size: dfEntry.size,
|
|
used: dfEntry.used,
|
|
available: dfEntry.available,
|
|
capacity: dfEntry.capacity,
|
|
mountpoint: dfEntry.mountpoint,
|
|
contents: [] // filled below . { type, id, path }
|
|
};
|
|
}
|
|
|
|
const standardPaths = [
|
|
{ type: 'standard', id: 'platformdata', name: 'Platform data', path: paths.PLATFORM_DATA_DIR },
|
|
{ type: 'standard', id: 'boxdata', name: 'Box data', path: paths.BOX_DATA_DIR },
|
|
{ type: 'standard', id: 'maildata', name: 'Mail data', path: paths.MAIL_DATA_DIR },
|
|
];
|
|
|
|
for (const stdPath of standardPaths) {
|
|
const [dfError, diskInfo] = await safe(df.file(stdPath.path));
|
|
if (dfError) throw new BoxError(BoxError.FS_ERROR, `Error getting std path: ${dfError.message}`);
|
|
filesystems[diskInfo.filesystem].contents.push(stdPath);
|
|
}
|
|
|
|
const sites = await backupSites.list();
|
|
for (const backupSite of sites) {
|
|
if (backupSite.provider === 'filesystem') {
|
|
const [, dfResult] = await safe(df.file(backupSite.config.backupDir));
|
|
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
|
|
if (filesystems[filesystem]) filesystems[filesystem].contents.push({ type: 'cloudron-backup', id: backupSite.id, name: backupSite.name, path: backupSite.config.backupDir });
|
|
}
|
|
|
|
}
|
|
|
|
// often the default backup dir is not cleaned up
|
|
const siteForDefault = sites.find(s => s.provider === 'filesystem' && s.config.backupDir === paths.DEFAULT_BACKUP_DIR);
|
|
if (!siteForDefault) {
|
|
const [, dfResult] = await safe(df.file(paths.DEFAULT_BACKUP_DIR));
|
|
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
|
|
if (filesystems[filesystem]) filesystems[filesystem].contents.push({ type: 'cloudron-backup-default', id: 'cloudron-backup-default', name: 'Default backup', path: paths.DEFAULT_BACKUP_DIR });
|
|
}
|
|
|
|
const [dockerError, dockerInfo] = await safe(docker.info());
|
|
if (!dockerError) {
|
|
const [, dfResult] = await safe(df.file(dockerInfo.DockerRootDir));
|
|
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
|
|
if (filesystems[filesystem]) {
|
|
filesystems[filesystem].contents.push({ type: 'standard', id: 'docker', name: 'Docker', path: dockerInfo.DockerRootDir });
|
|
filesystems[filesystem].contents.push({ type: 'standard', id: 'docker-volumes', name: 'Docker volumes', path: dockerInfo.DockerRootDir });
|
|
}
|
|
}
|
|
|
|
for (const volume of await volumes.list()) {
|
|
const [, dfResult] = await safe(df.file(volume.hostPath));
|
|
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
|
|
if (filesystems[filesystem]) filesystems[filesystem].contents.push({ type: 'volume', id: volume.id, name: volume.name, path: volume.hostPath });
|
|
}
|
|
|
|
for (const app of await apps.list()) {
|
|
if (!app.manifest.addons?.localstorage) continue;
|
|
|
|
const dataDir = await apps.getStorageDir(app);
|
|
if (dataDir === null) continue;
|
|
const [, dfResult] = await safe(df.file(dataDir));
|
|
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
|
|
if (filesystems[filesystem]) filesystems[filesystem].contents.push({ type: 'app', id: app.id, name: app.label || app.fqdn, path: dataDir });
|
|
}
|
|
|
|
const swaps = await getSwaps();
|
|
for (const k in swaps) {
|
|
const swap = swaps[k];
|
|
if (swap.type !== 'file') continue;
|
|
|
|
const [, dfResult] = await safe(df.file(swap.name));
|
|
filesystems[dfResult?.filesystem || rootDisk.filesystem].contents.push({ type: 'swap', id: swap.name, name: swap.name, path: swap.name });
|
|
}
|
|
|
|
return Object.values(filesystems);
|
|
}
|
|
|
|
async function checkDiskSpace() {
|
|
debug('checkDiskSpace: checking disk space');
|
|
|
|
const filesystems = await getFilesystems();
|
|
|
|
let markdownMessage = '';
|
|
|
|
for (const filesystem of filesystems) {
|
|
if (filesystem.contents.length === 0) continue; // ignore if nothing interesting here
|
|
|
|
if (filesystem.capacity >= 0.90) { // > 90%
|
|
const prettyUsed = df.prettyBytes(filesystem.used),
|
|
prettyAvailable = df.prettyBytes(filesystem.available),
|
|
prettySize = df.prettyBytes(filesystem.size);
|
|
markdownMessage += `* ${filesystem.filesystem} (${filesystem.type}) mounted at ${filesystem.mountpoint} is at ${filesystem.capacity*100}% capacity. Used: ${prettyUsed} Available: ${prettyAvailable} Size: ${prettySize}\n`;
|
|
}
|
|
}
|
|
|
|
debug(`checkDiskSpace: disk space checked. low disk space: ${markdownMessage || 'no'}`);
|
|
|
|
if (markdownMessage) {
|
|
const finalMessage = `One or more file systems are running low on space. Please increase the disk size at the earliest.\n\n${markdownMessage}`;
|
|
await notifications.pin(notifications.TYPE_DISK_SPACE, 'Server is running out of disk space', finalMessage, {});
|
|
} else {
|
|
await notifications.unpin(notifications.TYPE_DISK_SPACE, {});
|
|
}
|
|
}
|
|
|
|
async function getSwapSize() {
|
|
const swaps = await getSwaps();
|
|
return Object.keys(swaps).map(n => swaps[n].size).reduce((acc, cur) => acc + cur, 0);
|
|
}
|
|
|
|
async function getMemory() {
|
|
return {
|
|
memory: os.totalmem(),
|
|
swap: await getSwapSize()
|
|
};
|
|
}
|
|
|
|
class FilesystemUsageTask extends AsyncTask {
|
|
#filesystem;
|
|
|
|
constructor(filesystem) {
|
|
super(`FileSystemUsageTask(${filesystem.filesystem})`);
|
|
this.#filesystem = filesystem;
|
|
}
|
|
|
|
async _run(abortSignal) {
|
|
const { filesystem, type, contents, mountpoint, used } = this.#filesystem;
|
|
|
|
let percent = 5;
|
|
|
|
if (type === 'ext4' || type === 'xfs') { // hdparm only works with block devices
|
|
this.emitProgress(percent, 'Calculating Disk Speed');
|
|
const [speedError, speed] = await safe(hdparm(filesystem, { abortSignal }));
|
|
if (speedError) debug(`hdparm error ${filesystem}: ${speedError.message}`);
|
|
this.emitData({ speed: speedError ? -1 : speed });
|
|
} else {
|
|
this.emitData({ speed: -1 });
|
|
}
|
|
|
|
let usage = 0, dockerDf;
|
|
|
|
for (const content of contents) {
|
|
percent += (90/contents.length+1);
|
|
if (abortSignal.aborted) return;
|
|
|
|
this.emitProgress(percent,`Checking du of ${content.id} ${content.path}`);
|
|
if (content.id === 'docker' || content.id === 'docker-volumes') {
|
|
if (!dockerDf) dockerDf = await docker.df({ abortSignal });
|
|
content.usage = content.id === 'docker' ? dockerDf.LayersSize : dockerDf.Volumes.map((v) => v.UsageData.Size).reduce((a,b) => a + b, 0);
|
|
} else {
|
|
const [error, usage] = await safe(du(content.path, { abortSignal }));
|
|
if (error) debug(`du error ${content.path}: ${error.message}`); // can happen if app is installing etc
|
|
content.usage = usage || 0;
|
|
}
|
|
usage += content.usage;
|
|
this.emitData({ content });
|
|
}
|
|
|
|
if (mountpoint === '/') this.emitData({ content: { type: 'standard', id: 'os', name: 'Ubuntu', usage: used - usage }});
|
|
}
|
|
}
|
|
|
|
async function getFilesystemUsage(fsPath) {
|
|
assert.strictEqual(typeof fsPath, 'string');
|
|
|
|
const filesystems = await getFilesystems();
|
|
const filesystem = filesystems.find(f => f.filesystem === fsPath);
|
|
if (!filesystem) throw new BoxError(BoxError.BAD_FIELD, 'No such filesystem');
|
|
|
|
return new FilesystemUsageTask(filesystem);
|
|
}
|
|
|
|
async function reboot() {
|
|
await notifications.unpin(notifications.TYPE_REBOOT, {});
|
|
|
|
const [error] = await safe(shell.sudo([ REBOOT_CMD ], {}));
|
|
if (error) debug('reboot: could not reboot. %o', error);
|
|
}
|
|
|
|
async function getInfo() {
|
|
// https://serverfault.com/questions/92932/how-does-ubuntu-keep-track-of-the-system-restart-required-flag-in-motd
|
|
const rebootRequired = fs.existsSync('/var/run/reboot-required');
|
|
const uptime = safe.fs.readFileSync('/proc/uptime', 'utf8');
|
|
const uptimeSecs = parseInt(uptime.split(' ')[0], 10);
|
|
|
|
// these files may not exist if kernel does not have access to dmi data
|
|
const sysVendor = safe.fs.readFileSync('/sys/devices/virtual/dmi/id/sys_vendor', 'utf8') || '';
|
|
const productName = safe.fs.readFileSync('/sys/devices/virtual/dmi/id/product_name', 'utf8') || '';
|
|
const productFamily = safe.fs.readFileSync('/sys/devices/virtual/dmi/id/product_family', 'utf8') || '';
|
|
|
|
const activationTime = (await eventlog.getActivationEvent())?.creationTime || null;
|
|
|
|
return {
|
|
sysVendor: sysVendor.trim(),
|
|
productName: productName.trim() || productFamily.trim(),
|
|
uptimeSecs,
|
|
rebootRequired,
|
|
activationTime
|
|
};
|
|
}
|
|
|
|
async function getLogs(unit, options) {
|
|
assert.strictEqual(typeof unit, 'string');
|
|
assert(options && typeof options === 'object');
|
|
|
|
if (unit !== 'box') throw new BoxError(BoxError.BAD_FIELD, `No such unit '${unit}'`);
|
|
|
|
const logFile = path.join(paths.LOG_DIR, 'box.log');
|
|
const cp = logs.tail([logFile], { lines: options.lines, follow: options.follow });
|
|
|
|
const logStream = new logs.LogStream({ format: options.format || 'json', source: unit });
|
|
logStream.on('close', () => cp.terminate()); // the caller has to call destroy() on logStream. destroy() of Transform emits 'close'
|
|
|
|
cp.stdout.pipe(logStream);
|
|
|
|
return logStream;
|
|
}
|
|
|
|
// this gets block devices as opposed to mounted filesystems. this is used for configuring backups and volumes in the frontend
|
|
async function getBlockDevices() {
|
|
const output = await shell.spawn('lsblk', ['--paths', '--bytes', '--json', '--list', '--fs', '--output', '+rota,fsused,fsavail'], { encoding: 'utf8' });
|
|
const info = safe.JSON.parse(output);
|
|
if (!info) throw new BoxError(BoxError.INTERNAL_ERROR, `failed to parse lsblk: ${safe.error.message}`);
|
|
|
|
// despite the function and variable names, this is the partitions and not the block devices!
|
|
const devices = info.blockdevices.filter(d => d.fstype === 'ext4' || d.fstype === 'xfs');
|
|
|
|
const result = [];
|
|
for (const device of devices) {
|
|
const mountpoints = Array.isArray(device.mountpoints)
|
|
? (device.mountpoints[0] === null ? [] : device.mountpoints) // convert [ null ] to []
|
|
: (device.mountpoint ? [ device.mountpoint ] : []); // old lsblk only exposed one .mountpoint
|
|
|
|
result.push({
|
|
path: device.name,
|
|
size: device.fsavail || 0,
|
|
used: device.fsused || 0,
|
|
type: device.fstype, // when null, it is not formatted
|
|
uuid: device.uuid,
|
|
rota: device.rota, // false (ssd) true (hdd) . unforuntately, this is not set correctly when virtualized (like in DO)
|
|
mountpoints
|
|
});
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
async function checkRebootRequired() {
|
|
const { rebootRequired } = await getInfo();
|
|
if (rebootRequired) {
|
|
await notifications.pin(notifications.TYPE_REBOOT, 'Reboot Required', 'To finish ubuntu security updates, a reboot is necessary.', {});
|
|
} else {
|
|
await notifications.unpin(notifications.TYPE_REBOOT, {});
|
|
}
|
|
}
|
|
|
|
async function getKernelVersion() {
|
|
const data = safe.fs.readFileSync('/proc/version', 'utf-8');
|
|
if (data === null) throw new BoxError(BoxError.FS_ERROR, safe.error.message);
|
|
const version = data.match(/^Linux version (\S+)/)[1];
|
|
return `Linux ${version}`;
|
|
}
|
|
|
|
async function getUbuntuVersion() {
|
|
const release = safe.fs.readFileSync('/etc/lsb-release', 'utf-8');
|
|
if (release === null) throw new BoxError(BoxError.FS_ERROR, safe.error.message);
|
|
return release.match(/DISTRIB_DESCRIPTION="(.*)"/)[1];
|
|
}
|
|
|
|
// https://wiki.ubuntu.com/Releases
|
|
async function checkUbuntuVersion() {
|
|
const isFocal = fs.readFileSync('/etc/lsb-release', 'utf-8').includes('20.04');
|
|
if (!isFocal) return;
|
|
|
|
await notifications.pin(notifications.TYPE_UPDATE_UBUNTU, 'Ubuntu upgrade required', 'Ubuntu 20.04 is reaching end of life and will not receive security and maintenance updates. Please follow https://docs.cloudron.io/guides/upgrade-ubuntu-22/ to upgrade to Ubuntu 22.04 at the earliest.', {});
|
|
}
|
|
|
|
async function runSystemChecks() {
|
|
debug('runSystemChecks: checking status');
|
|
|
|
const checks = [
|
|
checkRebootRequired(),
|
|
checkUbuntuVersion()
|
|
];
|
|
|
|
await Promise.allSettled(checks);
|
|
}
|
|
|
|
function getProvider() {
|
|
const provider = safe.fs.readFileSync(paths.PROVIDER_FILE, 'utf8');
|
|
return provider ? provider.trim() : 'generic';
|
|
}
|
|
|
|
async function getCpus() {
|
|
return os.cpus();
|
|
}
|