Files
cloudron-box/src/system.js
2023-03-26 17:20:58 +02:00

243 lines
8.5 KiB
JavaScript

'use strict';
exports = module.exports = {
getDisks,
getSwaps,
checkDiskSpace,
getMemory,
getMemoryAllocation,
getDiskUsage,
updateDiskUsage
};
const apps = require('./apps.js'),
assert = require('assert'),
BoxError = require('./boxerror.js'),
debug = require('debug')('box:disks'),
df = require('./df.js'),
docker = require('./docker.js'),
notifications = require('./notifications.js'),
os = require('os'),
path = require('path'),
paths = require('./paths.js'),
safe = require('safetydance'),
settings = require('./settings.js'),
shell = require('./shell.js'),
volumes = require('./volumes.js');
const DU_CMD = path.join(__dirname, 'scripts/du.sh');
const HDPARM_CMD = path.join(__dirname, 'scripts/hdparm.sh');
async function du(file) {
assert.strictEqual(typeof file, 'string');
const [error, stdoutResult] = await safe(shell.promises.sudo('system', [ DU_CMD, file ], {}));
if (error) throw new BoxError(BoxError.FS_ERROR, error);
return parseInt(stdoutResult.trim(), 10);
}
async function hdparm(file) {
assert.strictEqual(typeof file, 'string');
const [error, stdoutResult] = await safe(shell.promises.sudo('system', [ HDPARM_CMD, file ], {}));
if (error) throw new BoxError(BoxError.FS_ERROR, error);
const lines = stdoutResult.split('\n');
if (lines.length != 4) return -1;
if (lines[2].split('=').length !== 2) return -1;
const speed = lines[2].split('=')[1].slice(0, 'MB/sec'.length).trim();
return Number(speed);
}
async function getSwaps() {
const stdout = safe.child_process.execSync('swapon --noheadings --raw --bytes --show=type,size,used,name', { encoding: 'utf8' });
if (!stdout) return {};
const swaps = {};
for (const line of stdout.trim().split('\n')) {
const parts = line.split(' ', 4);
const name = parts[3];
swaps[name] = {
name: parts[3],
type: parts[0], // partition or file
size: parseInt(parts[1]),
used: parseInt(parts[2]),
};
}
return swaps;
}
async function getDisks() {
let [dfError, dfEntries] = await safe(df.disks());
if (dfError) throw new BoxError(BoxError.FS_ERROR, `Error running df: ${dfError.message}`);
const disks = {}; // by file system
let rootDisk;
const DISK_TYPES = [ 'ext4', 'xfs', 'cifs', 'nfs', 'fuse.sshfs' ]; // we don't show size of contents in untracked disk types
for (const disk of dfEntries) {
if (!DISK_TYPES.includes(disk.type)) continue;
if (disk.mountpoint === '/') rootDisk = disk;
disks[disk.filesystem] = {
filesystem: disk.filesystem,
type: disk.type,
size: disk.size,
used: disk.used,
available: disk.available,
capacity: disk.capacity,
mountpoint: disk.mountpoint,
contents: [] // filled below
};
}
const standardPaths = [
{ type: 'standard', id: 'platformdata', path: paths.PLATFORM_DATA_DIR },
{ type: 'standard', id: 'boxdata', path: paths.BOX_DATA_DIR },
{ type: 'standard', id: 'maildata', path: paths.MAIL_DATA_DIR },
];
for (const stdPath of standardPaths) {
const [dfError, diskInfo] = await safe(df.file(stdPath.path));
if (dfError) throw new BoxError(BoxError.FS_ERROR, `Error getting std path: ${dfError.message}`);
disks[diskInfo.filesystem].contents.push(stdPath);
}
const backupConfig = await settings.getBackupConfig();
if (backupConfig.provider === 'filesystem') {
const [, dfResult] = await safe(df.file(backupConfig.backupFolder));
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
if (disks[filesystem]) disks[filesystem].contents.push({ type: 'standard', id: 'cloudron-backup', path: backupConfig.backupFolder });
}
const [dockerError, dockerInfo] = await safe(docker.info());
if (!dockerError) {
const [, dfResult] = await safe(df.file(dockerInfo.DockerRootDir));
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
if (disks[filesystem]) disks[filesystem].contents.push({ type: 'standard', id: 'docker', path: dockerInfo.DockerRootDir });
}
for (const volume of await volumes.list()) {
const [, dfResult] = await safe(df.file(volume.hostPath));
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
if (disks[filesystem]) disks[filesystem].contents.push({ type: 'volume', id: volume.id, path: volume.hostPath });
}
for (const app of await apps.list()) {
if (!app.manifest.addons?.localstorage) continue;
const dataDir = await apps.getStorageDir(app);
const [, dfResult] = await safe(df.file(dataDir));
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
if (disks[filesystem]) disks[filesystem].contents.push({ type: 'app', id: app.id, path: dataDir });
}
const swaps = await getSwaps();
for (const k in swaps) {
const swap = swaps[k];
if (swap.type !== 'file') continue;
const [, dfResult] = await safe(df.file(swap.name));
disks[dfResult?.filesystem || rootDisk.filesystem].contents.push({ type: 'swap', id: swap.name, path: swap.name });
}
return disks;
}
async function checkDiskSpace() {
debug('checkDiskSpace: checking disk space');
const disks = await getDisks();
let markdownMessage = '';
for (const filesystem of Object.keys(disks)) {
const disk = disks[filesystem];
if (disk.contents.length === 0) continue; // ignore if nothing interesting here
if (disk.available <= (1.25 * 1024 * 1024 * 1024)) { // 1.5G
markdownMessage += `* ${disk.filesystem} is at ${disk.capacity*100}% capacity.\n`;
}
}
debug(`checkDiskSpace: disk space checked. out of space: ${markdownMessage || 'no'}`);
if (markdownMessage) {
const finalMessage = `One or more file systems are running out of space. Please increase the disk size at the earliest.\n\n${markdownMessage}`;
await notifications.alert(notifications.ALERT_DISK_SPACE, 'Server is running out of disk space', finalMessage, { persist: true });
} else {
await notifications.clearAlert(notifications.ALERT_DISK_SPACE, 'Server is running out of disk space');
}
}
async function getSwapSize() {
const swaps = await getSwaps();
return Object.keys(swaps).map(n => swaps[n].size).reduce((acc, cur) => acc + cur, 0);
}
async function getMemory() {
return {
memory: os.totalmem(),
swap: await getSwapSize()
};
}
async function getMemoryAllocation(limit) {
let ratio = parseFloat(safe.fs.readFileSync(paths.SWAP_RATIO_FILE, 'utf8'), 10);
if (!ratio) {
const pc = os.totalmem() / (os.totalmem() + await getSwapSize());
ratio = Math.round(pc * 10) / 10; // a simple ratio
}
return Math.round(Math.round(limit * ratio) / 1048576) * 1048576; // nearest MB
}
async function getDiskUsage() {
return safe.JSON.parse(safe.fs.readFileSync(paths.DISK_USAGE_FILE, 'utf8'));
}
async function updateDiskUsage(progressCallback) {
assert.strictEqual(typeof progressCallback, 'function');
const disks = await getDisks();
const filesystems = Object.keys(disks);
const now = Date.now();
let percent = 1;
for (const filesystem of filesystems) {
const disk = disks[filesystem];
const [speedError, speed] = await safe(hdparm(filesystem));
if (speedError) progressCallback({ message: `hdparm error: ${speedError.message}`});
disk.speed = speed;
percent += (100/filesystems.length);
progressCallback({ percent, message: `Checking contents of ${filesystem}`});
for (const content of disk.contents) {
progressCallback({ message: `Checking du of ${JSON.stringify(content)}`});
if (content.id === 'docker') {
content.usage = (await docker.df()).LayersSize;
} else {
const [error, usage] = await safe(du(content.path));
if (error) progressCallback({ message: `du error: ${error.message}`}); // can happen if app is installing etc
content.usage = usage || 0;
}
progressCallback({ message: `du of ${JSON.stringify(content)}: ${content.usage}`});
}
}
if (!safe.fs.writeFileSync(paths.DISK_USAGE_FILE, JSON.stringify({ ts: now, disks }), 'utf8')) throw new BoxError(BoxError.FS_ERROR, `Could not write du cache file: ${safe.error.message}`);
return disks;
}