'use strict'; exports = module.exports = { getDisks, checkDiskSpace, getMemory, getMemoryAllocation, getDiskUsage, updateDiskUsage }; const apps = require('./apps.js'), assert = require('assert'), BoxError = require('./boxerror.js'), debug = require('debug')('box:disks'), df = require('./df.js'), docker = require('./docker.js'), notifications = require('./notifications.js'), os = require('os'), path = require('path'), paths = require('./paths.js'), safe = require('safetydance'), settings = require('./settings.js'), shell = require('./shell.js'), volumes = require('./volumes.js'); const DU_CMD = path.join(__dirname, 'scripts/du.sh'); async function du(file) { assert.strictEqual(typeof file, 'string'); const [error, stdoutResult] = await safe(shell.promises.sudo('system', [ DU_CMD, file ], {})); if (error) throw new BoxError(BoxError.FS_ERROR, error); return parseInt(stdoutResult.trim(), 10); } async function getDisks() { let [dfError, dfEntries] = await safe(df.disks()); if (dfError) throw new BoxError(BoxError.FS_ERROR, `Error running df: ${dfError.message}`); const disks = {}; // by file system let rootDisk; for (const disk of dfEntries) { if (disk.type !== 'ext4' && disk.type !== 'xfs') continue; if (disk.mountpoint === '/') rootDisk = disk; disks[disk.filesystem] = { filesystem: disk.filesystem, type: disk.type, size: disk.size, used: disk.used, available: disk.available, capacity: disk.capacity, mountpoint: disk.mountpoint, contents: [] // filled below }; } const standardPaths = [ { type: 'standard', id: 'platformdata', path: paths.PLATFORM_DATA_DIR }, { type: 'standard', id: 'boxdata', path: paths.BOX_DATA_DIR }, { type: 'standard', id: 'maildata', path: paths.MAIL_DATA_DIR }, ]; for (const stdPath of standardPaths) { const [dfError, diskInfo] = await safe(df.file(stdPath.path)); if (dfError) throw new BoxError(BoxError.FS_ERROR, `Error getting std path: ${dfError.message}`); disks[diskInfo.filesystem].contents.push(stdPath); } const backupConfig = await settings.getBackupConfig(); if (backupConfig.provider === 'filesystem') { const [, dfResult] = await safe(df.file(backupConfig.backupFolder)); disks[dfResult?.filesystem || rootDisk.filesystem].contents.push({ type: 'standard', id: 'cloudron-backup', path: backupConfig.backupFolder }); } const [dockerError, dockerInfo] = await safe(docker.info()); if (!dockerError) { const [, dfResult] = await safe(df.file(dockerInfo.DockerRootDir)); disks[dfResult?.filesystem || rootDisk.filesystem].contents.push({ type: 'standard', id: 'docker', path: dockerInfo.DockerRootDir }); } for (const volume of await volumes.list()) { const [, dfResult] = await safe(df.file(volume.hostPath)); disks[dfResult?.filesystem || rootDisk.filesystem].contents.push({ type: 'volume', id: volume.id, path: volume.hostPath }); } for (const app of await apps.list()) { if (!app.manifest.addons?.localstorage) continue; const dataDir = await apps.getStorageDir(app); const [, dfResult] = await safe(df.file(dataDir)); disks[dfResult?.filesystem || rootDisk.filesystem].contents.push({ type: 'app', id: app.id, path: dataDir }); } return disks; } async function checkDiskSpace() { debug('checkDiskSpace: checking disk space'); const disks = await getDisks(); let markdownMessage = ''; for (const filesystem of Object.keys(disks)) { const disk = disks[filesystem]; if (disk.contents.length === 0) continue; // ignore if nothing interesting here if (disk.available <= (1.25 * 1024 * 1024 * 1024)) { // 1.5G markdownMessage += `* ${disk.filesystem} is at ${disk.capacity*100}% capacity.\n`; } } debug(`checkDiskSpace: disk space checked. out of space: ${markdownMessage || 'no'}`); if (markdownMessage) markdownMessage = `One or more file systems are running out of space. Please increase the disk size at the earliest.\n\n${markdownMessage}`; await notifications.alert(notifications.ALERT_DISK_SPACE, 'Server is running out of disk space', markdownMessage); } function getSwapSize() { const stdout = safe.child_process.execSync('swapon --noheadings --raw --bytes --show=SIZE', { encoding: 'utf8' }); const swap = !stdout ? 0 : stdout.trim().split('\n').map(x => parseInt(x, 10) || 0).reduce((acc, cur) => acc + cur); return swap; } async function getMemory() { return { memory: os.totalmem(), swap: getSwapSize() }; } function getMemoryAllocation(limit) { let ratio = parseFloat(safe.fs.readFileSync(paths.SWAP_RATIO_FILE, 'utf8'), 10); if (!ratio) { const pc = os.totalmem() / (os.totalmem() + getSwapSize()); ratio = Math.round(pc * 10) / 10; // a simple ratio } return Math.round(Math.round(limit * ratio) / 1048576) * 1048576; // nearest MB } async function getDiskUsage() { return safe.JSON.parse(safe.fs.readFileSync(paths.DISK_USAGE_FILE, 'utf8')); } async function updateDiskUsage(progressCallback) { assert.strictEqual(typeof progressCallback, 'function'); const disks = await getDisks(); const filesystems = Object.keys(disks); const now = Date.now(); let percent = 1; for (const filesystem of filesystems) { const disk = disks[filesystem]; percent += (100/filesystems.length); progressCallback({ percent, message: `Checking contents of ${filesystem}`}); for (const content of disk.contents) { progressCallback({ message: `Checking du of ${JSON.stringify(content)}`}); if (content.id === 'docker') { content.usage = (await docker.df()).LayersSize; } else { const [error, usage] = await safe(du(content.path)); if (error) progressCallback({ message: `du error: ${error.message}`}); // can happen if app is installing etc content.usage = usage || 0; } progressCallback({ message: `du of ${JSON.stringify(content)}: ${content.usage}`}); } } if (!safe.fs.writeFileSync(paths.DISK_USAGE_FILE, JSON.stringify({ ts: now, disks }), 'utf8')) throw new BoxError(BoxError.FS_ERROR, `Could not write du cache file: ${safe.error.message}`); return disks; }