388 lines
14 KiB
JavaScript
388 lines
14 KiB
JavaScript
'use strict';
|
|
|
|
exports = module.exports = {
|
|
reboot,
|
|
getInfo,
|
|
getUbuntuVersion,
|
|
getDisks,
|
|
getSwaps,
|
|
checkDiskSpace,
|
|
getMemory,
|
|
getMemoryAllocation,
|
|
getDiskUsage,
|
|
updateDiskUsage,
|
|
startUpdateDiskUsage,
|
|
getLogs,
|
|
getBlockDevices,
|
|
runSystemChecks,
|
|
getProvider,
|
|
getCpus,
|
|
};
|
|
|
|
const apps = require('./apps.js'),
|
|
assert = require('assert'),
|
|
backups = require('./backups.js'),
|
|
BoxError = require('./boxerror.js'),
|
|
debug = require('debug')('box:disks'),
|
|
df = require('./df.js'),
|
|
docker = require('./docker.js'),
|
|
eventlog = require('./eventlog.js'),
|
|
fs = require('fs'),
|
|
logs = require('./logs.js'),
|
|
notifications = require('./notifications.js'),
|
|
os = require('os'),
|
|
path = require('path'),
|
|
paths = require('./paths.js'),
|
|
safe = require('safetydance'),
|
|
shell = require('./shell.js'),
|
|
tasks = require('./tasks.js'),
|
|
volumes = require('./volumes.js');
|
|
|
|
const DU_CMD = path.join(__dirname, 'scripts/du.sh');
|
|
const HDPARM_CMD = path.join(__dirname, 'scripts/hdparm.sh');
|
|
const REBOOT_CMD = path.join(__dirname, 'scripts/reboot.sh');
|
|
|
|
async function du(file) {
|
|
assert.strictEqual(typeof file, 'string');
|
|
|
|
const [error, stdoutResult] = await safe(shell.promises.sudo('system', [ DU_CMD, file ], {}));
|
|
if (error) throw new BoxError(BoxError.FS_ERROR, error);
|
|
|
|
return parseInt(stdoutResult.trim(), 10);
|
|
}
|
|
|
|
async function hdparm(file) {
|
|
assert.strictEqual(typeof file, 'string');
|
|
|
|
const [error, stdoutResult] = await safe(shell.promises.sudo('system', [ HDPARM_CMD, file ], {}));
|
|
if (error) throw new BoxError(BoxError.FS_ERROR, error);
|
|
|
|
const lines = stdoutResult.split('\n');
|
|
|
|
if (lines.length != 4) return -1;
|
|
if (lines[2].split('=').length !== 2) return -1;
|
|
|
|
const speed = lines[2].split('=')[1].slice(0, 'MB/sec'.length).trim();
|
|
|
|
return Number(speed);
|
|
}
|
|
|
|
async function getSwaps() {
|
|
const stdout = safe.child_process.execSync('swapon --noheadings --raw --bytes --show=type,size,used,name', { encoding: 'utf8' });
|
|
if (!stdout) return {};
|
|
|
|
const swaps = {};
|
|
for (const line of stdout.trim().split('\n')) {
|
|
const parts = line.split(' ', 4);
|
|
const name = parts[3];
|
|
swaps[name] = {
|
|
name: parts[3],
|
|
type: parts[0], // partition or file
|
|
size: parseInt(parts[1]),
|
|
used: parseInt(parts[2]),
|
|
};
|
|
}
|
|
|
|
return swaps;
|
|
}
|
|
|
|
async function getDisks() {
|
|
let [dfError, dfEntries] = await safe(df.disks());
|
|
if (dfError) throw new BoxError(BoxError.FS_ERROR, `Error running df: ${dfError.message}`);
|
|
|
|
const disks = {}; // by file system
|
|
let rootDisk;
|
|
|
|
const DISK_TYPES = [ 'ext4', 'xfs', 'cifs', 'nfs', 'fuse.sshfs' ]; // we don't show size of contents in untracked disk types
|
|
|
|
for (const disk of dfEntries) {
|
|
if (!DISK_TYPES.includes(disk.type)) continue;
|
|
if (disk.mountpoint === '/') rootDisk = disk;
|
|
disks[disk.filesystem] = {
|
|
filesystem: disk.filesystem,
|
|
type: disk.type,
|
|
size: disk.size,
|
|
used: disk.used,
|
|
available: disk.available,
|
|
capacity: disk.capacity,
|
|
mountpoint: disk.mountpoint,
|
|
contents: [] // filled below
|
|
};
|
|
}
|
|
|
|
const standardPaths = [
|
|
{ type: 'standard', id: 'platformdata', path: paths.PLATFORM_DATA_DIR },
|
|
{ type: 'standard', id: 'boxdata', path: paths.BOX_DATA_DIR },
|
|
{ type: 'standard', id: 'maildata', path: paths.MAIL_DATA_DIR },
|
|
];
|
|
|
|
for (const stdPath of standardPaths) {
|
|
const [dfError, diskInfo] = await safe(df.file(stdPath.path));
|
|
if (dfError) throw new BoxError(BoxError.FS_ERROR, `Error getting std path: ${dfError.message}`);
|
|
disks[diskInfo.filesystem].contents.push(stdPath);
|
|
}
|
|
|
|
const backupConfig = await backups.getConfig();
|
|
if (backupConfig.provider === 'filesystem') {
|
|
const [, dfResult] = await safe(df.file(backupConfig.backupFolder));
|
|
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
|
|
if (disks[filesystem]) disks[filesystem].contents.push({ type: 'standard', id: 'cloudron-backup', path: backupConfig.backupFolder });
|
|
}
|
|
|
|
// often the default backup dir is not cleaned up
|
|
if (backupConfig.provider !== 'filesystem' || backupConfig.backupFolder !== paths.DEFAULT_BACKUP_DIR) {
|
|
const [, dfResult] = await safe(df.file(paths.DEFAULT_BACKUP_DIR));
|
|
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
|
|
if (disks[filesystem]) disks[filesystem].contents.push({ type: 'cloudron-backup-default', id: 'cloudron-backup-default', path: paths.DEFAULT_BACKUP_DIR });
|
|
}
|
|
|
|
const [dockerError, dockerInfo] = await safe(docker.info());
|
|
if (!dockerError) {
|
|
const [, dfResult] = await safe(df.file(dockerInfo.DockerRootDir));
|
|
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
|
|
if (disks[filesystem]) {
|
|
disks[filesystem].contents.push({ type: 'standard', id: 'docker', path: dockerInfo.DockerRootDir });
|
|
disks[filesystem].contents.push({ type: 'standard', id: 'docker-volumes', path: dockerInfo.DockerRootDir });
|
|
}
|
|
}
|
|
|
|
for (const volume of await volumes.list()) {
|
|
const [, dfResult] = await safe(df.file(volume.hostPath));
|
|
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
|
|
if (disks[filesystem]) disks[filesystem].contents.push({ type: 'volume', id: volume.id, path: volume.hostPath });
|
|
}
|
|
|
|
for (const app of await apps.list()) {
|
|
if (!app.manifest.addons?.localstorage) continue;
|
|
|
|
const dataDir = await apps.getStorageDir(app);
|
|
const [, dfResult] = await safe(df.file(dataDir));
|
|
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
|
|
if (disks[filesystem]) disks[filesystem].contents.push({ type: 'app', id: app.id, path: dataDir });
|
|
}
|
|
|
|
const swaps = await getSwaps();
|
|
for (const k in swaps) {
|
|
const swap = swaps[k];
|
|
if (swap.type !== 'file') continue;
|
|
|
|
const [, dfResult] = await safe(df.file(swap.name));
|
|
disks[dfResult?.filesystem || rootDisk.filesystem].contents.push({ type: 'swap', id: swap.name, path: swap.name });
|
|
}
|
|
|
|
return disks;
|
|
}
|
|
|
|
async function checkDiskSpace() {
|
|
debug('checkDiskSpace: checking disk space');
|
|
|
|
const disks = await getDisks();
|
|
|
|
let markdownMessage = '';
|
|
|
|
for (const filesystem of Object.keys(disks)) {
|
|
const disk = disks[filesystem];
|
|
if (disk.contents.length === 0) continue; // ignore if nothing interesting here
|
|
|
|
if (disk.available <= (1.25 * 1024 * 1024 * 1024)) { // 1.5G
|
|
markdownMessage += `* ${disk.filesystem} is at ${disk.capacity*100}% capacity.\n`;
|
|
}
|
|
}
|
|
|
|
debug(`checkDiskSpace: disk space checked. out of space: ${markdownMessage || 'no'}`);
|
|
|
|
if (markdownMessage) {
|
|
const finalMessage = `One or more file systems are running out of space. Please increase the disk size at the earliest.\n\n${markdownMessage}`;
|
|
await notifications.alert(notifications.ALERT_DISK_SPACE, 'Server is running out of disk space', finalMessage, { persist: true });
|
|
} else {
|
|
await notifications.clearAlert(notifications.ALERT_DISK_SPACE, 'Server is running out of disk space');
|
|
}
|
|
}
|
|
|
|
async function getSwapSize() {
|
|
const swaps = await getSwaps();
|
|
return Object.keys(swaps).map(n => swaps[n].size).reduce((acc, cur) => acc + cur, 0);
|
|
}
|
|
|
|
async function getMemory() {
|
|
return {
|
|
memory: os.totalmem(),
|
|
swap: await getSwapSize()
|
|
};
|
|
}
|
|
|
|
async function getMemoryAllocation(limit) {
|
|
let ratio = parseFloat(safe.fs.readFileSync(paths.SWAP_RATIO_FILE, 'utf8'), 10);
|
|
|
|
if (!ratio) {
|
|
const pc = os.totalmem() / (os.totalmem() + await getSwapSize());
|
|
ratio = Math.round(pc * 10) / 10; // a simple ratio
|
|
}
|
|
|
|
return Math.round(Math.round(limit * ratio) / 1048576) * 1048576; // nearest MB
|
|
}
|
|
|
|
async function getDiskUsage() {
|
|
return safe.JSON.parse(safe.fs.readFileSync(paths.DISK_USAGE_FILE, 'utf8'));
|
|
}
|
|
|
|
async function updateDiskUsage(progressCallback) {
|
|
assert.strictEqual(typeof progressCallback, 'function');
|
|
|
|
const disks = await getDisks();
|
|
const filesystems = Object.keys(disks);
|
|
const now = Date.now();
|
|
|
|
let percent = 1;
|
|
|
|
for (const filesystem of filesystems) {
|
|
const disk = disks[filesystem];
|
|
|
|
if (disk.type === 'ext4' || disk.type === 'xfs') {
|
|
const [speedError, speed] = await safe(hdparm(filesystem));
|
|
if (speedError) progressCallback({ message: `hdparm error: ${speedError.message}`});
|
|
disk.speed = speedError ? -1 : speed;
|
|
} else {
|
|
disk.speed = -1;
|
|
}
|
|
|
|
percent += (100/filesystems.length);
|
|
progressCallback({ percent, message: `Checking contents of ${filesystem}`});
|
|
|
|
const dockerDf = await docker.df();
|
|
|
|
|
|
for (const content of disk.contents) {
|
|
progressCallback({ message: `Checking du of ${JSON.stringify(content)}`});
|
|
if (content.id === 'docker') {
|
|
content.usage = dockerDf.LayersSize;
|
|
} else if (content.id === 'docker-volumes') {
|
|
content.usage = dockerDf.Volumes.map((v) => v.UsageData.Size).reduce((a,b) => a + b, 0);
|
|
} else {
|
|
const [error, usage] = await safe(du(content.path));
|
|
if (error) progressCallback({ message: `du error: ${error.message}`}); // can happen if app is installing etc
|
|
content.usage = usage || 0;
|
|
}
|
|
progressCallback({ message: `du of ${JSON.stringify(content)}: ${content.usage}`});
|
|
}
|
|
}
|
|
|
|
if (!safe.fs.writeFileSync(paths.DISK_USAGE_FILE, JSON.stringify({ ts: now, disks }), 'utf8')) throw new BoxError(BoxError.FS_ERROR, `Could not write du cache file: ${safe.error.message}`);
|
|
|
|
return disks;
|
|
}
|
|
|
|
async function reboot() {
|
|
await notifications.clearAlert(notifications.ALERT_REBOOT, 'Reboot Required');
|
|
|
|
const [error] = await safe(shell.promises.sudo('reboot', [ REBOOT_CMD ], {}));
|
|
if (error) debug('reboot: could not reboot. %o', error);
|
|
}
|
|
|
|
async function getInfo() {
|
|
// https://serverfault.com/questions/92932/how-does-ubuntu-keep-track-of-the-system-restart-required-flag-in-motd
|
|
const rebootRequired = fs.existsSync('/var/run/reboot-required');
|
|
const uptime = safe.fs.readFileSync('/proc/uptime', 'utf8');
|
|
const uptimeSecs = parseInt(uptime.split(' ')[0], 10);
|
|
|
|
const sysVendor = safe.fs.readFileSync('/sys/devices/virtual/dmi/id/sys_vendor', 'utf8');
|
|
const productName = safe.fs.readFileSync('/sys/devices/virtual/dmi/id/product_name', 'utf8');
|
|
|
|
const activationTime = (await eventlog.getActivationEvent())?.creationTime || null;
|
|
|
|
return {
|
|
sysVendor: sysVendor.trim(),
|
|
productName: productName.trim(),
|
|
uptimeSecs,
|
|
rebootRequired,
|
|
activationTime
|
|
};
|
|
}
|
|
|
|
async function startUpdateDiskUsage() {
|
|
const taskId = await tasks.add(tasks.TASK_UPDATE_DISK_USAGE, []);
|
|
tasks.startTask(taskId, {});
|
|
return taskId;
|
|
}
|
|
|
|
async function getLogs(unit, options) {
|
|
assert.strictEqual(typeof unit, 'string');
|
|
assert(options && typeof options === 'object');
|
|
|
|
debug(`Getting logs for ${unit}`);
|
|
|
|
let logFile = '';
|
|
if (unit === 'box') logFile = path.join(paths.LOG_DIR, 'box.log'); // box.log is at the top
|
|
else throw new BoxError(BoxError.BAD_FIELD, `No such unit '${unit}'`);
|
|
|
|
const cp = logs.tail([logFile], { lines: options.lines, follow: options.follow });
|
|
|
|
const logStream = new logs.LogStream({ format: options.format || 'json', source: unit });
|
|
logStream.close = cp.kill.bind(cp, 'SIGKILL'); // hook for caller. closing stream kills the child process
|
|
|
|
cp.stdout.pipe(logStream);
|
|
|
|
return logStream;
|
|
}
|
|
|
|
async function getBlockDevices() {
|
|
const info = safe.JSON.parse(safe.child_process.execSync('lsblk --paths --json --list --fs', { encoding: 'utf8' }));
|
|
if (!info) throw new BoxError(BoxError.INTERNAL_ERROR, safe.error.message);
|
|
|
|
const devices = info.blockdevices.filter(d => d.fstype === 'ext4' || d.fstype === 'xfs');
|
|
|
|
debug(`getBlockDevices: Found ${devices.length} devices. ${devices.map(d => d.name).join(', ')}`);
|
|
|
|
return devices.map(function (d) {
|
|
return {
|
|
path: d.name,
|
|
size: d.fsavail || 0,
|
|
type: d.fstype,
|
|
uuid: d.uuid,
|
|
mountpoint: d.mountpoints ? d.mountpoints.pop() : d.mountpoint // we only support one mountpoint here old lsblk only exposed one via .mountpoint
|
|
};
|
|
});
|
|
}
|
|
|
|
async function checkRebootRequired() {
|
|
const { rebootRequired } = await getInfo();
|
|
if (rebootRequired) {
|
|
await notifications.alert(notifications.ALERT_REBOOT, 'Reboot Required', 'To finish ubuntu security updates, a reboot is necessary.', { persist: true });
|
|
} else {
|
|
await notifications.clearAlert(notifications.ALERT_REBOOT, 'Reboot Required');
|
|
}
|
|
}
|
|
|
|
async function getUbuntuVersion() {
|
|
const release = safe.fs.readFileSync('/etc/lsb-release', 'utf-8');
|
|
if (release === null) throw new BoxError(BoxError.FS_ERROR, safe.error.message);
|
|
return release.match(/DISTRIB_DESCRIPTION="(.*)"/)[1];
|
|
}
|
|
|
|
async function checkUbuntuVersion() {
|
|
const isXenial = fs.readFileSync('/etc/lsb-release', 'utf-8').includes('16.04');
|
|
if (!isXenial) return;
|
|
|
|
await notifications.alert(notifications.ALERT_UPDATE_UBUNTU, 'Ubuntu upgrade required', 'Ubuntu 16.04 has reached end of life and will not receive security and maintenance updates. Please follow https://docs.cloudron.io/guides/upgrade-ubuntu-18/ to upgrade to Ubuntu 18 at the earliest.', { persist: true });
|
|
}
|
|
|
|
async function runSystemChecks() {
|
|
debug('runSystemChecks: checking status');
|
|
|
|
const checks = [
|
|
checkRebootRequired(),
|
|
checkUbuntuVersion()
|
|
];
|
|
|
|
await Promise.allSettled(checks);
|
|
}
|
|
|
|
function getProvider() {
|
|
const provider = safe.fs.readFileSync(paths.PROVIDER_FILE, 'utf8');
|
|
return provider ? provider.trim() : 'generic';
|
|
}
|
|
|
|
async function getCpus() {
|
|
return os.cpus();
|
|
}
|