diff --git a/dashboard/public/views/system.js b/dashboard/public/views/system.js index 552e598fc..ea5f31331 100644 --- a/dashboard/public/views/system.js +++ b/dashboard/public/views/system.js @@ -74,7 +74,7 @@ angular.module('Application').controller('SystemController', ['$scope', '$locati $scope.disks.ts = result.usage.ts; // [ { filesystem, type, size, used, available, capacity, mountpoint }] - $scope.disks.disks = Object.keys(result.usage.disks).map(function (k) { return result.usage.disks[k]; }); + $scope.disks.disks = Object.keys(result.usage.filesystems).map(function (k) { return result.usage.filesystems[k]; }); // convert object to array... $scope.disks.disks.forEach(function (disk) { var usageOther = disk.used; diff --git a/src/df.js b/src/df.js index 79c3c3ca9..a98475c8e 100644 --- a/src/df.js +++ b/src/df.js @@ -1,7 +1,7 @@ 'use strict'; exports = module.exports = { - disks, + filesystems, file, prettyBytes }; @@ -36,10 +36,10 @@ function parseLine(line) { }; } -async function disks() { +async function filesystems() { const [error, output] = await safe(shell.spawn('df', ['-B1', '--output=source,fstype,size,used,avail,pcent,target'], { encoding: 'utf8', timeout: 5000 })); if (error) { - debug(`disks: df command failed. error: ${error}\n stdout: ${error.stdout}\n stderr: ${error.stderr}`); + debug(`filesystems: df command failed. error: ${error}\n stdout: ${error.stdout}\n stderr: ${error.stderr}`); throw new BoxError(BoxError.FS_ERROR, `Error running df: ${error.message}`); } diff --git a/src/system.js b/src/system.js index 4c3412541..516bcb923 100644 --- a/src/system.js +++ b/src/system.js @@ -17,7 +17,7 @@ exports = module.exports = { getCpus, // exported for testing - _getDisks: getDisks + _getFilesystems: getFilesystems }; const apps = require('./apps.js'), @@ -90,11 +90,11 @@ async function getSwaps() { } // this gets information based on mounted filesystems -async function getDisks() { - const [dfError, dfEntries] = await safe(df.disks()); +async function getFilesystems() { + const [dfError, dfEntries] = await safe(df.filesystems()); if (dfError) throw new BoxError(BoxError.FS_ERROR, `Error running df: ${dfError.message}`); - const disks = {}; // by file system + const filesystems = {}; // by file system let rootDisk; const DISK_TYPES = [ 'ext4', 'xfs', 'cifs', 'nfs', 'fuse.sshfs' ]; // we don't show size of contents in untracked disk types @@ -102,7 +102,7 @@ async function getDisks() { for (const disk of dfEntries) { if (!DISK_TYPES.includes(disk.type)) continue; if (disk.mountpoint === '/') rootDisk = disk; - disks[disk.filesystem] = { + filesystems[disk.filesystem] = { filesystem: disk.filesystem, type: disk.type, size: disk.size, @@ -123,37 +123,37 @@ async function getDisks() { for (const stdPath of standardPaths) { const [dfError, diskInfo] = await safe(df.file(stdPath.path)); if (dfError) throw new BoxError(BoxError.FS_ERROR, `Error getting std path: ${dfError.message}`); - disks[diskInfo.filesystem].contents.push(stdPath); + filesystems[diskInfo.filesystem].contents.push(stdPath); } const backupConfig = await backups.getConfig(); if (backupConfig.provider === 'filesystem') { const [, dfResult] = await safe(df.file(backupConfig.backupFolder)); const filesystem = dfResult?.filesystem || rootDisk.filesystem; - if (disks[filesystem]) disks[filesystem].contents.push({ type: 'standard', id: 'cloudron-backup', path: backupConfig.backupFolder }); + if (filesystems[filesystem]) filesystems[filesystem].contents.push({ type: 'standard', id: 'cloudron-backup', path: backupConfig.backupFolder }); } // often the default backup dir is not cleaned up if (backupConfig.provider !== 'filesystem' || backupConfig.backupFolder !== paths.DEFAULT_BACKUP_DIR) { const [, dfResult] = await safe(df.file(paths.DEFAULT_BACKUP_DIR)); const filesystem = dfResult?.filesystem || rootDisk.filesystem; - if (disks[filesystem]) disks[filesystem].contents.push({ type: 'cloudron-backup-default', id: 'cloudron-backup-default', path: paths.DEFAULT_BACKUP_DIR }); + if (filesystems[filesystem]) filesystems[filesystem].contents.push({ type: 'cloudron-backup-default', id: 'cloudron-backup-default', path: paths.DEFAULT_BACKUP_DIR }); } const [dockerError, dockerInfo] = await safe(docker.info()); if (!dockerError) { const [, dfResult] = await safe(df.file(dockerInfo.DockerRootDir)); const filesystem = dfResult?.filesystem || rootDisk.filesystem; - if (disks[filesystem]) { - disks[filesystem].contents.push({ type: 'standard', id: 'docker', path: dockerInfo.DockerRootDir }); - disks[filesystem].contents.push({ type: 'standard', id: 'docker-volumes', path: dockerInfo.DockerRootDir }); + if (filesystems[filesystem]) { + filesystems[filesystem].contents.push({ type: 'standard', id: 'docker', path: dockerInfo.DockerRootDir }); + filesystems[filesystem].contents.push({ type: 'standard', id: 'docker-volumes', path: dockerInfo.DockerRootDir }); } } for (const volume of await volumes.list()) { const [, dfResult] = await safe(df.file(volume.hostPath)); const filesystem = dfResult?.filesystem || rootDisk.filesystem; - if (disks[filesystem]) disks[filesystem].contents.push({ type: 'volume', id: volume.id, path: volume.hostPath }); + if (filesystems[filesystem]) filesystems[filesystem].contents.push({ type: 'volume', id: volume.id, path: volume.hostPath }); } for (const app of await apps.list()) { @@ -163,7 +163,7 @@ async function getDisks() { if (dataDir === null) continue; const [, dfResult] = await safe(df.file(dataDir)); const filesystem = dfResult?.filesystem || rootDisk.filesystem; - if (disks[filesystem]) disks[filesystem].contents.push({ type: 'app', id: app.id, path: dataDir }); + if (filesystems[filesystem]) filesystems[filesystem].contents.push({ type: 'app', id: app.id, path: dataDir }); } const swaps = await getSwaps(); @@ -172,25 +172,25 @@ async function getDisks() { if (swap.type !== 'file') continue; const [, dfResult] = await safe(df.file(swap.name)); - disks[dfResult?.filesystem || rootDisk.filesystem].contents.push({ type: 'swap', id: swap.name, path: swap.name }); + filesystems[dfResult?.filesystem || rootDisk.filesystem].contents.push({ type: 'swap', id: swap.name, path: swap.name }); } - return disks; + return filesystems; } async function checkDiskSpace() { debug('checkDiskSpace: checking disk space'); - const disks = await getDisks(); + const filesystems = await getFilesystems(); let markdownMessage = ''; - for (const filesystem of Object.keys(disks)) { - const disk = disks[filesystem]; - if (disk.contents.length === 0) continue; // ignore if nothing interesting here + for (const fsPath in filesystems) { + const filesystem = filesystems[fsPath]; + if (filesystem.contents.length === 0) continue; // ignore if nothing interesting here - if (disk.available <= (1.25 * 1024 * 1024 * 1024)) { // 1.5G - markdownMessage += `* ${disk.filesystem} is at ${disk.capacity*100}% capacity.\n`; + if (filesystem.available <= (1.25 * 1024 * 1024 * 1024)) { // 1.5G + markdownMessage += `* ${filesystem.filesystem} is at ${filesystem.capacity*100}% capacity.\n`; } } @@ -217,27 +217,31 @@ async function getMemory() { } async function getDiskUsage() { - return safe.JSON.parse(safe.fs.readFileSync(paths.DISK_USAGE_FILE, 'utf8')); + const cache = safe.JSON.parse(safe.fs.readFileSync(paths.DISK_USAGE_FILE, 'utf8')); + if (cache?.disks) { + cache.filesystems = cache.disks; // legacy cache file had "disks" + delete cache.disks; + } + return cache; } async function updateDiskUsage(progressCallback) { assert.strictEqual(typeof progressCallback, 'function'); - const disks = await getDisks(); - const filesystems = Object.keys(disks); + const filesystems = await getFilesystems(); const now = Date.now(); let percent = 1; - for (const filesystem of filesystems) { - const disk = disks[filesystem]; + for (const fsPath in filesystems) { + const filesystem = filesystems[fsPath]; - if (disk.type === 'ext4' || disk.type === 'xfs') { + if (filesystem.type === 'ext4' || filesystem.type === 'xfs') { const [speedError, speed] = await safe(hdparm(filesystem)); if (speedError) progressCallback({ message: `hdparm error: ${speedError.message}`}); - disk.speed = speedError ? -1 : speed; + filesystem.speed = speedError ? -1 : speed; } else { - disk.speed = -1; + filesystem.speed = -1; } percent += (100/filesystems.length); @@ -246,7 +250,7 @@ async function updateDiskUsage(progressCallback) { const dockerDf = await docker.df(); - for (const content of disk.contents) { + for (const content of filesystem.contents) { progressCallback({ message: `Checking du of ${JSON.stringify(content)}`}); if (content.id === 'docker') { content.usage = dockerDf.LayersSize; @@ -261,9 +265,9 @@ async function updateDiskUsage(progressCallback) { } } - if (!safe.fs.writeFileSync(paths.DISK_USAGE_FILE, JSON.stringify({ ts: now, disks }, null, 4), 'utf8')) throw new BoxError(BoxError.FS_ERROR, `Could not write du cache file: ${safe.error.message}`); + if (!safe.fs.writeFileSync(paths.DISK_USAGE_FILE, JSON.stringify({ ts: now, filesystems }, null, 4), 'utf8')) throw new BoxError(BoxError.FS_ERROR, `Could not write du cache file: ${safe.error.message}`); - return disks; + return filesystems; } async function reboot() { diff --git a/src/test/df-test.js b/src/test/df-test.js index 606261d8c..517e22b23 100644 --- a/src/test/df-test.js +++ b/src/test/df-test.js @@ -16,11 +16,11 @@ describe('System', function () { before(setup); after(cleanup); - it('can get disks', async function () { + it('can get filesystems', async function () { // does not work on archlinux 8! if (require('child_process').execSync('uname -a').toString().indexOf('-arch') !== -1) return; - const disks = await df.disks(); + const disks = await df.filesystems(); expect(disks).to.be.ok(); expect(disks.some(d => d.mountpoint === '/')).to.be.ok(); }); diff --git a/src/test/system-test.js b/src/test/system-test.js index b3ccda69a..6dcbee153 100644 --- a/src/test/system-test.js +++ b/src/test/system-test.js @@ -16,11 +16,11 @@ describe('System', function () { before(setup); after(cleanup); - it('can get disks', async function () { + it('can get filesystems', async function () { // does not work on archlinux 8! if (require('child_process').execSync('uname -a').toString().indexOf('-arch') !== -1) return; - const disks = await system._getDisks(); + const disks = await system._getFilesystems(); expect(disks).to.be.ok(); expect(Object.keys(disks).some(fs => disks[fs].mountpoint === '/')).to.be.ok(); });