rename disks to filesystems

This commit is contained in:
Girish Ramakrishnan
2024-11-30 11:46:28 +01:00
parent f219abf082
commit 56f6519b3e
5 changed files with 44 additions and 40 deletions

View File

@@ -17,7 +17,7 @@ exports = module.exports = {
getCpus,
// exported for testing
_getDisks: getDisks
_getFilesystems: getFilesystems
};
const apps = require('./apps.js'),
@@ -90,11 +90,11 @@ async function getSwaps() {
}
// this gets information based on mounted filesystems
async function getDisks() {
const [dfError, dfEntries] = await safe(df.disks());
async function getFilesystems() {
const [dfError, dfEntries] = await safe(df.filesystems());
if (dfError) throw new BoxError(BoxError.FS_ERROR, `Error running df: ${dfError.message}`);
const disks = {}; // by file system
const filesystems = {}; // by file system
let rootDisk;
const DISK_TYPES = [ 'ext4', 'xfs', 'cifs', 'nfs', 'fuse.sshfs' ]; // we don't show size of contents in untracked disk types
@@ -102,7 +102,7 @@ async function getDisks() {
for (const disk of dfEntries) {
if (!DISK_TYPES.includes(disk.type)) continue;
if (disk.mountpoint === '/') rootDisk = disk;
disks[disk.filesystem] = {
filesystems[disk.filesystem] = {
filesystem: disk.filesystem,
type: disk.type,
size: disk.size,
@@ -123,37 +123,37 @@ async function getDisks() {
for (const stdPath of standardPaths) {
const [dfError, diskInfo] = await safe(df.file(stdPath.path));
if (dfError) throw new BoxError(BoxError.FS_ERROR, `Error getting std path: ${dfError.message}`);
disks[diskInfo.filesystem].contents.push(stdPath);
filesystems[diskInfo.filesystem].contents.push(stdPath);
}
const backupConfig = await backups.getConfig();
if (backupConfig.provider === 'filesystem') {
const [, dfResult] = await safe(df.file(backupConfig.backupFolder));
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
if (disks[filesystem]) disks[filesystem].contents.push({ type: 'standard', id: 'cloudron-backup', path: backupConfig.backupFolder });
if (filesystems[filesystem]) filesystems[filesystem].contents.push({ type: 'standard', id: 'cloudron-backup', path: backupConfig.backupFolder });
}
// often the default backup dir is not cleaned up
if (backupConfig.provider !== 'filesystem' || backupConfig.backupFolder !== paths.DEFAULT_BACKUP_DIR) {
const [, dfResult] = await safe(df.file(paths.DEFAULT_BACKUP_DIR));
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
if (disks[filesystem]) disks[filesystem].contents.push({ type: 'cloudron-backup-default', id: 'cloudron-backup-default', path: paths.DEFAULT_BACKUP_DIR });
if (filesystems[filesystem]) filesystems[filesystem].contents.push({ type: 'cloudron-backup-default', id: 'cloudron-backup-default', path: paths.DEFAULT_BACKUP_DIR });
}
const [dockerError, dockerInfo] = await safe(docker.info());
if (!dockerError) {
const [, dfResult] = await safe(df.file(dockerInfo.DockerRootDir));
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
if (disks[filesystem]) {
disks[filesystem].contents.push({ type: 'standard', id: 'docker', path: dockerInfo.DockerRootDir });
disks[filesystem].contents.push({ type: 'standard', id: 'docker-volumes', path: dockerInfo.DockerRootDir });
if (filesystems[filesystem]) {
filesystems[filesystem].contents.push({ type: 'standard', id: 'docker', path: dockerInfo.DockerRootDir });
filesystems[filesystem].contents.push({ type: 'standard', id: 'docker-volumes', path: dockerInfo.DockerRootDir });
}
}
for (const volume of await volumes.list()) {
const [, dfResult] = await safe(df.file(volume.hostPath));
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
if (disks[filesystem]) disks[filesystem].contents.push({ type: 'volume', id: volume.id, path: volume.hostPath });
if (filesystems[filesystem]) filesystems[filesystem].contents.push({ type: 'volume', id: volume.id, path: volume.hostPath });
}
for (const app of await apps.list()) {
@@ -163,7 +163,7 @@ async function getDisks() {
if (dataDir === null) continue;
const [, dfResult] = await safe(df.file(dataDir));
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
if (disks[filesystem]) disks[filesystem].contents.push({ type: 'app', id: app.id, path: dataDir });
if (filesystems[filesystem]) filesystems[filesystem].contents.push({ type: 'app', id: app.id, path: dataDir });
}
const swaps = await getSwaps();
@@ -172,25 +172,25 @@ async function getDisks() {
if (swap.type !== 'file') continue;
const [, dfResult] = await safe(df.file(swap.name));
disks[dfResult?.filesystem || rootDisk.filesystem].contents.push({ type: 'swap', id: swap.name, path: swap.name });
filesystems[dfResult?.filesystem || rootDisk.filesystem].contents.push({ type: 'swap', id: swap.name, path: swap.name });
}
return disks;
return filesystems;
}
async function checkDiskSpace() {
debug('checkDiskSpace: checking disk space');
const disks = await getDisks();
const filesystems = await getFilesystems();
let markdownMessage = '';
for (const filesystem of Object.keys(disks)) {
const disk = disks[filesystem];
if (disk.contents.length === 0) continue; // ignore if nothing interesting here
for (const fsPath in filesystems) {
const filesystem = filesystems[fsPath];
if (filesystem.contents.length === 0) continue; // ignore if nothing interesting here
if (disk.available <= (1.25 * 1024 * 1024 * 1024)) { // 1.5G
markdownMessage += `* ${disk.filesystem} is at ${disk.capacity*100}% capacity.\n`;
if (filesystem.available <= (1.25 * 1024 * 1024 * 1024)) { // 1.5G
markdownMessage += `* ${filesystem.filesystem} is at ${filesystem.capacity*100}% capacity.\n`;
}
}
@@ -217,27 +217,31 @@ async function getMemory() {
}
async function getDiskUsage() {
return safe.JSON.parse(safe.fs.readFileSync(paths.DISK_USAGE_FILE, 'utf8'));
const cache = safe.JSON.parse(safe.fs.readFileSync(paths.DISK_USAGE_FILE, 'utf8'));
if (cache?.disks) {
cache.filesystems = cache.disks; // legacy cache file had "disks"
delete cache.disks;
}
return cache;
}
async function updateDiskUsage(progressCallback) {
assert.strictEqual(typeof progressCallback, 'function');
const disks = await getDisks();
const filesystems = Object.keys(disks);
const filesystems = await getFilesystems();
const now = Date.now();
let percent = 1;
for (const filesystem of filesystems) {
const disk = disks[filesystem];
for (const fsPath in filesystems) {
const filesystem = filesystems[fsPath];
if (disk.type === 'ext4' || disk.type === 'xfs') {
if (filesystem.type === 'ext4' || filesystem.type === 'xfs') {
const [speedError, speed] = await safe(hdparm(filesystem));
if (speedError) progressCallback({ message: `hdparm error: ${speedError.message}`});
disk.speed = speedError ? -1 : speed;
filesystem.speed = speedError ? -1 : speed;
} else {
disk.speed = -1;
filesystem.speed = -1;
}
percent += (100/filesystems.length);
@@ -246,7 +250,7 @@ async function updateDiskUsage(progressCallback) {
const dockerDf = await docker.df();
for (const content of disk.contents) {
for (const content of filesystem.contents) {
progressCallback({ message: `Checking du of ${JSON.stringify(content)}`});
if (content.id === 'docker') {
content.usage = dockerDf.LayersSize;
@@ -261,9 +265,9 @@ async function updateDiskUsage(progressCallback) {
}
}
if (!safe.fs.writeFileSync(paths.DISK_USAGE_FILE, JSON.stringify({ ts: now, disks }, null, 4), 'utf8')) throw new BoxError(BoxError.FS_ERROR, `Could not write du cache file: ${safe.error.message}`);
if (!safe.fs.writeFileSync(paths.DISK_USAGE_FILE, JSON.stringify({ ts: now, filesystems }, null, 4), 'utf8')) throw new BoxError(BoxError.FS_ERROR, `Could not write du cache file: ${safe.error.message}`);
return disks;
return filesystems;
}
async function reboot() {