Compare commits

...

29 Commits
v7.5.0 ... 7.3

Author SHA1 Message Date
Girish Ramakrishnan
06cbea11ac cname fix again
e4d9dbb558 left out this line by mistake

(cherry picked from commit 2b260c873f)
2023-02-28 11:07:42 +01:00
Girish Ramakrishnan
7df1399f17 typo 2023-02-01 21:52:27 +01:00
Girish Ramakrishnan
ce8f6c4c6b ubuntu 18: systemd kill ends up killing the script itself
This is because KillMode=control-group by default

(cherry picked from commit c07c8b5bb8)
2023-02-01 18:51:16 +01:00
Girish Ramakrishnan
0832ebf052 ubuntu 18: ExecReload does not work
(cherry picked from commit 7bbc7c2306)
2023-02-01 17:28:27 +01:00
Girish Ramakrishnan
7be176a3b5 reverseproxy: LE backdates certs by an hour
https://community.letsencrypt.org/t/valid-from-date-on-cert-off-by-1-hour/103239
(cherry picked from commit 54add73d2a)
2023-02-01 13:07:40 +01:00
Girish Ramakrishnan
4d9612889b print subject and fix notBefore parsing
(cherry picked from commit 3f70edf3ec)
2023-02-01 12:39:22 +01:00
Girish Ramakrishnan
29faa722ac typo
(cherry picked from commit c63e0036cb)
2023-02-01 12:39:16 +01:00
Girish Ramakrishnan
2442abf10b reverseproxy: force renewal only renews if not issued in last 5 mins
otherwise, this leads to repeated renewals in checkCerts

(cherry picked from commit 3b9486596d)
2023-02-01 12:05:11 +01:00
Girish Ramakrishnan
dcbec79b77 reverseproxy: get dates
(cherry picked from commit eddfd20f24)
2023-02-01 12:05:05 +01:00
Girish Ramakrishnan
f874acbeb9 reverseproxy: add option to force renewal for e2e
(cherry picked from commit 690df0e5c4)
2023-02-01 12:04:58 +01:00
Girish Ramakrishnan
3e01faeca3 7.3.6 changes 2023-01-31 18:06:25 +01:00
Girish Ramakrishnan
f7d7e36f10 reverseproxy: rebuild in 7.3 only 2023-01-31 18:02:13 +01:00
Girish Ramakrishnan
972f453535 reverseproxy: fix issue where renewed certs are not written to disk
(cherry picked from commit ce9e78d23b)
2023-01-31 17:59:30 +01:00
Girish Ramakrishnan
d441b9d926 backup cleaner: do not delete mail snapshot
(cherry picked from commit 02b6aa93cb)
2023-01-31 11:51:32 +01:00
Johannes Zellner
e0c996840d Use correct error object to avoid crash
(cherry picked from commit 6f84fd3f71)
2023-01-31 11:33:30 +01:00
Girish Ramakrishnan
3c5987cdad more 7.3.5 changes 2023-01-25 10:22:37 +01:00
Girish Ramakrishnan
e0673d78b9 dns: resolve cname records using unbound
cname record can be external and the original NS may not respond to
recursive queries

(cherry picked from commit e4d9dbb558)
2023-01-25 09:58:24 +01:00
Girish Ramakrishnan
08136a5347 More 7.3.4 changes 2023-01-17 11:18:31 +01:00
Girish Ramakrishnan
98b5c77177 s3: add listing check
This is needed for situations like in cloudflare where the endpoint can
be mistakenly configured with the bucket name like https://xx.r2.cloudflarestorage.com/cloudron-backups .
The upload and del calls work but list and copy does not.

(cherry picked from commit 093fc98ae5)
2023-01-17 11:18:05 +01:00
Girish Ramakrishnan
ea441d0b4b s3: throw any copy errors
(cherry picked from commit 40bcfdba76)
2023-01-17 11:18:00 +01:00
Girish Ramakrishnan
d8b5b49ffd Update manifest format for runtimeDirs change 2023-01-16 12:27:58 +01:00
Girish Ramakrishnan
13fd595e8b contabo: network can be real slow
(cherry picked from commit 68f4f1ba85)
2022-12-24 16:34:18 +01:00
Girish Ramakrishnan
5f11e430bd unbound: disable controller interface explicitly
https://github.com/NLnetLabs/unbound/issues/806
(cherry picked from commit ae30fe25d7)
2022-12-24 16:34:09 +01:00
Girish Ramakrishnan
cfa9f901c1 Fix crash in RBL check
(cherry picked from commit d5793bc7c0)
2022-12-09 00:00:46 +01:00
Girish Ramakrishnan
ce2c9d9ac5 reverseproxy: fix typo in regexp matching
(cherry picked from commit d7d43c73fe)
2022-12-08 10:06:48 +01:00
Girish Ramakrishnan
8d5039da35 7.3.5 changes
(cherry picked from commit a198d1ea8d)
2022-12-08 10:06:43 +01:00
Girish Ramakrishnan
c264ff32c2 du: fix crash when filesystem is cifs/nfs/sshfs
(cherry picked from commit 67cde5a62c)
2022-12-08 08:54:41 +01:00
Johannes Zellner
5e30bea155 Start with a default to not fail if no swap is present
(cherry picked from commit d126f056fc)
2022-12-08 08:54:33 +01:00
Johannes Zellner
2c63a89199 Disallow jupyter hub on demo
(cherry picked from commit db5e0b8fdf)
2022-12-08 08:54:27 +01:00
17 changed files with 121 additions and 56 deletions

17
CHANGES
View File

@@ -2573,4 +2573,21 @@
[7.3.4]
* Display platform update status in the UI
* Fix image pruning
* cloudflare: fix issue where incorrect URL configuration is accepted
[7.3.5]
* du: fix crash when filesystem is cifs/nfs/sshfs
* Start with a default to not fail if no swap is present
* Fix bug in cert cleanup logic causing it to repeatedly cleanup
* Fix crash in RBL check
* unbound: disable controller interface explicitly
* Fix issue where cert renewal logs where not displayed
* Fix loading of mailboxes
[7.3.6]
* aws: add melbourne region
* Fix display of box backups
* mail usage: fix issue caused by deleted mailboxes
* reverseproxy: fix issue where renewed certs are not written to disk
* support: fix crash when opening tickets with 0 length files

View File

@@ -17,7 +17,7 @@
"aws-sdk": "^2.1248.0",
"basic-auth": "^2.0.1",
"body-parser": "^1.20.1",
"cloudron-manifestformat": "^5.19.0",
"cloudron-manifestformat": "^5.19.1",
"connect": "^3.7.0",
"connect-lastmile": "^2.1.1",
"connect-timeout": "^1.9.0",

View File

@@ -236,8 +236,8 @@ while true; do
sleep 10
done
ip4=$(curl -s --fail --connect-timeout 2 --max-time 2 https://ipv4.api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p' || true)
ip6=$(curl -s --fail --connect-timeout 2 --max-time 2 https://ipv6.api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p' || true)
ip4=$(curl -s --fail --connect-timeout 10 --max-time 10 https://ipv4.api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p' || true)
ip6=$(curl -s --fail --connect-timeout 10 --max-time 10 https://ipv6.api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p' || true)
url4=""
url6=""

View File

@@ -179,8 +179,8 @@ systemctl disable systemd-resolved || true
# on vultr, ufw is enabled by default. we have our own firewall
ufw disable || true
# we need unbound to work as this is required for installer.sh to do any DNS requests
echo -e "server:\n\tinterface: 127.0.0.1\n" > /etc/unbound/unbound.conf.d/cloudron-network.conf
# we need unbound to work as this is required for installer.sh to do any DNS requests. control-enable is for https://github.com/NLnetLabs/unbound/issues/806
echo -e "server:\n\tinterface: 127.0.0.1\n\nremote-control:\n\tcontrol-enable: no\n" > /etc/unbound/unbound.conf.d/cloudron-network.conf
systemctl restart unbound
# Ubuntu 22 has private home directories by default (https://discourse.ubuntu.com/t/private-home-directories-for-ubuntu-21-04-onwards/)

View File

@@ -225,6 +225,9 @@ fi
rm -f /etc/cloudron/cloudron.conf
# 7.3 branch only: we had a bug in 7.3 that renewed certs were not written to disk. this will rebuild nginx/certs in the cron job
touch "${PLATFORM_DATA_DIR}/nginx/rebuild-needed"
log "Changing ownership"
# note, change ownership after db migrate. this allow db migrate to move files around as root and then we can fix it up here
# be careful of what is chown'ed here. subdirs like mysql,redis etc are owned by the containers and will stop working if perms change

View File

@@ -14,3 +14,8 @@ server:
# enable below for logging to journalctl -u unbound
# verbosity: 5
# log-queries: yes
# https://github.com/NLnetLabs/unbound/issues/806
remote-control:
control-enable: no

View File

@@ -242,11 +242,11 @@ async function cleanupSnapshots(backupConfig) {
const info = safe.JSON.parse(contents);
if (!info) return;
delete info.box;
const progressCallback = (progress) => { debug(`cleanupSnapshots: ${progress.message}`); };
for (const appId of Object.keys(info)) {
if (appId === 'box' || appId === 'mail') continue;
const app = await apps.get(appId);
if (app) continue; // app is still installed

View File

@@ -229,7 +229,11 @@ async function copy(backupConfig, srcRemotePath, destRemotePath, progressCallbac
const newFilePath = backupFormat.api(format).getBackupFilePath(backupConfig, destRemotePath);
const startTime = new Date();
await safe(storage.api(provider).copy(backupConfig, oldFilePath, newFilePath, progressCallback));
const [copyError] = await safe(storage.api(provider).copy(backupConfig, oldFilePath, newFilePath, progressCallback));
if (copyError) {
debug(`copy: copied to ${destRemotePath} errored. error: ${copyError.message}`);
throw copyError;
}
debug(`copy: copied successfully to ${destRemotePath}. Took ${(new Date() - startTime)/1000} seconds`);
}

View File

@@ -315,7 +315,7 @@ async function setupDnsAndCert(subdomain, domain, auditSource, progressCallback)
if (ipv6) await dns.waitForDnsRecord(subdomain, domain, 'AAAA', ipv6, { interval: 30000, times: 50000 });
progressCallback({ percent: 60, message: `Getting certificate of ${dashboardFqdn}` });
const location = { subdomain, domain, fqdn: dashboardFqdn, type: apps.LOCATION_TYPE_DASHBOARD, certificate: null };
await reverseProxy.ensureCertificate(location, auditSource);
await reverseProxy.ensureCertificate(location, {}, auditSource);
}
async function syncDnsRecords(options) {

View File

@@ -40,6 +40,7 @@ exports = module.exports = {
DEMO_USERNAME: 'cloudron',
DEMO_BLACKLISTED_APPS: [
'org.jupyter.cloudronapp',
'com.github.cloudtorrent',
'net.alltubedownload.cloudronapp',
'com.adguard.home.cloudronapp',

View File

@@ -7,7 +7,8 @@ const assert = require('assert'),
debug = require('debug')('box:dns/waitfordns'),
dig = require('../dig.js'),
promiseRetry = require('../promise-retry.js'),
safe = require('safetydance');
safe = require('safetydance'),
_ = require('underscore');
async function resolveIp(hostname, type, options) {
assert.strictEqual(typeof hostname, 'string');
@@ -20,13 +21,13 @@ async function resolveIp(hostname, type, options) {
if (!error && results.length !== 0) return results;
// try CNAME record at authoritative server
debug(`resolveIp: Checking if ${hostname} has CNAME record at ${options.server}`);
debug(`resolveIp: No A record. Checking if ${hostname} has CNAME record at ${options.server}`);
const cnameResults = await dig.resolve(hostname, 'CNAME', options);
if (cnameResults.length === 0) return cnameResults;
// recurse lookup the CNAME record
debug(`resolveIp: Resolving ${hostname}'s CNAME record ${cnameResults[0]}`);
return await dig.resolve(cnameResults[0], type, options);
debug(`resolveIp: CNAME record found. Resolving ${hostname}'s CNAME record ${cnameResults[0]} using unbound`);
return await dig.resolve(cnameResults[0], type, _.omit(options, 'server'));
}
async function isChangeSynced(hostname, type, value, nameserver) {

View File

@@ -539,7 +539,7 @@ async function checkRblStatus(domain) {
const [error2, txtRecords] = await safe(dig.resolve(flippedIp + '.' + rblServer.dns, 'TXT', DNS_OPTIONS));
result.txtRecords = error2 || !txtRecords ? 'No txt record' : txtRecords.map(x => x.join(''));
debug(`checkRblStatus: ${domain} (error: ${error2.message}) (txtRecords: ${JSON.stringify(txtRecords)})`);
debug(`checkRblStatus: ${domain} (error: ${error2?.message || null}) (txtRecords: ${JSON.stringify(txtRecords)})`);
blacklistedServers.push(result);
}
@@ -590,7 +590,7 @@ async function getStatus(domain) {
for (let i = 0; i < checks.length; i++) {
const response = responses[i], check = checks[i];
if (response.status !== 'fulfilled') {
debug(`check ${check.what} was rejected. This is not expected`);
debug(`check ${check.what} was rejected. This is not expected. reason: ${response.reason}`);
continue;
}

View File

@@ -64,19 +64,23 @@ function nginxLocation(s) {
return `~ ^(?!(${re.slice(1)}))`; // negative regex assertion - https://stackoverflow.com/questions/16302897/nginx-location-not-equal-to-regex
}
function getExpiryDateSync(cert) {
function getCertificateDatesSync(cert) {
assert.strictEqual(typeof cert, 'string');
const result = safe.child_process.spawnSync('/usr/bin/openssl', [ 'x509', '-enddate', '-noout' ], { input: cert });
if (!result) return null; // some error
const result = safe.child_process.spawnSync('/usr/bin/openssl', [ 'x509', '-startdate', '-enddate', '-subject', '-noout' ], { input: cert, encoding: 'utf8' });
if (!result) return { startDate: null, endDate: null } ; // some error
const notAfter = result.stdout.toString('utf8').trim().split('=')[1];
const lines = result.stdout.trim().split('\n');
const notBefore = lines[0].split('=')[1];
const notBeforeDate = new Date(notBefore);
const notAfter = lines[1].split('=')[1];
const notAfterDate = new Date(notAfter);
const daysLeft = (notAfterDate - new Date())/(24 * 60 * 60 * 1000);
debug(`expiryDate: notAfter=${notAfter} daysLeft=${daysLeft}`);
debug(`expiryDate: ${lines[2]} notBefore=${notBefore} notAfter=${notAfter} daysLeft=${daysLeft}`);
return notAfterDate;
return { startDate: notBeforeDate, endDate: notAfterDate };
}
async function isOcspEnabled(certFilePath) {
@@ -249,12 +253,21 @@ function getAcmeCertificateNameSync(fqdn, domainObject) {
}
}
function needsRenewalSync(cert) {
function needsRenewalSync(cert, options) {
assert.strictEqual(typeof cert, 'string');
assert.strictEqual(typeof options, 'object');
const notAfter = getExpiryDateSync(cert);
const isExpiring = (notAfter - new Date()) <= (30 * 24 * 60 * 60 * 1000); // expiring in a month
debug(`needsRenewal: ${isExpiring}`);
const { startDate, endDate } = getCertificateDatesSync(cert);
const now = new Date();
let isExpiring;
if (options.forceRenewal) {
isExpiring = (now - startDate) > (65 * 60 * 1000); // was renewed 5 minutes ago. LE backdates issue date by 1 hour for clock skew
} else {
isExpiring = (endDate - now) <= (30 * 24 * 60 * 60 * 1000); // expiring in a month
}
debug(`needsRenewal: ${isExpiring}. force: ${!!options.forceRenewal}`);
return isExpiring;
}
@@ -378,8 +391,9 @@ async function writeCertificate(location) {
return { certFilePath, keyFilePath };
}
async function ensureCertificate(location, auditSource) {
async function ensureCertificate(location, options, auditSource) {
assert.strictEqual(typeof location, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof auditSource, 'object');
const domainObject = await domains.get(location.domain);
@@ -402,7 +416,7 @@ async function ensureCertificate(location, auditSource) {
const cert = await blobs.getString(`${blobs.CERT_PREFIX}-${certName}.cert`);
if (key && cert) {
if (providerMatchesSync(domainObject, cert) && !needsRenewalSync(cert)) {
if (providerMatchesSync(domainObject, cert) && !needsRenewalSync(cert, options)) {
debug(`ensureCertificate: ${fqdn} acme cert exists and is up to date`);
return;
}
@@ -547,7 +561,7 @@ async function configureApp(app, auditSource) {
const locations = getAppLocationsSync(app);
for (const location of locations) {
await ensureCertificate(location, auditSource);
await ensureCertificate(location, {}, auditSource);
}
await writeAppConfigs(app);
@@ -562,19 +576,6 @@ async function unconfigureApp(app) {
await reload();
}
async function ensureCertificates(locations, auditSource, progressCallback) {
assert(Array.isArray(locations));
assert.strictEqual(typeof auditSource, 'object');
assert.strictEqual(typeof progressCallback, 'function');
let percent = 1;
for (const location of locations) {
percent += Math.round(100/locations.length);
progressCallback({ percent, message: `Ensuring certs of ${location.fqdn}` });
await ensureCertificate(location, auditSource);
}
}
async function cleanupCerts(locations, auditSource, progressCallback) {
assert(Array.isArray(locations));
assert.strictEqual(typeof auditSource, 'object');
@@ -592,14 +593,14 @@ async function cleanupCerts(locations, auditSource, progressCallback) {
const certIds = await blobs.listCertIds();
const removedCertNames = [];
for (const certId of certIds) {
const certName = certId.match(new RegExp(`${blobs.CERT_PREFIX}-(.*).cert`))[0];
const certName = certId.match(new RegExp(`${blobs.CERT_PREFIX}-(.*).cert`))[1];
if (certNamesInUse.has(certName)) continue;
const cert = await blobs.getString(certId);
const notAfter = getExpiryDateSync(cert);
if (!notAfter) continue; // some error
const { endDate } = getCertificateDatesSync(cert);
if (!endDate) continue; // some error
if (now - notAfter >= (60 * 60 * 24 * 30 * 6 * 1000)) { // expired 6 months ago and not in use
if (now - endDate >= (60 * 60 * 24 * 30 * 6 * 1000)) { // expired 6 months ago and not in use
progressCallback({ message: `deleting certs of ${certName}` });
// it is safe to delete the certs of stopped apps because their nginx configs are removed
@@ -634,7 +635,12 @@ async function checkCerts(options, auditSource, progressCallback) {
locations = locations.concat(getAppLocationsSync(app));
}
await ensureCertificates(locations, auditSource, progressCallback);
let percent = 1;
for (const location of locations) {
percent += Math.round(100/locations.length);
progressCallback({ percent, message: `Ensuring certs of ${location.fqdn}` });
await ensureCertificate(location, options, auditSource);
}
if (options.rebuild || fs.existsSync(paths.REVERSE_PROXY_REBUILD_FILE)) {
progressCallback( { message: 'Rebuilding app configs' });
@@ -645,6 +651,12 @@ async function checkCerts(options, auditSource, progressCallback) {
await notifyCertChange(); // this allows user to "rebuild" using UI just in case we crashed and went out of sync
safe.fs.unlinkSync(paths.REVERSE_PROXY_REBUILD_FILE);
} else {
// sync all locations and not just the ones that changed. this helps with 0 length certs when disk is full and also
// if renewal task crashed midway.
for (const location of locations) {
await writeCertificate(location);
}
await reload();
await notifyCertChange(); // propagate any cert changes to services
}

View File

@@ -48,7 +48,7 @@ async function createTicket(req, res, next) {
if (supportConfig.email !== constants.SUPPORT_EMAIL) return next(new HttpError(503, 'Sending to non-cloudron email not implemented yet'));
const [ticketError, result] = await safe(appstore.createTicket(_.extend({ }, req.body, { email: req.user.email, displayName: req.user.displayName }), AuditSource.fromRequest(req)));
if (ticketError) return next(new HttpError(503, `Error contacting cloudron.io: ${error.message}. Please email ${constants.SUPPORT_EMAIL}`));
if (ticketError) return next(new HttpError(503, `Error contacting cloudron.io: ${ticketError.message}. Please email ${constants.SUPPORT_EMAIL}`));
next(new HttpSuccess(201, result));
}

View File

@@ -35,7 +35,13 @@ elif [[ "${service}" == "docker" ]]; then
elif [[ "${service}" == "collectd" ]]; then
systemctl restart --no-block collectd
elif [[ "${service}" == "box" ]]; then
systemctl reload --no-block box
readonly ubuntu_version=$(lsb_release -rs)
if [[ "${ubuntu_version}" == "18.04" ]]; then
pid=$(systemctl show box -p MainPID | sed 's/MainPID=//g')
kill -HUP $pid
else
systemctl reload --no-block box
fi
else
echo "Unknown service ${service}"
exit 1

View File

@@ -454,6 +454,7 @@ async function copy(apiConfig, oldFilePath, newFilePath, progressCallback) {
}));
progressCallback({ message: `Copied ${total} files with error: ${copyError}` });
if (copyError) throw copyError;
}
async function remove(apiConfig, filename) {
@@ -560,7 +561,7 @@ async function testConfig(apiConfig) {
const putParams = {
Bucket: apiConfig.bucket,
Key: path.join(apiConfig.prefix, 'cloudron-testfile'),
Key: path.join(apiConfig.prefix, 'snapshot/cloudron-testfile'),
Body: 'testcontent'
};
@@ -568,9 +569,18 @@ async function testConfig(apiConfig) {
const [putError] = await safe(s3.putObject(putParams).promise());
if (putError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error put object cloudron-testfile. Message: ${putError.message} HTTP Code: ${putError.code}`);
const listParams = {
Bucket: apiConfig.bucket,
Prefix: path.join(apiConfig.prefix, 'snapshot'),
MaxKeys: 1
};
const [listError] = await safe(s3.listObjects(listParams).promise());
if (listError) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects. Message: ${listError.message} HTTP Code: ${listError.code}`);
const delParams = {
Bucket: apiConfig.bucket,
Key: path.join(apiConfig.prefix, 'cloudron-testfile')
Key: path.join(apiConfig.prefix, 'snapshot/cloudron-testfile')
};
const [delError] = await safe(s3.deleteObject(delParams).promise());

View File

@@ -62,8 +62,10 @@ async function getDisks() {
const disks = {}; // by file system
let rootDisk;
const DISK_TYPES = [ 'ext4', 'xfs', 'cifs', 'nfs', 'fuse.sshfs' ]; // we don't show size of contents in untracked disk types
for (const disk of dfEntries) {
if (disk.type !== 'ext4' && disk.type !== 'xfs') continue;
if (!DISK_TYPES.includes(disk.type)) continue;
if (disk.mountpoint === '/') rootDisk = disk;
disks[disk.filesystem] = {
filesystem: disk.filesystem,
@@ -92,18 +94,21 @@ async function getDisks() {
const backupConfig = await settings.getBackupConfig();
if (backupConfig.provider === 'filesystem') {
const [, dfResult] = await safe(df.file(backupConfig.backupFolder));
disks[dfResult?.filesystem || rootDisk.filesystem].contents.push({ type: 'standard', id: 'cloudron-backup', path: backupConfig.backupFolder });
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
if (disks[filesystem]) disks[filesystem].contents.push({ type: 'standard', id: 'cloudron-backup', path: backupConfig.backupFolder });
}
const [dockerError, dockerInfo] = await safe(docker.info());
if (!dockerError) {
const [, dfResult] = await safe(df.file(dockerInfo.DockerRootDir));
disks[dfResult?.filesystem || rootDisk.filesystem].contents.push({ type: 'standard', id: 'docker', path: dockerInfo.DockerRootDir });
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
if (disks[filesystem]) disks[filesystem].contents.push({ type: 'standard', id: 'docker', path: dockerInfo.DockerRootDir });
}
for (const volume of await volumes.list()) {
const [, dfResult] = await safe(df.file(volume.hostPath));
disks[dfResult?.filesystem || rootDisk.filesystem].contents.push({ type: 'volume', id: volume.id, path: volume.hostPath });
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
if (disks[filesystem]) disks[filesystem].contents.push({ type: 'volume', id: volume.id, path: volume.hostPath });
}
for (const app of await apps.list()) {
@@ -111,7 +116,8 @@ async function getDisks() {
const dataDir = await apps.getStorageDir(app);
const [, dfResult] = await safe(df.file(dataDir));
disks[dfResult?.filesystem || rootDisk.filesystem].contents.push({ type: 'app', id: app.id, path: dataDir });
const filesystem = dfResult?.filesystem || rootDisk.filesystem;
if (disks[filesystem]) disks[filesystem].contents.push({ type: 'app', id: app.id, path: dataDir });
}
const swaps = await getSwaps();
@@ -151,7 +157,7 @@ async function checkDiskSpace() {
async function getSwapSize() {
const swaps = await getSwaps();
return Object.keys(swaps).map(n => swaps[n].size).reduce((acc, cur) => acc + cur);
return Object.keys(swaps).map(n => swaps[n].size).reduce((acc, cur) => acc + cur, 0);
}
async function getMemory() {