diff --git a/src/acme2.js b/src/acme2.js index 786197539..eb75518c8 100644 --- a/src/acme2.js +++ b/src/acme2.js @@ -74,7 +74,7 @@ function b64(str) { async function getModulus(pem) { assert.strictEqual(typeof pem, 'string'); - const stdout = await shell.promises.exec('getModulus', 'openssl rsa -modulus -noout', { input: pem, encoding: 'utf8' }); + const stdout = await shell.promises.exec('getModulus', 'openssl rsa -modulus -noout', { input: pem }); const match = stdout.match(/Modulus=([0-9a-fA-F]+)$/m); if (!match) throw new BoxError(BoxError.OPENSSL_ERROR, 'Could not get modulus'); return Buffer.from(match[1], 'hex'); @@ -153,7 +153,7 @@ Acme2.prototype.updateContact = async function (registrationUri) { }; async function generateAccountKey() { - const acmeAccountKey = await shell.promises.exec('generateAccountKey', 'openssl genrsa 4096', { encoding: 'utf8' }); + const acmeAccountKey = await shell.promises.exec('generateAccountKey', 'openssl genrsa 4096', {}); return acmeAccountKey; } @@ -316,7 +316,7 @@ Acme2.prototype.ensureKey = async function () { debug(`ensureKey: generating new key for ${this.cn}`); // same as prime256v1. openssl ecparam -list_curves. we used to use secp384r1 but it doesn't seem to be accepted by few mail servers - const newKey = await shell.promises.exec('ensureKey', 'openssl ecparam -genkey -name secp256r1', { encoding: 'utf8' }); + const newKey = await shell.promises.exec('ensureKey', 'openssl ecparam -genkey -name secp256r1', {}); return newKey; }; @@ -344,7 +344,7 @@ Acme2.prototype.createCsr = async function (key) { if (!safe.fs.writeFileSync(opensslConfigFile, conf)) throw new BoxError(BoxError.FS_ERROR, `Failed to write openssl config: ${safe.error.message}`); // while we pass the CN anyways, subjectAltName takes precedence - const csrPem = await shell.promises.exec('createCsr', `openssl req -new -key ${keyFilePath} -outform PEM -subj /CN=${this.cn} -config ${opensslConfigFile}`, { encoding: 'utf8' }); + const csrPem = await shell.promises.exec('createCsr', `openssl req -new -key ${keyFilePath} -outform PEM -subj /CN=${this.cn} -config ${opensslConfigFile}`, {}); await safe(fs.promises.rm(tmpdir, { recursive: true, force: true })); debug(`createCsr: csr file created for ${this.cn}`); return csrPem; // inspect with openssl req -text -noout -in hostname.csr -inform pem diff --git a/src/apps.js b/src/apps.js index 263068617..ead364f37 100644 --- a/src/apps.js +++ b/src/apps.js @@ -2832,7 +2832,7 @@ async function uploadFile(app, sourceFilePath, destFilePath) { // the built-in bash printf understands "%q" but not /usr/bin/printf. // ' gets replaced with '\'' . the first closes the quote and last one starts a new one - const escapedDestFilePath = await shell.promises.exec('uploadFile', `printf %q '${destFilePath.replace(/'/g, '\'\\\'\'')}'`, { shell: '/bin/bash', encoding: 'utf8' }); + const escapedDestFilePath = await shell.promises.exec('uploadFile', `printf %q '${destFilePath.replace(/'/g, '\'\\\'\'')}'`, { shell: '/bin/bash' }); debug(`uploadFile: ${sourceFilePath} -> ${escapedDestFilePath}`); const execId = await createExec(app, { cmd: [ 'bash', '-c', `cat - > ${escapedDestFilePath}` ], tty: false }); diff --git a/src/backupformat/rsync.js b/src/backupformat/rsync.js index 58b988b31..9f4f946cb 100644 --- a/src/backupformat/rsync.js +++ b/src/backupformat/rsync.js @@ -110,14 +110,13 @@ async function saveFsMetadata(dataLayout, metadataFile) { // we assume small number of files. spawnSync will raise a ENOBUFS error after maxBuffer for (let lp of dataLayout.localPaths()) { - const emptyDirs = await shell.promises.exec('saveFsMetadata', `find ${lp} -type d -empty`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 80 }); + const emptyDirs = await shell.promises.exec('saveFsMetadata', `find ${lp} -type d -empty`, { maxBuffer: 1024 * 1024 * 80 }); metadata.emptyDirs = metadata.emptyDirs.concat(emptyDirs.trim().split('\n').map((ed) => dataLayout.toRemotePath(ed))); - const execFiles = await shell.promises.exec('saveFsMetadata', `find ${lp} -type f -executable`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 80 }); + const execFiles = await shell.promises.exec('saveFsMetadata', `find ${lp} -type f -executable`, { maxBuffer: 1024 * 1024 * 80 }); metadata.execFiles = metadata.execFiles.concat(execFiles.trim().split('\n').map((ef) => dataLayout.toRemotePath(ef))); - const symlinks = await shell.promises.exec('safeFsMetadata', `find ${lp} -type l`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 }); - console.log(symlinks); + const symlinks = await shell.promises.exec('safeFsMetadata', `find ${lp} -type l`, { maxBuffer: 1024 * 1024 * 30 }); metadata.symlinks = metadata.symlinks.concat(symlinks.trim().split('\n').map((sl) => { const target = safe.fs.readlinkSync(sl); return { path: dataLayout.toRemotePath(sl), target }; diff --git a/src/backuptask.js b/src/backuptask.js index 6ac8b782e..c3ead0d4c 100644 --- a/src/backuptask.js +++ b/src/backuptask.js @@ -60,7 +60,7 @@ async function checkPreconditions(backupConfig, dataLayout) { let used = 0; for (const localPath of dataLayout.localPaths()) { debug(`checkPreconditions: getting disk usage of ${localPath}`); - const result = await shell.promises.exec('checkPreconditions', `du -Dsb --exclude='*.lock' --exclude='dovecot.list.index.log.*' "${localPath}"`, { encoding: 'utf8' }); + const result = await shell.promises.exec('checkPreconditions', `du -Dsb --exclude='*.lock' --exclude='dovecot.list.index.log.*' "${localPath}"`, {}); used += parseInt(result, 10); } diff --git a/src/database.js b/src/database.js index 756733eb0..3de506174 100644 --- a/src/database.js +++ b/src/database.js @@ -144,7 +144,7 @@ async function exportToFile(file) { assert.strictEqual(typeof file, 'string'); // latest mysqldump enables column stats by default which is not present in 5.7 util - const mysqlDumpHelp = await shell.promises.exec('exportToFile', '/usr/bin/mysqldump --help', { encoding: 'utf8' }); + const mysqlDumpHelp = await shell.promises.exec('exportToFile', '/usr/bin/mysqldump --help', {}); const hasColStats = mysqlDumpHelp.includes('column-statistics'); const colStats = hasColStats ? '--column-statistics=0' : ''; diff --git a/src/docker.js b/src/docker.js index 6654ee720..6155ddf93 100644 --- a/src/docker.js +++ b/src/docker.js @@ -247,7 +247,7 @@ async function getAddressesForPort53() { const addresses = []; for (const phy of physicalDevices) { - const [error, output] = await safe(shell.promises.exec('getAddressesForPort53', `ip -f inet -j addr show dev ${phy.name} scope global`, { encoding: 'utf8' })); + const [error, output] = await safe(shell.promises.exec('getAddressesForPort53', `ip -f inet -j addr show dev ${phy.name} scope global`, {})); if (error) continue; const inet = safe.JSON.parse(output) || []; for (const r of inet) { diff --git a/src/mailserver.js b/src/mailserver.js index 0b737185c..f9a6d4ca1 100644 --- a/src/mailserver.js +++ b/src/mailserver.js @@ -161,8 +161,8 @@ async function configureMail(mailFqdn, mailDomain, serviceConfig) { // if the 'yellowtent' user of OS and the 'cloudron' user of mail container don't match, the keys become inaccessible by mail code if (!safe.fs.chmodSync(mailKeyFilePath, 0o644)) throw new BoxError(BoxError.FS_ERROR, `Could not chmod key file: ${safe.error.message}`); - await shell.promises.exec('stopMail', 'docker stop mail || true', {}); - await shell.promises.exec('removeMail', 'docker rm -f mail || true', {}); + await safe(shell.promises.exec('stopMail', 'docker stop mail', {})); // ignore error + await safe(shell.promises.exec('removeMail', 'docker rm -f mail', {})); // ignore error const allowInbound = await createMailConfig(mailFqdn); diff --git a/src/mounts.js b/src/mounts.js index 0d1b4ddc9..ba190853a 100644 --- a/src/mounts.js +++ b/src/mounts.js @@ -23,7 +23,7 @@ const assert = require('assert'), const ADD_MOUNT_CMD = path.join(__dirname, 'scripts/addmount.sh'); const RM_MOUNT_CMD = path.join(__dirname, 'scripts/rmmount.sh'); const REMOUNT_MOUNT_CMD = path.join(__dirname, 'scripts/remountmount.sh'); -const SYSTEMD_MOUNT_EJS = fs.readFileSync(path.join(__dirname, 'systemd-mount.ejs'), { encoding: 'utf8' }); +const SYSTEMD_MOUNT_EJS = fs.readFileSync(path.join(__dirname, 'systemd-mount.ejs'), {}); // https://man7.org/linux/man-pages/man8/mount.8.html function validateMountOptions(type, options) { @@ -79,7 +79,7 @@ async function renderMountFile(mount) { let options, what, type; switch (mountType) { case 'cifs': { - const out = shell.promises.exec('renderMountFile', `systemd-escape -p '${hostPath}'`, { encoding: 'utf8' }); // this ensures uniqueness of creds file + const out = shell.promises.exec('renderMountFile', `systemd-escape -p '${hostPath}'`, {}); // this ensures uniqueness of creds file const credentialsFilePath = path.join(paths.CIFS_CREDENTIALS_DIR, `${out.trim()}.cred`); if (!safe.fs.writeFileSync(credentialsFilePath, `username=${mountOptions.username}\npassword=${mountOptions.password}\n`, { mode: 0o600 })) throw new BoxError(BoxError.FS_ERROR, `Could not write credentials file: ${safe.error.message}`); @@ -138,7 +138,7 @@ async function removeMount(mount) { const keyFilePath = path.join(paths.SSHFS_KEYS_DIR, `id_rsa_${mountOptions.host}`); safe.fs.unlinkSync(keyFilePath); } else if (mountType === 'cifs') { - const out = await shell.promises.exec('removeMount', `systemd-escape -p '${hostPath}'`, { encoding: 'utf8' }); + const out = await shell.promises.exec('removeMount', `systemd-escape -p '${hostPath}'`, {}); const credentialsFilePath = path.join(paths.CIFS_CREDENTIALS_DIR, `${out.trim()}.cred`); safe.fs.unlinkSync(credentialsFilePath); } @@ -159,7 +159,7 @@ async function getStatus(mountType, hostPath) { let message; if (state !== 'active') { // find why it failed - const logsJson = await shell.promises.exec('getStatus', `journalctl -u $(systemd-escape -p --suffix=mount ${hostPath}) -n 10 --no-pager -o json`, { encoding: 'utf8' }); + const logsJson = await shell.promises.exec('getStatus', `journalctl -u $(systemd-escape -p --suffix=mount ${hostPath}) -n 10 --no-pager -o json`, {}); if (logsJson) { const lines = logsJson.trim().split('\n').map(l => JSON.parse(l)); // array of json diff --git a/src/platform.js b/src/platform.js index d27fc1838..952fce14e 100644 --- a/src/platform.js +++ b/src/platform.js @@ -49,7 +49,7 @@ async function pruneInfraImages() { // cannot blindly remove all unused images since redis image may not be used const imageNames = Object.keys(infra.images).map(addon => infra.images[addon]); - const [error, output] = await safe(shell.promises.exec('pruneInfraImages', 'docker images --digests --format "{{.ID}} {{.Repository}} {{.Tag}} {{.Digest}}"', { encoding: 'utf8' })); + const [error, output] = await safe(shell.promises.exec('pruneInfraImages', 'docker images --digests --format "{{.ID}} {{.Repository}} {{.Tag}} {{.Digest}}"', {})); if (error) { debug(`Failed to list images ${error.message}`); throw error; @@ -69,7 +69,7 @@ async function pruneInfraImages() { const imageIdToPrune = tag === '' ? `${repo}@${digest}` : `${repo}:${tag}`; // untagged, use digest console.log(`pruneInfraImages: removing unused image of ${imageName}: ${imageIdToPrune}`); - const [error] = await safe(shell.promises.exec('pruneInfraImages', `docker rmi '${imageIdToPrune}'`, { encoding: 'utf8' })); + const [error] = await safe(shell.promises.exec('pruneInfraImages', `docker rmi '${imageIdToPrune}'`, {})); if (error) console.log(`Error removing image ${imageIdToPrune}: ${error.mesage}`); } } @@ -78,7 +78,7 @@ async function pruneInfraImages() { async function createDockerNetwork() { debug('createDockerNetwork: recreating docker network'); - await shell.promises.exec('createDockerNetwork', 'docker network rm cloudron || true', {}); + await safe(shell.promises.exec('createDockerNetwork', 'docker network rm cloudron', {})); // ignore error // the --ipv6 option will work even in ipv6 is disabled. fd00 is IPv6 ULA await shell.promises.exec('createDockerNetwork', `docker network create --subnet=${constants.DOCKER_IPv4_SUBNET} --ip-range=${constants.DOCKER_IPv4_RANGE} --gateway ${constants.DOCKER_IPv4_GATEWAY} --ipv6 --subnet=fd00:c107:d509::/64 cloudron`, {}); } @@ -86,8 +86,8 @@ async function createDockerNetwork() { async function removeAllContainers() { debug('removeAllContainers: removing all containers for infra upgrade'); - await shell.promises.exec('removeAllContainers', 'docker ps -qa --filter \'label=isCloudronManaged\' | xargs --no-run-if-empty docker stop', {}); - await shell.promises.exec('removeAllContainers', 'docker ps -qa --filter \'label=isCloudronManaged\' | xargs --no-run-if-empty docker rm -f', {}); + await shell.promises.exec('removeAllContainers', 'docker ps -qa --filter \'label=isCloudronManaged\' | xargs --no-run-if-empty docker stop', { shell: '/bin/bash' }); + await shell.promises.exec('removeAllContainers', 'docker ps -qa --filter \'label=isCloudronManaged\' | xargs --no-run-if-empty docker rm -f', { shell: '/bin/bash' }); } async function markApps(existingInfra, restoreOptions) { diff --git a/src/reverseproxy.js b/src/reverseproxy.js index 92ff45703..69f289007 100644 --- a/src/reverseproxy.js +++ b/src/reverseproxy.js @@ -75,7 +75,7 @@ function nginxLocation(s) { async function getCertificateDates(cert) { assert.strictEqual(typeof cert, 'string'); - const [error, result] = await safe(shell.promises.exec('getCertificateDates', 'openssl x509 -startdate -enddate -subject -noout', { input: cert, encoding: 'utf8' })); + const [error, result] = await safe(shell.promises.exec('getCertificateDates', 'openssl x509 -startdate -enddate -subject -noout', { input: cert })); if (error) return { startDate: null, endDate: null } ; // some error const lines = result.trim().split('\n'); @@ -103,7 +103,7 @@ async function isOcspEnabled(certFilePath) { // We used to check for the must-staple in the cert using openssl x509 -text -noout -in ${certFilePath} | grep -q status_request // however, we cannot set the must-staple because first request to nginx fails because of it's OCSP caching behavior - const [error, result] = await safe(shell.promises.exec('isOscpEnabled', `openssl x509 -in ${certFilePath} -noout -ocsp_uri`, { encoding: 'utf8' })); + const [error, result] = await safe(shell.promises.exec('isOscpEnabled', `openssl x509 -in ${certFilePath} -noout -ocsp_uri`, {})); return !error && result.length > 0; // no error and has uri } @@ -112,7 +112,7 @@ async function providerMatches(domainObject, cert) { assert.strictEqual(typeof domainObject, 'object'); assert.strictEqual(typeof cert, 'string'); - const [error, subjectAndIssuer] = await safe(shell.promises.exec('providerMatches', 'openssl x509 -noout -subject -issuer', { encoding: 'utf8', input: cert })); + const [error, subjectAndIssuer] = await safe(shell.promises.exec('providerMatches', 'openssl x509 -noout -subject -issuer', { input: cert })); if (error) return false; // something bad happenned const subject = subjectAndIssuer.match(/^subject=(.*)$/m)[1]; @@ -153,21 +153,21 @@ async function validateCertificate(subdomain, domain, certificate) { // -checkhost checks for SAN or CN exclusively. SAN takes precedence and if present, ignores the CN. const fqdn = dns.fqdn(subdomain, domain); - const [checkHostError, checkHostOutput] = await safe(shell.promises.exec('validateCertificate', `openssl x509 -noout -checkhost "${fqdn}"`, { encoding: 'utf8', input: cert })); + const [checkHostError, checkHostOutput] = await safe(shell.promises.exec('validateCertificate', `openssl x509 -noout -checkhost "${fqdn}"`, { input: cert })); console.log(checkHostError, checkHostOutput); if (checkHostError) throw new BoxError(BoxError.BAD_FIELD, 'Could not validate certificate'); if (checkHostOutput.indexOf('does match certificate') === -1) throw new BoxError(BoxError.BAD_FIELD, `Certificate is not valid for this domain. Expecting ${fqdn}`); // check if public key in the cert and private key matches. pkey below works for RSA and ECDSA keys - const [pubKeyError1, pubKeyFromCert] = await safe(shell.promises.exec('validateCertificate', 'openssl x509 -noout -pubkey', { encoding: 'utf8', input: cert })); + const [pubKeyError1, pubKeyFromCert] = await safe(shell.promises.exec('validateCertificate', 'openssl x509 -noout -pubkey', { input: cert })); if (pubKeyError1) throw new BoxError(BoxError.BAD_FIELD, 'Could not get public key from cert'); - const [pubKeyError2, pubKeyFromKey] = await safe(shell.promises.exec('validateCertificate', 'openssl pkey -pubout', { encoding: 'utf8', input: key })); + const [pubKeyError2, pubKeyFromKey] = await safe(shell.promises.exec('validateCertificate', 'openssl pkey -pubout', { input: key })); if (pubKeyError2) throw new BoxError(BoxError.BAD_FIELD, 'Could not get public key from private key'); if (pubKeyFromCert !== pubKeyFromKey) throw new BoxError(BoxError.BAD_FIELD, 'Public key does not match the certificate.'); // check expiration - const [error] = await safe(shell.promises.exec('validateCertificate', 'openssl x509 -checkend 0', { encoding: 'utf8', input: cert })); + const [error] = await safe(shell.promises.exec('validateCertificate', 'openssl x509 -checkend 0', { input: cert })); if (error) throw new BoxError(BoxError.BAD_FIELD, 'Certificate has expired'); return null; diff --git a/src/services.js b/src/services.js index d02664700..20f2a07ac 100644 --- a/src/services.js +++ b/src/services.js @@ -512,7 +512,7 @@ async function rebuildService(id, auditSource) { await mailServer.start({ version: 'none' }); break; case 'redis': { - await shell.promises.exec('removeRedis', `docker rm -f redis-${instance} || true`, {}); + await safe(shell.promises.exec('removeRedis', `docker rm -f redis-${instance}`, {})); // ignore error const app = await apps.get(instance); if (app) await setupRedis(app, app.manifest.addons.redis); // starts the container break; @@ -952,8 +952,8 @@ async function startTurn(existingInfra) { --label isCloudronManaged=true \ ${readOnly} -v /tmp -v /run "${image}" ${cmd}`; - await shell.promises.exec('stopTurn', 'docker stop turn || true', {}); - await shell.promises.exec('removeTurn', 'docker rm -f turn || true', {}); + await safe(shell.promises.exec('stopTurn', 'docker stop turn', {})); // ignore error + await safe(shell.promises.exec('removeTurn', 'docker rm -f turn', {})); // ignore error await shell.promises.exec('startTurn', runCmd, {}); } @@ -1159,8 +1159,8 @@ async function startMysql(existingInfra) { --cap-add SYS_NICE \ ${readOnly} -v /tmp -v /run "${image}" ${cmd}`; - await shell.promises.exec('stopMysql', 'docker stop mysql || true', {}); - await shell.promises.exec('removeMysql', 'docker rm -f mysql || true', {}); + await safe(shell.promises.exec('stopMysql', 'docker stop mysql', {})); // ignore error + await safe(shell.promises.exec('removeMysql', 'docker rm -f mysql', {})); // ignore error await shell.promises.exec('startMysql', runCmd, {}); if (!serviceConfig.recoveryMode) { @@ -1378,8 +1378,8 @@ async function startPostgresql(existingInfra) { --label isCloudronManaged=true \ ${readOnly} -v /tmp -v /run "${image}" ${cmd}`; - await shell.promises.exec('stopPostgresql', 'docker stop postgresql || true', {}); - await shell.promises.exec('removePostgresql', 'docker rm -f postgresql || true', {}); + await safe(shell.promises.exec('stopPostgresql', 'docker stop postgresql', {})); // ignore error + await safe(shell.promises.exec('removePostgresql', 'docker rm -f postgresql', {})); // ignore error await shell.promises.exec('startPostgresql', runCmd, {}); if (!serviceConfig.recoveryMode) { @@ -1521,8 +1521,8 @@ async function startMongodb(existingInfra) { --label isCloudronManaged=true \ ${readOnly} -v /tmp -v /run "${image}" ${cmd}`; - await shell.promises.exec('stopMongodb', 'docker stop mongodb || true', {}); - await shell.promises.exec('removeMongodb', 'docker rm -f mongodb || true', {}); + await safe(shell.promises.exec('stopMongodb', 'docker stop mongodb', {})); // ignore error + await safe(shell.promises.exec('removeMongodb', 'docker rm -f mongodb', {})); // ignore error await shell.promises.exec('startMongodb', runCmd, {}); if (!serviceConfig.recoveryMode) { @@ -1669,8 +1669,8 @@ async function startGraphite(existingInfra) { --label isCloudronManaged=true \ ${readOnly} -v /tmp -v /run "${image}" ${cmd}`; - await shell.promises.exec('stopGraphite', 'docker stop graphite || true', {}); - await shell.promises.exec('removeGraphite', 'docker rm -f graphite || true', {}); + await safe(shell.promises.exec('stopGraphite', 'docker stop graphite', {})); // ignore error + await safe(shell.promises.exec('removeGraphite', 'docker rm -f graphite', {})); // ignore error if (upgrading) await shell.promises.sudo('removeGraphiteDir', [ RMADDONDIR_CMD, 'graphite' ], {}); await shell.promises.exec('startGraphite', runCmd, {}); @@ -1714,8 +1714,8 @@ async function startRedis(existingInfra) { if (upgrading) await backupRedis(app, {}); - await shell.promises.exec('stopRedis', `docker stop ${redisName} || true`, {}); // redis will backup as part of signal handling - await shell.promises.exec('removeRedis', `docker rm -f ${redisName} || true`, {}); + await safe(shell.promises.exec('stopRedis', `docker stop ${redisName}`, {})); // redis will backup as part of signal handling + await safe(shell.promises.exec('removeRedis', `docker rm -f ${redisName}`, {})); // ignore error await setupRedis(app, app.manifest.addons.redis); // starts the container } diff --git a/src/sftp.js b/src/sftp.js index 2dc3bc75d..8521ff242 100644 --- a/src/sftp.js +++ b/src/sftp.js @@ -123,8 +123,8 @@ async function start(existingInfra) { ${readOnly} -v /tmp -v /run "${image}" ${cmd}`; // ignore error if container not found (and fail later) so that this code works across restarts - await shell.promises.exec('stopSftp', 'docker stop sftp || true', {}); - await shell.promises.exec('removeSftp', 'docker rm -f sftp || true', {}); + await safe(shell.promises.exec('stopSftp', 'docker stop sftp', {})); // ignore error + await safe(shell.promises.exec('removeSftp', 'docker rm -f sftp', {})); // ignore error await shell.promises.exec('startSftp', runCmd, {}); } diff --git a/src/shell.js b/src/shell.js index 0bf762a88..552b7f2ba 100644 --- a/src/shell.js +++ b/src/shell.js @@ -19,7 +19,7 @@ exports = module.exports = { const SUDO = '/usr/bin/sudo'; -// default encoding utf8, shell, handles input. full command +// default encoding utf8, shell, handles input, full command function exec(tag, cmd, options, callback) { assert.strictEqual(typeof tag, 'string'); assert.strictEqual(typeof cmd, 'string'); @@ -28,8 +28,10 @@ function exec(tag, cmd, options, callback) { debug(`${tag} exec: ${cmd}`); + const execOptions = Object.assign({ encoding: 'utf8', shell: false }, options); + // https://github.com/nodejs/node/issues/25231 - const cp = child_process.exec(cmd, options, function (error, stdout, stderr) { + const cp = child_process.exec(cmd, execOptions, function (error, stdout, stderr) { let e = null; if (error) { e = new BoxError(BoxError.SHELL_ERROR, `${tag} errored with code ${error.code} message ${error.message}`); @@ -48,7 +50,7 @@ function exec(tag, cmd, options, callback) { } } -// no shell, utf8 encoding, separate args +// default encoding utf8, no shell, separate args function execFile(tag, file, args, options, callback) { assert.strictEqual(typeof tag, 'string'); assert.strictEqual(typeof file, 'string'); @@ -58,8 +60,10 @@ function execFile(tag, file, args, options, callback) { debug(`${tag} exec: ${file}`); + const execOptions = Object.assign({ encoding: 'utf8', shell: false }, options); + // https://github.com/nodejs/node/issues/25231 - const cp = child_process.execFile(file, args, options, function (error, stdout, stderr) { + const cp = child_process.execFile(file, args, execOptions, function (error, stdout, stderr) { let e = null; if (error) { e = new BoxError(BoxError.SHELL_ERROR, `${tag} errored with code ${error.code} message ${error.message}`); diff --git a/src/system.js b/src/system.js index 639a12626..5a402ff7d 100644 --- a/src/system.js +++ b/src/system.js @@ -68,7 +68,7 @@ async function hdparm(file) { } async function getSwaps() { - const [error, stdout] = await safe(shell.promises.exec('getSwaps', 'swapon --noheadings --raw --bytes --show=type,size,used,name', { encoding: 'utf8' })); + const [error, stdout] = await safe(shell.promises.exec('getSwaps', 'swapon --noheadings --raw --bytes --show=type,size,used,name', {})); if (error) return {}; const swaps = {}; @@ -329,7 +329,7 @@ async function getLogs(unit, options) { } async function getBlockDevices() { - const result = await shell.promises.exec('getBlockDevices', 'lsblk --paths --json --list --fs', { encoding: 'utf8' }); + const result = await shell.promises.exec('getBlockDevices', 'lsblk --paths --json --list --fs', {}); const info = safe.JSON.parse(result); if (!info) throw new BoxError(BoxError.INTERNAL_ERROR, `failed to parse lsblk: ${safe.error.message}`);