Compare commits
364 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 54af286fcd | |||
| 7b5df02a0e | |||
| 4f0e0706b2 | |||
| 1f74febdb0 | |||
| 49bf333355 | |||
| c4af06dd66 | |||
| f5f9a8e520 | |||
| ae376774e4 | |||
| ff8c2184f6 | |||
| a7b056a84c | |||
| 131d456329 | |||
| d4bba93dbf | |||
| e332ad96e4 | |||
| c455325875 | |||
| 88e9f751ea | |||
| 8677e86ace | |||
| cde22cd0a3 | |||
| 6d7f7fbc9a | |||
| 858c85ee85 | |||
| 15d473d506 | |||
| 70d3040135 | |||
| 56c567ac86 | |||
| 1f5831b79e | |||
| 6382216dc5 | |||
| 81b59eae36 | |||
| bc3cb6acb5 | |||
| fa768ad305 | |||
| 5184e017c9 | |||
| d2ea6b2002 | |||
| 3fcc3ea1aa | |||
| 15877f45b8 | |||
| 0a514323a9 | |||
| 1c07ec219c | |||
| 82142f3f31 | |||
| 554dec640a | |||
| d176ff2582 | |||
| bd7ee437a8 | |||
| 0250661402 | |||
| 9cef08aa6a | |||
| bead9589a1 | |||
| c5b631c0e5 | |||
| 4e75694ac6 | |||
| 2a93c703ef | |||
| 3c92971665 | |||
| 563391c2f1 | |||
| d4555886f4 | |||
| a584fad278 | |||
| e21f39bc0b | |||
| 84ca85b315 | |||
| d1bdb80c72 | |||
| d20f8d5e75 | |||
| b2de6624fd | |||
| 1591541c7f | |||
| 6124323d52 | |||
| b23189b45c | |||
| 1c18c16e38 | |||
| d07b1c7280 | |||
| 20d722f076 | |||
| bb3be9f380 | |||
| edd284fe0b | |||
| b5cc7d90a9 | |||
| 251c1f9757 | |||
| 03cd9bcc7c | |||
| fc8572c2af | |||
| a913660aeb | |||
| 9c82765512 | |||
| ace96bd228 | |||
| 02d95810a6 | |||
| 0fcb202364 | |||
| 88eb809c6e | |||
| 1534eaf6f7 | |||
| a2a60ff426 | |||
| afc70ac332 | |||
| d5e5b64df2 | |||
| 4a18ecc0ef | |||
| f355403412 | |||
| 985320d355 | |||
| 26c9d8bc88 | |||
| 2b81163179 | |||
| 6715efca50 | |||
| 612b1d6030 | |||
| b71254a0c3 | |||
| c0e5f60592 | |||
| 64243425ce | |||
| 9ad7fda3cd | |||
| c0eedc97ac | |||
| 5b4a1e0ec1 | |||
| 5b31486dc9 | |||
| 116cde19f9 | |||
| 14fc089f05 | |||
| 885d60f7cc | |||
| d33fd7b886 | |||
| ba067a959c | |||
| a246cb7e73 | |||
| f0abd7edc8 | |||
| 127470ae59 | |||
| efac46e40e | |||
| 6ab237034d | |||
| 2af29fd844 | |||
| 1549f6a4d0 | |||
| 5d16aca8f4 | |||
| 2facc6774b | |||
| e800c7d282 | |||
| a58228952a | |||
| 3511856a7c | |||
| 006a53dc7a | |||
| 45c73798b9 | |||
| c704884b10 | |||
| b54113ade3 | |||
| ac00225a75 | |||
| f43fd21929 | |||
| 741c21b368 | |||
| 5a26fe7361 | |||
| 1185dc7f79 | |||
| e1ac2b7b00 | |||
| e2c6672a5c | |||
| 5c50534e21 | |||
| 55e2139c69 | |||
| 34ff3462e9 | |||
| 104bdaf76b | |||
| c9f7b9a8a6 | |||
| 2e5d89be6b | |||
| bcf474aab6 | |||
| dea74f05ab | |||
| 69e0f2f727 | |||
| 080f701f33 | |||
| 94a196bfa0 | |||
| 3a63158763 | |||
| d9c47efe1f | |||
| e818e5f7d5 | |||
| cac0933334 | |||
| b74f01bb9e | |||
| 1f2d596a4a | |||
| ce06b2e150 | |||
| 9bd9b72e5d | |||
| a32166bc9d | |||
| f382b8f1f5 | |||
| fbc7fcf04b | |||
| 11d7dfa071 | |||
| 923a9f6560 | |||
| 25f44f58e3 | |||
| d55a6a5eec | |||
| f854d86986 | |||
| 6a7379e64c | |||
| a955457ee7 | |||
| 67801020ed | |||
| 037f4195da | |||
| 8cf0922401 | |||
| 6311c78bcd | |||
| 544ca6e1f4 | |||
| 6de198eaad | |||
| 6c67f13d90 | |||
| 7598cf2baf | |||
| 7dba294961 | |||
| 4bee30dd83 | |||
| 7952a67ed2 | |||
| 50b2eabfde | |||
| 591067ee22 | |||
| 88f78c01ba | |||
| dddc5a1994 | |||
| 8fc8128957 | |||
| c76b211ce0 | |||
| 0c13504928 | |||
| 26ab7f2767 | |||
| f78dabbf7e | |||
| 39c5c44ac3 | |||
| 2dea7f8fe9 | |||
| 85af0d96d2 | |||
| 176e917f51 | |||
| 534c8f9c3f | |||
| 5ee9feb0d2 | |||
| 723453dd1c | |||
| 45c9ddeacf | |||
| 5b075e3918 | |||
| c9916c4107 | |||
| c7956872cb | |||
| 3adf8b5176 | |||
| 40eae601da | |||
| 3eead2fdbe | |||
| 9fcd6f9c0a | |||
| 17910584ca | |||
| d9a02faf7a | |||
| d366f3107d | |||
| 2596afa7b3 | |||
| aa1e8dc930 | |||
| f3c66056b5 | |||
| 93bacd00da | |||
| b5c2a0ff44 | |||
| 6bd478b8b0 | |||
| c5c62ff294 | |||
| 7ed8678d50 | |||
| e19e5423f0 | |||
| 622ba01c7a | |||
| 935da3ed15 | |||
| ce054820a6 | |||
| a7668624b4 | |||
| 01b36bb37e | |||
| 5d1aaf6bc6 | |||
| 7ceb307110 | |||
| 6371b7c20d | |||
| 7ec648164e | |||
| 6e98f5f36c | |||
| a098c6da34 | |||
| 94e70aca33 | |||
| ea01586b52 | |||
| 8ceb80dc44 | |||
| 2280b7eaf5 | |||
| 1c1d247a24 | |||
| 90a6ad8cf5 | |||
| 80d91e5540 | |||
| 26cf084e1c | |||
| 8ef730ad9c | |||
| 7123ec433c | |||
| c67d9fd082 | |||
| dd8f710605 | |||
| e097b79f65 | |||
| 765f6d1b12 | |||
| 7cf80ebf69 | |||
| cc328f3a6e | |||
| 045c3917c9 | |||
| ac2186ccf6 | |||
| a57fe36643 | |||
| 1e711f7928 | |||
| eafccde6cb | |||
| 6b85e11a22 | |||
| a74de3811b | |||
| 070a425c85 | |||
| 32153ed47d | |||
| 454f9c4a79 | |||
| 3d28833c35 | |||
| be458020dd | |||
| 9b6733fd88 | |||
| 1b34a3e599 | |||
| 67d29dbad8 | |||
| 28b0043541 | |||
| 78824b059e | |||
| c63709312d | |||
| 11cf24075b | |||
| 5d440d55c3 | |||
| 4c3b81d29c | |||
| 032218c0fd | |||
| 0cd48bd239 | |||
| f5a2e8545b | |||
| 4306e20a8e | |||
| 635dd5f10d | |||
| 7f89dfd261 | |||
| e878e71b20 | |||
| 64a2493ca2 | |||
| 26f9635a38 | |||
| 5f2492558d | |||
| c83c151e10 | |||
| 801dddc269 | |||
| 9a886111ad | |||
| bdc9a0cbe3 | |||
| 555f914537 | |||
| 43f86674b4 | |||
| f7ed044a40 | |||
| 72408f2542 | |||
| 0abc6c8844 | |||
| d46de32ffb | |||
| 185d5d66ad | |||
| 01ce251596 | |||
| 05d7a7f496 | |||
| 685bda35b9 | |||
| 8d8cdd38a9 | |||
| d54c03f0a0 | |||
| 11f7be2065 | |||
| a39e0ab934 | |||
| b51082f7e4 | |||
| 9ec76c69ec | |||
| b0a09a8a00 | |||
| 5870f949a3 | |||
| 87cb90c9b6 | |||
| 21b900258a | |||
| de9f3c10f4 | |||
| 47e45808a3 | |||
| 0280c2baba | |||
| 2f8f5fcb7d | |||
| 709d4041b2 | |||
| b4b999bd74 | |||
| ea3fd27123 | |||
| 452a4d9a75 | |||
| 54934c41a7 | |||
| a05e564ae6 | |||
| 57ac94bab6 | |||
| 6839ff4cf6 | |||
| 993dda9121 | |||
| 70695b1b0f | |||
| d47b39d90b | |||
| 574d3b120f | |||
| 3d1f2bf716 | |||
| bac5edc188 | |||
| 7700c56d3e | |||
| 9f395f64da | |||
| 73d029ba4b | |||
| a292393a43 | |||
| 37a4e8d5c5 | |||
| 81728f4202 | |||
| 2d2ddd1c49 | |||
| bc49f64a0c | |||
| 52fc031516 | |||
| cae528158c | |||
| 566a03cd59 | |||
| ad2221350f | |||
| 656dca7c66 | |||
| 638fe2e6c8 | |||
| 3295d2b727 | |||
| c4689a8385 | |||
| d09d6c21fa | |||
| 7ec1594428 | |||
| 529f6fb2cd | |||
| 724f5643bc | |||
| 74e849e2a1 | |||
| bfb233eca1 | |||
| 5b27eb9c54 | |||
| faf91d4d00 | |||
| dbb803ff5e | |||
| 0dea2d283b | |||
| cbc44da102 | |||
| 3f633c9779 | |||
| 6933ccefe2 | |||
| 54aeff1419 | |||
| 14f9d7fe25 | |||
| 144e98abab | |||
| e0e0c049c8 | |||
| ef0f9c5298 | |||
| d13905377c | |||
| 6f1023e0cd | |||
| eeddc233dd | |||
| f48690ee11 | |||
| 3b0bdd9807 | |||
| 6dc5c4f13b | |||
| 9bb5096f1c | |||
| af42008fd3 | |||
| d6875d4949 | |||
| 4396bd3ea7 | |||
| db03053e05 | |||
| 193dff8c30 | |||
| 59582d081a | |||
| ef684d32a2 | |||
| fc2a326332 | |||
| e66a804012 | |||
| 5afa7345a5 | |||
| c100be4131 | |||
| d326d05ad6 | |||
| eb0662b245 | |||
| b92641d1b8 | |||
| 7912d521ca | |||
| 71dac64c4c | |||
| aab6f222b3 | |||
| 1cb1be321c | |||
| 2434e81383 | |||
| 62142c42ea | |||
| 0ae30e6447 | |||
| 1a87856655 | |||
| a3e097d541 | |||
| 9a6694286a | |||
| a662a60332 | |||
| 69f3b4e987 | |||
| 481586d7b7 | |||
| 34c3a2b42d | |||
| c4a9295d3e | |||
| 993ff50681 | |||
| ba5c2f623c |
@@ -1,5 +1,6 @@
|
||||
node_modules/
|
||||
coverage/
|
||||
.nyc_output/
|
||||
webadmin/dist/
|
||||
installer/src/certs/server.key
|
||||
|
||||
|
||||
@@ -2429,3 +2429,116 @@
|
||||
* firewall: add retry for xtables lock
|
||||
* redis: fix issue where protected mode was enabled with no password
|
||||
|
||||
[7.1.2]
|
||||
* Fix crash in cloudron-firewall when ports are whitelisted
|
||||
* eventlog: add event for certificate cleanup
|
||||
* eventlog: log event for mailbox alias update
|
||||
* backups: fix incorrect mountpoint check with managed mounts
|
||||
|
||||
[7.1.3]
|
||||
* Fix security issue where an admin can impersonate an owner
|
||||
* block list: can upload up to 2MB
|
||||
* dns: fix issue where link local address was picked up for ipv6
|
||||
* setup: ufw may not be installed
|
||||
* mysql: fix default collation of databases
|
||||
|
||||
[7.1.4]
|
||||
* wildcard dns: fix handling of ENODATA
|
||||
* cloudflare: fix error handling
|
||||
* openvpn: ipv6 support
|
||||
* dyndns: fix issue where eventlog was getting filled with empty entries
|
||||
* mandatory 2fa: Fix typo in 2FA check
|
||||
|
||||
[7.2.0]
|
||||
* mail: hide log button for non-superadmins
|
||||
* firewall: do not add duplicate ldap redirect rules
|
||||
* ldap: respond to RootDSE
|
||||
* Check if CNAME record exists and remove it if overwrite is set
|
||||
* cifs: use credentials file for better password support
|
||||
* installer: rework script to fix DNS resolution issues
|
||||
* backup cleaner: do not clean if not mounted
|
||||
* restore: fix sftp private key perms
|
||||
* support: add a separate system user named cloudron-support
|
||||
* sshfs: fix bug where sshfs mounts were generated without unbound dependancy
|
||||
* cloudron-setup: add --setup-token
|
||||
* notifications: add installation event
|
||||
* backups: set label of backup and control it's retention
|
||||
* wasabi: add new regions (London, Frankfurt, Paris, Toronto)
|
||||
* docker: update to 20.10.14
|
||||
* Ensure LDAP usernames are always treated lowercase
|
||||
* Add a way to make LDAP users local
|
||||
* proxyAuth: set X-Remote-User (rfc3875)
|
||||
* GoDaddy: there is now a delete API
|
||||
* nginx: use ubuntu packages for ubuntu 20.04 and 22.04
|
||||
* Ubuntu 22.04 LTS support
|
||||
* Add Hetzner DNS
|
||||
* cron: add support for extensions (@reboot, @weekly etc)
|
||||
* Add profile backgroundImage api
|
||||
* exec: rework API to get exit code
|
||||
* Add update available filter
|
||||
|
||||
[7.2.1]
|
||||
* Refactor backup code to use async/await
|
||||
* mongodb: fix bug where a small timeout prevented import of large backups
|
||||
* Add update available filter
|
||||
* exec: rework API to get exit code
|
||||
* Add profile backgroundImage api
|
||||
* cron: add support for extensions (@reboot, @weekly etc)
|
||||
|
||||
[7.2.2]
|
||||
* Update cloudron-manifestformat for new scheduler patterns
|
||||
* collectd: FQDNLookup causes collectd install to fail
|
||||
|
||||
[7.2.3]
|
||||
* appstore: allow re-registration on server side delete
|
||||
* transfer ownership route is not used anymore
|
||||
* graphite: fix issue where disk names with '.' do not render
|
||||
* dark mode fixes
|
||||
* sendmail: mail from display name
|
||||
* Use volumes for app data instead of raw path
|
||||
* initial xfs support
|
||||
|
||||
[7.2.4]
|
||||
* volumes: Ensure long volume names do not overflow the table
|
||||
* Move all appstore filter to the left
|
||||
* app data: allow sameness of old and new dir
|
||||
|
||||
[7.2.5]
|
||||
* Fix storage volume migration
|
||||
* Fix issue where only 25 group members were returned
|
||||
* Fix eventlog display
|
||||
|
||||
[7.3.0]
|
||||
* Proxied apps
|
||||
* Applinks - app bookmarks in dashboard
|
||||
* backups: optional encryption of backup file names
|
||||
* eventlog: add event for impersonated user login
|
||||
* ldap & user directory: Remove virtual user and admin groups
|
||||
* Randomize certificate generation cronjob to lighten load on Let's Encrypt servers
|
||||
* mail: catch all address can be any domain
|
||||
* mail: accept only STARTTLS servers for relay
|
||||
* graphs: cgroup v2 support
|
||||
* mail: fix issue where signature was appended to text attachments
|
||||
* redis: restart button will now rebuild if the container is missing
|
||||
* backups: allow space in label name
|
||||
* mail: fix crash when solr is enabled on Ubuntu 22 (cgroup v2 detection fix)
|
||||
* mail: fix issue where certificate renewal did not restart the mail container properly
|
||||
* notification: Fix crash when backupId is null
|
||||
* IPv6: initial support for ipv6 only server
|
||||
* User directory: Cloudron connector uses 2FA auth
|
||||
* port bindings: add read only flag
|
||||
* mail: add storage quota support
|
||||
* mail: allow aliases to have wildcard
|
||||
* proxyAuth: add supportsBearerAuth flag
|
||||
* backups: Fix precondition check which was not erroring if mount is missing
|
||||
* mail: add queue management API and UI
|
||||
* graphs: show app disk usage graphs
|
||||
* UI: fix issue where mailbox display name was not init correctly
|
||||
* wasabi: add singapore and sydney regions
|
||||
* filemanager: add split view
|
||||
* nginx: fix zero length certs when out of disk space
|
||||
* read only API tokens
|
||||
|
||||
[7.3.1]
|
||||
* Add cloudlare R2
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ const fs = require('fs'),
|
||||
safe = require('safetydance'),
|
||||
server = require('./src/server.js'),
|
||||
settings = require('./src/settings.js'),
|
||||
userdirectory = require('./src/userdirectory.js');
|
||||
directoryServer = require('./src/directoryserver.js');
|
||||
|
||||
let logFd;
|
||||
|
||||
@@ -38,16 +38,15 @@ async function startServers() {
|
||||
await proxyAuth.start();
|
||||
await ldap.start();
|
||||
|
||||
const conf = await settings.getUserDirectoryConfig();
|
||||
if (conf.enabled) await userdirectory.start();
|
||||
const conf = await settings.getDirectoryServerConfig();
|
||||
if (conf.enabled) await directoryServer.start();
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const [error] = await safe(startServers());
|
||||
if (error) return exitSync({ error: new Error(`Error starting server: ${JSON.stringify(error)}`), code: 1 });
|
||||
|
||||
// require those here so that logging handler is already setup
|
||||
require('supererror');
|
||||
// require this here so that logging handler is already setup
|
||||
const debug = require('debug')('box:box');
|
||||
|
||||
process.on('SIGINT', async function () {
|
||||
@@ -55,7 +54,7 @@ async function main() {
|
||||
|
||||
await proxyAuth.stop();
|
||||
await server.stop();
|
||||
await userdirectory.stop();
|
||||
await directoryServer.stop();
|
||||
await ldap.stop();
|
||||
setTimeout(process.exit.bind(process), 3000);
|
||||
});
|
||||
@@ -65,7 +64,7 @@ async function main() {
|
||||
|
||||
await proxyAuth.stop();
|
||||
await server.stop();
|
||||
await userdirectory.stop();
|
||||
await directoryServer.stop();
|
||||
await ldap.stop();
|
||||
setTimeout(process.exit.bind(process), 3000);
|
||||
});
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('DELETE FROM settings WHERE name=?', [ 'license_key' ], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,9 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('UPDATE settings SET name=? WHERE name=?', [ 'appstore_api_token', 'cloudron_token' ], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,37 @@
|
||||
'use strict';
|
||||
|
||||
const superagent = require('superagent');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT value FROM settings WHERE name="api_server_origin"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
const apiServerOrigin = results[0].value;
|
||||
|
||||
db.all('SELECT value FROM settings WHERE name="appstore_api_token"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
const apiToken = results[0].value;
|
||||
|
||||
console.log(`Getting appstore web token from ${apiServerOrigin}`);
|
||||
|
||||
superagent.post(`${apiServerOrigin}/api/v1/user_token`)
|
||||
.send({})
|
||||
.query({ accessToken: apiToken })
|
||||
.timeout(30 * 1000).end(function (error, response) {
|
||||
if (error && !error.response) {
|
||||
console.log('Network error getting web token', error);
|
||||
return callback();
|
||||
}
|
||||
if (response.statusCode !== 201 || !response.body.accessToken) {
|
||||
console.log(`Bad status getting web token: ${response.status} ${response.text}`);
|
||||
return callback();
|
||||
}
|
||||
|
||||
db.runSql('INSERT settings (name, value) VALUES(?, ?)', [ 'appstore_web_token', response.body.accessToken ], callback);
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,16 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups ADD COLUMN label VARCHAR(128) DEFAULT ""', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups DROP COLUMN label', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -0,0 +1,51 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
hat = require('../src/hat.js');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * from backups', function (error, allBackups) {
|
||||
if (error) return callback(error);
|
||||
|
||||
console.log(`Fixing up ${allBackups.length} backup entries`);
|
||||
const idMap = {};
|
||||
allBackups.forEach(b => {
|
||||
b.remotePath = b.id;
|
||||
b.id = `${b.type}_${b.identifier}_v${b.packageVersion}_${hat(256)}`; // id is used by the UI to derive dependent packages. making this a UUID will require a lot of db querying
|
||||
idMap[b.remotePath] = b.id;
|
||||
});
|
||||
|
||||
db.runSql('ALTER TABLE backups ADD COLUMN remotePath VARCHAR(256)', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
db.runSql('ALTER TABLE backups CHANGE COLUMN dependsOn dependsOnJson TEXT', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(allBackups, function (backup, iteratorDone) {
|
||||
const dependsOnPaths = backup.dependsOn ? backup.dependsOn.split(',') : []; // previously, it was paths
|
||||
let dependsOnIds = [];
|
||||
dependsOnPaths.forEach(p => { if (idMap[p]) dependsOnIds.push(idMap[p]); });
|
||||
|
||||
db.runSql('UPDATE backups SET id = ?, remotePath = ?, dependsOnJson = ? WHERE id = ?', [ backup.id, backup.remotePath, JSON.stringify(dependsOnIds), backup.remotePath ], iteratorDone);
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
db.runSql('ALTER TABLE backups MODIFY COLUMN remotePath VARCHAR(256) NOT NULL UNIQUE', callback);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups DROP COLUMN remotePath', function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
db.runSql('ALTER TABLE backups RENAME COLUMN dependsOnJson to dependsOn', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(error);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM apps', function (error, apps) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(apps, function (app, iteratorDone) {
|
||||
const manifest = JSON.parse(app.manifestJson);
|
||||
const hasSso = !!manifest.addons['proxyAuth'] || !!manifest.addons['ldap'];
|
||||
if (hasSso || !app.sso) return iteratorDone();
|
||||
|
||||
console.log(`Unsetting sso flag of ${app.id}`);
|
||||
db.runSql('UPDATE apps SET sso=? WHERE id=?', [ 0, app.id ], iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,20 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM settings WHERE name = ?', [ 'api_server_origin' ], function (error, result) {
|
||||
if (error || result.length === 0) return callback(error);
|
||||
|
||||
let consoleOrigin;
|
||||
switch (result[0].value) {
|
||||
case 'https://api.dev.cloudron.io': consoleOrigin = 'https://console.dev.cloudron.io'; break;
|
||||
case 'https://api.staging.cloudron.io': consoleOrigin = 'https://console.staging.cloudron.io'; break;
|
||||
default: consoleOrigin = 'https://console.cloudron.io'; break;
|
||||
}
|
||||
|
||||
db.runSql('REPLACE INTO settings (name, value) VALUES (?, ?)', [ 'console_server_origin', consoleOrigin ], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,9 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN backgroundImage MEDIUMBLOB', callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users DROP COLUMN backgroundImage', callback);
|
||||
};
|
||||
@@ -0,0 +1,12 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN mailboxDisplayName VARCHAR(128) DEFAULT "" NOT NULL', [], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN mailboxDisplayName', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,53 @@
|
||||
'use strict';
|
||||
|
||||
const path = require('path'),
|
||||
safe = require('safetydance'),
|
||||
uuid = require('uuid');
|
||||
|
||||
function getMountPoint(dataDir) {
|
||||
const output = safe.child_process.execSync(`df --output=target "${dataDir}" | tail -1`, { encoding: 'utf8' });
|
||||
if (!output) return dataDir;
|
||||
const mountPoint = output.trim();
|
||||
if (mountPoint === '/') return dataDir;
|
||||
return mountPoint;
|
||||
}
|
||||
|
||||
exports.up = async function(db) {
|
||||
// use safe() here because this migration failed midway in 7.2.4
|
||||
await safe(db.runSql('ALTER TABLE apps ADD storageVolumeId VARCHAR(128), ADD FOREIGN KEY(storageVolumeId) REFERENCES volumes(id)'));
|
||||
await safe(db.runSql('ALTER TABLE apps ADD storageVolumePrefix VARCHAR(128)'));
|
||||
await safe(db.runSql('ALTER TABLE apps ADD CONSTRAINT apps_storageVolume UNIQUE (storageVolumeId, storageVolumePrefix)'));
|
||||
|
||||
const apps = await db.runSql('SELECT * FROM apps WHERE dataDir IS NOT NULL');
|
||||
|
||||
for (const app of apps) {
|
||||
const allVolumes = await db.runSql('SELECT * FROM volumes');
|
||||
|
||||
console.log(`data-dir (${app.id}): migrating data dir ${app.dataDir}`);
|
||||
|
||||
const mountPoint = getMountPoint(app.dataDir);
|
||||
const prefix = path.relative(mountPoint, app.dataDir);
|
||||
|
||||
console.log(`data-dir (${app.id}): migrating to mountpoint ${mountPoint} and prefix ${prefix}`);
|
||||
|
||||
const volume = allVolumes.find(v => v.hostPath === mountPoint);
|
||||
if (volume) {
|
||||
console.log(`data-dir (${app.id}): using existing volume ${volume.id}`);
|
||||
await db.runSql('UPDATE apps SET storageVolumeId=?, storageVolumePrefix=? WHERE id=?', [ volume.id, prefix, app.id ]);
|
||||
continue;
|
||||
}
|
||||
|
||||
const id = uuid.v4().replace(/-/g, ''); // to make systemd mount file names more readable
|
||||
const name = `appdata-${id}`;
|
||||
const type = app.dataDir === mountPoint ? 'filesystem' : 'mountpoint';
|
||||
|
||||
console.log(`data-dir (${app.id}): creating new volume ${id}`);
|
||||
await db.runSql('INSERT INTO volumes (id, name, hostPath, mountType, mountOptionsJson) VALUES (?, ?, ?, ?, ?)', [ id, name, mountPoint, type, JSON.stringify({}) ]);
|
||||
await db.runSql('UPDATE apps SET storageVolumeId=?, storageVolumePrefix=? WHERE id=?', [ id, prefix, app.id ]);
|
||||
}
|
||||
|
||||
await db.runSql('ALTER TABLE apps DROP COLUMN dataDir');
|
||||
};
|
||||
|
||||
exports.down = async function(/*db*/) {
|
||||
};
|
||||
@@ -0,0 +1,9 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = async function (db) {
|
||||
await db.runSql('ALTER TABLE apps ADD COLUMN upstreamUri VARCHAR(256) DEFAULT ""');
|
||||
};
|
||||
|
||||
exports.down = async function (db) {
|
||||
await db.runSql('ALTER TABLE apps DROP COLUMN upstreamUri');
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = async function(db) {
|
||||
const result = await db.runSql('SELECT * FROM settings WHERE name=?', [ 'backup_config' ]);
|
||||
if (!result.length) return;
|
||||
|
||||
const backupConfig = JSON.parse(result[0].value);
|
||||
|
||||
if (backupConfig.encryption && backupConfig.format === 'rsync') backupConfig.encryptedFilenames = true;
|
||||
|
||||
await db.runSql('UPDATE settings SET value=? WHERE name=?', [ JSON.stringify(backupConfig), 'backup_config', ]);
|
||||
};
|
||||
|
||||
exports.down = async function(/* db */) {
|
||||
};
|
||||
@@ -0,0 +1,22 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = async function (db) {
|
||||
var cmd = 'CREATE TABLE applinks(' +
|
||||
'id VARCHAR(128) NOT NULL UNIQUE,' +
|
||||
'accessRestrictionJson TEXT,' +
|
||||
'creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,' +
|
||||
'updateTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,' +
|
||||
'ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,' +
|
||||
'label VARCHAR(128),' +
|
||||
'tagsJson VARCHAR(2048),' +
|
||||
'icon MEDIUMBLOB,' +
|
||||
'upstreamUri VARCHAR(256) DEFAULT "",' +
|
||||
'PRIMARY KEY (id)) CHARACTER SET utf8 COLLATE utf8_bin';
|
||||
|
||||
await db.runSql(cmd);
|
||||
};
|
||||
|
||||
exports.down = async function (db) {
|
||||
await db.runSql('DROP TABLE applinks');
|
||||
};
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE mailboxes ADD COLUMN storageQuota BIGINT DEFAULT 0'),
|
||||
db.runSql.bind(db, 'ALTER TABLE mailboxes ADD COLUMN messagesQuota BIGINT DEFAULT 0'),
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE mailboxes DROP COLUMN storageQuota'),
|
||||
db.runSql.bind(db, 'ALTER TABLE mailboxes DROP COLUMN messagesQuota')
|
||||
], callback);
|
||||
};
|
||||
@@ -0,0 +1,18 @@
|
||||
'use strict';
|
||||
|
||||
const safe = require('safetydance');
|
||||
|
||||
exports.up = async function (db) {
|
||||
const mailDomains = await db.runSql('SELECT * FROM mail', []);
|
||||
|
||||
for (const mailDomain of mailDomains) {
|
||||
let catchAll = safe.JSON.parse(mailDomain.catchAllJson) || [];
|
||||
if (catchAll.length === 0) continue;
|
||||
|
||||
catchAll = catchAll.map(a => `${a}@${mailDomain.domain}`);
|
||||
await db.runSql('UPDATE mail SET catchAllJson = ? WHERE domain = ?', [ JSON.stringify(catchAll), mailDomain.domain ]);
|
||||
}
|
||||
};
|
||||
|
||||
exports.down = async function( /* db */) {
|
||||
};
|
||||
@@ -0,0 +1,13 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = async function (db) {
|
||||
await db.runSql('ALTER TABLE tokens DROP COLUMN scope');
|
||||
await db.runSql('ALTER TABLE tokens ADD COLUMN scopeJson TEXT');
|
||||
|
||||
await db.runSql('UPDATE tokens SET scopeJson = ?', [ JSON.stringify({'*':'rw'})]);
|
||||
};
|
||||
|
||||
exports.down = async function (db) {
|
||||
await db.runSql('ALTER TABLE tokens ADD COLUMN scope VARCHAR(512) NOT NULL DEFAULT ""');
|
||||
await db.runSql('ALTER TABLE tokens DROP COLUMN scopeJson');
|
||||
};
|
||||
+28
-5
@@ -33,6 +33,7 @@ CREATE TABLE IF NOT EXISTS users(
|
||||
resetTokenCreationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
active BOOLEAN DEFAULT 1,
|
||||
avatar MEDIUMBLOB NOT NULL,
|
||||
backgroundImage MEDIUMBLOB,
|
||||
loginLocationsJson MEDIUMTEXT, // { locations: [{ ip, userAgent, city, country, ts }] }
|
||||
|
||||
INDEX creationTime_index (creationTime),
|
||||
@@ -57,7 +58,7 @@ CREATE TABLE IF NOT EXISTS tokens(
|
||||
accessToken VARCHAR(128) NOT NULL UNIQUE,
|
||||
identifier VARCHAR(128) NOT NULL, // resourceId: app id or user id
|
||||
clientId VARCHAR(128),
|
||||
scope VARCHAR(512) NOT NULL,
|
||||
scopeJson TEXT,
|
||||
expires BIGINT NOT NULL, // FIXME: make this a timestamp
|
||||
lastUsedTime TIMESTAMP NULL,
|
||||
PRIMARY KEY(accessToken));
|
||||
@@ -85,13 +86,15 @@ CREATE TABLE IF NOT EXISTS apps(
|
||||
enableAutomaticUpdate BOOLEAN DEFAULT 1,
|
||||
enableMailbox BOOLEAN DEFAULT 1, // whether sendmail addon is enabled
|
||||
mailboxName VARCHAR(128), // mailbox of this app
|
||||
mailboxDomain VARCHAR(128), // mailbox domain of this apps
|
||||
mailboxDomain VARCHAR(128), // mailbox domain of this app
|
||||
mailboxDisplayName VARCHAR(128), // mailbox display name
|
||||
enableInbox BOOLEAN DEFAULT 0, // whether recvmail addon is enabled
|
||||
inboxName VARCHAR(128), // mailbox of this app
|
||||
inboxDomain VARCHAR(128), // mailbox domain of this apps
|
||||
inboxDomain VARCHAR(128), // mailbox domain of this app
|
||||
label VARCHAR(128), // display name
|
||||
tagsJson VARCHAR(2048), // array of tags
|
||||
dataDir VARCHAR(256) UNIQUE,
|
||||
storageVolumeId VARCHAR(128),
|
||||
storageVolumePrefix VARCHAR(128),
|
||||
taskId INTEGER, // current task
|
||||
errorJson TEXT,
|
||||
servicesConfigJson TEXT, // app services configuration
|
||||
@@ -99,9 +102,12 @@ CREATE TABLE IF NOT EXISTS apps(
|
||||
appStoreIcon MEDIUMBLOB,
|
||||
icon MEDIUMBLOB,
|
||||
crontab TEXT,
|
||||
upstreamUri VARCHAR(256) DEFAULT "",
|
||||
|
||||
FOREIGN KEY(mailboxDomain) REFERENCES domains(domain),
|
||||
FOREIGN KEY(taskId) REFERENCES tasks(id),
|
||||
FOREIGN KEY(storageVolumeId) REFERENCES volumes(id),
|
||||
UNIQUE (storageVolumeId, storageVolumePrefix),
|
||||
PRIMARY KEY(id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS appPortBindings(
|
||||
@@ -133,12 +139,14 @@ CREATE TABLE IF NOT EXISTS appEnvVars(
|
||||
|
||||
CREATE TABLE IF NOT EXISTS backups(
|
||||
id VARCHAR(128) NOT NULL,
|
||||
remotePath VARCHAR(256) NOT NULL UNIQUE,
|
||||
label VARCHAR(128) DEFAULT "",
|
||||
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
packageVersion VARCHAR(128) NOT NULL, /* app version or box version */
|
||||
encryptionVersion INTEGER, /* when null, unencrypted backup */
|
||||
type VARCHAR(16) NOT NULL, /* 'box' or 'app' */
|
||||
identifier VARCHAR(128) NOT NULL, /* 'box' or the app id */
|
||||
dependsOn TEXT, /* comma separate list of objects this backup depends on */
|
||||
dependsOnJson TEXT, /* comma separate list of objects this backup depends on */
|
||||
state VARCHAR(16) NOT NULL,
|
||||
manifestJson TEXT, /* to validate if the app can be installed in this version of box */
|
||||
format VARCHAR(16) DEFAULT "tgz",
|
||||
@@ -209,6 +217,8 @@ CREATE TABLE IF NOT EXISTS mailboxes(
|
||||
domain VARCHAR(128),
|
||||
active BOOLEAN DEFAULT 1,
|
||||
enablePop3 BOOLEAN DEFAULT 0,
|
||||
storageQuota BIGINT DEFAULT 0,
|
||||
messagesQuota BIGINT DEFAULT 0,
|
||||
|
||||
FOREIGN KEY(domain) REFERENCES mail(domain),
|
||||
FOREIGN KEY(aliasDomain) REFERENCES mail(domain),
|
||||
@@ -290,4 +300,17 @@ CREATE TABLE IF NOT EXISTS blobs(
|
||||
value MEDIUMBLOB,
|
||||
PRIMARY KEY(id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS appLinks(
|
||||
id VARCHAR(128) NOT NULL UNIQUE,
|
||||
accessRestrictionJson TEXT, // { users: [ ], groups: [ ] }
|
||||
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, // when the app was installed
|
||||
updateTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, // when the last app update was done
|
||||
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, // when this db record was updated (useful for UI caching)
|
||||
label VARCHAR(128), // display name
|
||||
tagsJson VARCHAR(2048), // array of tags
|
||||
icon MEDIUMBLOB,
|
||||
upstreamUri VARCHAR(256) DEFAULT "",
|
||||
|
||||
PRIMARY KEY(id));
|
||||
|
||||
CHARACTER SET utf8 COLLATE utf8_bin;
|
||||
|
||||
Generated
+3390
-685
File diff suppressed because it is too large
Load Diff
+18
-25
@@ -12,13 +12,13 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@google-cloud/dns": "^2.2.4",
|
||||
"@google-cloud/storage": "^5.16.1",
|
||||
"@google-cloud/storage": "^5.19.2",
|
||||
"@sindresorhus/df": "git+https://github.com/cloudron-io/df.git#type",
|
||||
"async": "^3.2.2",
|
||||
"aws-sdk": "^2.1053.0",
|
||||
"async": "^3.2.3",
|
||||
"aws-sdk": "^2.1115.0",
|
||||
"basic-auth": "^2.0.1",
|
||||
"body-parser": "^1.19.1",
|
||||
"cloudron-manifestformat": "^5.15.0",
|
||||
"body-parser": "^1.20.0",
|
||||
"cloudron-manifestformat": "^5.18.0",
|
||||
"connect": "^3.7.0",
|
||||
"connect-lastmile": "^2.1.1",
|
||||
"connect-timeout": "^1.9.0",
|
||||
@@ -27,39 +27,32 @@
|
||||
"cron": "^1.8.2",
|
||||
"db-migrate": "^0.11.13",
|
||||
"db-migrate-mysql": "^2.2.0",
|
||||
"debug": "^4.3.3",
|
||||
"delay": "^5.0.0",
|
||||
"debug": "^4.3.4",
|
||||
"dockerode": "^3.3.1",
|
||||
"ejs": "^3.1.6",
|
||||
"ejs-cli": "^2.2.3",
|
||||
"express": "^4.17.2",
|
||||
"express": "^4.17.3",
|
||||
"ipaddr.js": "^2.0.1",
|
||||
"js-yaml": "^4.1.0",
|
||||
"jsdom": "^20.0.0",
|
||||
"json": "^11.0.0",
|
||||
"jsonwebtoken": "^8.5.1",
|
||||
"ldapjs": "^2.3.1",
|
||||
"ldapjs": "^2.3.2",
|
||||
"lodash": "^4.17.21",
|
||||
"lodash.chunk": "^4.2.0",
|
||||
"moment": "^2.29.1",
|
||||
"moment": "^2.29.2",
|
||||
"moment-timezone": "^0.5.34",
|
||||
"morgan": "^1.10.0",
|
||||
"multiparty": "^4.2.2",
|
||||
"multiparty": "^4.2.3",
|
||||
"mysql": "^2.18.1",
|
||||
"nodemailer": "^6.7.2",
|
||||
"nodemailer-smtp-transport": "^2.7.4",
|
||||
"once": "^1.4.0",
|
||||
"pretty-bytes": "^5.6.0",
|
||||
"nodemailer": "^6.7.3",
|
||||
"progress-stream": "^2.0.0",
|
||||
"proxy-middleware": "^0.15.0",
|
||||
"qrcode": "^1.5.0",
|
||||
"readdirp": "^3.6.0",
|
||||
"s3-block-read-stream": "^0.5.0",
|
||||
"safetydance": "^2.2.0",
|
||||
"semver": "^7.3.5",
|
||||
"semver": "^7.3.7",
|
||||
"speakeasy": "^2.0.0",
|
||||
"split": "^1.0.1",
|
||||
"superagent": "^7.0.1",
|
||||
"supererror": "^0.7.2",
|
||||
"superagent": "^7.1.1",
|
||||
"tar-fs": "github:cloudron-io/tar-fs#ignore_stat_error",
|
||||
"tar-stream": "^2.2.0",
|
||||
"tldjs": "^2.3.1",
|
||||
@@ -67,18 +60,18 @@
|
||||
"underscore": "^1.13.2",
|
||||
"uuid": "^8.3.2",
|
||||
"validator": "^13.7.0",
|
||||
"ws": "^8.4.0",
|
||||
"ws": "^8.5.0",
|
||||
"xml2js": "^0.4.23"
|
||||
},
|
||||
"devDependencies": {
|
||||
"expect.js": "*",
|
||||
"hock": "^1.4.1",
|
||||
"js2xmlparser": "^4.0.2",
|
||||
"mocha": "^9.1.3",
|
||||
"mocha": "^9.2.2",
|
||||
"mock-aws-s3": "git+https://github.com/cloudron-io/mock-aws-s3.git",
|
||||
"nock": "^13.2.1",
|
||||
"nock": "^13.2.4",
|
||||
"node-sass": "^7.0.1",
|
||||
"recursive-readdir": "^2.2.2"
|
||||
"nyc": "^15.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "./runTests",
|
||||
|
||||
@@ -79,10 +79,15 @@ echo "=> Run database migrations"
|
||||
cd "${source_dir}"
|
||||
BOX_ENV=test DATABASE_URL=mysql://root:password@${MYSQL_IP}/box node_modules/.bin/db-migrate up
|
||||
|
||||
echo "=> Run tests with mocha"
|
||||
TESTS=${DEFAULT_TESTS}
|
||||
if [[ $# -gt 0 ]]; then
|
||||
TESTS="$*"
|
||||
fi
|
||||
|
||||
BOX_ENV=test ./node_modules/mocha/bin/_mocha --bail --no-timeouts --exit -R spec ${TESTS}
|
||||
if [[ -z ${COVERAGE+x} ]]; then
|
||||
echo "=> Run tests with mocha"
|
||||
BOX_ENV=test ./node_modules/.bin/mocha --bail --no-timeouts --exit -R spec ${TESTS}
|
||||
else
|
||||
echo "=> Run tests with mocha and coverage"
|
||||
BOX_ENV=test ./node_modules/.bin/nyc --reporter=html ./node_modules/.bin/mocha --no-timeouts --exit -R spec ${TESTS}
|
||||
fi
|
||||
|
||||
+69
-36
@@ -11,7 +11,7 @@ trap exitHandler EXIT
|
||||
# change this to a hash when we make a upgrade release
|
||||
readonly LOG_FILE="/var/log/cloudron-setup.log"
|
||||
readonly MINIMUM_DISK_SIZE_GB="18" # this is the size of "/" and required to fit in docker images 18 is a safe bet for different reporting on 20GB min
|
||||
readonly MINIMUM_MEMORY="974" # this is mostly reported for 1GB main memory (DO 992, EC2 990, Linode 989, Serverdiscounter.com 974)
|
||||
readonly MINIMUM_MEMORY="960" # this is mostly reported for 1GB main memory (DO 992, EC2 967, Linode 989, Serverdiscounter.com 974)
|
||||
|
||||
readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 2400"
|
||||
|
||||
@@ -26,8 +26,8 @@ readonly GREEN='\033[32m'
|
||||
readonly DONE='\033[m'
|
||||
|
||||
# verify the system has minimum requirements met
|
||||
if [[ "${rootfs_type}" != "ext4" ]]; then
|
||||
echo "Error: Cloudron requires '/' to be ext4" # see #364
|
||||
if [[ "${rootfs_type}" != "ext4" && "${rootfs_type}" != "xfs" ]]; then
|
||||
echo "Error: Cloudron requires '/' to be ext4 or xfs" # see #364
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -46,23 +46,30 @@ if [[ "$(uname -m)" != "x86_64" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if cvirt=$(systemd-detect-virt --container); then
|
||||
echo "Error: Cloudron does not support ${cvirt}, only runs on bare metal or with full hardware virtualization"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# do not use is-active in case box service is down and user attempts to re-install
|
||||
if systemctl cat box.service >/dev/null 2>&1; then
|
||||
echo "Error: Cloudron is already installed. To reinstall, start afresh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
initBaseImage="true"
|
||||
provider="generic"
|
||||
requestedVersion=""
|
||||
installServerOrigin="https://api.cloudron.io"
|
||||
apiServerOrigin="https://api.cloudron.io"
|
||||
webServerOrigin="https://cloudron.io"
|
||||
consoleServerOrigin="https://console.cloudron.io"
|
||||
sourceTarballUrl=""
|
||||
rebootServer="true"
|
||||
setupToken=""
|
||||
setupToken="" # this is a OTP for securing an installation (https://forum.cloudron.io/topic/6389/add-password-for-initial-configuration)
|
||||
appstoreSetupToken=""
|
||||
redo="false"
|
||||
|
||||
args=$(getopt -o "" -l "help,skip-baseimage-init,provider:,version:,env:,skip-reboot,generate-setup-token" -n "$0" -- "$@")
|
||||
args=$(getopt -o "" -l "help,provider:,version:,env:,skip-reboot,generate-setup-token,setup-token:,redo" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
@@ -74,17 +81,20 @@ while true; do
|
||||
if [[ "$2" == "dev" ]]; then
|
||||
apiServerOrigin="https://api.dev.cloudron.io"
|
||||
webServerOrigin="https://dev.cloudron.io"
|
||||
consoleServerOrigin="https://console.dev.cloudron.io"
|
||||
installServerOrigin="https://api.dev.cloudron.io"
|
||||
elif [[ "$2" == "staging" ]]; then
|
||||
apiServerOrigin="https://api.staging.cloudron.io"
|
||||
webServerOrigin="https://staging.cloudron.io"
|
||||
consoleServerOrigin="https://console.staging.cloudron.io"
|
||||
installServerOrigin="https://api.staging.cloudron.io"
|
||||
elif [[ "$2" == "unstable" ]]; then
|
||||
installServerOrigin="https://api.dev.cloudron.io"
|
||||
fi
|
||||
shift 2;;
|
||||
--skip-baseimage-init) initBaseImage="false"; shift;;
|
||||
--skip-reboot) rebootServer="false"; shift;;
|
||||
--redo) redo="true"; shift;;
|
||||
--setup-token) appstoreSetupToken="$2"; shift 2;;
|
||||
--generate-setup-token) setupToken="$(openssl rand -hex 10)"; shift;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
@@ -99,14 +109,16 @@ fi
|
||||
|
||||
# Only --help works with mismatched ubuntu
|
||||
ubuntu_version=$(lsb_release -rs)
|
||||
if [[ "${ubuntu_version}" != "16.04" && "${ubuntu_version}" != "18.04" && "${ubuntu_version}" != "20.04" ]]; then
|
||||
echo "Cloudron requires Ubuntu 16.04, 18.04 or 20.04" > /dev/stderr
|
||||
if [[ "${ubuntu_version}" != "16.04" && "${ubuntu_version}" != "18.04" && "${ubuntu_version}" != "20.04" && "${ubuntu_version}" != "22.04" ]]; then
|
||||
echo "Cloudron requires Ubuntu 18.04, 20.04, 22.04" > /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if which nginx >/dev/null || which docker >/dev/null || which node > /dev/null; then
|
||||
echo "Error: Some packages like nginx/docker/nodejs are already installed. Cloudron requires specific versions of these packages and will install them as part of it's installation. Please start with a fresh Ubuntu install and run this script again." > /dev/stderr
|
||||
exit 1
|
||||
if [[ "${redo}" == "false" ]]; then
|
||||
echo "Error: Some packages like nginx/docker/nodejs are already installed. Cloudron requires specific versions of these packages and will install them as part of it's installation. Please start with a fresh Ubuntu install and run this script again." > /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Install MOTD file for stack script style installations. this is removed by the trap exit handler. Heredoc quotes prevents parameter expansion
|
||||
@@ -143,17 +155,15 @@ echo ""
|
||||
echo " Join us at https://forum.cloudron.io for any questions."
|
||||
echo ""
|
||||
|
||||
if [[ "${initBaseImage}" == "true" ]]; then
|
||||
echo "=> Updating apt and installing script dependencies"
|
||||
if ! apt-get update &>> "${LOG_FILE}"; then
|
||||
echo "Could not update package repositories. See ${LOG_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
echo "=> Updating apt and installing script dependencies"
|
||||
if ! apt-get update &>> "${LOG_FILE}"; then
|
||||
echo "Could not update package repositories. See ${LOG_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y install --no-install-recommends curl python3 ubuntu-standard software-properties-common -y &>> "${LOG_FILE}"; then
|
||||
echo "Could not install setup dependencies (curl). See ${LOG_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
if ! DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y install --no-install-recommends curl python3 ubuntu-standard software-properties-common -y &>> "${LOG_FILE}"; then
|
||||
echo "Could not install setup dependencies (curl). See ${LOG_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "=> Checking version"
|
||||
@@ -181,15 +191,13 @@ if ! $curl -sL "${sourceTarballUrl}" | tar -zxf - -C "${box_src_tmp_dir}"; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${initBaseImage}" == "true" ]]; then
|
||||
echo -n "=> Installing base dependencies and downloading docker images (this takes some time) ..."
|
||||
# initializeBaseUbuntuImage.sh args (provider, infraversion path) are only to support installation of pre 5.3 Cloudrons
|
||||
if ! /bin/bash "${box_src_tmp_dir}/baseimage/initializeBaseUbuntuImage.sh" "generic" "../src" &>> "${LOG_FILE}"; then
|
||||
echo "Init script failed. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
echo -n "=> Installing base dependencies and downloading docker images (this takes some time) ..."
|
||||
init_ubuntu_script=$(test -f "${box_src_tmp_dir}/scripts/init-ubuntu.sh" && echo "${box_src_tmp_dir}/scripts/init-ubuntu.sh" || echo "${box_src_tmp_dir}/baseimage/initializeBaseUbuntuImage.sh")
|
||||
if ! /bin/bash "${init_ubuntu_script}" &>> "${LOG_FILE}"; then
|
||||
echo "Init script failed. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# The provider flag is still used for marketplace images
|
||||
echo "=> Installing Cloudron version ${version} (this takes some time) ..."
|
||||
@@ -204,6 +212,20 @@ fi
|
||||
|
||||
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('api_server_origin', '${apiServerOrigin}');" 2>/dev/null
|
||||
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('web_server_origin', '${webServerOrigin}');" 2>/dev/null
|
||||
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('console_server_origin', '${consoleServerOrigin}');" 2>/dev/null
|
||||
|
||||
if [[ -n "${appstoreSetupToken}" ]]; then
|
||||
if ! setupResponse=$(curl -sX POST -H "Content-type: application/json" --data "{\"setupToken\": \"${appstoreSetupToken}\"}" "${apiServerOrigin}/api/v1/cloudron_setup_done"); then
|
||||
echo "Could not complete setup. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cloudronId=$(echo "${setupResponse}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj["cloudronId"])')
|
||||
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('cloudron_id', '${cloudronId}');" 2>/dev/null
|
||||
|
||||
appstoreApiToken=$(echo "${setupResponse}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj["cloudronToken"])')
|
||||
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('appstore_api_token', '${appstoreApiToken}');" 2>/dev/null
|
||||
fi
|
||||
|
||||
echo -n "=> Waiting for cloudron to be ready (this takes some time) ..."
|
||||
while true; do
|
||||
@@ -214,20 +236,31 @@ while true; do
|
||||
sleep 10
|
||||
done
|
||||
|
||||
if ! ip=$(curl -s --fail --connect-timeout 2 --max-time 2 https://ipv4.api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p'); then
|
||||
ip='<IP>'
|
||||
fi
|
||||
ip4=$(curl -s --fail --connect-timeout 2 --max-time 2 https://ipv4.api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p' || true)
|
||||
ip6=$(curl -s --fail --connect-timeout 2 --max-time 2 https://ipv6.api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p' || true)
|
||||
|
||||
url4=""
|
||||
url6=""
|
||||
fallbackUrl=""
|
||||
if [[ -z "${setupToken}" ]]; then
|
||||
url="https://${ip}"
|
||||
[[ -n "${ip4}" ]] && url4="https://${ip4}"
|
||||
[[ -n "${ip6}" ]] && url6="https://[${ip6}]"
|
||||
[[ -z "${ip4}" && -z "${ip6}" ]] && fallbackUrl="https://<IP>"
|
||||
else
|
||||
url="https://${ip}/?setupToken=${setupToken}"
|
||||
[[ -n "${ip4}" ]] && url4="https://${ip4}/?setupToken=${setupToken}"
|
||||
[[ -n "${ip6}" ]] && url6="https://[${ip6}]/?setupToken=${setupToken}"
|
||||
[[ -z "${ip4}" && -z "${ip6}" ]] && fallbackUrl="https://<IP>?setupToken=${setupToken}"
|
||||
fi
|
||||
echo -e "\n\n${GREEN}After reboot, visit ${url} and accept the self-signed certificate to finish setup.${DONE}\n"
|
||||
echo -e "\n\n${GREEN}After reboot, visit one of the following URLs and accept the self-signed certificate to finish setup.${DONE}\n"
|
||||
[[ -n "${url4}" ]] && echo -e " * ${GREEN}${url4}${DONE}"
|
||||
[[ -n "${url6}" ]] && echo -e " * ${GREEN}${url6}${DONE}"
|
||||
[[ -n "${fallbackUrl}" ]] && echo -e " * ${GREEN}${fallbackUrl}${DONE}"
|
||||
|
||||
if [[ "${rebootServer}" == "true" ]]; then
|
||||
systemctl stop box mysql # sometimes mysql ends up having corrupt privilege tables
|
||||
|
||||
read -p "The server has to be rebooted to apply all the settings. Reboot now ? [Y/n] " yn
|
||||
# https://www.gnu.org/savannah-checkouts/gnu/bash/manual/bash.html#ANSI_002dC-Quoting
|
||||
read -p $'\n'"The server has to be rebooted to apply all the settings. Reboot now ? [Y/n] " yn
|
||||
yn=${yn:-y}
|
||||
case $yn in
|
||||
[Yy]* ) exitHandler; systemctl reboot;;
|
||||
|
||||
+26
-32
@@ -8,7 +8,7 @@ set -eu -o pipefail
|
||||
PASTEBIN="https://paste.cloudron.io"
|
||||
OUT="/tmp/cloudron-support.log"
|
||||
LINE="\n========================================================\n"
|
||||
CLOUDRON_SUPPORT_PUBLIC_KEY="ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQVilclYAIu+ioDp/sgzzFz6YU0hPcRYY7ze/LiF/lC7uQqK062O54BFXTvQ3ehtFZCx3bNckjlT2e6gB8Qq07OM66De4/S/g+HJW4TReY2ppSPMVNag0TNGxDzVH8pPHOysAm33LqT2b6L/wEXwC6zWFXhOhHjcMqXvi8Ejaj20H1HVVcf/j8qs5Thkp9nAaFTgQTPu8pgwD8wDeYX1hc9d0PYGesTADvo6HF4hLEoEnefLw7PaStEbzk2fD3j7/g5r5HcgQQXBe74xYZ/1gWOX2pFNuRYOBSEIrNfJEjFJsqk3NR1+ZoMGK7j+AZBR4k0xbrmncQLcQzl6MMDzkp support@cloudron.io"
|
||||
CLOUDRON_SUPPORT_PUBLIC_KEY="ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGWS+930b8QdzbchGljt3KSljH9wRhYvht8srrtQHdzg support@cloudron.io"
|
||||
HELP_MESSAGE="
|
||||
This script collects diagnostic information to help debug server related issues.
|
||||
|
||||
@@ -77,6 +77,31 @@ if [[ "`df --output="avail" /tmp | sed -n 2p`" -lt "5120" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${enableSSH}" == "true" ]]; then
|
||||
ssh_port=$(cat /etc/ssh/sshd_config | grep "Port " | sed -e "s/.*Port //")
|
||||
|
||||
ssh_user="cloudron-support"
|
||||
keys_file="/home/cloudron-support/.ssh/authorized_keys"
|
||||
|
||||
echo -e $LINE"SSH"$LINE >> $OUT
|
||||
echo "Username: ${ssh_user}" >> $OUT
|
||||
echo "Port: ${ssh_port}" >> $OUT
|
||||
echo "Key file: ${keys_file}" >> $OUT
|
||||
|
||||
echo -n "Enabling ssh access for the Cloudron support team..."
|
||||
mkdir -p $(dirname "${keys_file}") # .ssh does not exist sometimes
|
||||
touch "${keys_file}" # required for concat to work
|
||||
if ! grep -q "${CLOUDRON_SUPPORT_PUBLIC_KEY}" "${keys_file}"; then
|
||||
echo -e "\n${CLOUDRON_SUPPORT_PUBLIC_KEY}" >> "${keys_file}"
|
||||
chmod 600 "${keys_file}"
|
||||
chown "${ssh_user}" "${keys_file}"
|
||||
fi
|
||||
|
||||
echo "Done"
|
||||
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -n "Generating Cloudron Support stats..."
|
||||
|
||||
# clear file
|
||||
@@ -119,37 +144,6 @@ iptables -L &>> $OUT
|
||||
|
||||
echo "Done"
|
||||
|
||||
if [[ "${enableSSH}" == "true" ]]; then
|
||||
ssh_port=$(cat /etc/ssh/sshd_config | grep "Port " | sed -e "s/.*Port //")
|
||||
permit_root_login=$(grep -q ^PermitRootLogin.*yes /etc/ssh/sshd_config && echo "yes" || echo "no")
|
||||
|
||||
# support.js uses similar logic
|
||||
if [[ -d /home/ubuntu ]]; then
|
||||
ssh_user="ubuntu"
|
||||
keys_file="/home/ubuntu/.ssh/authorized_keys"
|
||||
else
|
||||
ssh_user="root"
|
||||
keys_file="/root/.ssh/authorized_keys"
|
||||
fi
|
||||
|
||||
echo -e $LINE"SSH"$LINE >> $OUT
|
||||
echo "Username: ${ssh_user}" >> $OUT
|
||||
echo "Port: ${ssh_port}" >> $OUT
|
||||
echo "PermitRootLogin: ${permit_root_login}" >> $OUT
|
||||
echo "Key file: ${keys_file}" >> $OUT
|
||||
|
||||
echo -n "Enabling ssh access for the Cloudron support team..."
|
||||
mkdir -p $(dirname "${keys_file}") # .ssh does not exist sometimes
|
||||
touch "${keys_file}" # required for concat to work
|
||||
if ! grep -q "${CLOUDRON_SUPPORT_PUBLIC_KEY}" "${keys_file}"; then
|
||||
echo -e "\n${CLOUDRON_SUPPORT_PUBLIC_KEY}" >> "${keys_file}"
|
||||
chmod 600 "${keys_file}"
|
||||
chown "${ssh_user}" "${keys_file}"
|
||||
fi
|
||||
|
||||
echo "Done"
|
||||
fi
|
||||
|
||||
echo -n "Uploading information..."
|
||||
paste_key=$(curl -X POST ${PASTEBIN}/documents --silent --data-binary "@$OUT" | python3 -c "import sys, json; print(json.load(sys.stdin)['key'])")
|
||||
echo "Done"
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script is run on the base ubuntu. Put things here which are managed by ubuntu
|
||||
# This script is also run after ubuntu upgrade
|
||||
|
||||
set -euv -o pipefail
|
||||
|
||||
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
@@ -29,10 +32,37 @@ debconf-set-selections <<< 'mysql-server mysql-server/root_password_again passwo
|
||||
|
||||
# this enables automatic security upgrades (https://help.ubuntu.com/community/AutomaticSecurityUpdates)
|
||||
# resolvconf is needed for unbound to work property after disabling systemd-resolved in 18.04
|
||||
case "${ubuntu_version}" in
|
||||
16.04)
|
||||
gpg_package="gnupg"
|
||||
mysql_package="mysql-server-5.7"
|
||||
ntpd_package=""
|
||||
python_package="python2.7"
|
||||
nginx_package="" # we use custom package for TLS v1.3 support
|
||||
;;
|
||||
18.04)
|
||||
gpg_package="gpg"
|
||||
mysql_package="mysql-server-5.7"
|
||||
ntpd_package=""
|
||||
python_package="python2.7"
|
||||
nginx_package="" # we use custom package for TLS v1.3 support
|
||||
;;
|
||||
20.04)
|
||||
gpg_package="gpg"
|
||||
mysql_package="mysql-server-8.0"
|
||||
ntpd_package="systemd-timesyncd"
|
||||
python_package="python3.8"
|
||||
nginx_package="nginx-full"
|
||||
;;
|
||||
22.04)
|
||||
gpg_package="gpg"
|
||||
mysql_package="mysql-server-8.0"
|
||||
ntpd_package="systemd-timesyncd"
|
||||
python_package="python3.10"
|
||||
nginx_package="nginx-full"
|
||||
;;
|
||||
esac
|
||||
|
||||
gpg_package=$([[ "${ubuntu_version}" == "16.04" ]] && echo "gnupg" || echo "gpg")
|
||||
mysql_package=$([[ "${ubuntu_version}" == "20.04" ]] && echo "mysql-server-8.0" || echo "mysql-server-5.7")
|
||||
ntpd_package=$([[ "${ubuntu_version}" == "20.04" ]] && echo "systemd-timesyncd" || echo "")
|
||||
apt-get -y install --no-install-recommends \
|
||||
acl \
|
||||
apparmor \
|
||||
@@ -45,11 +75,12 @@ apt-get -y install --no-install-recommends \
|
||||
$gpg_package \
|
||||
ipset \
|
||||
iptables \
|
||||
libpython2.7 \
|
||||
lib${python_package} \
|
||||
linux-generic \
|
||||
logrotate \
|
||||
$mysql_package \
|
||||
nfs-common \
|
||||
$nginx_package \
|
||||
$ntpd_package \
|
||||
openssh-server \
|
||||
pwgen \
|
||||
@@ -62,12 +93,6 @@ apt-get -y install --no-install-recommends \
|
||||
unzip \
|
||||
xfsprogs
|
||||
|
||||
echo "==> installing nginx for xenial for TLSv3 support"
|
||||
curl -sL http://nginx.org/packages/ubuntu/pool/nginx/n/nginx/nginx_1.18.0-2~${ubuntu_codename}_amd64.deb -o /tmp/nginx.deb
|
||||
# apt install with install deps (as opposed to dpkg -i)
|
||||
apt install -y /tmp/nginx.deb
|
||||
rm /tmp/nginx.deb
|
||||
|
||||
# on some providers like scaleway the sudo file is changed and we want to keep the old one
|
||||
apt-get -o Dpkg::Options::="--force-confold" install -y --no-install-recommends sudo
|
||||
|
||||
@@ -75,36 +100,7 @@ apt-get -o Dpkg::Options::="--force-confold" install -y --no-install-recommends
|
||||
# debconf-set-selection of unattended-upgrades/enable_auto_updates + dpkg-reconfigure does not work
|
||||
cp /usr/share/unattended-upgrades/20auto-upgrades /etc/apt/apt.conf.d/20auto-upgrades
|
||||
|
||||
echo "==> Installing node.js"
|
||||
readonly node_version=16.13.1
|
||||
mkdir -p /usr/local/node-${node_version}
|
||||
curl -sL https://nodejs.org/dist/v${node_version}/node-v${node_version}-linux-x64.tar.gz | tar zxf - --strip-components=1 -C /usr/local/node-${node_version}
|
||||
ln -sf /usr/local/node-${node_version}/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-${node_version}/bin/npm /usr/bin/npm
|
||||
apt-get install -y --no-install-recommends python # Install python which is required for npm rebuild
|
||||
[[ "$(python --version 2>&1)" == "Python 2.7."* ]] || die "Expecting python version to be 2.7.x"
|
||||
|
||||
# https://docs.docker.com/engine/installation/linux/ubuntulinux/
|
||||
echo "==> Installing Docker"
|
||||
|
||||
# create systemd drop-in file. if you channge options here, be sure to fixup installer.sh as well
|
||||
mkdir -p /etc/systemd/system/docker.service.d
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=overlay2 --experimental --ip6tables" > /etc/systemd/system/docker.service.d/cloudron.conf
|
||||
|
||||
# there are 3 packages for docker - containerd, CLI and the daemon
|
||||
readonly docker_version=20.10.12
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.4.9-1_amd64.deb" -o /tmp/containerd.deb
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
|
||||
# apt install with install deps (as opposed to dpkg -i)
|
||||
apt install -y /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
|
||||
rm /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
|
||||
|
||||
storage_driver=$(docker info | grep "Storage Driver" | sed 's/.*: //')
|
||||
if [[ "${storage_driver}" != "overlay2" ]]; then
|
||||
echo "Docker is using "${storage_driver}" instead of overlay2"
|
||||
exit 1
|
||||
fi
|
||||
apt-get install -y --no-install-recommends $python_package # Install python which is required for npm rebuild
|
||||
|
||||
# do not upgrade grub because it might prompt user and break this script
|
||||
echo "==> Enable memory accounting"
|
||||
@@ -112,30 +108,26 @@ apt-get -y --no-upgrade --no-install-recommends install grub2-common
|
||||
sed -e 's/^GRUB_CMDLINE_LINUX="\(.*\)"$/GRUB_CMDLINE_LINUX="\1 cgroup_enable=memory swapaccount=1 panic_on_oops=1 panic=5"/' -i /etc/default/grub
|
||||
update-grub
|
||||
|
||||
echo "==> Downloading docker images"
|
||||
if [ ! -f "${arg_infraversionpath}/infra_version.js" ]; then
|
||||
echo "No infra_versions.js found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
images=$(node -e "var i = require('${arg_infraversionpath}/infra_version.js'); console.log(i.baseImages.map(function (x) { return x.tag; }).join(' '), Object.keys(i.images).map(function (x) { return i.images[x].tag; }).join(' '));")
|
||||
|
||||
echo -e "\tPulling docker images: ${images}"
|
||||
for image in ${images}; do
|
||||
docker pull "${image}"
|
||||
docker pull "${image%@sha256:*}" # this will tag the image for readability
|
||||
done
|
||||
|
||||
echo "==> Install collectd"
|
||||
# without this, libnotify4 will install gnome-shell
|
||||
apt-get install -y libnotify4 --no-install-recommends
|
||||
if ! apt-get install -y --no-install-recommends libcurl3-gnutls collectd collectd-utils; then
|
||||
# FQDNLookup is true in default debian config. The box code has a custom collectd.conf that fixes this
|
||||
echo "Failed to install collectd. Presumably because of http://mailman.verplant.org/pipermail/collectd/2015-March/006491.html"
|
||||
sed -e 's/^FQDNLookup true/FQDNLookup false/' -i /etc/collectd/collectd.conf
|
||||
fi
|
||||
apt-get install -y libnotify4 libcurl3-gnutls --no-install-recommends
|
||||
# https://bugs.launchpad.net/ubuntu/+source/collectd/+bug/1872281
|
||||
[[ "${ubuntu_version}" == "20.04" ]] && echo -e "\nLD_PRELOAD=/usr/lib/python3.8/config-3.8-x86_64-linux-gnu/libpython3.8.so" >> /etc/default/collectd
|
||||
if [[ "${ubuntu_version}" == "22.04" ]]; then
|
||||
readonly launchpad="https://launchpad.net/ubuntu/+source/collectd/5.12.0-9/+build/23189375/+files"
|
||||
cd /tmp && wget -q "${launchpad}/collectd_5.12.0-9_amd64.deb" "${launchpad}/collectd-utils_5.12.0-9_amd64.deb" "${launchpad}/collectd-core_5.12.0-9_amd64.deb" "${launchpad}/libcollectdclient1_5.12.0-9_amd64.deb"
|
||||
cd /tmp && apt install -y --no-install-recommends ./libcollectdclient1_5.12.0-9_amd64.deb ./collectd-core_5.12.0-9_amd64.deb ./collectd_5.12.0-9_amd64.deb ./collectd-utils_5.12.0-9_amd64.deb && rm -f /tmp/collectd_*.deb
|
||||
echo -e "\nLD_PRELOAD=/usr/lib/python3.10/config-3.10-x86_64-linux-gnu/libpython3.10.so" >> /etc/default/collectd
|
||||
else
|
||||
if ! apt-get install -y --no-install-recommends collectd collectd-utils; then
|
||||
# FQDNLookup is true in default debian config. The box code has a custom collectd.conf that fixes this
|
||||
echo "Failed to install collectd, continuing anyway. Presumably because of http://mailman.verplant.org/pipermail/collectd/2015-March/006491.html"
|
||||
fi
|
||||
|
||||
if [[ "${ubuntu_version}" == "20.04" ]]; then
|
||||
echo -e "\nLD_PRELOAD=/usr/lib/python3.8/config-3.8-x86_64-linux-gnu/libpython3.8.so" >> /etc/default/collectd
|
||||
fi
|
||||
fi
|
||||
sed -e 's/^FQDNLookup true/FQDNLookup false/' -i /etc/collectd/collectd.conf
|
||||
|
||||
# some hosts like atlantic install ntp which conflicts with timedatectl. https://serverfault.com/questions/1024770/ubuntu-20-04-time-sync-problems-and-possibly-incorrect-status-information
|
||||
echo "==> Configuring host"
|
||||
@@ -153,7 +145,7 @@ sed -e '/Port 22/ i # NOTE: Cloudron only supports moving SSH to port 202. See h
|
||||
|
||||
# https://bugs.launchpad.net/ubuntu/+source/base-files/+bug/1701068
|
||||
echo "==> Disabling motd news"
|
||||
if [ -f "/etc/default/motd-news" ]; then
|
||||
if [[ -f "/etc/default/motd-news" ]]; then
|
||||
sed -i 's/^ENABLED=.*/ENABLED=0/' /etc/default/motd-news
|
||||
fi
|
||||
|
||||
@@ -185,8 +177,23 @@ systemctl stop systemd-resolved || true
|
||||
systemctl disable systemd-resolved || true
|
||||
|
||||
# on vultr, ufw is enabled by default. we have our own firewall
|
||||
ufw disable
|
||||
ufw disable || true
|
||||
|
||||
# we need unbound to work as this is required for installer.sh to do any DNS requests
|
||||
echo -e "server:\n\tinterface: 127.0.0.1\n\tdo-ip6: no" > /etc/unbound/unbound.conf.d/cloudron-network.conf
|
||||
echo -e "server:\n\tinterface: 127.0.0.1\n" > /etc/unbound/unbound.conf.d/cloudron-network.conf
|
||||
systemctl restart unbound
|
||||
|
||||
# Ubuntu 22 has private home directories by default (https://discourse.ubuntu.com/t/private-home-directories-for-ubuntu-21-04-onwards/)
|
||||
sed -e 's/^HOME_MODE\([[:space:]]\+\).*$/HOME_MODE\10755/' -i /etc/login.defs
|
||||
|
||||
# create the yellowtent user. system user has different numeric range, no age and won't show in login/gdm UI
|
||||
# the nologin will also disable su/login
|
||||
if ! id yellowtent 2>/dev/null; then
|
||||
useradd --system --comment "Cloudron Box" --create-home --shell /usr/sbin/nologin yellowtent
|
||||
fi
|
||||
|
||||
# add support user (no password, sudo)
|
||||
if ! id cloudron-support 2>/dev/null; then
|
||||
useradd --system --comment "Cloudron Support (support@cloudron.io)" --create-home --no-user-group --shell /bin/bash cloudron-support
|
||||
fi
|
||||
|
||||
+51
-35
@@ -69,14 +69,19 @@ readonly ubuntu_codename=$(lsb_release -cs)
|
||||
|
||||
readonly is_update=$(systemctl is-active -q box && echo "yes" || echo "no")
|
||||
|
||||
log "Updating from $(cat $box_src_dir/VERSION) to $(cat $box_src_tmp_dir/VERSION)"
|
||||
log "Updating from $(cat $box_src_dir/VERSION 2>/dev/null) to $(cat $box_src_tmp_dir/VERSION 2>/dev/null)"
|
||||
|
||||
log "updating docker"
|
||||
# https://docs.docker.com/engine/installation/linux/ubuntulinux/
|
||||
readonly docker_version=20.10.14
|
||||
if ! which docker 2>/dev/null || [[ $(docker version --format {{.Client.Version}}) != "${docker_version}" ]]; then
|
||||
log "installing/updating docker"
|
||||
|
||||
# create systemd drop-in file already to make sure images are with correct driver
|
||||
mkdir -p /etc/systemd/system/docker.service.d
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=overlay2 --experimental --ip6tables" > /etc/systemd/system/docker.service.d/cloudron.conf
|
||||
|
||||
readonly docker_version=20.10.12
|
||||
if [[ $(docker version --format {{.Client.Version}}) != "${docker_version}" ]]; then
|
||||
# there are 3 packages for docker - containerd, CLI and the daemon
|
||||
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.4.9-1_amd64.deb" -o /tmp/containerd.deb
|
||||
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.5.11-1_amd64.deb" -o /tmp/containerd.deb
|
||||
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
|
||||
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
|
||||
|
||||
@@ -86,41 +91,40 @@ if [[ $(docker version --format {{.Client.Version}}) != "${docker_version}" ]];
|
||||
rm /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
|
||||
fi
|
||||
|
||||
# we want atleast nginx 1.14 for TLS v1.3 support. Ubuntu 20/22 already has nginx 1.18
|
||||
# Ubuntu 18 OpenSSL does not have TLS v1.3 support, so we use the upstream nginx packages
|
||||
readonly nginx_version=$(nginx -v 2>&1)
|
||||
if [[ "${nginx_version}" != *"1.18."* ]]; then
|
||||
log "installing nginx 1.18"
|
||||
$curl -sL http://nginx.org/packages/ubuntu/pool/nginx/n/nginx/nginx_1.18.0-2~${ubuntu_codename}_amd64.deb -o /tmp/nginx.deb
|
||||
if [[ "${ubuntu_version}" == "20.04" ]]; then
|
||||
if [[ "${nginx_version}" == *"Ubuntu"* ]]; then
|
||||
log "switching nginx to ubuntu package"
|
||||
prepare_apt_once
|
||||
apt remove -y nginx
|
||||
apt install -y nginx-full
|
||||
fi
|
||||
elif [[ "${ubuntu_version}" == "18.04" ]]; then
|
||||
if [[ "${nginx_version}" != *"1.18."* ]]; then
|
||||
log "installing/updating nginx 1.18"
|
||||
$curl -sL http://nginx.org/packages/ubuntu/pool/nginx/n/nginx/nginx_1.18.0-2~${ubuntu_codename}_amd64.deb -o /tmp/nginx.deb
|
||||
|
||||
prepare_apt_once
|
||||
prepare_apt_once
|
||||
|
||||
# apt install with install deps (as opposed to dpkg -i)
|
||||
apt install -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes /tmp/nginx.deb
|
||||
rm /tmp/nginx.deb
|
||||
# apt install with install deps (as opposed to dpkg -i)
|
||||
apt install -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes /tmp/nginx.deb
|
||||
rm /tmp/nginx.deb
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! which mount.nfs; then
|
||||
log "installing nfs-common"
|
||||
prepare_apt_once
|
||||
apt install -y nfs-common
|
||||
fi
|
||||
|
||||
if ! which sshfs; then
|
||||
log "installing sshfs"
|
||||
prepare_apt_once
|
||||
apt install -y sshfs
|
||||
fi
|
||||
|
||||
log "updating node"
|
||||
readonly node_version=16.13.1
|
||||
if [[ "$(node --version)" != "v${node_version}" ]]; then
|
||||
readonly node_version=16.14.2
|
||||
if ! which node 2>/dev/null || [[ "$(node --version)" != "v${node_version}" ]]; then
|
||||
log "installing/updating node ${node_version}"
|
||||
mkdir -p /usr/local/node-${node_version}
|
||||
$curl -sL https://nodejs.org/dist/v${node_version}/node-v${node_version}-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-${node_version}
|
||||
ln -sf /usr/local/node-${node_version}/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-${node_version}/bin/npm /usr/bin/npm
|
||||
rm -rf /usr/local/node-14.17.6
|
||||
rm -rf /usr/local/node-16.13.1
|
||||
fi
|
||||
|
||||
# this is here (and not in updater.js) because rebuild requires the above node
|
||||
# note that rebuild requires the above node
|
||||
for try in `seq 1 10`; do
|
||||
# for reasons unknown, the dtrace package will fail. but rebuilding second time will work
|
||||
|
||||
@@ -138,15 +142,21 @@ if [[ ${try} -eq 10 ]]; then
|
||||
fi
|
||||
|
||||
log "downloading new addon images"
|
||||
images=$(node -e "var i = require('${box_src_tmp_dir}/src/infra_version.js'); console.log(i.baseImages.map(function (x) { return x.tag; }).join(' '), Object.keys(i.images).map(function (x) { return i.images[x].tag; }).join(' '));")
|
||||
images=$(node -e "let i = require('${box_src_tmp_dir}/src/infra_version.js'); console.log(i.baseImages.map(function (x) { return x.tag; }).join(' '), Object.keys(i.images).map(function (x) { return i.images[x].tag; }).join(' '));")
|
||||
|
||||
log "\tPulling docker images: ${images}"
|
||||
if ! curl -s --fail --connect-timeout 2 --max-time 2 https://ipv4.api.cloudron.io/api/v1/helper/public_ip; then
|
||||
docker_registry=registry.ipv6.docker.com
|
||||
else
|
||||
docker_registry=registry-1.docker.io
|
||||
fi
|
||||
|
||||
for image in ${images}; do
|
||||
while ! docker pull "${image}"; do # this pulls the image using the sha256
|
||||
while ! docker pull "${docker_registry}/${image}"; do # this pulls the image using the sha256
|
||||
log "Could not pull ${image}"
|
||||
sleep 5
|
||||
done
|
||||
while ! docker pull "${image%@sha256:*}"; do # this will tag the image for readability
|
||||
while ! docker pull "${docker_registry}/${image%@sha256:*}"; do # this will tag the image for readability
|
||||
log "Could not pull ${image%@sha256:*}"
|
||||
sleep 5
|
||||
done
|
||||
@@ -159,15 +169,21 @@ CLOUDRON_SYSLOG_VERSION="1.1.0"
|
||||
while [[ ! -f "${CLOUDRON_SYSLOG}" || "$(${CLOUDRON_SYSLOG} --version)" != ${CLOUDRON_SYSLOG_VERSION} ]]; do
|
||||
rm -rf "${CLOUDRON_SYSLOG_DIR}"
|
||||
mkdir -p "${CLOUDRON_SYSLOG_DIR}"
|
||||
if npm install --unsafe-perm -g --prefix "${CLOUDRON_SYSLOG_DIR}" cloudron-syslog@${CLOUDRON_SYSLOG_VERSION}; then break; fi
|
||||
# verbatim is not needed in node 18 since that is the default there. in node 16, ipv4 is preferred and this breaks on ipv6 only servers
|
||||
if NODE_OPTIONS="--dns-result-order=verbatim" npm install --unsafe-perm -g --prefix "${CLOUDRON_SYSLOG_DIR}" cloudron-syslog@${CLOUDRON_SYSLOG_VERSION}; then break; fi
|
||||
log "Failed to install cloudron-syslog, trying again"
|
||||
sleep 5
|
||||
done
|
||||
|
||||
if ! id "${user}" 2>/dev/null; then
|
||||
useradd "${user}" -m
|
||||
log "creating cloudron-support user"
|
||||
if ! id cloudron-support 2>/dev/null; then
|
||||
useradd --system --comment "Cloudron Support (support@cloudron.io)" --create-home --no-user-group --shell /bin/bash cloudron-support
|
||||
fi
|
||||
|
||||
log "locking the ${user} account"
|
||||
usermod --shell /usr/sbin/nologin "${user}"
|
||||
passwd --lock "${user}"
|
||||
|
||||
if [[ "${is_update}" == "yes" ]]; then
|
||||
log "stop box service for update"
|
||||
${box_src_dir}/setup/stop.sh
|
||||
|
||||
Executable
+32
@@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
readonly logfile="/home/yellowtent/platformdata/logs/box.log"
|
||||
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
echo "This script should be run as root." > /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "This will re-create all the containers. Services will go down for a bit."
|
||||
|
||||
read -p "Do you want to proceed? (y/N) " -n 1 -r choice
|
||||
echo
|
||||
|
||||
if [[ ! $choice =~ ^[Yy]$ ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -n "Re-creating addon containers (this takes a while) ."
|
||||
line_count=$(cat /home/yellowtent/platformdata/logs/box.log | wc -l)
|
||||
sed -e 's/"version": ".*",/"version":"48.0.0",/' -i /home/yellowtent/platformdata/INFRA_VERSION
|
||||
systemctl restart box
|
||||
|
||||
while ! tail -n "+${line_count}" "${logfile}" | grep -q "platform is ready"; do
|
||||
echo -n "."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo -e "\nDone.\nThe Cloudron dashboard will say 'Configuring (Queued)' for each app. The apps will come up in a short while."
|
||||
|
||||
+8
-11
@@ -71,6 +71,8 @@ mkdir -p "${PLATFORM_DATA_DIR}/logs/backup" \
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/update"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/sftp/ssh" # sftp keys
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/firewall"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/sshfs"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/cifs"
|
||||
|
||||
# ensure backups folder exists and is writeable
|
||||
mkdir -p /var/backups
|
||||
@@ -127,19 +129,13 @@ systemctl restart unbound
|
||||
systemctl restart cloudron-syslog
|
||||
|
||||
log "Configuring sudoers"
|
||||
rm -f /etc/sudoers.d/${USER}
|
||||
cp "${script_dir}/start/sudoers" /etc/sudoers.d/${USER}
|
||||
rm -f /etc/sudoers.d/${USER} /etc/sudoers.d/cloudron
|
||||
cp "${script_dir}/start/sudoers" /etc/sudoers.d/cloudron
|
||||
|
||||
log "Configuring collectd"
|
||||
rm -rf /etc/collectd /var/log/collectd.log
|
||||
ln -sfF "${PLATFORM_DATA_DIR}/collectd" /etc/collectd
|
||||
cp "${script_dir}/start/collectd/collectd.conf" "${PLATFORM_DATA_DIR}/collectd/collectd.conf"
|
||||
if [[ "${ubuntu_version}" == "20.04" ]]; then
|
||||
# https://bugs.launchpad.net/ubuntu/+source/collectd/+bug/1872281
|
||||
if ! grep -q LD_PRELOAD /etc/default/collectd; then
|
||||
echo -e "\nLD_PRELOAD=/usr/lib/python3.8/config-3.8-x86_64-linux-gnu/libpython3.8.so" >> /etc/default/collectd
|
||||
fi
|
||||
fi
|
||||
systemctl restart collectd
|
||||
|
||||
log "Configuring sysctl"
|
||||
@@ -178,7 +174,7 @@ fi
|
||||
|
||||
# worker_rlimit_nofile in nginx config can be max this number
|
||||
mkdir -p /etc/systemd/system/nginx.service.d
|
||||
if ! grep -q "^LimitNOFILE=" /etc/systemd/system/nginx.service.d/cloudron.conf; then
|
||||
if ! grep -q "^LimitNOFILE=" /etc/systemd/system/nginx.service.d/cloudron.conf 2>/dev/null; then
|
||||
echo -e "[Service]\nLimitNOFILE=16384\n" > /etc/systemd/system/nginx.service.d/cloudron.conf
|
||||
fi
|
||||
|
||||
@@ -213,7 +209,8 @@ done
|
||||
|
||||
readonly mysql_root_password="password"
|
||||
mysqladmin -u root -ppassword password password # reset default root password
|
||||
if [[ "${ubuntu_version}" == "20.04" ]]; then
|
||||
readonly mysqlVersion=$(mysql -NB -u root -p${mysql_root_password} -e 'SELECT VERSION()' 2>/dev/null)
|
||||
if [[ "${mysqlVersion}" == "8.0."* ]]; then
|
||||
# mysql 8 added a new caching_sha2_password scheme which mysqljs does not support
|
||||
mysql -u root -p${mysql_root_password} -e "ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY '${mysql_root_password}';"
|
||||
fi
|
||||
@@ -234,7 +231,7 @@ log "Changing ownership"
|
||||
# note, change ownership after db migrate. this allow db migrate to move files around as root and then we can fix it up here
|
||||
# be careful of what is chown'ed here. subdirs like mysql,redis etc are owned by the containers and will stop working if perms change
|
||||
chown -R "${USER}" /etc/cloudron
|
||||
chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/nginx" "${PLATFORM_DATA_DIR}/collectd" "${PLATFORM_DATA_DIR}/addons" "${PLATFORM_DATA_DIR}/acme" "${PLATFORM_DATA_DIR}/backup" "${PLATFORM_DATA_DIR}/logs" "${PLATFORM_DATA_DIR}/update" "${PLATFORM_DATA_DIR}/sftp" "${PLATFORM_DATA_DIR}/firewall"
|
||||
chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/nginx" "${PLATFORM_DATA_DIR}/collectd" "${PLATFORM_DATA_DIR}/addons" "${PLATFORM_DATA_DIR}/acme" "${PLATFORM_DATA_DIR}/backup" "${PLATFORM_DATA_DIR}/logs" "${PLATFORM_DATA_DIR}/update" "${PLATFORM_DATA_DIR}/sftp" "${PLATFORM_DATA_DIR}/firewall" "${PLATFORM_DATA_DIR}/sshfs" "${PLATFORM_DATA_DIR}/cifs"
|
||||
chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}/INFRA_VERSION" 2>/dev/null || true
|
||||
chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}"
|
||||
chown "${USER}:${USER}" "${APPS_DATA_DIR}"
|
||||
|
||||
@@ -41,16 +41,14 @@ ipxtables -t filter -A CLOUDRON -p tcp -m tcp -m multiport --dports 22,80,202,44
|
||||
|
||||
# whitelist any user ports. we used to use --dports but it has a 15 port limit (XT_MULTI_PORTS)
|
||||
ports_json="/home/yellowtent/platformdata/firewall/ports.json"
|
||||
if allowed_tcp_ports=$(node -e "console.log(JSON.parse(fs.readFileSync('${ports_json}', 'utf8')).allowed_tcp_ports.join(','))" 2>/dev/null); then
|
||||
IFS=',' arr=(${allowed_tcp_ports})
|
||||
for p in "${arr[@]}"; do
|
||||
if allowed_tcp_ports=$(node -e "console.log(JSON.parse(fs.readFileSync('${ports_json}', 'utf8')).allowed_tcp_ports.join(' '))" 2>/dev/null); then
|
||||
for p in $allowed_tcp_ports; do
|
||||
ipxtables -A CLOUDRON -p tcp -m tcp --dport "${p}" -j ACCEPT
|
||||
done
|
||||
fi
|
||||
|
||||
if allowed_udp_ports=$(node -e "console.log(JSON.parse(fs.readFileSync('${ports_json}', 'utf8')).allowed_udp_ports.join(','))" 2>/dev/null); then
|
||||
IFS=',' arr=(${allowed_udp_ports})
|
||||
for p in "${arr[@]}"; do
|
||||
if allowed_udp_ports=$(node -e "console.log(JSON.parse(fs.readFileSync('${ports_json}', 'utf8')).allowed_udp_ports.join(' '))" 2>/dev/null); then
|
||||
for p in $allowed_udp_ports; do
|
||||
ipxtables -A CLOUDRON -p udp -m udp --dport "${p}" -j ACCEPT
|
||||
done
|
||||
fi
|
||||
@@ -63,6 +61,9 @@ ipset create cloudron_ldap_allowlist6 hash:net family inet6 || true
|
||||
ipset flush cloudron_ldap_allowlist6
|
||||
|
||||
ldap_allowlist_json="/home/yellowtent/platformdata/firewall/ldap_allowlist.txt"
|
||||
# delete any existing redirect rule
|
||||
$iptables -t nat -D PREROUTING -p tcp --dport 636 -j REDIRECT --to-ports 3004 2>/dev/null || true
|
||||
$ip6tables -t nat -D PREROUTING -p tcp --dport 636 -j REDIRECT --to-ports 3004 >/dev/null || true
|
||||
if [[ -f "${ldap_allowlist_json}" ]]; then
|
||||
# without the -n block, any last line without a new line won't be read it!
|
||||
while read -r line || [[ -n "$line" ]]; do
|
||||
@@ -76,8 +77,10 @@ if [[ -f "${ldap_allowlist_json}" ]]; then
|
||||
done < "${ldap_allowlist_json}"
|
||||
|
||||
# ldap server we expose 3004 and also redirect from standard ldaps port 636
|
||||
ipxtables -t nat -I PREROUTING -p tcp --dport 636 -j REDIRECT --to-ports 3004
|
||||
$iptables -t nat -I PREROUTING -p tcp --dport 636 -j REDIRECT --to-ports 3004
|
||||
$iptables -t filter -A CLOUDRON -m set --match-set cloudron_ldap_allowlist src -p tcp --dport 3004 -j ACCEPT
|
||||
|
||||
$ip6tables -t nat -I PREROUTING -p tcp --dport 636 -j REDIRECT --to-ports 3004
|
||||
$ip6tables -t filter -A CLOUDRON -m set --match-set cloudron_ldap_allowlist6 src -p tcp --dport 3004 -j ACCEPT
|
||||
fi
|
||||
|
||||
@@ -144,6 +147,7 @@ for port in 3306 5432 6379 27017; do
|
||||
$iptables -A CLOUDRON_RATELIMIT -p tcp --syn -s 172.18.0.0/16 -d 172.18.0.0/16 --dport ${port} -m connlimit --connlimit-above 5000 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# Add the rate limit chain to input chain
|
||||
$iptables -t filter -C INPUT -j CLOUDRON_RATELIMIT 2>/dev/null || $iptables -t filter -I INPUT 1 -j CLOUDRON_RATELIMIT
|
||||
$ip6tables -t filter -C INPUT -j CLOUDRON_RATELIMIT 2>/dev/null || $ip6tables -t filter -I INPUT 1 -j CLOUDRON_RATELIMIT
|
||||
|
||||
|
||||
+33
-12
@@ -4,30 +4,51 @@
|
||||
|
||||
printf "**********************************************************************\n\n"
|
||||
|
||||
cache_file="/var/cache/cloudron-motd-cache"
|
||||
readonly cache_file4="/var/cache/cloudron-motd-cache4"
|
||||
readonly cache_file6="/var/cache/cloudron-motd-cache6"
|
||||
|
||||
if [[ -z "$(ls -A /home/yellowtent/platformdata/addons/mail/dkim)" ]]; then
|
||||
if [[ ! -f "${cache_file}" ]]; then
|
||||
curl --fail --connect-timeout 2 --max-time 2 -q https://ipv4.api.cloudron.io/api/v1/helper/public_ip --output "${cache_file}" || true
|
||||
fi
|
||||
if [[ -f "${cache_file}" ]]; then
|
||||
ip=$(sed -n -e 's/.*"ip": "\(.*\)"/\1/p' /var/cache/cloudron-motd-cache)
|
||||
url4=""
|
||||
url6=""
|
||||
fallbackUrl=""
|
||||
|
||||
function detectIp() {
|
||||
if [[ ! -f "${cache_file4}" ]]; then
|
||||
ip4=$(curl -s --fail --connect-timeout 2 --max-time 2 https://ipv4.api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p' || true)
|
||||
[[ -n "${ip4}" ]] && echo "${ip4}" > "${cache_file4}"
|
||||
else
|
||||
ip='<IP>'
|
||||
ip4=$(cat "${cache_file4}")
|
||||
fi
|
||||
|
||||
if [[ ! -f "${cache_file6}" ]]; then
|
||||
ip6=$(curl -s --fail --connect-timeout 2 --max-time 2 https://ipv6.api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p' || true)
|
||||
[[ -n "${ip6}" ]] && echo "${ip6}" > "${cache_file6}"
|
||||
else
|
||||
ip6=$(cat "${cache_file6}")
|
||||
fi
|
||||
|
||||
if [[ ! -f /etc/cloudron/SETUP_TOKEN ]]; then
|
||||
url="https://${ip}"
|
||||
[[ -n "${ip4}" ]] && url4="https://${ip4}"
|
||||
[[ -n "${ip6}" ]] && url6="https://[${ip6}]"
|
||||
[[ -z "${ip4}" && -z "${ip6}" ]] && fallbackUrl="https://<IP>"
|
||||
else
|
||||
setupToken="$(cat /etc/cloudron/SETUP_TOKEN)"
|
||||
url="https://${ip}/?setupToken=${setupToken}"
|
||||
[[ -n "${ip4}" ]] && url4="https://${ip4}/?setupToken=${setupToken}"
|
||||
[[ -n "${ip6}" ]] && url6="https://[${ip6}]/?setupToken=${setupToken}"
|
||||
[[ -z "${ip4}" && -z "${ip6}" ]] && fallbackUrl="https://<IP>?setupToken=${setupToken}"
|
||||
fi
|
||||
}
|
||||
|
||||
if [[ -z "$(ls -A /home/yellowtent/platformdata/addons/mail/dkim)" ]]; then
|
||||
detectIp
|
||||
|
||||
printf "\t\t\tWELCOME TO CLOUDRON\n"
|
||||
printf "\t\t\t-------------------\n"
|
||||
|
||||
printf '\n\e[1;32m%-6s\e[m\n\n' "Visit ${url} on your browser and accept the self-signed certificate to finish setup."
|
||||
printf "Cloudron overview - https://docs.cloudron.io/ \n"
|
||||
printf '\n\e[1;32m%-6s\e[m\n' "Visit one of the following URLs on your browser and accept the self-signed certificate to finish setup."
|
||||
[[ -n "${url4}" ]] && printf '\e[1;32m%-6s\e[m\n' " * ${url4}"
|
||||
[[ -n "${url6}" ]] && printf '\e[1;32m%-6s\e[m\n' " * ${url6}"
|
||||
[[ -n "${fallbackUrl}" ]] && printf '\e[1;32m%-6s\e[m\n' " * ${fallbackUrl}"
|
||||
printf "\nCloudron overview - https://docs.cloudron.io/ \n"
|
||||
printf "Cloudron setup - https://docs.cloudron.io/installation/#setup \n"
|
||||
else
|
||||
printf "\t\t\tNOTE TO CLOUDRON ADMINS\n"
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
##############################################################################
|
||||
|
||||
Hostname "localhost"
|
||||
#FQDNLookup true
|
||||
FQDNLookup false
|
||||
#BaseDir "/var/lib/collectd"
|
||||
#PluginDir "/usr/lib/collectd"
|
||||
#TypesDB "/usr/share/collectd/types.db" "/etc/collectd/my_types.db"
|
||||
@@ -232,7 +232,7 @@ LoadPlugin swap
|
||||
|
||||
<Plugin write_graphite>
|
||||
<Node "graphing">
|
||||
Host "localhost"
|
||||
Host "127.0.0.1"
|
||||
Port "2003"
|
||||
Protocol "tcp"
|
||||
LogSendErrors true
|
||||
|
||||
@@ -14,7 +14,7 @@ def read():
|
||||
for d in disks:
|
||||
device = d[0]
|
||||
if 'devicemapper' in d[1] or not device.startswith('/dev/'): continue
|
||||
instance = device[len('/dev/'):].replace('/', '_') # see #348
|
||||
instance = device[len('/dev/'):].replace('/', '_').replace('.', '_') # see #348
|
||||
|
||||
try:
|
||||
st = os.statvfs(d[1]) # handle disk removal
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
# sudo logging breaks journalctl output with very long urls (systemd bug)
|
||||
Defaults !syslog
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/checkvolume.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/checkvolume.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/clearvolume.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/clearvolume.sh
|
||||
|
||||
@@ -64,3 +67,6 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmmount.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/remountmount.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/remountmount.sh
|
||||
|
||||
cloudron-support ALL=(ALL) NOPASSWD: ALL
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ server:
|
||||
interface: 127.0.0.1
|
||||
interface: 172.18.0.1
|
||||
ip-freebind: yes
|
||||
do-ip6: no
|
||||
do-ip6: yes
|
||||
access-control: 127.0.0.1 allow
|
||||
access-control: 172.18.0.1/16 allow
|
||||
cache-max-negative-ttl: 30
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
verifyToken
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
safe = require('safetydance'),
|
||||
tokens = require('./tokens.js'),
|
||||
users = require('./users.js');
|
||||
|
||||
async function verifyToken(accessToken) {
|
||||
assert.strictEqual(typeof accessToken, 'string');
|
||||
|
||||
const token = await tokens.getByAccessToken(accessToken);
|
||||
if (!token) throw new BoxError(BoxError.INVALID_CREDENTIALS, 'No such token');
|
||||
|
||||
const user = await users.get(token.identifier);
|
||||
if (!user) throw new BoxError(BoxError.INVALID_CREDENTIALS, 'User not found');
|
||||
if (!user.active) throw new BoxError(BoxError.INVALID_CREDENTIALS, 'User not active');
|
||||
|
||||
await safe(tokens.update(token.id, { lastUsedTime: new Date() })); // ignore any error
|
||||
|
||||
return user;
|
||||
}
|
||||
+15
-15
@@ -47,16 +47,16 @@ function urlBase64Encode(string) {
|
||||
}
|
||||
|
||||
function b64(str) {
|
||||
var buf = Buffer.isBuffer(str) ? str : Buffer.from(str);
|
||||
const buf = Buffer.isBuffer(str) ? str : Buffer.from(str);
|
||||
return urlBase64Encode(buf.toString('base64'));
|
||||
}
|
||||
|
||||
function getModulus(pem) {
|
||||
assert(Buffer.isBuffer(pem));
|
||||
|
||||
var stdout = safe.child_process.execSync('openssl rsa -modulus -noout', { input: pem, encoding: 'utf8' });
|
||||
const stdout = safe.child_process.execSync('openssl rsa -modulus -noout', { input: pem, encoding: 'utf8' });
|
||||
if (!stdout) return null;
|
||||
var match = stdout.match(/Modulus=([0-9a-fA-F]+)$/m);
|
||||
const match = stdout.match(/Modulus=([0-9a-fA-F]+)$/m);
|
||||
if (!match) return null;
|
||||
return Buffer.from(match[1], 'hex');
|
||||
}
|
||||
@@ -324,7 +324,7 @@ Acme2.prototype.createKeyAndCsr = async function (hostname, keyFilePath, csrFile
|
||||
if (!csrDer) throw new BoxError(BoxError.OPENSSL_ERROR, safe.error);
|
||||
if (!safe.fs.writeFileSync(csrFilePath, csrDer)) throw new BoxError(BoxError.FS_ERROR, safe.error); // bookkeeping. inspect with openssl req -text -noout -in hostname.csr -inform der
|
||||
|
||||
await safe(fs.promises.rmdir(tmpdir, { recursive: true }));
|
||||
await safe(fs.promises.rm(tmpdir, { recursive: true, force: true }));
|
||||
|
||||
debug('createKeyAndCsr: csr file (DER) saved at %s', csrFilePath);
|
||||
|
||||
@@ -522,32 +522,32 @@ Acme2.prototype.loadDirectory = async function () {
|
||||
});
|
||||
};
|
||||
|
||||
Acme2.prototype.getCertificate = async function (vhost, domain, paths) {
|
||||
assert.strictEqual(typeof vhost, 'string');
|
||||
Acme2.prototype.getCertificate = async function (fqdn, domain, paths) {
|
||||
assert.strictEqual(typeof fqdn, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof paths, 'object');
|
||||
|
||||
debug(`getCertificate: start acme flow for ${vhost} from ${this.caDirectory}`);
|
||||
debug(`getCertificate: start acme flow for ${fqdn} from ${this.caDirectory}`);
|
||||
|
||||
if (vhost !== domain && this.wildcard) { // bare domain is not part of wildcard SAN
|
||||
vhost = dns.makeWildcard(vhost);
|
||||
debug(`getCertificate: will get wildcard cert for ${vhost}`);
|
||||
if (fqdn !== domain && this.wildcard) { // bare domain is not part of wildcard SAN
|
||||
fqdn = dns.makeWildcard(fqdn);
|
||||
debug(`getCertificate: will get wildcard cert for ${fqdn}`);
|
||||
}
|
||||
|
||||
await this.loadDirectory();
|
||||
await this.acmeFlow(vhost, domain, paths);
|
||||
await this.acmeFlow(fqdn, domain, paths);
|
||||
};
|
||||
|
||||
async function getCertificate(vhost, domain, paths, options) {
|
||||
assert.strictEqual(typeof vhost, 'string'); // this can also be a wildcard domain (for alias domains)
|
||||
async function getCertificate(fqdn, domain, paths, options) {
|
||||
assert.strictEqual(typeof fqdn, 'string'); // this can also be a wildcard domain (for alias domains)
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof paths, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
await promiseRetry({ times: 3, interval: 0, debug }, async function () {
|
||||
debug(`getCertificate: for vhost ${vhost} and domain ${domain}`);
|
||||
debug(`getCertificate: for fqdn ${fqdn} and domain ${domain}`);
|
||||
|
||||
const acme = new Acme2(options || { });
|
||||
return await acme.getCertificate(vhost, domain, paths);
|
||||
return await acme.getCertificate(fqdn, domain, paths);
|
||||
});
|
||||
}
|
||||
|
||||
+20
-12
@@ -38,14 +38,14 @@ async function setHealth(app, health) {
|
||||
debug(`setHealth: ${app.id} (${app.fqdn}) switched from ${lastHealth} to healthy`);
|
||||
|
||||
// do not send mails for dev apps
|
||||
if (!app.debugMode) eventlog.add(eventlog.ACTION_APP_UP, AuditSource.HEALTH_MONITOR, { app: app });
|
||||
if (!app.debugMode) await eventlog.add(eventlog.ACTION_APP_UP, AuditSource.HEALTH_MONITOR, { app: app });
|
||||
}
|
||||
} else if (Math.abs(now - healthTime) > UNHEALTHY_THRESHOLD) {
|
||||
if (lastHealth === apps.HEALTH_HEALTHY) {
|
||||
debug(`setHealth: marking ${app.id} (${app.fqdn}) as unhealthy since not seen for more than ${UNHEALTHY_THRESHOLD/(60 * 1000)} minutes`);
|
||||
|
||||
// do not send mails for dev apps
|
||||
if (!app.debugMode) eventlog.add(eventlog.ACTION_APP_DOWN, AuditSource.HEALTH_MONITOR, { app: app });
|
||||
if (!app.debugMode) await eventlog.add(eventlog.ACTION_APP_DOWN, AuditSource.HEALTH_MONITOR, { app: app });
|
||||
}
|
||||
} else {
|
||||
debug(`setHealth: ${app.id} (${app.fqdn}) waiting for ${(UNHEALTHY_THRESHOLD - Math.abs(now - healthTime))/1000} to update health`);
|
||||
@@ -70,25 +70,35 @@ async function checkAppHealth(app, options) {
|
||||
|
||||
const manifest = app.manifest;
|
||||
|
||||
const [error, data] = await safe(docker.inspect(app.containerId));
|
||||
if (error || !data || !data.State) return await setHealth(app, apps.HEALTH_ERROR);
|
||||
if (data.State.Running !== true) return await setHealth(app, apps.HEALTH_DEAD);
|
||||
let healthCheckUrl, host;
|
||||
if (app.manifest.id === constants.PROXY_APP_APPSTORE_ID) {
|
||||
healthCheckUrl = app.upstreamUri;
|
||||
host = '';
|
||||
} else {
|
||||
const [error, data] = await safe(docker.inspect(app.containerId));
|
||||
if (error || !data || !data.State) return await setHealth(app, apps.HEALTH_ERROR);
|
||||
if (data.State.Running !== true) return await setHealth(app, apps.HEALTH_DEAD);
|
||||
|
||||
// non-appstore apps may not have healthCheckPath
|
||||
if (!manifest.healthCheckPath) return await setHealth(app, apps.HEALTH_HEALTHY);
|
||||
// non-appstore apps may not have healthCheckPath
|
||||
if (!manifest.healthCheckPath) return await setHealth(app, apps.HEALTH_HEALTHY);
|
||||
|
||||
healthCheckUrl = `http://${app.containerIp}:${manifest.httpPort}${manifest.healthCheckPath}`;
|
||||
host = app.fqdn;
|
||||
}
|
||||
|
||||
const healthCheckUrl = `http://${app.containerIp}:${manifest.httpPort}${manifest.healthCheckPath}`;
|
||||
const [healthCheckError, response] = await safe(superagent
|
||||
.get(healthCheckUrl)
|
||||
.set('Host', app.fqdn) // required for some apache configs with rewrite rules
|
||||
.set('Host', host) // required for some apache configs with rewrite rules
|
||||
.set('User-Agent', 'Mozilla (CloudronHealth)') // required for some apps (e.g. minio)
|
||||
.redirects(0)
|
||||
.ok(() => true)
|
||||
.timeout(options.timeout * 1000));
|
||||
|
||||
if (healthCheckError) {
|
||||
await apps.appendLogLine(app, `=> Healtheck error: ${healthCheckError}`);
|
||||
await setHealth(app, apps.HEALTH_UNHEALTHY);
|
||||
} else if (response.status > 403) { // 2xx and 3xx are ok. even 401 and 403 are ok for now (for WP sites)
|
||||
await apps.appendLogLine(app, `=> Healtheck error got response status ${response.status}`);
|
||||
await setHealth(app, apps.HEALTH_UNHEALTHY);
|
||||
} else {
|
||||
await setHealth(app, apps.HEALTH_HEALTHY);
|
||||
@@ -129,9 +139,7 @@ async function processDockerEvents(options) {
|
||||
const [error, info] = await safe(getContainerInfo(containerId));
|
||||
const program = error ? containerId : (info.addonName || info.app.fqdn);
|
||||
const now = Date.now();
|
||||
|
||||
// do not send mails for dev apps
|
||||
const notifyUser = !(info.app && info.app.debugMode) && ((now - gLastOomMailTime) > OOM_EVENT_LIMIT);
|
||||
const notifyUser = !info?.app?.debugMode && ((now - gLastOomMailTime) > OOM_EVENT_LIMIT);
|
||||
|
||||
debug(`OOM ${program} notifyUser: ${notifyUser}. lastOomTime: ${gLastOomMailTime} (now: ${now})`);
|
||||
|
||||
|
||||
+178
@@ -0,0 +1,178 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
list,
|
||||
listByUser,
|
||||
add,
|
||||
get,
|
||||
update,
|
||||
remove,
|
||||
getIcon
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
apps = require('./apps.js'),
|
||||
database = require('./database.js'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
uuid = require('uuid'),
|
||||
safe = require('safetydance'),
|
||||
superagent = require('superagent'),
|
||||
validator = require('validator'),
|
||||
jsdom = require('jsdom'),
|
||||
debug = require('debug')('box:applinks');
|
||||
|
||||
const APPLINKS_FIELDS= [ 'id', 'accessRestrictionJson', 'creationTime', 'updateTime', 'ts', 'label', 'tagsJson', 'icon', 'upstreamUri' ].join(',');
|
||||
|
||||
function postProcess(result) {
|
||||
assert.strictEqual(typeof result, 'object');
|
||||
|
||||
assert(result.tagsJson === null || typeof result.tagsJson === 'string');
|
||||
result.tags = safe.JSON.parse(result.tagsJson) || [];
|
||||
delete result.tagsJson;
|
||||
|
||||
assert(result.accessRestrictionJson === null || typeof result.accessRestrictionJson === 'string');
|
||||
result.accessRestriction = safe.JSON.parse(result.accessRestrictionJson);
|
||||
if (result.accessRestriction && !result.accessRestriction.users) result.accessRestriction.users = [];
|
||||
delete result.accessRestrictionJson;
|
||||
|
||||
result.ts = new Date(result.ts).getTime();
|
||||
|
||||
result.icon = result.icon ? result.icon : null;
|
||||
}
|
||||
|
||||
async function list() {
|
||||
const results = await database.query(`SELECT ${APPLINKS_FIELDS} FROM applinks ORDER BY upstreamUri`);
|
||||
|
||||
results.forEach(postProcess);
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
async function listByUser(user) {
|
||||
assert.strictEqual(typeof user, 'object');
|
||||
|
||||
const result = await list();
|
||||
return result.filter((app) => apps.canAccess(app, user));
|
||||
}
|
||||
|
||||
async function detectMetaInfo(applink) {
|
||||
assert.strictEqual(typeof applink, 'object');
|
||||
|
||||
const [error, response] = await safe(superagent.get(applink.upstreamUri));
|
||||
if (error || !response.text) throw new BoxError(BoxError.BAD_FIELD, 'cannot fetch upstream uri for favicon and label');
|
||||
|
||||
// fixup upstreamUri to match the redirect
|
||||
if (response.redirects && response.redirects.length) {
|
||||
debug(`detectMetaInfo: found redirect from ${applink.upstreamUri} to ${response.redirects[0]}`);
|
||||
applink.upstreamUri = response.redirects[0];
|
||||
}
|
||||
|
||||
if (applink.favicon && applink.label) return;
|
||||
|
||||
const dom = new jsdom.JSDOM(response.text);
|
||||
if (!applink.icon) {
|
||||
let favicon = '';
|
||||
if (dom.window.document.querySelector('link[rel="apple-touch-icon"]')) favicon = dom.window.document.querySelector('link[rel="apple-touch-icon"]').href ;
|
||||
if (!favicon.endsWith('.png') && dom.window.document.querySelector('meta[name="msapplication-TileImage"]')) favicon = dom.window.document.querySelector('meta[name="msapplication-TileImage"]').content ;
|
||||
if (!favicon.endsWith('.png') && dom.window.document.querySelector('link[rel="shortcut icon"]')) favicon = dom.window.document.querySelector('link[rel="shortcut icon"]').href ;
|
||||
if (!favicon.endsWith('.png') && dom.window.document.querySelector('link[rel="icon"]')) favicon = dom.window.document.querySelector('link[rel="icon"]').href ;
|
||||
if (!favicon.endsWith('.png') && dom.window.document.querySelector('meta[itemprop="image"]')) favicon = dom.window.document.querySelector('meta[itemprop="image"]').content;
|
||||
|
||||
if (favicon) {
|
||||
if (favicon.startsWith('/')) favicon = applink.upstreamUri + favicon;
|
||||
|
||||
const [error, response] = await safe(superagent.get(favicon));
|
||||
if (error) console.error(`Failed to fetch icon ${favicon}: `, error);
|
||||
else if (response.ok && response.headers['content-type'] === 'image/png') applink.icon = response.body;
|
||||
else console.error(`Failed to fetch icon ${favicon}: statusCode=${response.status}`);
|
||||
} else {
|
||||
console.error(`Unable to find a suitable icon for ${applink.upstreamUri}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (!applink.label) {
|
||||
if (dom.window.document.querySelector('meta[property="og:title"]')) applink.label = dom.window.document.querySelector('meta[property="og:title"]').content;
|
||||
else if (dom.window.document.querySelector('meta[property="og:site_name"]')) applink.label = dom.window.document.querySelector('meta[property="og:site_name"]').content;
|
||||
else if (dom.window.document.title) applink.label = dom.window.document.title;
|
||||
}
|
||||
}
|
||||
|
||||
async function add(applink) {
|
||||
assert.strictEqual(typeof applink, 'object');
|
||||
assert.strictEqual(typeof applink.upstreamUri, 'string');
|
||||
|
||||
debug(`add: ${applink.upstreamUri}`, applink);
|
||||
|
||||
if (applink.icon) {
|
||||
if (!validator.isBase64(applink.icon)) throw new BoxError(BoxError.BAD_FIELD, 'icon is not base64');
|
||||
applink.icon = Buffer.from(applink.icon, 'base64');
|
||||
}
|
||||
|
||||
await detectMetaInfo(applink);
|
||||
|
||||
const data = {
|
||||
id: uuid.v4(),
|
||||
accessRestrictionJson: applink.accessRestriction ? JSON.stringify(applink.accessRestriction) : null,
|
||||
label: applink.label || '',
|
||||
tagsJson: applink.tags ? JSON.stringify(applink.tags) : null,
|
||||
icon: applink.icon || null,
|
||||
upstreamUri: applink.upstreamUri
|
||||
};
|
||||
|
||||
const query = 'INSERT INTO applinks (id, accessRestrictionJson, label, tagsJson, icon, upstreamUri) VALUES (?, ?, ?, ?, ?, ?)';
|
||||
const args = [ data.id, data.accessRestrictionJson, data.label, data.tagsJson, data.icon, data.upstreamUri ];
|
||||
|
||||
const [error] = await safe(database.query(query, args));
|
||||
if (error) throw error;
|
||||
|
||||
return data.id;
|
||||
}
|
||||
|
||||
async function get(applinkId) {
|
||||
assert.strictEqual(typeof applinkId, 'string');
|
||||
|
||||
const result = await database.query(`SELECT ${APPLINKS_FIELDS} FROM applinks WHERE id = ?`, [ applinkId ]);
|
||||
if (result.length === 0) throw new BoxError(BoxError.NOT_FOUND, 'Applink not found');
|
||||
|
||||
postProcess(result[0]);
|
||||
|
||||
return result[0];
|
||||
}
|
||||
|
||||
async function update(applinkId, applink) {
|
||||
assert.strictEqual(typeof applinkId, 'string');
|
||||
assert.strictEqual(typeof applink, 'object');
|
||||
assert.strictEqual(typeof applink.upstreamUri, 'string');
|
||||
|
||||
debug(`update: ${applinkId} ${applink.upstreamUri}`, applink);
|
||||
|
||||
if (applink.icon) {
|
||||
if (!validator.isBase64(applink.icon)) throw new BoxError(BoxError.BAD_FIELD, 'icon is not base64');
|
||||
applink.icon = Buffer.from(applink.icon, 'base64');
|
||||
}
|
||||
|
||||
await detectMetaInfo(applink);
|
||||
|
||||
const query = 'UPDATE applinks SET label=?, icon=?, upstreamUri=?, tagsJson=?, accessRestrictionJson=? WHERE id = ?';
|
||||
const args = [ applink.label, applink.icon || null, applink.upstreamUri, applink.tags ? JSON.stringify(applink.tags) : null, applink.accessRestriction ? JSON.stringify(applink.accessRestriction) : null, applinkId ];
|
||||
|
||||
const result = await database.query(query, args);
|
||||
if (result.affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Applink not found');
|
||||
}
|
||||
|
||||
async function remove(applinkId) {
|
||||
assert.strictEqual(typeof applinkId, 'string');
|
||||
|
||||
debug(`remove: ${applinkId}`);
|
||||
|
||||
const result = await database.query(`DELETE FROM applinks WHERE id = ?`, [ applinkId ]);
|
||||
if (result.affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Applink not found');
|
||||
}
|
||||
|
||||
async function getIcon(applinkId) {
|
||||
assert.strictEqual(typeof applinkId, 'string');
|
||||
|
||||
const applink = await get(applinkId);
|
||||
|
||||
return applink.icon;
|
||||
}
|
||||
+278
-106
@@ -27,6 +27,7 @@ exports = module.exports = {
|
||||
setAccessRestriction,
|
||||
setOperators,
|
||||
setCrontab,
|
||||
setUpstreamUri,
|
||||
setLabel,
|
||||
setIcon,
|
||||
setTags,
|
||||
@@ -42,7 +43,7 @@ exports = module.exports = {
|
||||
setMailbox,
|
||||
setInbox,
|
||||
setLocation,
|
||||
setDataDir,
|
||||
setStorage,
|
||||
repair,
|
||||
|
||||
restore,
|
||||
@@ -54,18 +55,23 @@ exports = module.exports = {
|
||||
|
||||
backup,
|
||||
listBackups,
|
||||
updateBackup,
|
||||
|
||||
getTask,
|
||||
getLogPaths,
|
||||
getLogs,
|
||||
|
||||
appendLogLine,
|
||||
|
||||
getCertificate,
|
||||
|
||||
start,
|
||||
stop,
|
||||
restart,
|
||||
|
||||
exec,
|
||||
createExec,
|
||||
startExec,
|
||||
getExec,
|
||||
|
||||
checkManifestConstraints,
|
||||
downloadManifest,
|
||||
@@ -78,7 +84,7 @@ exports = module.exports = {
|
||||
schedulePendingTasks,
|
||||
restartAppsUsingAddons,
|
||||
|
||||
getDataDir,
|
||||
getStorageDir,
|
||||
getIcon,
|
||||
getMemoryLimit,
|
||||
getLimits,
|
||||
@@ -156,7 +162,8 @@ const appstore = require('./appstore.js'),
|
||||
mail = require('./mail.js'),
|
||||
manifestFormat = require('cloudron-manifestformat'),
|
||||
mounts = require('./mounts.js'),
|
||||
once = require('once'),
|
||||
notifications = require('./notifications.js'),
|
||||
once = require('./once.js'),
|
||||
os = require('os'),
|
||||
path = require('path'),
|
||||
paths = require('./paths.js'),
|
||||
@@ -165,6 +172,7 @@ const appstore = require('./appstore.js'),
|
||||
semver = require('semver'),
|
||||
services = require('./services.js'),
|
||||
settings = require('./settings.js'),
|
||||
shell = require('./shell.js'),
|
||||
spawn = require('child_process').spawn,
|
||||
split = require('split'),
|
||||
superagent = require('superagent'),
|
||||
@@ -175,18 +183,21 @@ const appstore = require('./appstore.js'),
|
||||
util = require('util'),
|
||||
uuid = require('uuid'),
|
||||
validator = require('validator'),
|
||||
volumes = require('./volumes.js'),
|
||||
_ = require('underscore');
|
||||
|
||||
const APPS_FIELDS_PREFIXED = [ 'apps.id', 'apps.appStoreId', 'apps.installationState', 'apps.errorJson', 'apps.runState',
|
||||
'apps.health', 'apps.containerId', 'apps.manifestJson', 'apps.accessRestrictionJson', 'apps.memoryLimit', 'apps.cpuShares',
|
||||
'apps.label', 'apps.tagsJson', 'apps.taskId', 'apps.reverseProxyConfigJson', 'apps.servicesConfigJson', 'apps.operatorsJson',
|
||||
'apps.sso', 'apps.debugModeJson', 'apps.enableBackup', 'apps.proxyAuth', 'apps.containerIp', 'apps.crontab',
|
||||
'apps.creationTime', 'apps.updateTime', 'apps.enableAutomaticUpdate',
|
||||
'apps.enableMailbox', 'apps.mailboxName', 'apps.mailboxDomain', 'apps.enableInbox', 'apps.inboxName', 'apps.inboxDomain',
|
||||
'apps.dataDir', 'apps.ts', 'apps.healthTime', '(apps.icon IS NOT NULL) AS hasIcon', '(apps.appStoreIcon IS NOT NULL) AS hasAppStoreIcon' ].join(',');
|
||||
'apps.creationTime', 'apps.updateTime', 'apps.enableAutomaticUpdate', 'apps.upstreamUri',
|
||||
'apps.enableMailbox', 'apps.mailboxDisplayName', 'apps.mailboxName', 'apps.mailboxDomain', 'apps.enableInbox', 'apps.inboxName', 'apps.inboxDomain',
|
||||
'apps.storageVolumeId', 'apps.storageVolumePrefix', 'apps.ts', 'apps.healthTime', '(apps.icon IS NOT NULL) AS hasIcon', '(apps.appStoreIcon IS NOT NULL) AS hasAppStoreIcon' ].join(',');
|
||||
|
||||
// const PORT_BINDINGS_FIELDS = [ 'hostPort', 'type', 'environmentVariable', 'appId' ].join(',');
|
||||
|
||||
const CHECKVOLUME_CMD = path.join(__dirname, 'scripts/checkvolume.sh');
|
||||
|
||||
function validatePortBindings(portBindings, manifest) {
|
||||
assert.strictEqual(typeof portBindings, 'object');
|
||||
assert.strictEqual(typeof manifest, 'object');
|
||||
@@ -232,7 +243,10 @@ function validatePortBindings(portBindings, manifest) {
|
||||
|
||||
if (!portBindings) return null;
|
||||
|
||||
for (let portName in portBindings) {
|
||||
const tcpPorts = manifest.tcpPorts || { };
|
||||
const udpPorts = manifest.udpPorts || { };
|
||||
|
||||
for (const portName in portBindings) {
|
||||
if (!/^[a-zA-Z0-9_]+$/.test(portName)) return new BoxError(BoxError.BAD_FIELD, `${portName} is not a valid environment variable in portBindings`);
|
||||
|
||||
const hostPort = portBindings[portName];
|
||||
@@ -240,14 +254,11 @@ function validatePortBindings(portBindings, manifest) {
|
||||
if (RESERVED_PORTS.indexOf(hostPort) !== -1) return new BoxError(BoxError.BAD_FIELD, `Port ${hostPort} for ${portName} is reserved in portBindings`);
|
||||
if (RESERVED_PORT_RANGES.find(range => (hostPort >= range[0] && hostPort <= range[1]))) return new BoxError(BoxError.BAD_FIELD, `Port ${hostPort} for ${portName} is reserved in portBindings`);
|
||||
if (ALLOWED_PORTS.indexOf(hostPort) === -1 && (hostPort <= 1023 || hostPort > 65535)) return new BoxError(BoxError.BAD_FIELD, `${hostPort} for ${portName} is not in permitted range in portBindings`);
|
||||
}
|
||||
|
||||
// it is OK if there is no 1-1 mapping between values in manifest.tcpPorts and portBindings. missing values implies
|
||||
// that the user wants the service disabled
|
||||
const tcpPorts = manifest.tcpPorts || { };
|
||||
const udpPorts = manifest.udpPorts || { };
|
||||
for (let portName in portBindings) {
|
||||
if (!(portName in tcpPorts) && !(portName in udpPorts)) return new BoxError(BoxError.BAD_FIELD, `Invalid portBindings ${portName}`);
|
||||
// it is OK if there is no 1-1 mapping between values in manifest.tcpPorts and portBindings. missing values implies the service is disabled
|
||||
const portSpec = tcpPorts[portName] || udpPorts[portName];
|
||||
if (!portSpec) return new BoxError(BoxError.BAD_FIELD, `Invalid portBinding ${portName}`);
|
||||
if (portSpec.readOnly && portSpec.defaultValue !== hostPort) return new BoxError(BoxError.BAD_FIELD, `portBinding ${portName} is readOnly and cannot have a different value that the default`);
|
||||
}
|
||||
|
||||
return null;
|
||||
@@ -299,6 +310,18 @@ function translateSecondaryDomains(secondaryDomains) {
|
||||
function parseCrontab(crontab) {
|
||||
assert(crontab === null || typeof crontab === 'string');
|
||||
|
||||
// https://www.man7.org/linux/man-pages/man5/crontab.5.html#EXTENSIONS
|
||||
const KNOWN_EXTENSIONS = {
|
||||
'@service' : '@service', // runs once
|
||||
'@reboot' : '@service',
|
||||
'@yearly' : '0 0 1 1 *',
|
||||
'@annually' : '0 0 1 1 *',
|
||||
'@monthly' : '0 0 1 * *',
|
||||
'@weekly' : '0 0 * * 0',
|
||||
'@daily' : '0 0 * * *',
|
||||
'@hourly' : '0 * * * *',
|
||||
};
|
||||
|
||||
const result = [];
|
||||
if (!crontab) return result;
|
||||
|
||||
@@ -306,20 +329,28 @@ function parseCrontab(crontab) {
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
const line = lines[i].trim();
|
||||
if (!line || line.startsWith('#')) continue;
|
||||
const parts = /^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(.+)$/.exec(line);
|
||||
if (!parts) throw new BoxError(BoxError.BAD_FIELD, `Invalid cron configuration at line ${i+1}`);
|
||||
const schedule = parts.slice(1, 6).join(' ');
|
||||
const command = parts[6];
|
||||
if (line.startsWith('@')) {
|
||||
const parts = /^(@\S+)\s+(.+)$/.exec(line);
|
||||
if (!parts) throw new BoxError(BoxError.BAD_FIELD, `Invalid cron configuration at line ${i+1}`);
|
||||
const [, extension, command] = parts;
|
||||
if (!KNOWN_EXTENSIONS[extension]) throw new BoxError(BoxError.BAD_FIELD, `Unknown extension pattern at line ${i+1}`);
|
||||
result.push({ schedule: KNOWN_EXTENSIONS[extension], command });
|
||||
} else {
|
||||
const parts = /^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(.+)$/.exec(line);
|
||||
if (!parts) throw new BoxError(BoxError.BAD_FIELD, `Invalid cron configuration at line ${i+1}`);
|
||||
const schedule = parts.slice(1, 6).join(' ');
|
||||
const command = parts[6];
|
||||
|
||||
try {
|
||||
new CronJob('00 ' + schedule, function() {}); // second is disallowed
|
||||
} catch (ex) {
|
||||
throw new BoxError(BoxError.BAD_FIELD, `Invalid cron pattern at line ${i+1}`);
|
||||
try {
|
||||
new CronJob('00 ' + schedule, function() {}); // second is disallowed
|
||||
} catch (ex) {
|
||||
throw new BoxError(BoxError.BAD_FIELD, `Invalid cron pattern at line ${i+1}`);
|
||||
}
|
||||
|
||||
if (command.length === 0) throw new BoxError(BoxError.BAD_FIELD, `Invalid cron pattern. Command must not be empty at line ${i+1}`); // not possible with the regexp we have
|
||||
|
||||
result.push({ schedule, command });
|
||||
}
|
||||
|
||||
if (command.length === 0) throw new BoxError(BoxError.BAD_FIELD, `Invalid cron pattern. Command must not be empty at line ${i+1}`); // not possible with the regexp we have
|
||||
|
||||
result.push({ schedule, command });
|
||||
}
|
||||
|
||||
return result;
|
||||
@@ -427,6 +458,23 @@ function validateBackupFormat(format) {
|
||||
return new BoxError(BoxError.BAD_FIELD, 'Invalid backup format');
|
||||
}
|
||||
|
||||
function validateUpstreamUri(upstreamUri) {
|
||||
assert.strictEqual(typeof upstreamUri, 'string');
|
||||
|
||||
if (!upstreamUri) return null;
|
||||
|
||||
const uri = safe(() => new URL(upstreamUri));
|
||||
if (!uri) return new BoxError(BoxError.BAD_FIELD, `upstreamUri is invalid: ${safe.error.message}`);
|
||||
if (uri.protocol !== 'http:' && uri.protocol !== 'https:') return new BoxError(BoxError.BAD_FIELD, 'upstreamUri has an unsupported scheme');
|
||||
if (uri.search || uri.hash) return new BoxError(BoxError.BAD_FIELD, 'upstreamUri cannot have search or hash');
|
||||
if (uri.pathname !== '/') return new BoxError(BoxError.BAD_FIELD, 'upstreamUri cannot have a path');
|
||||
|
||||
// we use the uri in a named location @wellknown-upstream. nginx does not support having paths in it
|
||||
if (upstreamUri.endsWith('/')) return new BoxError(BoxError.BAD_FIELD, 'upstreamUri cannot have a path');
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function validateLabel(label) {
|
||||
if (label === null) return null;
|
||||
|
||||
@@ -454,28 +502,29 @@ function validateEnv(env) {
|
||||
return null;
|
||||
}
|
||||
|
||||
function validateDataDir(dataDir) {
|
||||
if (dataDir === null) return null;
|
||||
async function checkStorage(app, volumeId, prefix) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof volumeId, 'string');
|
||||
assert.strictEqual(typeof prefix, 'string');
|
||||
|
||||
if (!path.isAbsolute(dataDir)) return new BoxError(BoxError.BAD_FIELD, `${dataDir} is not an absolute path`);
|
||||
if (dataDir.endsWith('/')) return new BoxError(BoxError.BAD_FIELD, `${dataDir} contains trailing slash`);
|
||||
if (path.normalize(dataDir) !== dataDir) return new BoxError(BoxError.BAD_FIELD, `${dataDir} is not a normalized path`);
|
||||
const volume = await volumes.get(volumeId);
|
||||
if (volume === null) throw new BoxError(BoxError.BAD_FIELD, 'Storage volume not found');
|
||||
|
||||
// nfs shares will have the directory mounted already
|
||||
let stat = safe.fs.lstatSync(dataDir);
|
||||
if (stat) {
|
||||
if (!stat.isDirectory()) return new BoxError(BoxError.BAD_FIELD, `${dataDir} is not a directory`);
|
||||
let entries = safe.fs.readdirSync(dataDir);
|
||||
if (!entries) return new BoxError(BoxError.BAD_FIELD, `${dataDir} could not be listed`);
|
||||
if (entries.length !== 0) return new BoxError(BoxError.BAD_FIELD, `${dataDir} is not empty. If this is the root of a mounted volume, provide a subdirectory.`);
|
||||
}
|
||||
const status = await volumes.getStatus(volume);
|
||||
if (status.state !== 'active') throw new BoxError(BoxError.BAD_FIELD, 'Volume is not active');
|
||||
|
||||
// backup logic relies on paths not overlapping (because it recurses)
|
||||
if (dataDir.startsWith(paths.APPS_DATA_DIR)) return new BoxError(BoxError.BAD_FIELD, `${dataDir} cannot be inside apps data`);
|
||||
if (path.isAbsolute(prefix)) throw new BoxError(BoxError.BAD_FIELD, `prefix "${prefix}" must be a relative path`);
|
||||
if (prefix.endsWith('/')) throw new BoxError(BoxError.BAD_FIELD, `prefix "${prefix}" contains trailing slash`);
|
||||
if (prefix !== '' && path.normalize(prefix) !== prefix) throw new BoxError(BoxError.BAD_FIELD, `prefix "${prefix}" is not a normalized path`);
|
||||
|
||||
// if we made it this far, it cannot start with any of these realistically
|
||||
const fhs = [ '/bin', '/boot', '/etc', '/lib', '/lib32', '/lib64', '/proc', '/run', '/sbin', '/tmp', '/usr' ];
|
||||
if (fhs.some((p) => dataDir.startsWith(p))) return new BoxError(BoxError.BAD_FIELD, `${dataDir} cannot be placed inside this location`);
|
||||
const sourceDir = await getStorageDir(app);
|
||||
const targetDir = path.join(volume.hostPath, prefix);
|
||||
const rel = path.relative(sourceDir, targetDir);
|
||||
if (!rel.startsWith('../') && rel.split('/').length > 1) throw new BoxError(BoxError.BAD_FIELD, 'Only one level subdirectory moves are supported');
|
||||
|
||||
const [error] = await safe(shell.promises.sudo('checkStorage', [ CHECKVOLUME_CMD, targetDir, sourceDir ], {}));
|
||||
if (error && error.code === 2) throw new BoxError(BoxError.BAD_FIELD, `Target directory ${targetDir} is not empty`);
|
||||
if (error && error.code === 3) throw new BoxError(BoxError.BAD_FIELD, `Target directory ${targetDir} does not support chown`);
|
||||
|
||||
return null;
|
||||
}
|
||||
@@ -507,34 +556,52 @@ function getDuplicateErrorDetails(errorMessage, locations, domainObjectMap, port
|
||||
if (portBindings[portName] === parseInt(match[1])) return new BoxError(BoxError.ALREADY_EXISTS, `Port ${match[1]} is in use`);
|
||||
}
|
||||
|
||||
if (match[2] === 'dataDir') {
|
||||
return new BoxError(BoxError.BAD_FIELD, `Data directory ${match[1]} is in use`);
|
||||
if (match[2] === 'apps_storageVolume') {
|
||||
return new BoxError(BoxError.BAD_FIELD, `Storage directory ${match[1]} is in use`);
|
||||
}
|
||||
|
||||
return new BoxError(BoxError.ALREADY_EXISTS, `${match[2]} '${match[1]}' is in use`);
|
||||
}
|
||||
|
||||
function getDataDir(app, dataDir) {
|
||||
assert(dataDir === null || typeof dataDir === 'string');
|
||||
async function getStorageDir(app) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
|
||||
return dataDir || path.join(paths.APPS_DATA_DIR, app.id, 'data');
|
||||
if (!app.storageVolumeId) return path.join(paths.APPS_DATA_DIR, app.id, 'data');
|
||||
const volume = await volumes.get(app.storageVolumeId);
|
||||
if (!volume) throw new BoxError(BoxError.NOT_FOUND, 'Volume not found'); // not possible
|
||||
return path.join(volume.hostPath, app.storageVolumePrefix);
|
||||
}
|
||||
|
||||
function removeCertificateKeys(app) {
|
||||
if (app.certificate) delete app.certificate.key;
|
||||
app.secondaryDomains.forEach(sd => { if (sd.certificate) delete sd.certificate.key; });
|
||||
app.aliasDomains.forEach(ad => { if (ad.certificate) delete ad.certificate.key; });
|
||||
app.redirectDomains.forEach(rd => { if (rd.certificate) delete rd.certificate.key; });
|
||||
}
|
||||
|
||||
function removeInternalFields(app) {
|
||||
return _.pick(app,
|
||||
const result = _.pick(app,
|
||||
'id', 'appStoreId', 'installationState', 'error', 'runState', 'health', 'taskId',
|
||||
'subdomain', 'domain', 'fqdn', 'crontab',
|
||||
'subdomain', 'domain', 'fqdn', 'certificate', 'crontab', 'upstreamUri',
|
||||
'accessRestriction', 'manifest', 'portBindings', 'iconUrl', 'memoryLimit', 'cpuShares', 'operators',
|
||||
'sso', 'debugMode', 'reverseProxyConfig', 'enableBackup', 'creationTime', 'updateTime', 'ts', 'tags',
|
||||
'label', 'secondaryDomains', 'redirectDomains', 'aliasDomains', 'env', 'enableAutomaticUpdate', 'dataDir', 'mounts',
|
||||
'enableMailbox', 'mailboxName', 'mailboxDomain', 'enableInbox', 'inboxName', 'inboxDomain');
|
||||
'label', 'secondaryDomains', 'redirectDomains', 'aliasDomains', 'env', 'enableAutomaticUpdate',
|
||||
'storageVolumeId', 'storageVolumePrefix', 'mounts',
|
||||
'enableMailbox', 'mailboxDisplayName', 'mailboxName', 'mailboxDomain', 'enableInbox', 'inboxName', 'inboxDomain');
|
||||
|
||||
removeCertificateKeys(result);
|
||||
return result;
|
||||
}
|
||||
|
||||
// non-admins can only see these
|
||||
function removeRestrictedFields(app) {
|
||||
return _.pick(app,
|
||||
'id', 'appStoreId', 'installationState', 'error', 'runState', 'health', 'taskId', 'accessRestriction', 'secondaryDomains', 'redirectDomains', 'aliasDomains', 'sso',
|
||||
'subdomain', 'domain', 'fqdn', 'manifest', 'portBindings', 'iconUrl', 'creationTime', 'ts', 'tags', 'label', 'enableBackup');
|
||||
const result = _.pick(app,
|
||||
'id', 'appStoreId', 'installationState', 'error', 'runState', 'health', 'taskId', 'accessRestriction',
|
||||
'secondaryDomains', 'redirectDomains', 'aliasDomains', 'sso', 'subdomain', 'domain', 'fqdn', 'certificate',
|
||||
'manifest', 'portBindings', 'iconUrl', 'creationTime', 'ts', 'tags', 'label', 'enableBackup', 'upstreamUri');
|
||||
|
||||
removeCertificateKeys(result);
|
||||
return result;
|
||||
}
|
||||
|
||||
async function getIcon(app, options) {
|
||||
@@ -632,30 +699,35 @@ function postProcess(result) {
|
||||
const subdomains = JSON.parse(result.subdomains),
|
||||
domains = JSON.parse(result.domains),
|
||||
subdomainTypes = JSON.parse(result.subdomainTypes),
|
||||
subdomainEnvironmentVariables = JSON.parse(result.subdomainEnvironmentVariables);
|
||||
subdomainEnvironmentVariables = JSON.parse(result.subdomainEnvironmentVariables),
|
||||
subdomainCertificateJsons = JSON.parse(result.subdomainCertificateJsons);
|
||||
|
||||
delete result.subdomains;
|
||||
delete result.domains;
|
||||
delete result.subdomainTypes;
|
||||
delete result.subdomainEnvironmentVariables;
|
||||
delete result.subdomainCertificateJsons;
|
||||
|
||||
result.secondaryDomains = [];
|
||||
result.redirectDomains = [];
|
||||
result.aliasDomains = [];
|
||||
for (let i = 0; i < subdomainTypes.length; i++) {
|
||||
const subdomain = subdomains[i], domain = domains[i], certificate = safe.JSON.parse(subdomainCertificateJsons[i]);
|
||||
|
||||
if (subdomainTypes[i] === exports.LOCATION_TYPE_PRIMARY) {
|
||||
result.subdomain = subdomains[i];
|
||||
result.domain = domains[i];
|
||||
result.subdomain = subdomain;
|
||||
result.domain = domain;
|
||||
result.certificate = certificate;
|
||||
} else if (subdomainTypes[i] === exports.LOCATION_TYPE_SECONDARY) {
|
||||
result.secondaryDomains.push({ domain: domains[i], subdomain: subdomains[i], environmentVariable: subdomainEnvironmentVariables[i] });
|
||||
result.secondaryDomains.push({ domain, subdomain, certificate, environmentVariable: subdomainEnvironmentVariables[i] });
|
||||
} else if (subdomainTypes[i] === exports.LOCATION_TYPE_REDIRECT) {
|
||||
result.redirectDomains.push({ domain: domains[i], subdomain: subdomains[i] });
|
||||
result.redirectDomains.push({ domain, subdomain, certificate });
|
||||
} else if (subdomainTypes[i] === exports.LOCATION_TYPE_ALIAS) {
|
||||
result.aliasDomains.push({ domain: domains[i], subdomain: subdomains[i] });
|
||||
result.aliasDomains.push({ domain, subdomain, certificate });
|
||||
}
|
||||
}
|
||||
|
||||
let envNames = JSON.parse(result.envNames), envValues = JSON.parse(result.envValues);
|
||||
const envNames = JSON.parse(result.envNames), envValues = JSON.parse(result.envValues);
|
||||
delete result.envNames;
|
||||
delete result.envValues;
|
||||
result.env = {};
|
||||
@@ -663,7 +735,7 @@ function postProcess(result) {
|
||||
if (envNames[i]) result.env[envNames[i]] = envValues[i];
|
||||
}
|
||||
|
||||
let volumeIds = JSON.parse(result.volumeIds);
|
||||
const volumeIds = JSON.parse(result.volumeIds);
|
||||
delete result.volumeIds;
|
||||
let volumeReadOnlys = JSON.parse(result.volumeReadOnlys);
|
||||
delete result.volumeReadOnlys;
|
||||
@@ -756,9 +828,11 @@ async function add(id, appStoreId, manifest, subdomain, domain, portBindings, da
|
||||
tagsJson = data.tags ? JSON.stringify(data.tags) : null,
|
||||
mailboxName = data.mailboxName || null,
|
||||
mailboxDomain = data.mailboxDomain || null,
|
||||
mailboxDisplayName = data.mailboxDisplayName || '',
|
||||
reverseProxyConfigJson = data.reverseProxyConfig ? JSON.stringify(data.reverseProxyConfig) : null,
|
||||
servicesConfigJson = data.servicesConfig ? JSON.stringify(data.servicesConfig) : null,
|
||||
enableMailbox = data.enableMailbox || false,
|
||||
upstreamUri = data.upstreamUri || '',
|
||||
icon = data.icon || null;
|
||||
|
||||
const queries = [];
|
||||
@@ -766,10 +840,11 @@ async function add(id, appStoreId, manifest, subdomain, domain, portBindings, da
|
||||
queries.push({
|
||||
query: 'INSERT INTO apps (id, appStoreId, manifestJson, installationState, runState, accessRestrictionJson, memoryLimit, cpuShares, '
|
||||
+ 'sso, debugModeJson, mailboxName, mailboxDomain, label, tagsJson, reverseProxyConfigJson, servicesConfigJson, icon, '
|
||||
+ 'enableMailbox) '
|
||||
+ ' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
|
||||
+ 'enableMailbox, mailboxDisplayName, upstreamUri) '
|
||||
+ ' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
|
||||
args: [ id, appStoreId, manifestJson, installationState, runState, accessRestrictionJson, memoryLimit, cpuShares,
|
||||
sso, debugModeJson, mailboxName, mailboxDomain, label, tagsJson, reverseProxyConfigJson, servicesConfigJson, icon, enableMailbox ]
|
||||
sso, debugModeJson, mailboxName, mailboxDomain, label, tagsJson, reverseProxyConfigJson, servicesConfigJson, icon,
|
||||
enableMailbox, mailboxDisplayName, upstreamUri ]
|
||||
});
|
||||
|
||||
queries.push({
|
||||
@@ -992,9 +1067,9 @@ async function getDomainObjectMap() {
|
||||
// each query simply join apps table with another table by id. we then join the full result together
|
||||
const PB_QUERY = 'SELECT id, GROUP_CONCAT(CAST(appPortBindings.hostPort AS CHAR(6))) AS hostPorts, GROUP_CONCAT(appPortBindings.environmentVariable) AS environmentVariables, GROUP_CONCAT(appPortBindings.type) AS portTypes FROM apps LEFT JOIN appPortBindings ON apps.id = appPortBindings.appId GROUP BY apps.id';
|
||||
const ENV_QUERY = 'SELECT id, JSON_ARRAYAGG(appEnvVars.name) AS envNames, JSON_ARRAYAGG(appEnvVars.value) AS envValues FROM apps LEFT JOIN appEnvVars ON apps.id = appEnvVars.appId GROUP BY apps.id';
|
||||
const SUBDOMAIN_QUERY = 'SELECT id, JSON_ARRAYAGG(locations.subdomain) AS subdomains, JSON_ARRAYAGG(locations.domain) AS domains, JSON_ARRAYAGG(locations.type) AS subdomainTypes, JSON_ARRAYAGG(locations.environmentVariable) AS subdomainEnvironmentVariables FROM apps LEFT JOIN locations ON apps.id = locations.appId GROUP BY apps.id';
|
||||
const SUBDOMAIN_QUERY = 'SELECT id, JSON_ARRAYAGG(locations.subdomain) AS subdomains, JSON_ARRAYAGG(locations.domain) AS domains, JSON_ARRAYAGG(locations.type) AS subdomainTypes, JSON_ARRAYAGG(locations.environmentVariable) AS subdomainEnvironmentVariables, JSON_ARRAYAGG(locations.certificateJson) AS subdomainCertificateJsons FROM apps LEFT JOIN locations ON apps.id = locations.appId GROUP BY apps.id';
|
||||
const MOUNTS_QUERY = 'SELECT id, JSON_ARRAYAGG(appMounts.volumeId) AS volumeIds, JSON_ARRAYAGG(appMounts.readOnly) AS volumeReadOnlys FROM apps LEFT JOIN appMounts ON apps.id = appMounts.appId GROUP BY apps.id';
|
||||
const APPS_QUERY = `SELECT ${APPS_FIELDS_PREFIXED}, hostPorts, environmentVariables, portTypes, envNames, envValues, subdomains, domains, subdomainTypes, subdomainEnvironmentVariables, volumeIds, volumeReadOnlys FROM apps`
|
||||
const APPS_QUERY = `SELECT ${APPS_FIELDS_PREFIXED}, hostPorts, environmentVariables, portTypes, envNames, envValues, subdomains, domains, subdomainTypes, subdomainEnvironmentVariables, subdomainCertificateJsons, volumeIds, volumeReadOnlys FROM apps`
|
||||
+ ` LEFT JOIN (${PB_QUERY}) AS q1 on q1.id = apps.id`
|
||||
+ ` LEFT JOIN (${ENV_QUERY}) AS q2 on q2.id = apps.id`
|
||||
+ ` LEFT JOIN (${SUBDOMAIN_QUERY}) AS q3 on q3.id = apps.id`
|
||||
@@ -1112,10 +1187,12 @@ async function onTaskFinished(error, appId, installationState, taskId, auditSour
|
||||
await eventlog.add(eventlog.ACTION_APP_UPDATE_FINISH, auditSource, { app, toManifest, fromManifest, success, errorMessage });
|
||||
break;
|
||||
}
|
||||
case exports.ISTATE_PENDING_BACKUP:
|
||||
await eventlog.add(eventlog.ACTION_APP_BACKUP_FINISH, auditSource, { app, success, errorMessage, backupId: task.result });
|
||||
case exports.ISTATE_PENDING_BACKUP: {
|
||||
const backup = await backups.get(task.result);
|
||||
await eventlog.add(eventlog.ACTION_APP_BACKUP_FINISH, auditSource, { app, success, errorMessage, remotePath: backup?.remotePath, backupId: task.result });
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function scheduleTask(appId, installationState, taskId, auditSource) {
|
||||
@@ -1248,6 +1325,7 @@ async function install(data, auditSource) {
|
||||
overwriteDns = 'overwriteDns' in data ? data.overwriteDns : false,
|
||||
skipDnsSetup = 'skipDnsSetup' in data ? data.skipDnsSetup : false,
|
||||
appStoreId = data.appStoreId,
|
||||
upstreamUri = data.upstreamUri || '',
|
||||
manifest = data.manifest;
|
||||
|
||||
let error = manifestFormat.parse(manifest);
|
||||
@@ -1271,6 +1349,9 @@ async function install(data, auditSource) {
|
||||
error = validateLabel(label);
|
||||
if (error) throw error;
|
||||
|
||||
error = validateUpstreamUri(upstreamUri);
|
||||
if (error) throw error;
|
||||
|
||||
error = validateTags(tags);
|
||||
if (error) throw error;
|
||||
|
||||
@@ -1281,7 +1362,7 @@ async function install(data, auditSource) {
|
||||
let sso = 'sso' in data ? data.sso : null;
|
||||
if ('sso' in data && !('optionalSso' in manifest)) throw new BoxError(BoxError.BAD_FIELD, 'sso can only be specified for apps with optionalSso');
|
||||
// if sso was unspecified, enable it by default if possible
|
||||
if (sso === null) sso = !!manifest.addons['ldap'] || !!manifest.addons['proxyAuth'];
|
||||
if (sso === null) sso = !!manifest.addons?.ldap || !!manifest.addons?.proxyAuth;
|
||||
|
||||
error = validateEnv(env);
|
||||
if (error) throw error;
|
||||
@@ -1328,6 +1409,7 @@ async function install(data, auditSource) {
|
||||
tags,
|
||||
icon,
|
||||
enableMailbox,
|
||||
upstreamUri,
|
||||
runState: exports.RSTATE_RUNNING,
|
||||
installationState: exports.ISTATE_PENDING_INSTALL
|
||||
};
|
||||
@@ -1395,6 +1477,22 @@ async function setCrontab(app, crontab, auditSource) {
|
||||
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, crontab });
|
||||
}
|
||||
|
||||
async function setUpstreamUri(app, upstreamUri, auditSource) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof upstreamUri, 'string');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
const appId = app.id;
|
||||
const error = validateUpstreamUri(upstreamUri);
|
||||
if (error) throw error;
|
||||
|
||||
await reverseProxy.writeAppConfigs(_.extend({}, app, { upstreamUri }));
|
||||
|
||||
await update(appId, { upstreamUri });
|
||||
|
||||
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, upstreamUri });
|
||||
}
|
||||
|
||||
async function setLabel(app, label, auditSource) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof label, 'string');
|
||||
@@ -1568,6 +1666,7 @@ async function setMailbox(app, data, auditSource) {
|
||||
const optional = 'optional' in app.manifest.addons.sendmail ? app.manifest.addons.sendmail.optional : false;
|
||||
if (!optional && !enableMailbox) throw new BoxError(BoxError.BAD_FIELD, 'App requires sendmail to be enabled');
|
||||
|
||||
const mailboxDisplayName = data.mailboxDisplayName || '';
|
||||
let mailboxName = data.mailboxName || null;
|
||||
const mailboxDomain = data.mailboxDomain || null;
|
||||
|
||||
@@ -1580,15 +1679,20 @@ async function setMailbox(app, data, auditSource) {
|
||||
} else {
|
||||
mailboxName = mailboxNameForSubdomain(app.subdomain, app.domain, app.manifest);
|
||||
}
|
||||
|
||||
if (mailboxDisplayName) {
|
||||
error = mail.validateDisplayName(mailboxDisplayName);
|
||||
if (error) throw new BoxError(BoxError.BAD_FIELD, error.message);
|
||||
}
|
||||
}
|
||||
|
||||
const task = {
|
||||
args: {},
|
||||
values: { enableMailbox, mailboxName, mailboxDomain }
|
||||
values: { enableMailbox, mailboxName, mailboxDomain, mailboxDisplayName }
|
||||
};
|
||||
const taskId = await addTask(appId, exports.ISTATE_PENDING_RECREATE_CONTAINER, task, auditSource);
|
||||
|
||||
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, mailboxName, mailboxDomain, taskId });
|
||||
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, mailboxName, mailboxDomain, mailboxDisplayName, taskId });
|
||||
|
||||
return { taskId };
|
||||
}
|
||||
@@ -1662,7 +1766,7 @@ async function setReverseProxyConfig(app, reverseProxyConfig, auditSource) {
|
||||
error = validateRobotsTxt(reverseProxyConfig.robotsTxt);
|
||||
if (error) throw error;
|
||||
|
||||
await reverseProxy.writeAppConfig(_.extend({}, app, { reverseProxyConfig }));
|
||||
await reverseProxy.writeAppConfigs(_.extend({}, app, { reverseProxyConfig }));
|
||||
|
||||
await update(appId, { reverseProxyConfig });
|
||||
|
||||
@@ -1674,18 +1778,23 @@ async function setCertificate(app, data, auditSource) {
|
||||
assert(data && typeof data === 'object');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
const appId = app.id;
|
||||
const { location, domain, cert, key } = data;
|
||||
const { subdomain, domain, cert, key } = data;
|
||||
|
||||
const domainObject = await domains.get(domain);
|
||||
if (domainObject === null) throw new BoxError(BoxError.NOT_FOUND, 'Domain not found');
|
||||
|
||||
if (cert && key) {
|
||||
const error = reverseProxy.validateCertificate(location, domainObject, { cert, key });
|
||||
const error = reverseProxy.validateCertificate(subdomain, domainObject, { cert, key });
|
||||
if (error) throw error;
|
||||
}
|
||||
|
||||
await reverseProxy.setAppCertificate(location, domainObject, { cert, key });
|
||||
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, cert, key });
|
||||
const certificate = cert && key ? { cert, key } : null;
|
||||
const result = await database.query('UPDATE locations SET certificateJson=? WHERE location=? AND domain=?', [ certificate ? JSON.stringify(certificate) : null, subdomain, domain ]);
|
||||
if (result.affectedRows === 0) throw new BoxError(BoxError.NOT_FOUND, 'Location not found');
|
||||
|
||||
app = await get(app.id); // refresh app object
|
||||
await reverseProxy.setUserCertificate(app, dns.fqdn(subdomain, domainObject), certificate);
|
||||
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId: app.id, app, subdomain, domain, cert });
|
||||
}
|
||||
|
||||
async function setLocation(app, data, auditSource) {
|
||||
@@ -1761,25 +1870,29 @@ async function setLocation(app, data, auditSource) {
|
||||
return { taskId };
|
||||
}
|
||||
|
||||
async function setDataDir(app, dataDir, auditSource) {
|
||||
async function setStorage(app, volumeId, volumePrefix, auditSource) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert(dataDir === null || typeof dataDir === 'string');
|
||||
assert(volumeId === null || typeof volumeId === 'string');
|
||||
assert(volumePrefix === null || typeof volumePrefix === 'string');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
const appId = app.id;
|
||||
let error = checkAppState(app, exports.ISTATE_PENDING_DATA_DIR_MIGRATION);
|
||||
if (error) throw error;
|
||||
|
||||
error = validateDataDir(dataDir);
|
||||
if (error) throw error;
|
||||
if (volumeId) {
|
||||
await checkStorage(app, volumeId, volumePrefix);
|
||||
} else {
|
||||
volumeId = volumePrefix = null;
|
||||
}
|
||||
|
||||
const task = {
|
||||
args: { newDataDir: dataDir },
|
||||
args: { newStorageVolumeId: volumeId, newStorageVolumePrefix: volumePrefix },
|
||||
values: {}
|
||||
};
|
||||
const taskId = await addTask(appId, exports.ISTATE_PENDING_DATA_DIR_MIGRATION, task, auditSource);
|
||||
|
||||
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, dataDir, taskId });
|
||||
await eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId, app, volumeId, volumePrefix, taskId });
|
||||
|
||||
return { taskId };
|
||||
}
|
||||
@@ -1847,6 +1960,9 @@ async function updateApp(app, data, auditSource) {
|
||||
values.mailboxDomain = app.domain;
|
||||
}
|
||||
|
||||
const hasSso = !!updateConfig.manifest.addons?.proxyAuth || !!updateConfig.manifest.addons?.ldap;
|
||||
if (!hasSso && app.sso) values.sso = false; // turn off sso flag, if the update removes sso options
|
||||
|
||||
const task = {
|
||||
args: { updateConfig },
|
||||
values
|
||||
@@ -1934,13 +2050,23 @@ async function getLogs(app, options) {
|
||||
return transformStream;
|
||||
}
|
||||
|
||||
// never fails just prints error
|
||||
async function appendLogLine(app, line) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof line, 'string');
|
||||
|
||||
const logFilePath = path.join(paths.LOG_DIR, app.id, 'app.log');
|
||||
|
||||
if (!safe.fs.appendFileSync(logFilePath, line)) console.error(`Could not append log line for app ${app.id} at ${logFilePath}: ${safe.error.message}`);
|
||||
}
|
||||
|
||||
async function getCertificate(subdomain, domain) {
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
|
||||
const result = await database.query('SELECT certificateJson FROM locations WHERE subdomain=? AND domain=?', [ subdomain, domain ]);
|
||||
if (result.length === 0) return null;
|
||||
return JSON.parse(result[0].certificateJson);
|
||||
return safe.JSON.parse(result[0].certificateJson);
|
||||
}
|
||||
|
||||
// does a re-configure when called from most states. for install/clone errors, it re-installs with an optional manifest
|
||||
@@ -2022,7 +2148,7 @@ async function restore(app, backupId, auditSource) {
|
||||
values.mailboxDomain = app.domain;
|
||||
}
|
||||
|
||||
const restoreConfig = { backupId, backupFormat: backupInfo.format };
|
||||
const restoreConfig = { remotePath: backupInfo.remotePath, backupFormat: backupInfo.format };
|
||||
|
||||
const task = {
|
||||
args: {
|
||||
@@ -2036,7 +2162,7 @@ async function restore(app, backupId, auditSource) {
|
||||
|
||||
const taskId = await addTask(appId, exports.ISTATE_PENDING_RESTORE, task, auditSource);
|
||||
|
||||
await eventlog.add(eventlog.ACTION_APP_RESTORE, auditSource, { app: app, backupId: backupInfo.id, fromManifest: app.manifest, toManifest: backupInfo.manifest, taskId });
|
||||
await eventlog.add(eventlog.ACTION_APP_RESTORE, auditSource, { app, backupId: backupInfo.id, remotePath: backupInfo.remotePath, fromManifest: app.manifest, toManifest: backupInfo.manifest, taskId });
|
||||
|
||||
return { taskId };
|
||||
}
|
||||
@@ -2049,10 +2175,10 @@ async function importApp(app, data, auditSource) {
|
||||
const appId = app.id;
|
||||
|
||||
// all fields are optional
|
||||
data.backupId = data.backupId || null;
|
||||
data.remotePath = data.remotePath || null;
|
||||
data.backupFormat = data.backupFormat || null;
|
||||
data.backupConfig = data.backupConfig || null;
|
||||
const { backupId, backupFormat, backupConfig } = data;
|
||||
const { remotePath, backupFormat, backupConfig } = data;
|
||||
|
||||
let error = backupFormat ? validateBackupFormat(backupFormat) : null;
|
||||
if (error) throw error;
|
||||
@@ -2088,7 +2214,7 @@ async function importApp(app, data, auditSource) {
|
||||
}
|
||||
}
|
||||
|
||||
const restoreConfig = { backupId, backupFormat, backupConfig };
|
||||
const restoreConfig = { remotePath, backupFormat, backupConfig };
|
||||
|
||||
const task = {
|
||||
args: {
|
||||
@@ -2101,7 +2227,7 @@ async function importApp(app, data, auditSource) {
|
||||
};
|
||||
const taskId = await addTask(appId, exports.ISTATE_PENDING_IMPORT, task, auditSource);
|
||||
|
||||
await eventlog.add(eventlog.ACTION_APP_IMPORT, auditSource, { app: app, backupId, fromManifest: app.manifest, toManifest: app.manifest, taskId });
|
||||
await eventlog.add(eventlog.ACTION_APP_IMPORT, auditSource, { app: app, remotePath, fromManifest: app.manifest, toManifest: app.manifest, taskId });
|
||||
|
||||
return { taskId };
|
||||
}
|
||||
@@ -2157,6 +2283,7 @@ async function clone(app, data, user, auditSource) {
|
||||
|
||||
const backupInfo = await backups.get(backupId);
|
||||
|
||||
if (!backupInfo) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
|
||||
if (!backupInfo.manifest) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Could not get restore config');
|
||||
if (backupInfo.encryptionVersion === 1) throw new BoxError(BoxError.BAD_FIELD, 'This encrypted backup was created with an older Cloudron version and cannot be cloned');
|
||||
|
||||
@@ -2206,7 +2333,8 @@ async function clone(app, data, user, auditSource) {
|
||||
tags: app.tags,
|
||||
enableAutomaticUpdate: app.enableAutomaticUpdate,
|
||||
icon: icons.icon,
|
||||
enableMailbox: app.enableMailbox
|
||||
enableMailbox: app.enableMailbox,
|
||||
mailboxDisplayName: app.mailboxDisplayName
|
||||
};
|
||||
|
||||
const [addError] = await safe(add(newAppId, appStoreId, manifest, subdomain, domain, translatePortBindings(portBindings, manifest), obj));
|
||||
@@ -2215,7 +2343,7 @@ async function clone(app, data, user, auditSource) {
|
||||
|
||||
await purchaseApp({ appId: newAppId, appstoreId: app.appStoreId, manifestId: manifest.id || 'customapp' });
|
||||
|
||||
const restoreConfig = { backupId, backupFormat: backupInfo.format };
|
||||
const restoreConfig = { remotePath: backupInfo.remotePath, backupFormat: backupInfo.format };
|
||||
const task = {
|
||||
args: { restoreConfig, overwriteDns, skipDnsSetup, oldManifest: null },
|
||||
values: {},
|
||||
@@ -2229,7 +2357,7 @@ async function clone(app, data, user, auditSource) {
|
||||
newApp.redirectDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, domainObjectMap[ad.domain]); });
|
||||
newApp.aliasDomains.forEach(function (ad) { ad.fqdn = dns.fqdn(ad.subdomain, domainObjectMap[ad.domain]); });
|
||||
|
||||
await eventlog.add(eventlog.ACTION_APP_CLONE, auditSource, { appId: newAppId, oldAppId: appId, backupId, oldApp: app, newApp, taskId });
|
||||
await eventlog.add(eventlog.ACTION_APP_CLONE, auditSource, { appId: newAppId, oldAppId: appId, backupId, remotePath: backupInfo.remotePath, oldApp: app, newApp, taskId });
|
||||
|
||||
return { id: newAppId, taskId };
|
||||
}
|
||||
@@ -2328,7 +2456,7 @@ function checkManifestConstraints(manifest) {
|
||||
return null;
|
||||
}
|
||||
|
||||
async function exec(app, options) {
|
||||
async function createExec(app, options) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert(options && typeof options === 'object');
|
||||
|
||||
@@ -2339,7 +2467,7 @@ async function exec(app, options) {
|
||||
throw new BoxError(BoxError.BAD_STATE, 'App not installed or running');
|
||||
}
|
||||
|
||||
const execOptions = {
|
||||
const createOptions = {
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
@@ -2351,6 +2479,18 @@ async function exec(app, options) {
|
||||
Cmd: cmd
|
||||
};
|
||||
|
||||
return await docker.createExec(app.containerId, createOptions);
|
||||
}
|
||||
|
||||
async function startExec(app, execId, options) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof execId, 'string');
|
||||
assert(options && typeof options === 'object');
|
||||
|
||||
if (app.installationState !== exports.ISTATE_INSTALLED || app.runState !== exports.RSTATE_RUNNING) {
|
||||
throw new BoxError(BoxError.BAD_STATE, 'App not installed or running');
|
||||
}
|
||||
|
||||
const startOptions = {
|
||||
Detach: false,
|
||||
Tty: options.tty,
|
||||
@@ -2366,10 +2506,26 @@ async function exec(app, options) {
|
||||
stderr: true
|
||||
};
|
||||
|
||||
const stream = await docker.execContainer(app.containerId, { execOptions, startOptions, rows: options.rows, columns: options.columns });
|
||||
const stream = await docker.startExec(execId, startOptions);
|
||||
|
||||
if (options.rows && options.columns) {
|
||||
// there is a race where resizing too early results in a 404 "no such exec"
|
||||
// https://git.cloudron.io/cloudron/box/issues/549
|
||||
setTimeout(async function () {
|
||||
await safe(docker.resizeExec(execId, { h: options.rows, w: options.columns }, { debug }));
|
||||
}, 2000);
|
||||
}
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
async function getExec(app, execId) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof execId, 'string');
|
||||
|
||||
return await docker.getExec(execId);
|
||||
}
|
||||
|
||||
function canAutoupdateApp(app, updateInfo) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof updateInfo, 'object');
|
||||
@@ -2412,6 +2568,7 @@ async function autoupdateApps(updateInfo, auditSource) { // updateInfo is { appI
|
||||
|
||||
if (!canAutoupdateApp(app, updateInfo[appId])) {
|
||||
debug(`app ${app.fqdn} requires manual update`);
|
||||
notifications.alert(notifications.ALERT_MANUAL_APP_UPDATE, `${app.manifest.title} at ${app.fqdn} requires manual update to version ${updateInfo[appId].manifest.version}`, `Changelog:\n${updateInfo[appId].manifest.changelog}\n`);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -2452,6 +2609,18 @@ async function listBackups(app, page, perPage) {
|
||||
return await backups.getByIdentifierAndStatePaged(app.id, backups.BACKUP_STATE_NORMAL, page, perPage);
|
||||
}
|
||||
|
||||
async function updateBackup(app, backupId, data) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof data, 'object');
|
||||
|
||||
const backup = await backups.get(backupId);
|
||||
if (!backup) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
|
||||
if (backup.identifier !== app.id) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found'); // some other app's backup
|
||||
|
||||
await backups.update(backupId, data);
|
||||
}
|
||||
|
||||
async function restoreInstalledApps(options, auditSource) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
@@ -2461,11 +2630,11 @@ async function restoreInstalledApps(options, auditSource) {
|
||||
apps = apps.filter(app => app.installationState !== exports.ISTATE_PENDING_RESTORE); // safeguard against tasks being created non-stop if we crash on startup
|
||||
|
||||
for (const app of apps) {
|
||||
const [error, results] = await safe(backups.getByIdentifierAndStatePaged(app.id, backups.BACKUP_STATE_NORMAL, 1, 1));
|
||||
const [error, result] = await safe(backups.getByIdentifierAndStatePaged(app.id, backups.BACKUP_STATE_NORMAL, 1, 1));
|
||||
let installationState, restoreConfig, oldManifest;
|
||||
if (!error && results.length) {
|
||||
if (!error && result.length) {
|
||||
installationState = exports.ISTATE_PENDING_RESTORE;
|
||||
restoreConfig = { backupId: results[0].id, backupFormat: results[0].format };
|
||||
restoreConfig = { remotePath: result[0].remotePath, backupFormat: result[0].format };
|
||||
oldManifest = app.manifest;
|
||||
} else {
|
||||
installationState = exports.ISTATE_PENDING_INSTALL;
|
||||
@@ -2582,7 +2751,8 @@ async function downloadFile(app, filePath) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof filePath, 'string');
|
||||
|
||||
const statStream = await exec(app, { cmd: [ 'stat', '--printf=%F-%s', filePath ], tty: true });
|
||||
const statExecId = await createExec(app, { cmd: [ 'stat', '--printf=%F-%s', filePath ], tty: true });
|
||||
const statStream = await startExec(app, statExecId, { tty: true });
|
||||
const data = await drainStream(statStream);
|
||||
|
||||
const parts = data.split('-');
|
||||
@@ -2603,7 +2773,8 @@ async function downloadFile(app, filePath) {
|
||||
throw new BoxError(BoxError.NOT_FOUND, 'only files or dirs can be downloaded');
|
||||
}
|
||||
|
||||
const inputStream = await exec(app, { cmd, tty: false });
|
||||
const execId = await createExec(app, { cmd, tty: false });
|
||||
const inputStream = await startExec(app, execId, { tty: false });
|
||||
|
||||
// transforms the docker stream into a normal stream
|
||||
const stdoutStream = new TransformStream({
|
||||
@@ -2644,7 +2815,8 @@ async function uploadFile(app, sourceFilePath, destFilePath) {
|
||||
const escapedDestFilePath = safe.child_process.execSync(`printf %q '${destFilePath.replace(/'/g, '\'\\\'\'')}'`, { shell: '/bin/bash', encoding: 'utf8' });
|
||||
debug(`uploadFile: ${sourceFilePath} -> ${escapedDestFilePath}`);
|
||||
|
||||
const destStream = await exec(app, { cmd: [ 'bash', '-c', `cat - > ${escapedDestFilePath}` ], tty: false });
|
||||
const execId = await createExec(app, { cmd: [ 'bash', '-c', `cat - > ${escapedDestFilePath}` ], tty: false });
|
||||
const destStream = await startExec(app, execId, { tty: false });
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const done = once(error => reject(new BoxError(BoxError.FS_ERROR, error.message)));
|
||||
|
||||
+56
-79
@@ -13,14 +13,17 @@ exports = module.exports = {
|
||||
purchaseApp,
|
||||
unpurchaseApp,
|
||||
|
||||
createUserToken,
|
||||
getWebToken,
|
||||
getSubscription,
|
||||
isFreePlan,
|
||||
|
||||
getAppUpdate,
|
||||
getBoxUpdate,
|
||||
|
||||
createTicket
|
||||
createTicket,
|
||||
|
||||
// exported for tests
|
||||
_unregister: unregister
|
||||
};
|
||||
|
||||
const apps = require('./apps.js'),
|
||||
@@ -34,6 +37,7 @@ const apps = require('./apps.js'),
|
||||
safe = require('safetydance'),
|
||||
semver = require('semver'),
|
||||
settings = require('./settings.js'),
|
||||
sysinfo = require('./sysinfo.js'),
|
||||
superagent = require('superagent'),
|
||||
support = require('./support.js'),
|
||||
util = require('util');
|
||||
@@ -78,17 +82,15 @@ async function login(email, password, totpToken) {
|
||||
assert.strictEqual(typeof password, 'string');
|
||||
assert.strictEqual(typeof totpToken, 'string');
|
||||
|
||||
const data = { email, password, totpToken };
|
||||
|
||||
const url = settings.apiServerOrigin() + '/api/v1/login';
|
||||
const [error, response] = await safe(superagent.post(url)
|
||||
.send(data)
|
||||
const [error, response] = await safe(superagent.post(`${settings.apiServerOrigin()}/api/v1/login`)
|
||||
.send({ email, password, totpToken })
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `login status code: ${response.status}`);
|
||||
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Login error. status code: ${response.status}`);
|
||||
if (!response.body.accessToken) throw new BoxError(BoxError.EXTERNAL_ERROR, `Login error. invalid response: ${response.text}`);
|
||||
|
||||
return response.body; // { userId, accessToken }
|
||||
}
|
||||
@@ -97,54 +99,36 @@ async function registerUser(email, password) {
|
||||
assert.strictEqual(typeof email, 'string');
|
||||
assert.strictEqual(typeof password, 'string');
|
||||
|
||||
const data = { email, password };
|
||||
|
||||
const url = settings.apiServerOrigin() + '/api/v1/register_user';
|
||||
const [error, response] = await safe(superagent.post(url)
|
||||
.send(data)
|
||||
const [error, response] = await safe(superagent.post(`${settings.apiServerOrigin()}/api/v1/register_user`)
|
||||
.send({ email, password, utmSource: 'cloudron-dashboard' })
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
|
||||
if (response.status === 409) throw new BoxError(BoxError.ALREADY_EXISTS, 'account already exists');
|
||||
if (response.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, `register status code: ${response.status}`);
|
||||
if (response.status === 409) throw new BoxError(BoxError.ALREADY_EXISTS, 'Registration error: account already exists');
|
||||
if (response.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, `Registration error. invalid response: ${response.status}`);
|
||||
}
|
||||
|
||||
async function createUserToken() {
|
||||
async function getWebToken() {
|
||||
if (settings.isDemo()) throw new BoxError(BoxError.BAD_FIELD, 'Not allowed in demo mode');
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
const token = await settings.getAppstoreWebToken();
|
||||
if (!token) throw new BoxError(BoxError.NOT_FOUND); // user will have to re-login with password somehow
|
||||
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/user_token`;
|
||||
|
||||
const [error, response] = await safe(superagent.post(url)
|
||||
.send({})
|
||||
.query({ accessToken: token })
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, `getUserToken status code: ${response.status}`);
|
||||
|
||||
return response.body.accessToken;
|
||||
return token;
|
||||
}
|
||||
|
||||
async function getSubscription() {
|
||||
const token = await settings.getCloudronToken();
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
const url = settings.apiServerOrigin() + '/api/v1/subscription';
|
||||
|
||||
const [error, response] = await safe(superagent.get(url)
|
||||
const [error, response] = await safe(superagent.get(`${settings.apiServerOrigin()}/api/v1/subscription`)
|
||||
.query({ accessToken: token })
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR);
|
||||
if (response.status === 502) throw new BoxError(BoxError.EXTERNAL_ERROR, `Stripe error: ${error.message}`);
|
||||
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unknown error: ${error.message}`);
|
||||
|
||||
@@ -165,12 +149,10 @@ async function purchaseApp(data) {
|
||||
assert(data.appstoreId || data.manifestId);
|
||||
assert.strictEqual(typeof data.appId, 'string');
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/cloudronapps`;
|
||||
|
||||
const [error, response] = await safe(superagent.post(url)
|
||||
const [error, response] = await safe(superagent.post(`${settings.apiServerOrigin()}/api/v1/cloudronapps`)
|
||||
.send(data)
|
||||
.query({ accessToken: token })
|
||||
.timeout(30 * 1000)
|
||||
@@ -180,7 +162,6 @@ async function purchaseApp(data) {
|
||||
if (response.status === 404) throw new BoxError(BoxError.NOT_FOUND); // appstoreId does not exist
|
||||
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 402) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
// 200 if already purchased, 201 is newly purchased
|
||||
if (response.status !== 201 && response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', response.status, response.body));
|
||||
}
|
||||
@@ -190,7 +171,7 @@ async function unpurchaseApp(appId, data) {
|
||||
assert.strictEqual(typeof data, 'object'); // { appstoreId, manifestId }
|
||||
assert(data.appstoreId || data.manifestId);
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/cloudronapps/${appId}`;
|
||||
@@ -203,8 +184,7 @@ async function unpurchaseApp(appId, data) {
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status === 404) return; // was never purchased
|
||||
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status !== 201 && response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', response.status, response.body));
|
||||
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `App unpurchase failed to get app. status:${response.status}`);
|
||||
|
||||
[error, response] = await safe(superagent.del(url)
|
||||
.send(data)
|
||||
@@ -214,31 +194,28 @@ async function unpurchaseApp(appId, data) {
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status !== 204) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', response.status, response.body));
|
||||
if (response.status !== 204) throw new BoxError(BoxError.EXTERNAL_ERROR, `App unpurchase failed. status:${response.status}`);
|
||||
}
|
||||
|
||||
async function getBoxUpdate(options) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/boxupdate`;
|
||||
|
||||
const query = {
|
||||
accessToken: token,
|
||||
boxVersion: constants.VERSION,
|
||||
automatic: options.automatic
|
||||
};
|
||||
|
||||
const [error, response] = await safe(superagent.get(url)
|
||||
const [error, response] = await safe(superagent.get(`${settings.apiServerOrigin()}/api/v1/boxupdate`)
|
||||
.query(query)
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status === 204) return; // no update
|
||||
if (response.status !== 200 || !response.body) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('Bad response: %s %s', response.status, response.text));
|
||||
|
||||
@@ -263,10 +240,9 @@ async function getAppUpdate(app, options) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/appupdate`;
|
||||
const query = {
|
||||
accessToken: token,
|
||||
boxVersion: constants.VERSION,
|
||||
@@ -275,14 +251,13 @@ async function getAppUpdate(app, options) {
|
||||
automatic: options.automatic
|
||||
};
|
||||
|
||||
const [error, response] = await safe(superagent.get(url)
|
||||
const [error, response] = await safe(superagent.get(`${settings.apiServerOrigin()}/api/v1/appupdate`)
|
||||
.query(query)
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error);
|
||||
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status === 204) return; // no update
|
||||
if (response.status !== 200 || !response.body) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('Bad response: %s %s', response.status, response.text));
|
||||
|
||||
@@ -306,24 +281,23 @@ async function getAppUpdate(app, options) {
|
||||
async function registerCloudron(data) {
|
||||
assert.strictEqual(typeof data, 'object');
|
||||
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/register_cloudron`;
|
||||
const { domain, accessToken, version, existingApps } = data;
|
||||
|
||||
const [error, response] = await safe(superagent.post(url)
|
||||
.send(data)
|
||||
const [error, response] = await safe(superagent.post(`${settings.apiServerOrigin()}/api/v1/register_cloudron`)
|
||||
.send({ domain, accessToken, version, existingApps })
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unable to register cloudron: ${response.statusCode} ${error.message}`);
|
||||
|
||||
// cloudronId, token, licenseKey
|
||||
// cloudronId, token
|
||||
if (!response.body.cloudronId) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response - no cloudron id');
|
||||
if (!response.body.cloudronToken) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response - no token');
|
||||
if (!response.body.licenseKey) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response - no license');
|
||||
|
||||
await settings.setCloudronId(response.body.cloudronId);
|
||||
await settings.setCloudronToken(response.body.cloudronToken);
|
||||
await settings.setLicenseKey(response.body.licenseKey);
|
||||
await settings.setAppstoreApiToken(response.body.cloudronToken);
|
||||
await settings.setAppstoreWebToken(accessToken);
|
||||
|
||||
debug(`registerCloudron: Cloudron registered with id ${response.body.cloudronId}`);
|
||||
}
|
||||
@@ -331,23 +305,23 @@ async function registerCloudron(data) {
|
||||
async function updateCloudron(data) {
|
||||
assert.strictEqual(typeof data, 'object');
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
const { domain } = data;
|
||||
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/update_cloudron`;
|
||||
const query = {
|
||||
accessToken: token
|
||||
};
|
||||
|
||||
const [error, response] = await safe(superagent.post(url)
|
||||
const [error, response] = await safe(superagent.post(`${settings.apiServerOrigin()}/api/v1/update_cloudron`)
|
||||
.query(query)
|
||||
.send(data)
|
||||
.send({ domain })
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error);
|
||||
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('Bad response: %s %s', response.status, response.text));
|
||||
|
||||
debug(`updateCloudron: Cloudron updated with data ${JSON.stringify(data)}`);
|
||||
@@ -356,13 +330,20 @@ async function updateCloudron(data) {
|
||||
async function registerWithLoginCredentials(options) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
if (token) throw new BoxError(BoxError.CONFLICT, 'Cloudron is already registered');
|
||||
|
||||
if (options.signup) await registerUser(options.email, options.password);
|
||||
|
||||
const result = await login(options.email, options.password, options.totpToken || '');
|
||||
await registerCloudron({ domain: settings.dashboardDomain(), accessToken: result.accessToken, version: constants.VERSION });
|
||||
|
||||
for (const app of await apps.list()) {
|
||||
await purchaseApp({ appId: app.id, appstoreId: app.appStoreId, manifestId: app.manifest.id || 'customapp' });
|
||||
}
|
||||
}
|
||||
|
||||
async function unregister() {
|
||||
await settings.setCloudronId('');
|
||||
await settings.setAppstoreApiToken('');
|
||||
await settings.setAppstoreWebToken('');
|
||||
}
|
||||
|
||||
async function createTicket(info, auditSource) {
|
||||
@@ -374,11 +355,12 @@ async function createTicket(info, auditSource) {
|
||||
assert.strictEqual(typeof info.description, 'string');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
if (info.enableSshSupport) {
|
||||
await safe(support.enableRemoteSupport(true, auditSource));
|
||||
info.ipv4 = await sysinfo.getServerIPv4();
|
||||
}
|
||||
|
||||
info.app = info.appId ? await apps.get(info.appId) : null;
|
||||
@@ -405,30 +387,26 @@ async function createTicket(info, auditSource) {
|
||||
const [error, response] = await safe(request);
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('Bad response: %s %s', response.status, response.text));
|
||||
|
||||
eventlog.add(eventlog.ACTION_SUPPORT_TICKET, auditSource, info);
|
||||
await eventlog.add(eventlog.ACTION_SUPPORT_TICKET, auditSource, info);
|
||||
|
||||
return { message: `An email was sent to ${constants.SUPPORT_EMAIL}. We will get back shortly!` };
|
||||
}
|
||||
|
||||
async function getApps() {
|
||||
const token = await settings.getCloudronToken();
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
const unstable = await settings.getUnstableAppsConfig();
|
||||
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/apps`;
|
||||
|
||||
const [error, response] = await safe(superagent.get(url)
|
||||
const [error, response] = await safe(superagent.get(`${settings.apiServerOrigin()}/api/v1/apps`)
|
||||
.query({ accessToken: token, boxVersion: constants.VERSION, unstable: unstable })
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status === 403 || response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('App listing failed. %s %j', response.status, response.body));
|
||||
if (!response.body.apps) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('Bad response: %s %s', response.status, response.text));
|
||||
|
||||
@@ -445,7 +423,7 @@ async function getAppVersion(appId, version) {
|
||||
|
||||
if (!isAppAllowed(appId, listingConfig)) throw new BoxError(BoxError.FEATURE_DISABLED);
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
let url = `${settings.apiServerOrigin()}/api/v1/apps/${appId}`;
|
||||
@@ -459,7 +437,6 @@ async function getAppVersion(appId, version) {
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status === 403 || response.statusCode === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 404) throw new BoxError(BoxError.NOT_FOUND);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('App fetch failed. %s %j', response.status, response.body));
|
||||
|
||||
return response.body;
|
||||
|
||||
+56
-24
@@ -41,13 +41,16 @@ const apps = require('./apps.js'),
|
||||
sysinfo = require('./sysinfo.js'),
|
||||
_ = require('underscore');
|
||||
|
||||
const COLLECTD_CONFIG_EJS = fs.readFileSync(__dirname + '/collectd/app.ejs', { encoding: 'utf8' }),
|
||||
MV_VOLUME_CMD = path.join(__dirname, 'scripts/mvvolume.sh'),
|
||||
const MV_VOLUME_CMD = path.join(__dirname, 'scripts/mvvolume.sh'),
|
||||
LOGROTATE_CONFIG_EJS = fs.readFileSync(__dirname + '/logrotate.ejs', { encoding: 'utf8' }),
|
||||
CONFIGURE_LOGROTATE_CMD = path.join(__dirname, 'scripts/configurelogrotate.sh');
|
||||
|
||||
// https://rootlesscontaine.rs/getting-started/common/cgroup2/#checking-whether-cgroup-v2-is-already-enabled
|
||||
const CGROUP_VERSION = fs.existsSync('/sys/fs/cgroup/cgroup.controllers') ? '2' : '1';
|
||||
const COLLECTD_CONFIG_EJS = fs.readFileSync(`${__dirname}/collectd/app_cgroup_v${CGROUP_VERSION}.ejs`, { encoding: 'utf8' });
|
||||
|
||||
function makeTaskError(error, app) {
|
||||
assert.strictEqual(typeof error, 'object');
|
||||
assert(error instanceof BoxError);
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
|
||||
// track a few variables which helps 'repair' restart the task (see also scheduleTask in apps.js)
|
||||
@@ -71,11 +74,13 @@ async function updateApp(app, values) {
|
||||
async function allocateContainerIp(app) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
|
||||
if (app.manifest.id === constants.PROXY_APP_APPSTORE_ID) return;
|
||||
|
||||
await promiseRetry({ times: 10, interval: 0, debug }, async function () {
|
||||
const iprange = iputils.intFromIp('172.18.20.255') - iputils.intFromIp('172.18.16.1');
|
||||
let rnd = Math.floor(Math.random() * iprange);
|
||||
const containerIp = iputils.ipFromInt(iputils.intFromIp('172.18.16.1') + rnd);
|
||||
updateApp(app, { containerIp });
|
||||
await updateApp(app, { containerIp });
|
||||
});
|
||||
}
|
||||
|
||||
@@ -83,6 +88,8 @@ async function createContainer(app) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert(!app.containerId); // otherwise, it will trigger volumeFrom
|
||||
|
||||
if (app.manifest.id === constants.PROXY_APP_APPSTORE_ID) return;
|
||||
|
||||
debug('createContainer: creating container');
|
||||
|
||||
const container = await docker.createContainer(app);
|
||||
@@ -157,7 +164,8 @@ async function deleteAppDir(app, options) {
|
||||
async function addCollectdProfile(app) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
|
||||
const collectdConf = ejs.render(COLLECTD_CONFIG_EJS, { appId: app.id, containerId: app.containerId, appDataDir: apps.getDataDir(app, app.dataDir) });
|
||||
const appDataDir = await apps.getStorageDir(app);
|
||||
const collectdConf = ejs.render(COLLECTD_CONFIG_EJS, { appId: app.id, containerId: app.containerId, appDataDir });
|
||||
await collectd.addProfile(app.id, collectdConf);
|
||||
}
|
||||
|
||||
@@ -265,24 +273,30 @@ async function waitForDnsPropagation(app) {
|
||||
}
|
||||
}
|
||||
|
||||
async function moveDataDir(app, targetDir) {
|
||||
async function moveDataDir(app, targetVolumeId, targetVolumePrefix) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert(targetDir === null || typeof targetDir === 'string');
|
||||
assert(targetVolumeId === null || typeof targetVolumeId === 'string');
|
||||
assert(targetVolumePrefix === null || typeof targetVolumePrefix === 'string');
|
||||
|
||||
const resolvedSourceDir = apps.getDataDir(app, app.dataDir);
|
||||
const resolvedTargetDir = apps.getDataDir(app, targetDir);
|
||||
const resolvedSourceDir = await apps.getStorageDir(app);
|
||||
const resolvedTargetDir = await apps.getStorageDir(_.extend({}, app, { storageVolumeId: targetVolumeId, storageVolumePrefix: targetVolumePrefix }));
|
||||
|
||||
debug(`moveDataDir: migrating data from ${resolvedSourceDir} to ${resolvedTargetDir}`);
|
||||
|
||||
if (resolvedSourceDir === resolvedTargetDir) return;
|
||||
if (resolvedSourceDir !== resolvedTargetDir) {
|
||||
const [error] = await safe(shell.promises.sudo('moveDataDir', [ MV_VOLUME_CMD, resolvedSourceDir, resolvedTargetDir ], {}));
|
||||
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error migrating data directory: ${error.message}`);
|
||||
}
|
||||
|
||||
const [error] = await safe(shell.promises.sudo('moveDataDir', [ MV_VOLUME_CMD, resolvedSourceDir, resolvedTargetDir ], {}));
|
||||
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error migrating data directory: ${error.message}`);
|
||||
await updateApp(app, { storageVolumeId: targetVolumeId, storageVolumePrefix: targetVolumePrefix });
|
||||
}
|
||||
|
||||
async function downloadImage(manifest) {
|
||||
assert.strictEqual(typeof manifest, 'object');
|
||||
|
||||
// skip for relay app
|
||||
if (manifest.id === constants.PROXY_APP_APPSTORE_ID) return;
|
||||
|
||||
const info = await docker.info();
|
||||
const [dfError, diskUsage] = await safe(df.file(info.DockerRootDir));
|
||||
if (dfError) throw new BoxError(BoxError.FS_ERROR, `Error getting file system info: ${dfError.message}`);
|
||||
@@ -297,6 +311,9 @@ async function startApp(app) {
|
||||
|
||||
if (app.runState === apps.RSTATE_STOPPED) return;
|
||||
|
||||
// skip for relay app
|
||||
if (app.manifest.id === constants.PROXY_APP_APPSTORE_ID) return;
|
||||
|
||||
await docker.startContainer(app.id);
|
||||
}
|
||||
|
||||
@@ -328,7 +345,7 @@ async function install(app, args, progressCallback) {
|
||||
}
|
||||
await services.teardownAddons(app, addonsToRemove);
|
||||
|
||||
if (!restoreConfig || restoreConfig.backupId) { // in-place import should not delete data dir
|
||||
if (!restoreConfig || restoreConfig.remotePath) { // in-place import should not delete data dir
|
||||
await deleteAppDir(app, { removeDirectory: false }); // do not remove any symlinked appdata dir
|
||||
}
|
||||
|
||||
@@ -357,7 +374,7 @@ async function install(app, args, progressCallback) {
|
||||
if (!restoreConfig) { // install
|
||||
await progressCallback({ percent: 60, message: 'Setting up addons' });
|
||||
await services.setupAddons(app, app.manifest.addons);
|
||||
} else if (app.installationState === apps.ISTATE_PENDING_IMPORT && !restoreConfig.backupId) { // in-place import
|
||||
} else if (app.installationState === apps.ISTATE_PENDING_IMPORT && !restoreConfig.remotePath) { // in-place import
|
||||
await progressCallback({ percent: 60, message: 'Importing addons in-place' });
|
||||
await services.setupAddons(app, app.manifest.addons);
|
||||
await services.clearAddons(app, _.omit(app.manifest.addons, 'localstorage'));
|
||||
@@ -517,8 +534,9 @@ async function migrateDataDir(app, args, progressCallback) {
|
||||
assert.strictEqual(typeof args, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
const newDataDir = args.newDataDir;
|
||||
assert(newDataDir === null || typeof newDataDir === 'string');
|
||||
const { newStorageVolumeId, newStorageVolumePrefix } = args;
|
||||
assert(newStorageVolumeId === null || typeof newStorageVolumeId === 'string');
|
||||
assert(newStorageVolumePrefix === null || typeof newStorageVolumePrefix === 'string');
|
||||
|
||||
await progressCallback({ percent: 10, message: 'Cleaning up old install' });
|
||||
await deleteContainers(app, { managedOnly: true });
|
||||
@@ -526,12 +544,12 @@ async function migrateDataDir(app, args, progressCallback) {
|
||||
await progressCallback({ percent: 45, message: 'Ensuring app data directory' });
|
||||
await createAppDir(app);
|
||||
|
||||
// re-setup addons since this creates the localStorage volume
|
||||
// re-setup addons since this creates the localStorage destination
|
||||
await progressCallback({ percent: 50, message: 'Setting up addons' });
|
||||
await services.setupAddons(_.extend({}, app, { dataDir: newDataDir }), app.manifest.addons);
|
||||
await services.setupAddons(_.extend({}, app, { storageVolumeId: newStorageVolumeId, storageVolumePrefix: newStorageVolumePrefix }), app.manifest.addons);
|
||||
|
||||
await progressCallback({ percent: 60, message: 'Moving data dir' });
|
||||
await moveDataDir(app, newDataDir);
|
||||
await moveDataDir(app, newStorageVolumeId, newStorageVolumePrefix);
|
||||
|
||||
await progressCallback({ percent: 90, message: 'Creating container' });
|
||||
await createContainer(app);
|
||||
@@ -539,7 +557,7 @@ async function migrateDataDir(app, args, progressCallback) {
|
||||
await startApp(app);
|
||||
|
||||
await progressCallback({ percent: 100, message: 'Done' });
|
||||
await updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null, dataDir: newDataDir });
|
||||
await updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null });
|
||||
}
|
||||
|
||||
// configure is called for an infra update and repair to re-create container, reverseproxy config. it's all "local"
|
||||
@@ -667,8 +685,10 @@ async function start(app, args, progressCallback) {
|
||||
await progressCallback({ percent: 10, message: 'Starting app services' });
|
||||
await services.startAppServices(app);
|
||||
|
||||
await progressCallback({ percent: 35, message: 'Starting container' });
|
||||
await docker.startContainer(app.id);
|
||||
if (app.manifest.id !== constants.PROXY_APP_APPSTORE_ID) {
|
||||
await progressCallback({ percent: 35, message: 'Starting container' });
|
||||
await docker.startContainer(app.id);
|
||||
}
|
||||
|
||||
await progressCallback({ percent: 60, message: 'Adding collectd profile' });
|
||||
await addCollectdProfile(app);
|
||||
@@ -705,8 +725,20 @@ async function restart(app, args, progressCallback) {
|
||||
assert.strictEqual(typeof args, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
await progressCallback({ percent: 20, message: 'Restarting container' });
|
||||
await docker.restartContainer(app.id);
|
||||
if (app.manifest.id !== constants.PROXY_APP_APPSTORE_ID) {
|
||||
await progressCallback({ percent: 10, message: 'Starting app services' });
|
||||
await services.startAppServices(app);
|
||||
|
||||
await progressCallback({ percent: 20, message: 'Restarting container' });
|
||||
await docker.restartContainer(app.id);
|
||||
}
|
||||
|
||||
await progressCallback({ percent: 60, message: 'Adding collectd profile' });
|
||||
await addCollectdProfile(app);
|
||||
|
||||
// stopped apps do not renew certs. currently, we don't do DNS to not overwrite existing user settings
|
||||
await progressCallback({ percent: 80, message: 'Configuring reverse proxy' });
|
||||
await reverseProxy.configureApp(app, AuditSource.APPTASK);
|
||||
|
||||
await progressCallback({ percent: 100, message: 'Done' });
|
||||
await updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null });
|
||||
|
||||
+79
-82
@@ -1,5 +1,7 @@
|
||||
'use strict';
|
||||
|
||||
const BoxError = require('./boxerror.js');
|
||||
|
||||
exports = module.exports = {
|
||||
run,
|
||||
|
||||
@@ -8,16 +10,17 @@ exports = module.exports = {
|
||||
|
||||
const apps = require('./apps.js'),
|
||||
assert = require('assert'),
|
||||
backupFormat = require('./backupformat.js'),
|
||||
backups = require('./backups.js'),
|
||||
constants = require('./constants.js'),
|
||||
debug = require('debug')('box:backupcleaner'),
|
||||
moment = require('moment'),
|
||||
mounts = require('./mounts.js'),
|
||||
path = require('path'),
|
||||
paths = require('./paths.js'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js'),
|
||||
storage = require('./storage.js'),
|
||||
util = require('util'),
|
||||
_ = require('underscore');
|
||||
|
||||
function applyBackupRetentionPolicy(allBackups, policy, referencedBackupIds) {
|
||||
@@ -35,7 +38,7 @@ function applyBackupRetentionPolicy(allBackups, policy, referencedBackupIds) {
|
||||
else backup.discardReason = 'creating-too-long';
|
||||
} else if (referencedBackupIds.includes(backup.id)) {
|
||||
backup.keepReason = 'reference';
|
||||
} else if ((now - backup.creationTime) < (backup.preserveSecs * 1000)) {
|
||||
} else if ((backup.preserveSecs === -1) || ((now - backup.creationTime) < (backup.preserveSecs * 1000))) {
|
||||
backup.keepReason = 'preserveSecs';
|
||||
} else if ((now - backup.creationTime < policy.keepWithinSecs * 1000) || policy.keepWithinSecs < 0) {
|
||||
backup.keepReason = 'keepWithinSecs';
|
||||
@@ -78,49 +81,42 @@ function applyBackupRetentionPolicy(allBackups, policy, referencedBackupIds) {
|
||||
}
|
||||
}
|
||||
|
||||
async function cleanupBackup(backupConfig, backup, progressCallback) {
|
||||
async function removeBackup(backupConfig, backup, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof backup, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
const backupFilePath = storage.getBackupFilePath(backupConfig, backup.id, backup.format);
|
||||
const backupFilePath = backupFormat.api(backup.format).getBackupFilePath(backupConfig, backup.remotePath);
|
||||
|
||||
return new Promise((resolve) => {
|
||||
function done(error) {
|
||||
if (error) {
|
||||
debug('cleanupBackup: error removing backup %j : %s', backup, error.message);
|
||||
return resolve();
|
||||
}
|
||||
let removeError;
|
||||
if (backup.format ==='tgz') {
|
||||
progressCallback({ message: `${backup.remotePath}: Removing ${backupFilePath}`});
|
||||
[removeError] = await safe(storage.api(backupConfig.provider).remove(backupConfig, backupFilePath));
|
||||
} else {
|
||||
progressCallback({ message: `${backup.remotePath}: Removing directory ${backupFilePath}`});
|
||||
[removeError] = await safe(storage.api(backupConfig.provider).removeDir(backupConfig, backupFilePath, progressCallback));
|
||||
}
|
||||
|
||||
// prune empty directory if possible
|
||||
storage.api(backupConfig.provider).remove(backupConfig, path.dirname(backupFilePath), async function (error) {
|
||||
if (error) debug('cleanupBackup: unable to prune backup directory %s : %s', path.dirname(backupFilePath), error.message);
|
||||
if (removeError) {
|
||||
debug('removeBackup: error removing backup %j : %s', backup, removeError.message);
|
||||
return;
|
||||
}
|
||||
|
||||
const [delError] = await safe(backups.del(backup.id));
|
||||
if (delError) debug('cleanupBackup: error removing from database', delError);
|
||||
else debug('cleanupBackup: removed %s', backup.id);
|
||||
// prune empty directory if possible
|
||||
const [pruneError] = await safe(storage.api(backupConfig.provider).remove(backupConfig, path.dirname(backupFilePath)));
|
||||
if (pruneError) debug('removeBackup: unable to prune backup directory %s : %s', path.dirname(backupFilePath), pruneError.message);
|
||||
|
||||
resolve();
|
||||
});
|
||||
}
|
||||
|
||||
if (backup.format ==='tgz') {
|
||||
progressCallback({ message: `${backup.id}: Removing ${backupFilePath}`});
|
||||
storage.api(backupConfig.provider).remove(backupConfig, backupFilePath, done);
|
||||
} else {
|
||||
const events = storage.api(backupConfig.provider).removeDir(backupConfig, backupFilePath);
|
||||
events.on('progress', (message) => progressCallback({ message: `${backup.id}: ${message}` }));
|
||||
events.on('done', done);
|
||||
}
|
||||
});
|
||||
const [delError] = await safe(backups.del(backup.id));
|
||||
if (delError) debug(`removeBackup: error removing ${backup.id} from database`, delError);
|
||||
else debug(`removeBackup: removed ${backup.remotePath}`);
|
||||
}
|
||||
|
||||
async function cleanupAppBackups(backupConfig, referencedAppBackupIds, progressCallback) {
|
||||
async function cleanupAppBackups(backupConfig, referencedBackupIds, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert(Array.isArray(referencedAppBackupIds));
|
||||
assert(Array.isArray(referencedBackupIds));
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
let removedAppBackupIds = [];
|
||||
const removedAppBackupPaths = [];
|
||||
|
||||
const allApps = await apps.list();
|
||||
const allAppIds = allApps.map(a => a.id);
|
||||
@@ -137,49 +133,49 @@ async function cleanupAppBackups(backupConfig, referencedAppBackupIds, progressC
|
||||
// apply backup policy per app. keep latest backup only for existing apps
|
||||
let appBackupsToRemove = [];
|
||||
for (const appId of Object.keys(appBackupsById)) {
|
||||
applyBackupRetentionPolicy(appBackupsById[appId], _.extend({ keepLatest: allAppIds.includes(appId) }, backupConfig.retentionPolicy), referencedAppBackupIds);
|
||||
applyBackupRetentionPolicy(appBackupsById[appId], _.extend({ keepLatest: allAppIds.includes(appId) }, backupConfig.retentionPolicy), referencedBackupIds);
|
||||
appBackupsToRemove = appBackupsToRemove.concat(appBackupsById[appId].filter(b => !b.keepReason));
|
||||
}
|
||||
|
||||
for (const appBackup of appBackupsToRemove) {
|
||||
await progressCallback({ message: `Removing app backup (${appBackup.identifier}): ${appBackup.id}`});
|
||||
removedAppBackupIds.push(appBackup.id);
|
||||
await cleanupBackup(backupConfig, appBackup, progressCallback); // never errors
|
||||
removedAppBackupPaths.push(appBackup.remotePath);
|
||||
await removeBackup(backupConfig, appBackup, progressCallback); // never errors
|
||||
}
|
||||
|
||||
debug('cleanupAppBackups: done');
|
||||
|
||||
return removedAppBackupIds;
|
||||
return removedAppBackupPaths;
|
||||
}
|
||||
|
||||
async function cleanupMailBackups(backupConfig, referencedAppBackupIds, progressCallback) {
|
||||
async function cleanupMailBackups(backupConfig, referencedBackupIds, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert(Array.isArray(referencedAppBackupIds));
|
||||
assert(Array.isArray(referencedBackupIds));
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
let removedMailBackupIds = [];
|
||||
const removedMailBackupPaths = [];
|
||||
|
||||
const mailBackups = await backups.getByTypePaged(backups.BACKUP_TYPE_MAIL, 1, 1000);
|
||||
|
||||
applyBackupRetentionPolicy(mailBackups, _.extend({ keepLatest: true }, backupConfig.retentionPolicy), referencedAppBackupIds);
|
||||
applyBackupRetentionPolicy(mailBackups, _.extend({ keepLatest: true }, backupConfig.retentionPolicy), referencedBackupIds);
|
||||
|
||||
for (const mailBackup of mailBackups) {
|
||||
if (mailBackup.keepReason) continue;
|
||||
await progressCallback({ message: `Removing mail backup ${mailBackup.id}`});
|
||||
removedMailBackupIds.push(mailBackup.id);
|
||||
await cleanupBackup(backupConfig, mailBackup, progressCallback); // never errors
|
||||
await progressCallback({ message: `Removing mail backup ${mailBackup.remotePath}`});
|
||||
removedMailBackupPaths.push(mailBackup.remotePath);
|
||||
await removeBackup(backupConfig, mailBackup, progressCallback); // never errors
|
||||
}
|
||||
|
||||
debug('cleanupMailBackups: done');
|
||||
|
||||
return removedMailBackupIds;
|
||||
return removedMailBackupPaths;
|
||||
}
|
||||
|
||||
async function cleanupBoxBackups(backupConfig, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
let referencedAppBackupIds = [], removedBoxBackupIds = [];
|
||||
let referencedBackupIds = [], removedBoxBackupPaths = [];
|
||||
|
||||
const boxBackups = await backups.getByTypePaged(backups.BACKUP_TYPE_BOX, 1, 1000);
|
||||
|
||||
@@ -187,48 +183,48 @@ async function cleanupBoxBackups(backupConfig, progressCallback) {
|
||||
|
||||
for (const boxBackup of boxBackups) {
|
||||
if (boxBackup.keepReason) {
|
||||
referencedAppBackupIds = referencedAppBackupIds.concat(boxBackup.dependsOn);
|
||||
referencedBackupIds = referencedBackupIds.concat(boxBackup.dependsOn);
|
||||
continue;
|
||||
}
|
||||
|
||||
await progressCallback({ message: `Removing box backup ${boxBackup.id}`});
|
||||
await progressCallback({ message: `Removing box backup ${boxBackup.remotePath}`});
|
||||
|
||||
removedBoxBackupIds.push(boxBackup.id);
|
||||
await cleanupBackup(backupConfig, boxBackup, progressCallback);
|
||||
removedBoxBackupPaths.push(boxBackup.remotePath);
|
||||
await removeBackup(backupConfig, boxBackup, progressCallback);
|
||||
}
|
||||
|
||||
debug('cleanupBoxBackups: done');
|
||||
|
||||
return { removedBoxBackupIds, referencedAppBackupIds };
|
||||
return { removedBoxBackupPaths, referencedBackupIds };
|
||||
}
|
||||
|
||||
// cleans up the database by checking if backup existsing in the remote
|
||||
async function cleanupMissingBackups(backupConfig, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
const perPage = 1000;
|
||||
let missingBackupIds = [];
|
||||
const backupExists = util.promisify(storage.api(backupConfig.provider).exists);
|
||||
const missingBackupPaths = [];
|
||||
|
||||
if (constants.TEST) return missingBackupIds;
|
||||
if (constants.TEST) return missingBackupPaths;
|
||||
|
||||
let page = 1, result = [];
|
||||
do {
|
||||
result = await backups.list(page, perPage);
|
||||
|
||||
for (const backup of result) {
|
||||
let backupFilePath = storage.getBackupFilePath(backupConfig, backup.id, backup.format);
|
||||
let backupFilePath = backupFormat.api(backup.format).getBackupFilePath(backupConfig, backup.remotePath);
|
||||
if (backup.format === 'rsync') backupFilePath = backupFilePath + '/'; // add trailing slash to indicate directory
|
||||
|
||||
const [existsError, exists] = await safe(backupExists(backupConfig, backupFilePath));
|
||||
const [existsError, exists] = await safe(storage.api(backupConfig.provider).exists(backupConfig, backupFilePath));
|
||||
if (existsError || exists) continue;
|
||||
|
||||
await progressCallback({ message: `Removing missing backup ${backup.id}`});
|
||||
await progressCallback({ message: `Removing missing backup ${backup.remotePath}`});
|
||||
|
||||
const [delError] = await safe(backups.del(backup.id));
|
||||
if (delError) debug(`cleanupBackup: error removing ${backup.id} from database`, delError);
|
||||
if (delError) debug(`cleanupMissingBackups: error removing ${backup.id} from database`, delError);
|
||||
|
||||
missingBackupIds.push(backup.id);
|
||||
missingBackupPaths.push(backup.remotePath);
|
||||
}
|
||||
|
||||
++ page;
|
||||
@@ -236,7 +232,7 @@ async function cleanupMissingBackups(backupConfig, progressCallback) {
|
||||
|
||||
debug('cleanupMissingBackups: done');
|
||||
|
||||
return missingBackupIds;
|
||||
return missingBackupPaths;
|
||||
}
|
||||
|
||||
// removes the snapshots of apps that have been uninstalled
|
||||
@@ -249,29 +245,23 @@ async function cleanupSnapshots(backupConfig) {
|
||||
|
||||
delete info.box;
|
||||
|
||||
const progressCallback = (progress) => { debug(`cleanupSnapshots: ${progress.message}`); };
|
||||
|
||||
for (const appId of Object.keys(info)) {
|
||||
const app = await apps.get(appId);
|
||||
if (app) continue; // app is still installed
|
||||
|
||||
await new Promise((resolve) => {
|
||||
async function done(/* ignoredError */) {
|
||||
safe.fs.unlinkSync(path.join(paths.BACKUP_INFO_DIR, `${appId}.sync.cache`));
|
||||
safe.fs.unlinkSync(path.join(paths.BACKUP_INFO_DIR, `${appId}.sync.cache.new`));
|
||||
if (info[appId].format ==='tgz') {
|
||||
await safe(storage.api(backupConfig.provider).remove(backupConfig, backupFormat.api(info[appId].format).getBackupFilePath(backupConfig, `snapshot/app_${appId}`)), { debug });
|
||||
} else {
|
||||
await safe(storage.api(backupConfig.provider).removeDir(backupConfig, backupFormat.api(info[appId].format).getBackupFilePath(backupConfig, `snapshot/app_${appId}`), progressCallback), { debug });
|
||||
}
|
||||
|
||||
await safe(backups.setSnapshotInfo(appId, null /* info */), { debug });
|
||||
debug(`cleanupSnapshots: cleaned up snapshot of app ${appId}`);
|
||||
safe.fs.unlinkSync(path.join(paths.BACKUP_INFO_DIR, `${appId}.sync.cache`));
|
||||
safe.fs.unlinkSync(path.join(paths.BACKUP_INFO_DIR, `${appId}.sync.cache.new`));
|
||||
|
||||
resolve();
|
||||
}
|
||||
|
||||
if (info[appId].format ==='tgz') {
|
||||
storage.api(backupConfig.provider).remove(backupConfig, storage.getBackupFilePath(backupConfig, `snapshot/app_${appId}`, info[appId].format), done);
|
||||
} else {
|
||||
const events = storage.api(backupConfig.provider).removeDir(backupConfig, storage.getBackupFilePath(backupConfig, `snapshot/app_${appId}`, info[appId].format));
|
||||
events.on('progress', function (detail) { debug(`cleanupSnapshots: ${detail}`); });
|
||||
events.on('done', done);
|
||||
}
|
||||
});
|
||||
await safe(backups.setSnapshotInfo(appId, null /* info */), { debug });
|
||||
debug(`cleanupSnapshots: cleaned up snapshot of app ${appId}`);
|
||||
}
|
||||
|
||||
debug('cleanupSnapshots: done');
|
||||
@@ -282,25 +272,32 @@ async function run(progressCallback) {
|
||||
|
||||
const backupConfig = await settings.getBackupConfig();
|
||||
|
||||
if (mounts.isManagedProvider(backupConfig.provider) || backupConfig.provider === 'mountpoint') {
|
||||
const hostPath = mounts.isManagedProvider(backupConfig.provider) ? paths.MANAGED_BACKUP_MOUNT_DIR : backupConfig.mountPoint;
|
||||
const status = await mounts.getStatus(backupConfig.provider, hostPath); // { state, message }
|
||||
debug(`clean: mount point status is ${JSON.stringify(status)}`);
|
||||
if (status.state !== 'active') throw new BoxError(BoxError.MOUNT_ERROR, `Backup endpoint is not mounted: ${status.message}`);
|
||||
}
|
||||
|
||||
if (backupConfig.retentionPolicy.keepWithinSecs < 0) {
|
||||
debug('cleanup: keeping all backups');
|
||||
return {};
|
||||
}
|
||||
|
||||
await progressCallback({ percent: 10, message: 'Cleaning box backups' });
|
||||
const { removedBoxBackupIds, referencedAppBackupIds } = await cleanupBoxBackups(backupConfig, progressCallback);
|
||||
const { removedBoxBackupPaths, referencedBackupIds } = await cleanupBoxBackups(backupConfig, progressCallback); // references is app or mail backup ids
|
||||
|
||||
await progressCallback({ percent: 20, message: 'Cleaning mail backups' });
|
||||
const removedMailBackupIds = await cleanupMailBackups(backupConfig, referencedAppBackupIds, progressCallback);
|
||||
const removedMailBackupPaths = await cleanupMailBackups(backupConfig, referencedBackupIds, progressCallback);
|
||||
|
||||
await progressCallback({ percent: 40, message: 'Cleaning app backups' });
|
||||
const removedAppBackupIds = await cleanupAppBackups(backupConfig, referencedAppBackupIds, progressCallback);
|
||||
const removedAppBackupPaths = await cleanupAppBackups(backupConfig, referencedBackupIds, progressCallback);
|
||||
|
||||
await progressCallback({ percent: 70, message: 'Cleaning missing backups' });
|
||||
const missingBackupIds = await cleanupMissingBackups(backupConfig, progressCallback);
|
||||
await progressCallback({ percent: 70, message: 'Checking storage backend and removing stale entries in database' });
|
||||
const missingBackupPaths = await cleanupMissingBackups(backupConfig, progressCallback);
|
||||
|
||||
await progressCallback({ percent: 90, message: 'Cleaning snapshots' });
|
||||
await cleanupSnapshots(backupConfig);
|
||||
|
||||
return { removedBoxBackupIds, removedMailBackupIds, removedAppBackupIds, missingBackupIds };
|
||||
return { removedBoxBackupPaths, removedMailBackupPaths, removedAppBackupPaths, missingBackupPaths };
|
||||
}
|
||||
|
||||
@@ -0,0 +1,12 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
api
|
||||
};
|
||||
|
||||
function api(format) {
|
||||
switch (format) {
|
||||
case 'tgz': return require('./backupformat/tgz.js');
|
||||
case 'rsync': return require('./backupformat/rsync.js');
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,245 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
getBackupFilePath,
|
||||
download,
|
||||
upload,
|
||||
|
||||
_saveFsMetadata: saveFsMetadata,
|
||||
_restoreFsMetadata: restoreFsMetadata
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
async = require('async'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
DataLayout = require('../datalayout.js'),
|
||||
debug = require('debug')('box:backupformat/rsync'),
|
||||
fs = require('fs'),
|
||||
hush = require('../hush.js'),
|
||||
once = require('../once.js'),
|
||||
path = require('path'),
|
||||
safe = require('safetydance'),
|
||||
storage = require('../storage.js'),
|
||||
syncer = require('../syncer.js'),
|
||||
util = require('util');
|
||||
|
||||
function getBackupFilePath(backupConfig, remotePath) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
|
||||
const rootPath = storage.api(backupConfig.provider).getRootPath(backupConfig);
|
||||
return path.join(rootPath, remotePath);
|
||||
}
|
||||
|
||||
function sync(backupConfig, remotePath, dataLayout, progressCallback, callback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// the number here has to take into account the s3.upload partSize (which is 10MB). So 20=200MB
|
||||
const concurrency = backupConfig.syncConcurrency || (backupConfig.provider === 's3' ? 20 : 10);
|
||||
const removeDir = util.callbackify(storage.api(backupConfig.provider).removeDir);
|
||||
const remove = util.callbackify(storage.api(backupConfig.provider).remove);
|
||||
|
||||
syncer.sync(dataLayout, function processTask(task, iteratorCallback) {
|
||||
debug('sync: processing task: %j', task);
|
||||
// the empty task.path is special to signify the directory
|
||||
const destPath = task.path && backupConfig.encryptedFilenames ? hush.encryptFilePath(task.path, backupConfig.encryption) : task.path;
|
||||
const backupFilePath = path.join(getBackupFilePath(backupConfig, remotePath), destPath);
|
||||
|
||||
if (task.operation === 'removedir') {
|
||||
debug(`Removing directory ${backupFilePath}`);
|
||||
return removeDir(backupConfig, backupFilePath, progressCallback, iteratorCallback);
|
||||
} else if (task.operation === 'remove') {
|
||||
debug(`Removing ${backupFilePath}`);
|
||||
return remove(backupConfig, backupFilePath, iteratorCallback);
|
||||
}
|
||||
|
||||
let retryCount = 0;
|
||||
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
|
||||
retryCallback = once(retryCallback); // protect again upload() erroring much later after read stream error
|
||||
|
||||
++retryCount;
|
||||
if (task.operation === 'add') {
|
||||
progressCallback({ message: `Adding ${task.path}` + (retryCount > 1 ? ` (Try ${retryCount})` : '') });
|
||||
debug(`Adding ${task.path} position ${task.position} try ${retryCount}`);
|
||||
const stream = hush.createReadStream(dataLayout.toLocalPath('./' + task.path), backupConfig.encryption);
|
||||
stream.on('error', (error) => retryCallback(error.message.includes('ENOENT') ? null : error)); // ignore error if file disappears
|
||||
stream.on('progress', function (progress) {
|
||||
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
|
||||
if (!transferred && !speed) return progressCallback({ message: `Uploading ${task.path}` }); // 0M@0MBps looks wrong
|
||||
progressCallback({ message: `Uploading ${task.path}: ${transferred}M@${speed}MBps` }); // 0M@0MBps looks wrong
|
||||
});
|
||||
// only create the destination path when we have confirmation that the source is available. otherwise, we end up with
|
||||
// files owned as 'root' and the cp later will fail
|
||||
stream.on('open', function () {
|
||||
storage.api(backupConfig.provider).upload(backupConfig, backupFilePath, stream, function (error) {
|
||||
debug(error ? `Error uploading ${task.path} try ${retryCount}: ${error.message}` : `Uploaded ${task.path}`);
|
||||
retryCallback(error);
|
||||
});
|
||||
});
|
||||
}
|
||||
}, iteratorCallback);
|
||||
}, concurrency, function (error) {
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
|
||||
callback();
|
||||
});
|
||||
}
|
||||
|
||||
// this is not part of 'snapshotting' because we need root access to traverse
|
||||
async function saveFsMetadata(dataLayout, metadataFile) {
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof metadataFile, 'string');
|
||||
|
||||
// contains paths prefixed with './'
|
||||
const metadata = {
|
||||
emptyDirs: [],
|
||||
execFiles: [],
|
||||
symlinks: []
|
||||
};
|
||||
|
||||
// we assume small number of files. spawnSync will raise a ENOBUFS error after maxBuffer
|
||||
for (let lp of dataLayout.localPaths()) {
|
||||
const emptyDirs = safe.child_process.execSync(`find ${lp} -type d -empty`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
|
||||
if (emptyDirs === null) throw new BoxError(BoxError.FS_ERROR, `Error finding empty dirs: ${safe.error.message}`);
|
||||
if (emptyDirs.length) metadata.emptyDirs = metadata.emptyDirs.concat(emptyDirs.trim().split('\n').map((ed) => dataLayout.toRemotePath(ed)));
|
||||
|
||||
const execFiles = safe.child_process.execSync(`find ${lp} -type f -executable`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
|
||||
if (execFiles === null) throw new BoxError(BoxError.FS_ERROR, `Error finding executables: ${safe.error.message}`);
|
||||
if (execFiles.length) metadata.execFiles = metadata.execFiles.concat(execFiles.trim().split('\n').map((ef) => dataLayout.toRemotePath(ef)));
|
||||
|
||||
const symlinks = safe.child_process.execSync(`find ${lp} -type l`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
|
||||
if (symlinks === null) throw new BoxError(BoxError.FS_ERROR, `Error finding symlinks: ${safe.error.message}`);
|
||||
if (symlinks.length) metadata.symlinks = metadata.symlinks.concat(symlinks.trim().split('\n').map((sl) => {
|
||||
const target = safe.fs.readlinkSync(sl);
|
||||
return { path: dataLayout.toRemotePath(sl), target };
|
||||
}));
|
||||
}
|
||||
|
||||
if (!safe.fs.writeFileSync(metadataFile, JSON.stringify(metadata, null, 4))) throw new BoxError(BoxError.FS_ERROR, `Error writing fs metadata: ${safe.error.message}`);
|
||||
}
|
||||
|
||||
async function restoreFsMetadata(dataLayout, metadataFile) {
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof metadataFile, 'string');
|
||||
|
||||
debug(`Recreating empty directories in ${dataLayout.toString()}`);
|
||||
|
||||
const metadataJson = safe.fs.readFileSync(metadataFile, 'utf8');
|
||||
if (metadataJson === null) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Error loading fsmetadata.json:' + safe.error.message);
|
||||
const metadata = safe.JSON.parse(metadataJson);
|
||||
if (metadata === null) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Error parsing fsmetadata.json:' + safe.error.message);
|
||||
|
||||
for (const emptyDir of metadata.emptyDirs) {
|
||||
const [mkdirError] = await safe(fs.promises.mkdir(dataLayout.toLocalPath(emptyDir), { recursive: true }));
|
||||
if (mkdirError) throw new BoxError(BoxError.FS_ERROR, `unable to create path: ${mkdirError.message}`);
|
||||
}
|
||||
|
||||
for (const execFile of metadata.execFiles) {
|
||||
const [chmodError] = await safe(fs.promises.chmod(dataLayout.toLocalPath(execFile), parseInt('0755', 8)));
|
||||
if (chmodError) throw new BoxError(BoxError.FS_ERROR, `unable to chmod: ${chmodError.message}`);
|
||||
}
|
||||
|
||||
for (const symlink of (metadata.symlinks || [])) {
|
||||
if (!symlink.target) continue;
|
||||
// the path may not exist if we had a directory full of symlinks
|
||||
const [mkdirError] = await safe(fs.promises.mkdir(path.dirname(dataLayout.toLocalPath(symlink.path)), { recursive: true }));
|
||||
if (mkdirError) throw new BoxError(BoxError.FS_ERROR, `unable to symlink (mkdir): ${mkdirError.message}`);
|
||||
const [symlinkError] = await safe(fs.promises.symlink(symlink.target, dataLayout.toLocalPath(symlink.path), 'file'));
|
||||
if (symlinkError) throw new BoxError(BoxError.FS_ERROR, `unable to symlink: ${symlinkError.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
function downloadDir(backupConfig, backupFilePath, dataLayout, progressCallback, callback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof backupFilePath, 'string');
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug(`downloadDir: ${backupFilePath} to ${dataLayout.toString()}`);
|
||||
|
||||
function downloadFile(entry, done) {
|
||||
let relativePath = path.relative(backupFilePath, entry.fullPath);
|
||||
if (backupConfig.encryptedFilenames) {
|
||||
const { error, result } = hush.decryptFilePath(relativePath, backupConfig.encryption);
|
||||
if (error) return done(new BoxError(BoxError.CRYPTO_ERROR, 'Unable to decrypt file'));
|
||||
relativePath = result;
|
||||
}
|
||||
const destFilePath = dataLayout.toLocalPath('./' + relativePath);
|
||||
|
||||
fs.mkdir(path.dirname(destFilePath), { recursive: true }, function (error) {
|
||||
if (error) return done(new BoxError(BoxError.FS_ERROR, error.message));
|
||||
|
||||
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
|
||||
storage.api(backupConfig.provider).download(backupConfig, entry.fullPath, function (error, sourceStream) {
|
||||
if (error) {
|
||||
progressCallback({ message: `Download ${entry.fullPath} to ${destFilePath} errored: ${error.message}` });
|
||||
return retryCallback(error);
|
||||
}
|
||||
|
||||
let destStream = hush.createWriteStream(destFilePath, backupConfig.encryption);
|
||||
|
||||
// protect against multiple errors. must destroy the write stream so that a previous retry does not write
|
||||
let closeAndRetry = once((error) => {
|
||||
if (error) progressCallback({ message: `Download ${entry.fullPath} to ${destFilePath} errored: ${error.message}` });
|
||||
else progressCallback({ message: `Download ${entry.fullPath} to ${destFilePath} finished` });
|
||||
sourceStream.destroy();
|
||||
destStream.destroy();
|
||||
retryCallback(error);
|
||||
});
|
||||
|
||||
destStream.on('progress', function (progress) {
|
||||
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
|
||||
if (!transferred && !speed) return progressCallback({ message: `Downloading ${entry.fullPath}` }); // 0M@0MBps looks wrong
|
||||
progressCallback({ message: `Downloading ${entry.fullPath}: ${transferred}M@${speed}MBps` });
|
||||
});
|
||||
destStream.on('error', closeAndRetry);
|
||||
|
||||
sourceStream.on('error', closeAndRetry);
|
||||
|
||||
progressCallback({ message: `Downloading ${entry.fullPath} to ${destFilePath}` });
|
||||
|
||||
sourceStream.pipe(destStream, { end: true }).on('done', closeAndRetry);
|
||||
});
|
||||
}, done);
|
||||
});
|
||||
}
|
||||
|
||||
storage.api(backupConfig.provider).listDir(backupConfig, backupFilePath, 1000, function (entries, iteratorDone) {
|
||||
// https://www.digitalocean.com/community/questions/rate-limiting-on-spaces?answer=40441
|
||||
const concurrency = backupConfig.downloadConcurrency || (backupConfig.provider === 's3' ? 30 : 10);
|
||||
|
||||
async.eachLimit(entries, concurrency, downloadFile, iteratorDone);
|
||||
}, callback);
|
||||
}
|
||||
|
||||
async function download(backupConfig, remotePath, dataLayout, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
debug(`download: Downloading ${remotePath} to ${dataLayout.toString()}`);
|
||||
|
||||
const backupFilePath = getBackupFilePath(backupConfig, remotePath);
|
||||
const downloadDirAsync = util.promisify(downloadDir);
|
||||
|
||||
await downloadDirAsync(backupConfig, backupFilePath, dataLayout, progressCallback);
|
||||
await restoreFsMetadata(dataLayout, `${dataLayout.localRoot()}/fsmetadata.json`);
|
||||
}
|
||||
|
||||
async function upload(backupConfig, remotePath, dataLayout, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
assert.strictEqual(typeof dataLayout, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
const syncAsync = util.promisify(sync);
|
||||
|
||||
await saveFsMetadata(dataLayout, `${dataLayout.localRoot()}/fsmetadata.json`);
|
||||
await syncAsync(backupConfig, remotePath, dataLayout, progressCallback);
|
||||
}
|
||||
@@ -0,0 +1,195 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
getBackupFilePath,
|
||||
download,
|
||||
upload
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
async = require('async'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
DataLayout = require('../datalayout.js'),
|
||||
debug = require('debug')('box:backupformat/tgz'),
|
||||
{ DecryptStream, EncryptStream } = require('../hush.js'),
|
||||
once = require('../once.js'),
|
||||
path = require('path'),
|
||||
progressStream = require('progress-stream'),
|
||||
storage = require('../storage.js'),
|
||||
tar = require('tar-fs'),
|
||||
zlib = require('zlib');
|
||||
|
||||
function getBackupFilePath(backupConfig, remotePath) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
|
||||
const rootPath = storage.api(backupConfig.provider).getRootPath(backupConfig);
|
||||
|
||||
const fileType = backupConfig.encryption ? '.tar.gz.enc' : '.tar.gz';
|
||||
return path.join(rootPath, remotePath + fileType);
|
||||
}
|
||||
|
||||
function tarPack(dataLayout, encryption) {
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
|
||||
const pack = tar.pack('/', {
|
||||
dereference: false, // pack the symlink and not what it points to
|
||||
entries: dataLayout.localPaths(),
|
||||
ignoreStatError: (path, err) => {
|
||||
debug(`tarPack: error stat'ing ${path} - ${err.code}`);
|
||||
return err.code === 'ENOENT'; // ignore if file or dir got removed (probably some temporary file)
|
||||
},
|
||||
map: function(header) {
|
||||
header.name = dataLayout.toRemotePath(header.name);
|
||||
// the tar pax format allows us to encode filenames > 100 and size > 8GB (see #640)
|
||||
// https://www.systutorials.com/docs/linux/man/5-star/
|
||||
if (header.size > 8589934590 || header.name > 99) header.pax = { size: header.size };
|
||||
return header;
|
||||
},
|
||||
strict: false // do not error for unknown types (skip fifo, char/block devices)
|
||||
});
|
||||
|
||||
const gzip = zlib.createGzip({});
|
||||
const ps = progressStream({ time: 10000 }); // emit 'progress' every 10 seconds
|
||||
|
||||
pack.on('error', function (error) {
|
||||
debug('tarPack: tar stream error.', error);
|
||||
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
gzip.on('error', function (error) {
|
||||
debug('tarPack: gzip stream error.', error);
|
||||
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
if (encryption) {
|
||||
const encryptStream = new EncryptStream(encryption);
|
||||
encryptStream.on('error', function (error) {
|
||||
debug('tarPack: encrypt stream error.', error);
|
||||
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
pack.pipe(gzip).pipe(encryptStream).pipe(ps);
|
||||
} else {
|
||||
pack.pipe(gzip).pipe(ps);
|
||||
}
|
||||
|
||||
return ps;
|
||||
}
|
||||
|
||||
function tarExtract(inStream, dataLayout, encryption) {
|
||||
assert.strictEqual(typeof inStream, 'object');
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
|
||||
const gunzip = zlib.createGunzip({});
|
||||
const ps = progressStream({ time: 10000 }); // display a progress every 10 seconds
|
||||
const extract = tar.extract('/', {
|
||||
map: function (header) {
|
||||
header.name = dataLayout.toLocalPath(header.name);
|
||||
return header;
|
||||
},
|
||||
dmode: 500 // ensure directory is writable
|
||||
});
|
||||
|
||||
const emitError = once((error) => {
|
||||
inStream.destroy();
|
||||
ps.emit('error', error);
|
||||
});
|
||||
|
||||
inStream.on('error', function (error) {
|
||||
debug('tarExtract: input stream error.', error);
|
||||
emitError(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
gunzip.on('error', function (error) {
|
||||
debug('tarExtract: gunzip stream error.', error);
|
||||
emitError(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
extract.on('error', function (error) {
|
||||
debug('tarExtract: extract stream error.', error);
|
||||
emitError(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
extract.on('finish', function () {
|
||||
debug('tarExtract: done.');
|
||||
// we use a separate event because ps is a through2 stream which emits 'finish' event indicating end of inStream and not extract
|
||||
ps.emit('done');
|
||||
});
|
||||
|
||||
if (encryption) {
|
||||
const decrypt = new DecryptStream(encryption);
|
||||
decrypt.on('error', function (error) {
|
||||
debug('tarExtract: decrypt stream error.', error);
|
||||
emitError(new BoxError(BoxError.EXTERNAL_ERROR, `Failed to decrypt: ${error.message}`));
|
||||
});
|
||||
inStream.pipe(ps).pipe(decrypt).pipe(gunzip).pipe(extract);
|
||||
} else {
|
||||
inStream.pipe(ps).pipe(gunzip).pipe(extract);
|
||||
}
|
||||
|
||||
return ps;
|
||||
}
|
||||
|
||||
async function download(backupConfig, remotePath, dataLayout, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
debug(`download: Downloading ${remotePath} to ${dataLayout.toString()}`);
|
||||
|
||||
const backupFilePath = getBackupFilePath(backupConfig, remotePath);
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
|
||||
progressCallback({ message: `Downloading backup ${remotePath}` });
|
||||
|
||||
storage.api(backupConfig.provider).download(backupConfig, backupFilePath, function (error, sourceStream) {
|
||||
if (error) return retryCallback(error);
|
||||
|
||||
const ps = tarExtract(sourceStream, dataLayout, backupConfig.encryption);
|
||||
|
||||
ps.on('progress', function (progress) {
|
||||
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
|
||||
if (!transferred && !speed) return progressCallback({ message: 'Downloading backup' }); // 0M@0MBps looks wrong
|
||||
progressCallback({ message: `Downloading ${transferred}M@${speed}MBps` });
|
||||
});
|
||||
ps.on('error', retryCallback);
|
||||
ps.on('done', retryCallback);
|
||||
});
|
||||
}, (error) => {
|
||||
if (error) return reject(error);
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async function upload(backupConfig, remotePath, dataLayout, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
assert.strictEqual(typeof dataLayout, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
|
||||
retryCallback = once(retryCallback); // protect again upload() erroring much later after tar stream error
|
||||
|
||||
const tarStream = tarPack(dataLayout, backupConfig.encryption);
|
||||
|
||||
tarStream.on('progress', function (progress) {
|
||||
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
|
||||
if (!transferred && !speed) return progressCallback({ message: 'Uploading backup' }); // 0M@0MBps looks wrong
|
||||
progressCallback({ message: `Uploading backup ${transferred}M@${speed}MBps` });
|
||||
});
|
||||
tarStream.on('error', retryCallback); // already returns BoxError
|
||||
|
||||
storage.api(backupConfig.provider).upload(backupConfig, getBackupFilePath(backupConfig, remotePath), tarStream, retryCallback);
|
||||
}, (error) => {
|
||||
if (error) return reject(error);
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
}
|
||||
+70
-34
@@ -6,6 +6,7 @@ exports = module.exports = {
|
||||
getByTypePaged,
|
||||
add,
|
||||
update,
|
||||
setState,
|
||||
list,
|
||||
del,
|
||||
|
||||
@@ -52,29 +53,24 @@ const assert = require('assert'),
|
||||
ejs = require('ejs'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
fs = require('fs'),
|
||||
hat = require('./hat.js'),
|
||||
locker = require('./locker.js'),
|
||||
path = require('path'),
|
||||
paths = require('./paths.js'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js'),
|
||||
storage = require('./storage.js'),
|
||||
tasks = require('./tasks.js'),
|
||||
util = require('util');
|
||||
tasks = require('./tasks.js');
|
||||
|
||||
const COLLECTD_CONFIG_EJS = fs.readFileSync(__dirname + '/collectd/cloudron-backup.ejs', { encoding: 'utf8' });
|
||||
|
||||
const BACKUPS_FIELDS = [ 'id', 'identifier', 'creationTime', 'packageVersion', 'type', 'dependsOn', 'state', 'manifestJson', 'format', 'preserveSecs', 'encryptionVersion' ];
|
||||
|
||||
// helper until all storage providers have been ported
|
||||
function maybePromisify(func) {
|
||||
if (util.types.isAsyncFunction(func)) return func;
|
||||
return util.promisify(func);
|
||||
}
|
||||
const BACKUPS_FIELDS = [ 'id', 'remotePath', 'label', 'identifier', 'creationTime', 'packageVersion', 'type', 'dependsOnJson', 'state', 'manifestJson', 'format', 'preserveSecs', 'encryptionVersion' ];
|
||||
|
||||
function postProcess(result) {
|
||||
assert.strictEqual(typeof result, 'object');
|
||||
|
||||
result.dependsOn = result.dependsOn ? result.dependsOn.split(',') : [ ];
|
||||
result.dependsOn = result.dependsOnJson ? safe.JSON.parse(result.dependsOnJson) : [];
|
||||
delete result.dependsOnJson;
|
||||
|
||||
result.manifest = result.manifestJson ? safe.JSON.parse(result.manifestJson) : null;
|
||||
delete result.manifestJson;
|
||||
@@ -116,9 +112,9 @@ function generateEncryptionKeysSync(password) {
|
||||
};
|
||||
}
|
||||
|
||||
async function add(id, data) {
|
||||
async function add(data) {
|
||||
assert(data && typeof data === 'object');
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof data.remotePath, 'string');
|
||||
assert(data.encryptionVersion === null || typeof data.encryptionVersion === 'number');
|
||||
assert.strictEqual(typeof data.packageVersion, 'string');
|
||||
assert.strictEqual(typeof data.type, 'string');
|
||||
@@ -127,15 +123,20 @@ async function add(id, data) {
|
||||
assert(Array.isArray(data.dependsOn));
|
||||
assert.strictEqual(typeof data.manifest, 'object');
|
||||
assert.strictEqual(typeof data.format, 'string');
|
||||
assert.strictEqual(typeof data.preserveSecs, 'number');
|
||||
|
||||
const creationTime = data.creationTime || new Date(); // allow tests to set the time
|
||||
const manifestJson = JSON.stringify(data.manifest);
|
||||
const prefixId = data.type === exports.BACKUP_TYPE_APP ? `${data.type}_${data.identifier}` : data.type; // type and identifier are same for other types
|
||||
const id = `${prefixId}_v${data.packageVersion}_${hat(256)}`; // id is used by the UI to derive dependent packages. making this a UUID will require a lot of db querying
|
||||
|
||||
const [error] = await safe(database.query('INSERT INTO backups (id, identifier, encryptionVersion, packageVersion, type, creationTime, state, dependsOn, manifestJson, format) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
|
||||
[ id, data.identifier, data.encryptionVersion, data.packageVersion, data.type, creationTime, data.state, data.dependsOn.join(','), manifestJson, data.format ]));
|
||||
const [error] = await safe(database.query('INSERT INTO backups (id, remotePath, identifier, encryptionVersion, packageVersion, type, creationTime, state, dependsOnJson, manifestJson, format, preserveSecs) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
|
||||
[ id, data.remotePath, data.identifier, data.encryptionVersion, data.packageVersion, data.type, creationTime, data.state, JSON.stringify(data.dependsOn), manifestJson, data.format, data.preserveSecs ]));
|
||||
|
||||
if (error && error.code === 'ER_DUP_ENTRY') throw new BoxError(BoxError.ALREADY_EXISTS, 'Backup already exists');
|
||||
if (error) throw error;
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
async function getByIdentifierAndStatePaged(identifier, state, page, perPage) {
|
||||
@@ -172,19 +173,55 @@ async function getByTypePaged(type, page, perPage) {
|
||||
return results;
|
||||
}
|
||||
|
||||
async function update(id, backup) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof backup, 'object');
|
||||
function validateLabel(label) {
|
||||
assert.strictEqual(typeof label, 'string');
|
||||
|
||||
let fields = [ ], values = [ ];
|
||||
for (const p in backup) {
|
||||
fields.push(p + ' = ?');
|
||||
values.push(backup[p]);
|
||||
if (label.length >= 200) return new BoxError(BoxError.BAD_FIELD, 'label too long');
|
||||
if (/[^a-zA-Z0-9._() -]/.test(label)) return new BoxError(BoxError.BAD_FIELD, 'label can only contain alphanumerals, space, dot, hyphen, brackets or underscore');
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
// this is called by REST API
|
||||
async function update(id, data) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof data, 'object');
|
||||
|
||||
let error;
|
||||
if ('label' in data) {
|
||||
error = validateLabel(data.label);
|
||||
if (error) throw error;
|
||||
}
|
||||
|
||||
const fields = [], values = [];
|
||||
for (const p in data) {
|
||||
if (p === 'label' || p === 'preserveSecs') {
|
||||
fields.push(p + ' = ?');
|
||||
values.push(data[p]);
|
||||
}
|
||||
}
|
||||
values.push(id);
|
||||
|
||||
const backup = await get(id);
|
||||
if (backup === null) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
|
||||
|
||||
const result = await database.query('UPDATE backups SET ' + fields.join(', ') + ' WHERE id = ?', values);
|
||||
if (result.affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
|
||||
|
||||
if ('preserveSecs' in data) {
|
||||
// update the dependancies
|
||||
for (const depId of backup.dependsOn) {
|
||||
await database.query('UPDATE backups SET preserveSecs=? WHERE id = ?', [ data.preserveSecs, depId]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function setState(id, state) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof state, 'string');
|
||||
|
||||
const result = await database.query('UPDATE backups SET state = ? WHERE id = ?', [state, id]);
|
||||
if (result.affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
|
||||
}
|
||||
|
||||
async function startBackupTask(auditSource) {
|
||||
@@ -205,7 +242,8 @@ async function startBackupTask(auditSource) {
|
||||
const errorMessage = error ? error.message : '';
|
||||
const timedOut = error ? error.code === tasks.ETIMEOUT : false;
|
||||
|
||||
await safe(eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId, errorMessage, timedOut, backupId }), { debug });
|
||||
const backup = backupId ? await get(backupId) : null;
|
||||
await safe(eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId, errorMessage, timedOut, backupId, remotePath: backup?.remotePath }), { debug });
|
||||
});
|
||||
|
||||
return taskId;
|
||||
@@ -268,15 +306,15 @@ async function startCleanupTask(auditSource) {
|
||||
|
||||
const taskId = await tasks.add(tasks.TASK_CLEAN_BACKUPS, []);
|
||||
|
||||
tasks.startTask(taskId, {}, (error, result) => { // result is { removedBoxBackupIds, removedAppBackupIds, removedMailBackupIds, missingBackupIds }
|
||||
eventlog.add(eventlog.ACTION_BACKUP_CLEANUP_FINISH, auditSource, {
|
||||
tasks.startTask(taskId, {}, async (error, result) => { // result is { removedBoxBackupPaths, removedAppBackupPaths, removedMailBackupPaths, missingBackupPaths }
|
||||
await safe(eventlog.add(eventlog.ACTION_BACKUP_CLEANUP_FINISH, auditSource, {
|
||||
taskId,
|
||||
errorMessage: error ? error.message : null,
|
||||
removedBoxBackupIds: result ? result.removedBoxBackupIds : [],
|
||||
removedMailBackupIds: result ? result.removedMailBackupIds : [],
|
||||
removedAppBackupIds: result ? result.removedAppBackupIds : [],
|
||||
missingBackupIds: result ? result.missingBackupIds : []
|
||||
});
|
||||
removedBoxBackupPaths: result ? result.removedBoxBackupPaths : [],
|
||||
removedMailBackupPaths: result ? result.removedMailBackupPaths : [],
|
||||
removedAppBackupPaths: result ? result.removedAppBackupPaths : [],
|
||||
missingBackupPaths: result ? result.missingBackupPaths : []
|
||||
}), { debug });
|
||||
});
|
||||
|
||||
return taskId;
|
||||
@@ -318,8 +356,7 @@ async function testConfig(backupConfig) {
|
||||
if ('keepMonthly' in policy && typeof policy.keepMonthly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'keepMonthly must be a number');
|
||||
if ('keepYearly' in policy && typeof policy.keepYearly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'keepYearly must be a number');
|
||||
|
||||
const [error] = await safe(util.promisify(storage.api(backupConfig.provider).testConfig)(backupConfig));
|
||||
return error;
|
||||
await storage.api(backupConfig.provider).testConfig(backupConfig);
|
||||
}
|
||||
|
||||
// this skips password check since that policy is only at creation time
|
||||
@@ -329,8 +366,7 @@ async function testProviderConfig(backupConfig) {
|
||||
const func = storage.api(backupConfig.provider);
|
||||
if (!func) return new BoxError(BoxError.BAD_FIELD, 'unknown storage provider');
|
||||
|
||||
const [error] = await safe(util.promisify(storage.api(backupConfig.provider).testConfig)(backupConfig));
|
||||
return error;
|
||||
await storage.api(backupConfig.provider).testConfig(backupConfig);
|
||||
}
|
||||
|
||||
async function remount(auditSource) {
|
||||
@@ -341,5 +377,5 @@ async function remount(auditSource) {
|
||||
const func = storage.api(backupConfig.provider);
|
||||
if (!func) throw new BoxError(BoxError.BAD_FIELD, 'unknown storage provider');
|
||||
|
||||
await maybePromisify(storage.api(backupConfig.provider).remount)(backupConfig);
|
||||
await storage.api(backupConfig.provider).remount(backupConfig);
|
||||
}
|
||||
|
||||
+96
-669
File diff suppressed because it is too large
Load Diff
+1
-1
@@ -52,7 +52,7 @@ BoxError.INACTIVE = 'Inactive'; // service/volume/mount
|
||||
BoxError.INTERNAL_ERROR = 'Internal Error';
|
||||
BoxError.INVALID_CREDENTIALS = 'Invalid Credentials';
|
||||
BoxError.IPTABLES_ERROR = 'IPTables Error';
|
||||
BoxError.LICENSE_ERROR = 'License Error';
|
||||
BoxError.LICENSE_ERROR = 'License Error'; // billing or subscription expired
|
||||
BoxError.LOGROTATE_ERROR = 'Logrotate Error';
|
||||
BoxError.MAIL_ERROR = 'Mail Error';
|
||||
BoxError.MOUNT_ERROR = 'Mount Error';
|
||||
|
||||
+2
-2
@@ -1,6 +1,6 @@
|
||||
'use strict';
|
||||
|
||||
let assert = require('assert'),
|
||||
const assert = require('assert'),
|
||||
fs = require('fs'),
|
||||
path = require('path');
|
||||
|
||||
@@ -11,7 +11,7 @@ exports = module.exports = {
|
||||
function getChanges(version) {
|
||||
assert.strictEqual(typeof version, 'string');
|
||||
|
||||
let changelog = [ ];
|
||||
const changelog = [];
|
||||
const lines = fs.readFileSync(path.join(__dirname, '../CHANGES'), 'utf8').split('\n');
|
||||
|
||||
version = version.replace(/[+-].*/, ''); // strip prerelease
|
||||
|
||||
+15
-11
@@ -32,7 +32,7 @@ const apps = require('./apps.js'),
|
||||
constants = require('./constants.js'),
|
||||
cron = require('./cron.js'),
|
||||
debug = require('debug')('box:cloudron'),
|
||||
delay = require('delay'),
|
||||
delay = require('./delay.js'),
|
||||
dns = require('./dns.js'),
|
||||
dockerProxy = require('./dockerproxy.js'),
|
||||
domains = require('./domains.js'),
|
||||
@@ -90,10 +90,13 @@ async function notifyUpdate() {
|
||||
const version = safe.fs.readFileSync(paths.VERSION_FILE, 'utf8');
|
||||
if (version === constants.VERSION) return;
|
||||
|
||||
await eventlog.add(eventlog.ACTION_UPDATE_FINISH, AuditSource.CRON, { errorMessage: '', oldVersion: version || 'dev', newVersion: constants.VERSION });
|
||||
|
||||
const [error] = await safe(tasks.setCompletedByType(tasks.TASK_UPDATE, { error: null }));
|
||||
if (error && error.reason !== BoxError.NOT_FOUND) throw error; // when hotfixing, task may not exist
|
||||
if (!version) {
|
||||
await eventlog.add(eventlog.ACTION_INSTALL_FINISH, AuditSource.CRON, { version: constants.VERSION });
|
||||
} else {
|
||||
await eventlog.add(eventlog.ACTION_UPDATE_FINISH, AuditSource.CRON, { errorMessage: '', oldVersion: version || 'dev', newVersion: constants.VERSION });
|
||||
const [error] = await safe(tasks.setCompletedByType(tasks.TASK_UPDATE, { error: null }));
|
||||
if (error && error.reason !== BoxError.NOT_FOUND) throw error; // when hotfixing, task may not exist
|
||||
}
|
||||
|
||||
safe.fs.writeFileSync(paths.VERSION_FILE, constants.VERSION, 'utf8');
|
||||
}
|
||||
@@ -137,7 +140,7 @@ async function runStartupTasks() {
|
||||
// we used to run tasks in parallel but simultaneous nginx reloads was causing issues
|
||||
for (let i = 0; i < tasks.length; i++) {
|
||||
const [error] = await safe(tasks[i]());
|
||||
if (error) debug(`Startup task at index ${i} failed: ${error.message}`);
|
||||
if (error) debug(`Startup task at index ${i} failed: ${error.message} ${error.stack}`);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,6 +155,7 @@ async function getConfig() {
|
||||
return {
|
||||
apiServerOrigin: settings.apiServerOrigin(),
|
||||
webServerOrigin: settings.webServerOrigin(),
|
||||
consoleServerOrigin: settings.consoleServerOrigin(),
|
||||
adminDomain: settings.dashboardDomain(),
|
||||
adminFqdn: settings.dashboardFqdn(),
|
||||
mailFqdn: settings.mailFqdn(),
|
||||
@@ -215,7 +219,7 @@ async function getLogs(unit, options) {
|
||||
assert.strictEqual(typeof options.format, 'string');
|
||||
assert.strictEqual(typeof options.follow, 'boolean');
|
||||
|
||||
var lines = options.lines === -1 ? '+1' : options.lines,
|
||||
const lines = options.lines === -1 ? '+1' : options.lines,
|
||||
format = options.format || 'json',
|
||||
follow = options.follow;
|
||||
|
||||
@@ -263,12 +267,12 @@ async function prepareDashboardDomain(domain, auditSource) {
|
||||
const domainObject = await domains.get(domain);
|
||||
if (!domain) throw new BoxError(BoxError.NOT_FOUND, 'No such domain');
|
||||
|
||||
const fqdn = dns.fqdn(constants.DASHBOARD_LOCATION, domainObject);
|
||||
const fqdn = dns.fqdn(constants.DASHBOARD_SUBDOMAIN, domainObject);
|
||||
|
||||
const result = await apps.list();
|
||||
if (result.some(app => app.fqdn === fqdn)) throw new BoxError(BoxError.BAD_STATE, 'Dashboard location conflicts with an existing app');
|
||||
|
||||
const taskId = await tasks.add(tasks.TASK_SETUP_DNS_AND_CERT, [ constants.DASHBOARD_LOCATION, domain, auditSource ]);
|
||||
const taskId = await tasks.add(tasks.TASK_SETUP_DNS_AND_CERT, [ constants.DASHBOARD_SUBDOMAIN, domain, auditSource ]);
|
||||
|
||||
tasks.startTask(taskId, {});
|
||||
|
||||
@@ -286,11 +290,11 @@ async function setDashboardDomain(domain, auditSource) {
|
||||
if (!domain) throw new BoxError(BoxError.NOT_FOUND, 'No such domain');
|
||||
|
||||
await reverseProxy.writeDashboardConfig(domainObject);
|
||||
const fqdn = dns.fqdn(constants.DASHBOARD_LOCATION, domainObject);
|
||||
const fqdn = dns.fqdn(constants.DASHBOARD_SUBDOMAIN, domainObject);
|
||||
|
||||
await settings.setDashboardLocation(domain, fqdn);
|
||||
|
||||
await safe(appstore.updateCloudron({ domain }));
|
||||
await safe(appstore.updateCloudron({ domain }), { debug });
|
||||
|
||||
await eventlog.add(eventlog.ACTION_DASHBOARD_DOMAIN_UPDATE, auditSource, { domain, fqdn });
|
||||
}
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
LoadPlugin "table"
|
||||
<Plugin table>
|
||||
<Table "/sys/fs/cgroup/memory/docker/<%= containerId %>/memory.stat">
|
||||
Instance "<%= appId %>-memory"
|
||||
Separator " \\n"
|
||||
<Result>
|
||||
Type gauge
|
||||
InstancesFrom 0
|
||||
ValuesFrom 1
|
||||
</Result>
|
||||
</Table>
|
||||
|
||||
<Table "/sys/fs/cgroup/memory/docker/<%= containerId %>/memory.max_usage_in_bytes">
|
||||
Instance "<%= appId %>-memory"
|
||||
Separator "\\n"
|
||||
<Result>
|
||||
Type gauge
|
||||
InstancePrefix "max_usage_in_bytes"
|
||||
ValuesFrom 0
|
||||
</Result>
|
||||
</Table>
|
||||
|
||||
<Table "/sys/fs/cgroup/cpuacct/docker/<%= containerId %>/cpuacct.stat">
|
||||
Instance "<%= appId %>-cpu"
|
||||
Separator " \\n"
|
||||
<Result>
|
||||
Type gauge
|
||||
InstancesFrom 0
|
||||
ValuesFrom 1
|
||||
</Result>
|
||||
</Table>
|
||||
</Plugin>
|
||||
|
||||
<Plugin python>
|
||||
<Module du>
|
||||
<Path>
|
||||
Instance "<%= appId %>"
|
||||
Dir "<%= appDataDir %>"
|
||||
</Path>
|
||||
</Module>
|
||||
</Plugin>
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
LoadPlugin "table"
|
||||
<Plugin table>
|
||||
<Table "/sys/fs/cgroup/memory/docker/<%= containerId %>/memory.memsw.usage_in_bytes">
|
||||
Instance "<%= appId %>-memory"
|
||||
Separator "\\n"
|
||||
<Result>
|
||||
Type gauge
|
||||
InstancePrefix "memsw_usage_in_bytes"
|
||||
ValuesFrom 0
|
||||
</Result>
|
||||
</Table>
|
||||
</Plugin>
|
||||
|
||||
<Plugin python>
|
||||
<Module du>
|
||||
<Path>
|
||||
Instance "<%= appId %>"
|
||||
Dir "<%= appDataDir %>"
|
||||
</Path>
|
||||
</Module>
|
||||
</Plugin>
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
LoadPlugin "table"
|
||||
<Plugin table>
|
||||
<Table "/sys/fs/cgroup/docker/<%= containerId %>/memory.current">
|
||||
Instance "<%= appId %>-memory"
|
||||
Separator " \\n"
|
||||
<Result>
|
||||
Type gauge
|
||||
InstancePrefix "memory_current"
|
||||
ValuesFrom 0
|
||||
</Result>
|
||||
</Table>
|
||||
|
||||
<Table "/sys/fs/cgroup/docker/<%= containerId %>/memory.swap.current">
|
||||
Instance "<%= appId %>-memory"
|
||||
Separator "\\n"
|
||||
<Result>
|
||||
Type gauge
|
||||
InstancePrefix "memory_swap_current"
|
||||
ValuesFrom 0
|
||||
</Result>
|
||||
</Table>
|
||||
</Plugin>
|
||||
|
||||
<Plugin python>
|
||||
<Module du>
|
||||
<Path>
|
||||
Instance "<%= appId %>"
|
||||
Dir "<%= appDataDir %>"
|
||||
</Path>
|
||||
</Module>
|
||||
</Plugin>
|
||||
|
||||
+10
-5
@@ -1,14 +1,14 @@
|
||||
'use strict';
|
||||
|
||||
let fs = require('fs'),
|
||||
const fs = require('fs'),
|
||||
path = require('path');
|
||||
|
||||
const CLOUDRON = process.env.BOX_ENV === 'cloudron',
|
||||
TEST = process.env.BOX_ENV === 'test';
|
||||
|
||||
exports = module.exports = {
|
||||
SMTP_LOCATION: 'smtp',
|
||||
IMAP_LOCATION: 'imap',
|
||||
SMTP_SUBDOMAIN: 'smtp',
|
||||
IMAP_SUBDOMAIN: 'imap',
|
||||
|
||||
// These are combined into one array because users and groups become mailboxes
|
||||
RESERVED_NAMES: [
|
||||
@@ -22,7 +22,7 @@ exports = module.exports = {
|
||||
'admins', 'users' // ldap code uses 'users' pseudo group
|
||||
],
|
||||
|
||||
DASHBOARD_LOCATION: 'my',
|
||||
DASHBOARD_SUBDOMAIN: 'my',
|
||||
|
||||
PORT: CLOUDRON ? 3000 : 5454,
|
||||
INTERNAL_SMTP_PORT: 2525, // this value comes from the mail container
|
||||
@@ -49,6 +49,8 @@ exports = module.exports = {
|
||||
],
|
||||
DEMO_APP_LIMIT: 20,
|
||||
|
||||
PROXY_APP_APPSTORE_ID: 'io.cloudron.builtin.appproxy',
|
||||
|
||||
AUTOUPDATE_PATTERN_NEVER: 'never',
|
||||
|
||||
// the db field is a blob so we make this explicit
|
||||
@@ -63,12 +65,15 @@ exports = module.exports = {
|
||||
|
||||
PORT25_CHECK_SERVER: 'port25check.cloudron.io',
|
||||
|
||||
FORUM_URL: 'https://forum.cloudron.io',
|
||||
|
||||
SUPPORT_USERNAME: 'cloudron-support',
|
||||
SUPPORT_EMAIL: 'support@cloudron.io',
|
||||
|
||||
USER_DIRECTORY_LDAP_DN: 'cn=admin,ou=system,dc=cloudron',
|
||||
|
||||
FOOTER: '© %YEAR% [Cloudron](https://cloudron.io) [Forum <i class="fa fa-comments"></i>](https://forum.cloudron.io)',
|
||||
|
||||
VERSION: process.env.BOX_ENV === 'cloudron' ? fs.readFileSync(path.join(__dirname, '../VERSION'), 'utf8').trim() : '7.0.0-test'
|
||||
VERSION: process.env.BOX_ENV === 'cloudron' ? fs.readFileSync(path.join(__dirname, '../VERSION'), 'utf8').trim() : '7.2.0-test'
|
||||
};
|
||||
|
||||
|
||||
+32
-11
@@ -28,13 +28,13 @@ const appHealthMonitor = require('./apphealthmonitor.js'),
|
||||
dyndns = require('./dyndns.js'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
janitor = require('./janitor.js'),
|
||||
paths = require('./paths.js'),
|
||||
safe = require('safetydance'),
|
||||
scheduler = require('./scheduler.js'),
|
||||
settings = require('./settings.js'),
|
||||
system = require('./system.js'),
|
||||
updater = require('./updater.js'),
|
||||
updateChecker = require('./updatechecker.js'),
|
||||
userdirectory = require('./userdirectory.js'),
|
||||
_ = require('underscore');
|
||||
|
||||
const gJobs = {
|
||||
@@ -61,12 +61,36 @@ const gJobs = {
|
||||
// Months: 0-11
|
||||
// Day of Week: 0-6
|
||||
|
||||
async function startJobs() {
|
||||
debug('startJobs: starting cron jobs');
|
||||
function getCronSeed() {
|
||||
let hour = null;
|
||||
let minute = null;
|
||||
|
||||
const seedData = safe.fs.readFileSync(paths.CRON_SEED_FILE, 'utf8') || '';
|
||||
const parts = seedData.split(':');
|
||||
if (parts.length === 2) {
|
||||
hour = parseInt(parts[0]) || null;
|
||||
minute = parseInt(parts[1]) || null;
|
||||
}
|
||||
|
||||
if ((hour == null || hour < 0 || hour > 23) || (minute == null || minute < 0 || minute > 60)) {
|
||||
hour = Math.floor(24 * Math.random());
|
||||
minute = Math.floor(60 * Math.random());
|
||||
|
||||
debug(`getCronSeed: writing new cron seed file with ${hour}:${minute} to ${paths.CRON_SEED_FILE}`);
|
||||
|
||||
safe.fs.writeFileSync(paths.CRON_SEED_FILE, `${hour}:${minute}`);
|
||||
}
|
||||
|
||||
return { hour, minute };
|
||||
}
|
||||
|
||||
async function startJobs() {
|
||||
const { hour, minute } = getCronSeed();
|
||||
|
||||
debug(`startJobs: starting cron jobs with hour ${hour} and minute ${minute}`);
|
||||
|
||||
const randomTick = Math.floor(60*Math.random());
|
||||
gJobs.systemChecks = new CronJob({
|
||||
cronTime: '00 30 2 * * *', // once a day. if you change this interval, change the notification messages with correct duration
|
||||
cronTime: `00 ${minute} 2 * * *`, // once a day. if you change this interval, change the notification messages with correct duration
|
||||
onTick: async () => await safe(cloudron.runSystemChecks(), { debug }),
|
||||
start: true
|
||||
});
|
||||
@@ -79,7 +103,7 @@ async function startJobs() {
|
||||
|
||||
// this is run separately from the update itself so that the user can disable automatic updates but can still get a notification
|
||||
gJobs.updateCheckerJob = new CronJob({
|
||||
cronTime: `${randomTick} ${randomTick} 1,5,9,13,17,21,23 * * *`,
|
||||
cronTime: `00 ${minute} 1,5,9,13,17,21,23 * * *`,
|
||||
onTick: async () => await safe(updateChecker.checkForUpdates({ automatic: true }), { debug }),
|
||||
start: true
|
||||
});
|
||||
@@ -114,8 +138,9 @@ async function startJobs() {
|
||||
start: true
|
||||
});
|
||||
|
||||
// randomized per Cloudron based on hourlySeed
|
||||
gJobs.certificateRenew = new CronJob({
|
||||
cronTime: '00 00 */12 * * *', // every 12 hours
|
||||
cronTime: `00 10 ${hour} * * *`,
|
||||
onTick: async () => await safe(cloudron.renewCerts({}, AuditSource.CRON), { debug }),
|
||||
start: true
|
||||
});
|
||||
@@ -148,10 +173,6 @@ async function handleSettingsChanged(key, value) {
|
||||
await stopJobs();
|
||||
await startJobs();
|
||||
break;
|
||||
case settings.USER_DIRECTORY_KEY:
|
||||
if (value.enabled) await userdirectory.start();
|
||||
else await userdirectory.stop();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
+7
-4
@@ -61,8 +61,9 @@ async function initialize() {
|
||||
// note the pool also has an 'acquire' event but that is called whenever we do a getConnection()
|
||||
connection.on('error', (error) => debug(`Connection ${connection.threadId} error: ${error.message} ${error.code}`));
|
||||
|
||||
connection.query('USE ' + gDatabase.name);
|
||||
connection.query(`USE ${gDatabase.name}`);
|
||||
connection.query('SET SESSION sql_mode = \'strict_all_tables\'');
|
||||
connection.query('SET SESSION group_concat_max_len = 65536'); // GROUP_CONCAT has only 1024 default
|
||||
});
|
||||
}
|
||||
|
||||
@@ -143,9 +144,11 @@ async function importFromFile(file) {
|
||||
async function exportToFile(file) {
|
||||
assert.strictEqual(typeof file, 'string');
|
||||
|
||||
// latest mysqldump enables column stats by default which is not present in MySQL 5.7 server
|
||||
// this option must not be set in production cloudrons which still use the old mysqldump
|
||||
const colStats = (!constants.TEST && require('fs').readFileSync('/etc/lsb-release', 'utf-8').includes('20.04')) ? '--column-statistics=0' : '';
|
||||
// latest mysqldump enables column stats by default which is not present in 5.7 util
|
||||
const mysqlDumpHelp = safe.child_process.execSync('/usr/bin/mysqldump --help', { encoding: 'utf8' });
|
||||
if (!mysqlDumpHelp) throw new BoxError(BoxError.DATABASE_ERROR, safe.error);
|
||||
const hasColStats = mysqlDumpHelp.includes('column-statistics');
|
||||
const colStats = hasColStats ? '--column-statistics=0' : '';
|
||||
|
||||
const cmd = `/usr/bin/mysqldump -h "${gDatabase.hostname}" -u root -p${gDatabase.password} ${colStats} --single-transaction --routines --triggers ${gDatabase.name} > "${file}"`;
|
||||
|
||||
|
||||
@@ -0,0 +1,13 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = delay;
|
||||
|
||||
const assert = require('assert');
|
||||
|
||||
function delay(msecs) {
|
||||
assert.strictEqual(typeof msecs, 'number');
|
||||
|
||||
return new Promise(function (resolve) {
|
||||
setTimeout(resolve, msecs);
|
||||
});
|
||||
}
|
||||
+2
-3
@@ -34,8 +34,7 @@ async function resolve(hostname, rrtype, options) {
|
||||
if (error && error.code === 'ECANCELLED') error.code = 'TIMEOUT';
|
||||
if (error) throw error;
|
||||
|
||||
// result is an empty array if there was no error but there is no record. when you query a random
|
||||
// domain, it errors with ENOTFOUND. But if you query an existing domain (A record) but with different
|
||||
// type (CNAME) it is not an error and empty array. for TXT records, result is 2d array of strings
|
||||
// when you query a random record, it errors with ENOTFOUND. But, if you query a record which has a different type
|
||||
// we sometimes get empty array and sometimes ENODATA. for TXT records, result is 2d array of strings
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ exports = module.exports = {
|
||||
const assert = require('assert'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
constants = require('./constants.js'),
|
||||
debug = require('debug')('box:userdirectory'),
|
||||
debug = require('debug')('box:directoryserver'),
|
||||
dns = require('./dns.js'),
|
||||
domains = require('./domains.js'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
@@ -23,17 +23,16 @@ const assert = require('assert'),
|
||||
reverseproxy = require('./reverseproxy.js'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js'),
|
||||
speakeasy = require('speakeasy'),
|
||||
shell = require('./shell.js'),
|
||||
users = require('./users.js'),
|
||||
util = require('util'),
|
||||
validator = require('validator');
|
||||
|
||||
var gServer = null;
|
||||
let gServer = null;
|
||||
|
||||
const NOOP = function () {};
|
||||
|
||||
const GROUP_USERS_DN = 'cn=users,ou=groups,dc=cloudron';
|
||||
const GROUP_ADMINS_DN = 'cn=admins,ou=groups,dc=cloudron';
|
||||
const SET_LDAP_ALLOWLIST_CMD = path.join(__dirname, 'scripts/setldapallowlist.sh');
|
||||
|
||||
async function validateConfig(config) {
|
||||
@@ -68,6 +67,8 @@ async function applyConfig(config) {
|
||||
|
||||
const [error] = await safe(shell.promises.sudo('setLdapAllowlist', [ SET_LDAP_ALLOWLIST_CMD ], {}));
|
||||
if (error) throw new BoxError(BoxError.IPTABLES_ERROR, `Error setting ldap allowlist: ${error.message}`);
|
||||
|
||||
if (config.enabled) await start(); else await stop();
|
||||
}
|
||||
|
||||
// helper function to deal with pagination
|
||||
@@ -99,13 +100,13 @@ function finalSend(results, req, res, next) {
|
||||
|
||||
if (cookie && Buffer.isBuffer(cookie)) {
|
||||
// we have pagination
|
||||
var first = min;
|
||||
let first = min;
|
||||
if (cookie.length !== 0) {
|
||||
first = parseInt(cookie.toString(), 10);
|
||||
}
|
||||
var last = sendPagedResults(first, first + pageSize);
|
||||
const last = sendPagedResults(first, first + pageSize);
|
||||
|
||||
var resultCookie;
|
||||
let resultCookie;
|
||||
if (last < max) {
|
||||
resultCookie = Buffer.from(last.toString());
|
||||
} else {
|
||||
@@ -145,20 +146,20 @@ async function authorize(req, res, next) {
|
||||
async function userSearch(req, res, next) {
|
||||
debug('user search: dn %s, scope %s, filter %s (from %s)', req.dn.toString(), req.scope, req.filter.toString(), req.connection.ldap.id);
|
||||
|
||||
const [error, result] = await safe(users.list());
|
||||
const [error, allUsers] = await safe(users.list());
|
||||
if (error) return next(new ldap.OperationsError(error.toString()));
|
||||
|
||||
const [groupsError, allGroups] = await safe(groups.listWithMembers());
|
||||
if (groupsError) return next(new ldap.OperationsError(error.toString()));
|
||||
|
||||
let results = [];
|
||||
|
||||
// send user objects
|
||||
result.forEach(function (user) {
|
||||
for (const user of allUsers) {
|
||||
// skip entries with empty username. Some apps like owncloud can't deal with this
|
||||
if (!user.username) return;
|
||||
if (!user.username) continue;
|
||||
|
||||
const dn = ldap.parseDN('cn=' + user.id + ',ou=users,dc=cloudron');
|
||||
|
||||
const memberof = [ GROUP_USERS_DN ];
|
||||
if (users.compareRoles(user.role, users.ROLE_ADMIN) >= 0) memberof.push(GROUP_ADMINS_DN);
|
||||
const dn = ldap.parseDN(`cn=${user.id},ou=users,dc=cloudron`);
|
||||
|
||||
const displayName = user.displayName || user.username || ''; // displayName can be empty and username can be null
|
||||
const nameParts = displayName.split(' ');
|
||||
@@ -179,10 +180,12 @@ async function userSearch(req, res, next) {
|
||||
givenName: firstName,
|
||||
username: user.username,
|
||||
samaccountname: user.username, // to support ActiveDirectory clients
|
||||
memberof: memberof
|
||||
memberof: allGroups.filter(function (g) { return g.userIds.indexOf(user.id) !== -1; }).map(function (g) { return g.name; })
|
||||
}
|
||||
};
|
||||
|
||||
if (user.twoFactorAuthenticationEnabled) obj.attributes.twoFactorAuthenticationEnabled = true;
|
||||
|
||||
// http://www.zytrax.com/books/ldap/ape/core-schema.html#sn has 'name' as SUP which is a DirectoryString
|
||||
// which is required to have atleast one character if present
|
||||
if (lastName.length !== 0) obj.attributes.sn = lastName;
|
||||
@@ -194,7 +197,7 @@ async function userSearch(req, res, next) {
|
||||
if ((req.dn.equals(dn) || req.dn.parentOf(dn)) && lowerCaseFilter.matches(obj.attributes)) {
|
||||
results.push(obj);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
finalSend(results, req, res, next);
|
||||
}
|
||||
@@ -202,54 +205,24 @@ async function userSearch(req, res, next) {
|
||||
async function groupSearch(req, res, next) {
|
||||
debug('group search: dn %s, scope %s, filter %s (from %s)', req.dn.toString(), req.scope, req.filter.toString(), req.connection.ldap.id);
|
||||
|
||||
const [error, result] = await safe(users.list());
|
||||
const [error, allUsers] = await safe(users.list());
|
||||
if (error) return next(new ldap.OperationsError(error.toString()));
|
||||
|
||||
const results = [];
|
||||
|
||||
// those are the old virtual groups for backwards compat
|
||||
const virtualGroups = [{
|
||||
name: 'users',
|
||||
admin: false
|
||||
}, {
|
||||
name: 'admins',
|
||||
admin: true
|
||||
}];
|
||||
|
||||
virtualGroups.forEach(function (group) {
|
||||
const dn = ldap.parseDN('cn=' + group.name + ',ou=groups,dc=cloudron');
|
||||
const members = group.admin ? result.filter(function (user) { return users.compareRoles(user.role, users.ROLE_ADMIN) >= 0; }) : result;
|
||||
|
||||
const obj = {
|
||||
dn: dn.toString(),
|
||||
attributes: {
|
||||
objectclass: ['group'],
|
||||
cn: group.name,
|
||||
memberuid: members.map(function(entry) { return entry.id; }).sort()
|
||||
}
|
||||
};
|
||||
|
||||
// ensure all filter values are also lowercase
|
||||
const lowerCaseFilter = safe(function () { return ldap.parseFilter(req.filter.toString().toLowerCase()); }, null);
|
||||
if (!lowerCaseFilter) return next(new ldap.OperationsError(safe.error.toString()));
|
||||
|
||||
if ((req.dn.equals(dn) || req.dn.parentOf(dn)) && lowerCaseFilter.matches(obj.attributes)) {
|
||||
results.push(obj);
|
||||
}
|
||||
});
|
||||
|
||||
let [errorGroups, resultGroups] = await safe(groups.listWithMembers());
|
||||
let [errorGroups, allGroups] = await safe(groups.listWithMembers());
|
||||
if (errorGroups) return next(new ldap.OperationsError(errorGroups.toString()));
|
||||
|
||||
resultGroups.forEach(function (group) {
|
||||
const dn = ldap.parseDN('cn=' + group.name + ',ou=groups,dc=cloudron');
|
||||
const members = group.userIds.filter(function (uid) { return result.map(function (u) { return u.id; }).indexOf(uid) !== -1; });
|
||||
for (const group of allGroups) {
|
||||
const dn = ldap.parseDN(`cn=${group.name},ou=groups,dc=cloudron`);
|
||||
const members = group.userIds.filter(function (uid) { return allUsers.map(function (u) { return u.id; }).indexOf(uid) !== -1; });
|
||||
|
||||
const obj = {
|
||||
dn: dn.toString(),
|
||||
attributes: {
|
||||
objectclass: ['group'],
|
||||
cn: group.name,
|
||||
gidnumber: group.id,
|
||||
memberuid: members
|
||||
}
|
||||
};
|
||||
@@ -261,7 +234,7 @@ async function groupSearch(req, res, next) {
|
||||
if ((req.dn.equals(dn) || req.dn.parentOf(dn)) && lowerCaseFilter.matches(obj.attributes)) {
|
||||
results.push(obj);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
finalSend(results, req, res, next);
|
||||
}
|
||||
@@ -269,12 +242,15 @@ async function groupSearch(req, res, next) {
|
||||
// Will attach req.user if successful
|
||||
async function userAuth(req, res, next) {
|
||||
// extract the common name which might have different attribute names
|
||||
const attributeName = Object.keys(req.dn.rdns[0].attrs)[0];
|
||||
const commonName = req.dn.rdns[0].attrs[attributeName].value;
|
||||
const cnAttributeName = Object.keys(req.dn.rdns[0].attrs)[0];
|
||||
const commonName = req.dn.rdns[0].attrs[cnAttributeName].value;
|
||||
if (!commonName) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
|
||||
const TOTPTOKEN_ATTRIBUTE_NAME = 'totptoken'; // This has to be in-sync with externalldap.js
|
||||
const totpToken = req.dn.rdns[0].attrs[TOTPTOKEN_ATTRIBUTE_NAME] ? req.dn.rdns[0].attrs[TOTPTOKEN_ATTRIBUTE_NAME].value : null;
|
||||
|
||||
let verifyFunc;
|
||||
if (attributeName === 'mail') {
|
||||
if (cnAttributeName === 'mail') {
|
||||
verifyFunc = users.verifyWithEmail;
|
||||
} else if (commonName.indexOf('@') !== -1) { // if mail is specified, enforce mail check
|
||||
verifyFunc = users.verifyWithEmail;
|
||||
@@ -289,6 +265,12 @@ async function userAuth(req, res, next) {
|
||||
if (error && error.reason === BoxError.INVALID_CREDENTIALS) return next(new ldap.InvalidCredentialsError(req.dn.toString()));
|
||||
if (error) return next(new ldap.OperationsError(error.message));
|
||||
|
||||
// currently this is only optional if totpToken is provided and user has 2fa enabled
|
||||
if (totpToken && user.twoFactorAuthenticationEnabled) {
|
||||
const verified = speakeasy.totp.verify({ secret: user.twoFactorAuthenticationSecret, encoding: 'base32', token: totpToken, window: 2 });
|
||||
if (!verified) return next(new ldap.InvalidCredentialsError(req.dn.toString()));
|
||||
}
|
||||
|
||||
req.user = user;
|
||||
|
||||
next();
|
||||
@@ -308,12 +290,12 @@ async function start() {
|
||||
};
|
||||
|
||||
const domainObject = await domains.get(settings.dashboardDomain());
|
||||
const dashboardFqdn = dns.fqdn(constants.DASHBOARD_LOCATION, domainObject);
|
||||
const bundle = await reverseproxy.getCertificatePath(dashboardFqdn, domainObject.domain);
|
||||
const dashboardFqdn = dns.fqdn(constants.DASHBOARD_SUBDOMAIN, domainObject);
|
||||
const certificatePath = await reverseproxy.getCertificatePath(dashboardFqdn, domainObject.domain);
|
||||
|
||||
gServer = ldap.createServer({
|
||||
certificate: fs.readFileSync(bundle.certFilePath, 'utf8'),
|
||||
key: fs.readFileSync(bundle.keyFilePath, 'utf8'),
|
||||
certificate: fs.readFileSync(certificatePath.certFilePath, 'utf8'),
|
||||
key: fs.readFileSync(certificatePath.keyFilePath, 'utf8'),
|
||||
log: logger
|
||||
});
|
||||
|
||||
@@ -324,12 +306,12 @@ async function start() {
|
||||
gServer.bind('ou=system,dc=cloudron', async function(req, res, next) {
|
||||
debug('system bind: %s (from %s)', req.dn.toString(), req.connection.ldap.id);
|
||||
|
||||
const tmp = await settings.getUserDirectoryConfig();
|
||||
const tmp = await settings.getDirectoryServerConfig();
|
||||
|
||||
if (!req.dn.equals(constants.USER_DIRECTORY_LDAP_DN)) return next(new ldap.InvalidCredentialsError(req.dn.toString()));
|
||||
if (req.credentials !== tmp.secret) return next(new ldap.InvalidCredentialsError(req.dn.toString()));
|
||||
|
||||
req.user = { user: 'userDirectoryAdmin' };
|
||||
req.user = { user: 'directoryServerAdmin' };
|
||||
|
||||
res.end();
|
||||
|
||||
@@ -342,7 +324,7 @@ async function start() {
|
||||
gServer.bind('ou=users,dc=cloudron', userAuth, async function (req, res) {
|
||||
assert.strictEqual(typeof req.user, 'object');
|
||||
|
||||
await eventlog.upsertLoginEvent(eventlog.ACTION_USER_LOGIN, { authType: 'userdirectory', id: req.connection.ldap.id }, { userId: req.user.id, user: users.removePrivateFields(req.user) });
|
||||
await eventlog.upsertLoginEvent(req.user.ghost ? eventlog.ACTION_USER_LOGIN_GHOST : eventlog.ACTION_USER_LOGIN, { authType: 'directoryserver', id: req.connection.ldap.id }, { userId: req.user.id, user: users.removePrivateFields(req.user) });
|
||||
|
||||
res.end();
|
||||
});
|
||||
+27
-14
@@ -51,6 +51,7 @@ function api(provider) {
|
||||
case 'namecom': return require('./dns/namecom.js');
|
||||
case 'namecheap': return require('./dns/namecheap.js');
|
||||
case 'netcup': return require('./dns/netcup.js');
|
||||
case 'hetzner': return require('./dns/hetzner.js');
|
||||
case 'noop': return require('./dns/noop.js');
|
||||
case 'manual': return require('./dns/manual.js');
|
||||
case 'wildcard': return require('./dns/wildcard.js');
|
||||
@@ -75,16 +76,16 @@ function validateHostname(subdomain, domainObject) {
|
||||
|
||||
const hostname = fqdn(subdomain, domainObject);
|
||||
|
||||
const RESERVED_LOCATIONS = [
|
||||
constants.SMTP_LOCATION,
|
||||
constants.IMAP_LOCATION
|
||||
const RESERVED_SUBDOMAINS = [
|
||||
constants.SMTP_SUBDOMAIN,
|
||||
constants.IMAP_SUBDOMAIN
|
||||
];
|
||||
if (RESERVED_LOCATIONS.indexOf(subdomain) !== -1) return new BoxError(BoxError.BAD_FIELD, `subdomain '${subdomain}' is reserved`);
|
||||
if (RESERVED_SUBDOMAINS.indexOf(subdomain) !== -1) return new BoxError(BoxError.BAD_FIELD, `subdomain '${subdomain}' is reserved`);
|
||||
|
||||
if (hostname === settings.dashboardFqdn()) return new BoxError(BoxError.BAD_FIELD, `subdomain '${subdomain}' is reserved`);
|
||||
|
||||
// workaround https://github.com/oncletom/tld.js/issues/73
|
||||
var tmp = hostname.replace('_', '-');
|
||||
const tmp = hostname.replace('_', '-');
|
||||
if (!tld.isValid(tmp)) return new BoxError(BoxError.BAD_FIELD, 'Hostname is not a valid domain name');
|
||||
|
||||
if (hostname.length > 253) return new BoxError(BoxError.BAD_FIELD, 'Hostname length exceeds 253 characters');
|
||||
@@ -122,6 +123,9 @@ async function checkDnsRecords(subdomain, domain) {
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
|
||||
const cnameRecords = await getDnsRecords(subdomain, domain, 'CNAME');
|
||||
if (cnameRecords.length !== 0) return { needsOverwrite: true };
|
||||
|
||||
const ipv4Records = await getDnsRecords(subdomain, domain, 'A');
|
||||
const ipv4 = await sysinfo.getServerIPv4();
|
||||
|
||||
@@ -158,7 +162,7 @@ async function removeDnsRecords(subdomain, domain, type, values) {
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
|
||||
debug('removeDNSRecord: %s on %s type %s values', subdomain, domain, type, values);
|
||||
debug('removeDNSRecords: %s on %s type %s values', subdomain, domain, type, values);
|
||||
|
||||
const domainObject = await domains.get(domain);
|
||||
const [error] = await safe(api(domainObject.provider).del(domainObject, subdomain, type, values));
|
||||
@@ -180,11 +184,11 @@ async function waitForDnsRecord(subdomain, domain, type, value, options) {
|
||||
await api(domainObject.provider).wait(domainObject, subdomain, type, value, options);
|
||||
}
|
||||
|
||||
function makeWildcard(vhost) {
|
||||
assert.strictEqual(typeof vhost, 'string');
|
||||
function makeWildcard(fqdn) {
|
||||
assert.strictEqual(typeof fqdn, 'string');
|
||||
|
||||
// if the vhost is like *.example.com, this function will do nothing
|
||||
let parts = vhost.split('.');
|
||||
// if the fqdn is like *.example.com, this function will do nothing
|
||||
const parts = fqdn.split('.');
|
||||
parts[0] = '*';
|
||||
return parts.join('.');
|
||||
}
|
||||
@@ -195,8 +199,8 @@ async function registerLocation(location, options, recordType, recordValue) {
|
||||
// get the current record before updating it
|
||||
const [getError, values] = await safe(getDnsRecords(location.subdomain, location.domain, recordType));
|
||||
if (getError) {
|
||||
const retryable = getError.reason !== BoxError.ACCESS_DENIED && getError.reason !== BoxError.NOT_FOUND;
|
||||
debug(`registerLocation: Get error. retryable: ${retryable}. ${upsertError.message}`);
|
||||
const retryable = getError.reason !== BoxError.ACCESS_DENIED && getError.reason !== BoxError.NOT_FOUND; // NOT_FOUND is when zone is not found
|
||||
debug(`registerLocation: Get error. retryable: ${retryable}. ${getError.message}`);
|
||||
throw new BoxError(getError.reason, getError.message, { domain: location, retryable });
|
||||
}
|
||||
|
||||
@@ -224,9 +228,18 @@ async function registerLocations(locations, options, progressCallback) {
|
||||
const ipv6 = await sysinfo.getServerIPv6();
|
||||
|
||||
for (const location of locations) {
|
||||
progressCallback({ message: `Registering location: ${location.subdomain ? (location.subdomain + '.') : ''}${location.domain}` });
|
||||
const fqdn = `${location.subdomain ? (location.subdomain + '.') : ''}${location.domain}`;
|
||||
progressCallback({ message: `Registering location: ${fqdn}` });
|
||||
|
||||
await promiseRetry({ times: 200, interval: 5000, debug, retry: (error) => error.retryable }, async function () {
|
||||
// cname records cannot co-exist with other records
|
||||
const [getError, values] = await safe(getDnsRecords(location.subdomain, location.domain, 'CNAME'));
|
||||
if (!getError && values.length === 1) {
|
||||
if (!options.overwriteDns) throw new BoxError(BoxError.ALREADY_EXISTS, 'DNS CNAME record already exists', { domain: location, retryable: false });
|
||||
debug(`registerLocations: removing CNAME record of ${fqdn}`);
|
||||
await removeDnsRecords(location.subdomain, location.domain, 'CNAME', values);
|
||||
}
|
||||
|
||||
await registerLocation(location, options, 'A', ipv4);
|
||||
if (ipv6) await registerLocation(location, options, 'AAAA', ipv6);
|
||||
});
|
||||
@@ -282,7 +295,7 @@ async function syncDnsRecords(options, progressCallback) {
|
||||
progress += Math.round(100/(1+allDomains.length));
|
||||
|
||||
let locations = [];
|
||||
if (domain.domain === settings.dashboardDomain()) locations.push({ subdomain: constants.DASHBOARD_LOCATION, domain: settings.dashboardDomain() });
|
||||
if (domain.domain === settings.dashboardDomain()) locations.push({ subdomain: constants.DASHBOARD_SUBDOMAIN, domain: settings.dashboardDomain() });
|
||||
if (domain.domain === settings.mailDomain() && settings.mailFqdn() !== settings.dashboardFqdn()) locations.push({ subdomain: mailSubdomain, domain: settings.mailDomain() });
|
||||
|
||||
for (const app of allApps) {
|
||||
|
||||
@@ -34,7 +34,7 @@ function injectPrivateFields(newConfig, currentConfig) {
|
||||
if (newConfig.token === constants.SECRET_PLACEHOLDER) newConfig.token = currentConfig.token;
|
||||
}
|
||||
|
||||
async function translateRequestError(result) {
|
||||
function translateRequestError(result) {
|
||||
assert.strictEqual(typeof result, 'object');
|
||||
|
||||
if (result.statusCode === 404) return new BoxError(BoxError.NOT_FOUND, util.format('%s %j', result.statusCode, 'API does not exist'));
|
||||
|
||||
@@ -18,13 +18,12 @@ const assert = require('assert'),
|
||||
dns = require('../dns.js'),
|
||||
safe = require('safetydance'),
|
||||
superagent = require('superagent'),
|
||||
util = require('util'),
|
||||
waitForDns = require('./waitfordns.js');
|
||||
|
||||
const DIGITALOCEAN_ENDPOINT = 'https://api.digitalocean.com';
|
||||
|
||||
function formatError(response) {
|
||||
return util.format('DigitalOcean DNS error [%s] %j', response.statusCode, response.body);
|
||||
return `DigitalOcean DNS error ${response.statusCode} ${JSON.stringify(response.body)}`;
|
||||
}
|
||||
|
||||
function removePrivateFields(domainObject) {
|
||||
|
||||
+14
-20
@@ -24,12 +24,6 @@ const assert = require('assert'),
|
||||
// const GODADDY_API_OTE = 'https://api.ote-godaddy.com/v1/domains';
|
||||
const GODADDY_API = 'https://api.godaddy.com/v1/domains';
|
||||
|
||||
// this is a workaround for godaddy not having a delete API
|
||||
// https://stackoverflow.com/questions/39347464/delete-record-libcloud-godaddy-api
|
||||
const GODADDY_INVALID_IP = '0.0.0.0';
|
||||
const GODADDY_INVALID_IPv6 = '0:0:0:0:0:0:0:0';
|
||||
const GODADDY_INVALID_TXT = '""';
|
||||
|
||||
function formatError(response) {
|
||||
return util.format(`GoDaddy DNS error [${response.statusCode}] ${response.body.message}`);
|
||||
}
|
||||
@@ -106,6 +100,12 @@ async function get(domainObject, location, type) {
|
||||
const values = response.body.map(function (record) { return record.data; });
|
||||
|
||||
if (values.length === 1) {
|
||||
// legacy: this was a workaround for godaddy not having a delete API
|
||||
// https://stackoverflow.com/questions/39347464/delete-record-libcloud-godaddy-api
|
||||
const GODADDY_INVALID_IP = '0.0.0.0';
|
||||
const GODADDY_INVALID_IPv6 = '0:0:0:0:0:0:0:0';
|
||||
const GODADDY_INVALID_TXT = '""';
|
||||
|
||||
if ((type === 'A' && values[0] === GODADDY_INVALID_IP)
|
||||
|| (type === 'AAAA' && values[0] === GODADDY_INVALID_IPv6)
|
||||
|| (type === 'TXT' && values[0] === GODADDY_INVALID_TXT)) return []; // pretend this record doesn't exist
|
||||
@@ -124,30 +124,24 @@ async function del(domainObject, location, type, values) {
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
debug(`get: ${name} in zone ${zoneName} of type ${type} with values ${JSON.stringify(values)}`);
|
||||
debug(`del: ${name} in zone ${zoneName} of type ${type} with values ${JSON.stringify(values)}`);
|
||||
|
||||
if (type !== 'A' && type !== 'AAAA' && type !== 'TXT') throw new BoxError(BoxError.EXTERNAL_ERROR, 'Record deletion is not supported by GoDaddy API');
|
||||
const result = await get(domainObject, location, type);
|
||||
if (result.length === 0) return;
|
||||
|
||||
// check if the record exists at all so that we don't insert the "Dead" record for no reason
|
||||
const existingRecords = await get(domainObject, location, type);
|
||||
if (existingRecords.length === 0) return;
|
||||
const tmp = result.filter(r => !values.includes(r));
|
||||
|
||||
// godaddy does not have a delete API. so fill it up with an invalid IP that we can ignore in future get()
|
||||
const records = [{
|
||||
ttl: 600,
|
||||
data: type === 'A' ? GODADDY_INVALID_IP : (type === 'AAAA' ? GODADDY_INVALID_IPv6 : GODADDY_INVALID_TXT)
|
||||
}];
|
||||
if (tmp.length) return await upsert(domainObject, location, type, tmp); // only remove 'values'
|
||||
|
||||
const [error, response] = await safe(superagent.put(`${GODADDY_API}/${zoneName}/records/${type}/${name}`)
|
||||
const [error, response] = await safe(superagent.del(`${GODADDY_API}/${zoneName}/records/${type}/${name}`)
|
||||
.set('Authorization', `sso-key ${domainConfig.apiKey}:${domainConfig.apiSecret}`)
|
||||
.send(records)
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 404) return;
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
if (response.statusCode !== 204) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
}
|
||||
|
||||
async function wait(domainObject, subdomain, type, value, options) {
|
||||
@@ -184,7 +178,7 @@ async function verifyDomainConfig(domainObject) {
|
||||
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
|
||||
|
||||
if (!nameservers.every(function (n) { return n.toLowerCase().indexOf('.domaincontrol.com') !== -1; })) {
|
||||
if (!nameservers.every(function (n) { return n.toLowerCase().indexOf('.domaincontrol.com') !== -1 || n.toLowerCase().indexOf('.secureserver.net') !== -1; })) {
|
||||
debug('verifyDomainConfig: %j does not contain GoDaddy NS', nameservers);
|
||||
throw new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to GoDaddy');
|
||||
}
|
||||
|
||||
@@ -0,0 +1,259 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
removePrivateFields,
|
||||
injectPrivateFields,
|
||||
upsert,
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDomainConfig
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
constants = require('../constants.js'),
|
||||
debug = require('debug')('box:dns/hetzner'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('../dns.js'),
|
||||
safe = require('safetydance'),
|
||||
superagent = require('superagent'),
|
||||
waitForDns = require('./waitfordns.js');
|
||||
|
||||
const ENDPOINT = 'https://dns.hetzner.com/api/v1';
|
||||
|
||||
function formatError(response) {
|
||||
return `Hetzner DNS error ${response.statusCode} ${JSON.stringify(response.body)}`;
|
||||
}
|
||||
|
||||
function removePrivateFields(domainObject) {
|
||||
domainObject.config.token = constants.SECRET_PLACEHOLDER;
|
||||
return domainObject;
|
||||
}
|
||||
|
||||
function injectPrivateFields(newConfig, currentConfig) {
|
||||
if (newConfig.token === constants.SECRET_PLACEHOLDER) newConfig.token = currentConfig.token;
|
||||
}
|
||||
|
||||
async function getZone(domainConfig, zoneName) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
|
||||
const [error, response] = await safe(superagent.get(`${ENDPOINT}/zones`)
|
||||
.set('Auth-API-Token', domainConfig.token)
|
||||
.query({ search_name: zoneName })
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 401 || response.statusCode === 403) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
if (!Array.isArray(response.body.zones)) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
const zone = response.body.zones.filter(z => z.name === zoneName);
|
||||
if (zone.length === 0) throw new BoxError(BoxError.NOT_FOUND, formatError(response));
|
||||
return zone[0];
|
||||
}
|
||||
|
||||
async function getZoneRecords(domainConfig, zone, name, type) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zone, 'object');
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
|
||||
let page = 1, matchingRecords = [];
|
||||
|
||||
debug(`getInternal: getting dns records of ${zone.name} with ${name} and type ${type}`);
|
||||
|
||||
const perPage = 50;
|
||||
|
||||
// eslint-disable-next-line no-constant-condition
|
||||
while (true) {
|
||||
const [error, response] = await safe(superagent.get(`${ENDPOINT}/records`)
|
||||
.set('Auth-API-Token', domainConfig.token)
|
||||
.query({ zone_id: zone.id, page, per_page: perPage })
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, formatError(response));
|
||||
if (response.statusCode === 401 || response.statusCode === 403) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
matchingRecords = matchingRecords.concat(response.body.records.filter(function (record) {
|
||||
return (record.type === type && record.name === name);
|
||||
}));
|
||||
|
||||
if (response.body.records.length < perPage) break;
|
||||
|
||||
++page;
|
||||
}
|
||||
|
||||
return matchingRecords;
|
||||
}
|
||||
|
||||
async function upsert(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
debug('upsert: %s for zone %s of type %s with values %j', name, zoneName, type, values);
|
||||
|
||||
const zone = await getZone(domainConfig, zoneName);
|
||||
const records = await getZoneRecords(domainConfig, zone, name, type);
|
||||
|
||||
// used to track available records to update instead of create
|
||||
let i = 0;
|
||||
|
||||
for (let value of values) {
|
||||
const data = {
|
||||
type,
|
||||
name,
|
||||
value,
|
||||
ttl: 60,
|
||||
zone_id: zone.id
|
||||
};
|
||||
|
||||
if (i >= records.length) {
|
||||
const [error, response] = await safe(superagent.post(`${ENDPOINT}/records`)
|
||||
.set('Auth-API-Token', domainConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode === 422) throw new BoxError(BoxError.BAD_FIELD, response.body.message);
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
} else {
|
||||
const [error, response] = await safe(superagent.put(`${ENDPOINT}/records/${records[i].id}`)
|
||||
.set('Auth-API-Token', domainConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
++i;
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode === 422) throw new BoxError(BoxError.BAD_FIELD, response.body.message);
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
}
|
||||
}
|
||||
|
||||
for (let j = values.length + 1; j < records.length; j++) {
|
||||
const [error] = await safe(superagent.del(`${ENDPOINT}/records/${records[j].id}`)
|
||||
.set('Auth-API-Token', domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) debug(`upsert: error removing record ${records[j].id}: ${error.message}`);
|
||||
}
|
||||
|
||||
debug('upsert: completed');
|
||||
}
|
||||
|
||||
async function get(domainObject, location, type) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
const zone = await getZone(domainConfig, zoneName);
|
||||
const result = await getZoneRecords(domainConfig, zone, name, type);
|
||||
|
||||
return result.map(function (record) { return record.value; });
|
||||
}
|
||||
|
||||
async function del(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
const zone = await getZone(domainConfig, zoneName);
|
||||
const records = await getZoneRecords(domainConfig, zone, name, type);
|
||||
if (records.length === 0) return;
|
||||
|
||||
const matchingRecords = records.filter(function (record) { return values.some(function (value) { return value === record.value; }); });
|
||||
if (matchingRecords.length === 0) return;
|
||||
|
||||
for (const r of matchingRecords) {
|
||||
const [error, response] = await safe(superagent.del(`${ENDPOINT}/records/${r.id}`)
|
||||
.set('Auth-API-Token', domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 404) return;
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
}
|
||||
}
|
||||
|
||||
async function wait(domainObject, subdomain, type, value, options) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
|
||||
const fqdn = dns.fqdn(subdomain, domainObject);
|
||||
|
||||
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
|
||||
}
|
||||
|
||||
async function verifyDomainConfig(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName;
|
||||
|
||||
if (!domainConfig.token || typeof domainConfig.token !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'token must be a non-empty string');
|
||||
|
||||
const ip = '127.0.0.1';
|
||||
|
||||
const credentials = {
|
||||
token: domainConfig.token
|
||||
};
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return credentials; // this shouldn't be here
|
||||
|
||||
const [error, nameservers] = await safe(dig.resolve(zoneName, 'NS', { timeout: 5000 }));
|
||||
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
|
||||
|
||||
// https://docs.hetzner.com/dns-console/dns/general/dns-overview#the-hetzner-online-name-servers-are
|
||||
if (nameservers.map(function (n) { return n.toLowerCase(); }).indexOf('oxygen.ns.hetzner.com') === -1) {
|
||||
debug('verifyDomainConfig: %j does not contain Hetzner NS', nameservers);
|
||||
throw new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Hetzner');
|
||||
}
|
||||
|
||||
const location = 'cloudrontestdns';
|
||||
|
||||
await upsert(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record added');
|
||||
|
||||
await del(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record removed again');
|
||||
|
||||
return credentials;
|
||||
}
|
||||
@@ -90,7 +90,7 @@ async function setZone(domainConfig, zoneName, hosts) {
|
||||
|
||||
// Map to query params https://www.namecheap.com/support/api/methods/domains-dns/set-hosts.aspx
|
||||
hosts.forEach(function (host, i) {
|
||||
var n = i+1; // api starts with 1 not 0
|
||||
const n = i+1; // api starts with 1 not 0
|
||||
query['TTL' + n] = '300'; // keep it low
|
||||
query['HostName' + n] = host.HostName || host.Name;
|
||||
query['RecordType' + n] = host.RecordType || host.Type;
|
||||
|
||||
@@ -255,6 +255,9 @@ async function verifyDomainConfig(domainObject) {
|
||||
await upsert(newDomainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record added');
|
||||
|
||||
await get(newDomainObject, location, 'A');
|
||||
debug('verifyDomainConfig: Can list record sets');
|
||||
|
||||
await del(newDomainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record removed again');
|
||||
|
||||
|
||||
+1
-1
@@ -99,7 +99,7 @@ async function upsert(domainObject, location, type, values) {
|
||||
for (const value of values) {
|
||||
const data = {
|
||||
type,
|
||||
ttl: 300 // lowest
|
||||
ttl: 120 // lowest
|
||||
};
|
||||
|
||||
if (type === 'MX') {
|
||||
|
||||
@@ -25,8 +25,8 @@ async function resolveIp(hostname, type, options) {
|
||||
if (cnameResults.length === 0) return cnameResults;
|
||||
|
||||
// recurse lookup the CNAME record
|
||||
debug(`resolveIp: Resolving ${hostname}'s CNAME record ${results[0]}`);
|
||||
await dig.resolve(results[0], type, options);
|
||||
debug(`resolveIp: Resolving ${hostname}'s CNAME record ${cnameResults[0]}`);
|
||||
return await dig.resolve(cnameResults[0], type, options);
|
||||
}
|
||||
|
||||
async function isChangeSynced(hostname, type, value, nameserver) {
|
||||
@@ -83,15 +83,16 @@ async function waitForDns(hostname, zoneName, type, value, options) {
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
|
||||
debug(`waitForDns: ${hostname} to be ${value} in zone ${zoneName}`);
|
||||
debug(`waitForDns: waiting for ${hostname} to be ${value} in zone ${zoneName}`);
|
||||
|
||||
await promiseRetry(Object.assign({ debug }, options), async function () {
|
||||
const nameservers = await dig.resolve(zoneName, 'NS', { timeout: 5000 });
|
||||
if (!nameservers) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Unable to get nameservers');
|
||||
debug(`waitForDns: nameservers are ${JSON.stringify(nameservers)}`);
|
||||
|
||||
for (const nameserver of nameservers) {
|
||||
const synced = await isChangeSynced(hostname, type, value, nameserver);
|
||||
debug('waitForDns: %s %s ns: %j', hostname, synced ? 'done' : 'not done', nameservers);
|
||||
debug(`waitForDns: ${hostname} at ns ${nameserver}: ${synced ? 'done' : 'not done'} `);
|
||||
if (!synced) throw new BoxError(BoxError.EXTERNAL_ERROR, 'ETRYAGAIN');
|
||||
}
|
||||
});
|
||||
|
||||
+6
-4
@@ -80,8 +80,9 @@ async function verifyDomainConfig(domainObject) {
|
||||
const fqdn = dns.fqdn(location, domainObject);
|
||||
|
||||
const [ipv4Error, ipv4Result] = await safe(dig.resolve(fqdn, 'A', { server: '127.0.0.1', timeout: 5000 }));
|
||||
if (ipv4Error && ipv4Error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv4 of ${fqdn}. Please check if you have set up *.${domainObject.domain} to point to this server's IP`);
|
||||
if (ipv4Error || !ipv4Result) throw new BoxError(BoxError.BAD_FIELD, ipv4Error ? ipv4Error.message : `Unable to resolve IPv4 of ${fqdn}`);
|
||||
if (ipv4Error && (ipv4Error.code === 'ENOTFOUND' || ipv4Error.code === 'ENODATA')) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv4 of ${fqdn}. Please check if you have set up *.${domainObject.domain} to point to this server's IP`);
|
||||
if (ipv4Error) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv4 of ${fqdn}: ${ipv4Error.message}`);
|
||||
if (!ipv4Result) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv4 of ${fqdn}`);
|
||||
|
||||
const ipv4 = await sysinfo.getServerIPv4();
|
||||
if (ipv4Result.length !== 1 || ipv4 !== ipv4Result[0]) throw new BoxError(BoxError.EXTERNAL_ERROR, `Domain resolves to ${JSON.stringify(ipv4Result)} instead of IPv4 ${ipv4}`);
|
||||
@@ -89,8 +90,9 @@ async function verifyDomainConfig(domainObject) {
|
||||
const ipv6 = await sysinfo.getServerIPv6(); // both should be RFC 5952 format
|
||||
if (ipv6) {
|
||||
const [ipv6Error, ipv6Result] = await safe(dig.resolve(fqdn, 'AAAA', { server: '127.0.0.1', timeout: 5000 }));
|
||||
if (ipv6Error && ipv6Error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv6 of ${fqdn}`);
|
||||
if (ipv6Error || !ipv6Result) throw new BoxError(BoxError.BAD_FIELD, ipv6Error ? ipv6Error.message : `Unable to resolve IPv6 of ${fqdn}`);
|
||||
if (ipv6Error && (ipv6Error.code === 'ENOTFOUND' || ipv6Error.code === 'ENODATA')) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv6 of ${fqdn}`);
|
||||
if (ipv6Error) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv6 of ${fqdn}: ${ipv6Error.message}`);
|
||||
if (!ipv6Result) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv6 of ${fqdn}`);
|
||||
|
||||
if (ipv6Result.length !== 1 || ipv6 !== ipv6Result[0]) throw new BoxError(BoxError.EXTERNAL_ERROR, `Domain resolves to ${JSON.stringify(ipv6Result)} instead of IPv6 ${ipv6}`);
|
||||
}
|
||||
|
||||
+61
-80
@@ -20,13 +20,15 @@ exports = module.exports = {
|
||||
createSubcontainer,
|
||||
inspect,
|
||||
getContainerIp,
|
||||
execContainer,
|
||||
getEvents,
|
||||
memoryUsage,
|
||||
createVolume,
|
||||
removeVolume,
|
||||
clearVolume,
|
||||
|
||||
update,
|
||||
|
||||
createExec,
|
||||
startExec,
|
||||
getExec,
|
||||
resizeExec
|
||||
};
|
||||
|
||||
const apps = require('./apps.js'),
|
||||
@@ -34,9 +36,8 @@ const apps = require('./apps.js'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
constants = require('./constants.js'),
|
||||
debug = require('debug')('box:docker'),
|
||||
delay = require('delay'),
|
||||
delay = require('./delay.js'),
|
||||
Docker = require('dockerode'),
|
||||
path = require('path'),
|
||||
reverseProxy = require('./reverseproxy.js'),
|
||||
services = require('./services.js'),
|
||||
settings = require('./settings.js'),
|
||||
@@ -46,9 +47,6 @@ const apps = require('./apps.js'),
|
||||
volumes = require('./volumes.js'),
|
||||
_ = require('underscore');
|
||||
|
||||
const CLEARVOLUME_CMD = path.join(__dirname, 'scripts/clearvolume.sh'),
|
||||
MKDIRVOLUME_CMD = path.join(__dirname, 'scripts/mkdirvolume.sh');
|
||||
|
||||
const DOCKER_SOCKET_PATH = '/var/run/docker.sock';
|
||||
const gConnection = new Docker({ socketPath: DOCKER_SOCKET_PATH });
|
||||
|
||||
@@ -117,7 +115,7 @@ async function pullImage(manifest) {
|
||||
// https://github.com/dotcloud/docker/issues/1074 says each status message
|
||||
// is emitted as a chunk
|
||||
stream.on('data', function (chunk) {
|
||||
var data = safe.JSON.parse(chunk) || { };
|
||||
const data = safe.JSON.parse(chunk) || { };
|
||||
debug('pullImage: %j', data);
|
||||
|
||||
// The data.status here is useless because this is per layer as opposed to per image
|
||||
@@ -194,28 +192,30 @@ async function getAddonMounts(app) {
|
||||
|
||||
for (const addon of Object.keys(addons)) {
|
||||
switch (addon) {
|
||||
case 'localstorage':
|
||||
case 'localstorage': {
|
||||
const storageDir = await apps.getStorageDir(app);
|
||||
mounts.push({
|
||||
Target: '/app/data',
|
||||
Source: `${app.id}-localstorage`,
|
||||
Type: 'volume',
|
||||
Source: storageDir,
|
||||
Type: 'bind',
|
||||
ReadOnly: false
|
||||
});
|
||||
|
||||
break;
|
||||
}
|
||||
case 'tls': {
|
||||
const bundle = await reverseProxy.getCertificatePath(app.fqdn, app.domain);
|
||||
const certificatePath = await reverseProxy.getCertificatePath(app.fqdn, app.domain);
|
||||
|
||||
mounts.push({
|
||||
Target: '/etc/certs/tls_cert.pem',
|
||||
Source: bundle.certFilePath,
|
||||
Source: certificatePath.certFilePath,
|
||||
Type: 'bind',
|
||||
ReadOnly: true
|
||||
});
|
||||
|
||||
mounts.push({
|
||||
Target: '/etc/certs/tls_key.pem',
|
||||
Source: bundle.keyFilePath,
|
||||
Source: certificatePath.keyFilePath,
|
||||
Type: 'bind',
|
||||
ReadOnly: true
|
||||
});
|
||||
@@ -247,12 +247,12 @@ function getAddresses() {
|
||||
|
||||
const addresses = [];
|
||||
for (const phy of physicalDevices) {
|
||||
const inet = safe.JSON.parse(safe.child_process.execSync(`ip -f inet -j addr show ${phy.name}`, { encoding: 'utf8' }));
|
||||
const inet = safe.JSON.parse(safe.child_process.execSync(`ip -f inet -j addr show dev ${phy.name} scope global`, { encoding: 'utf8' }));
|
||||
for (const r of inet) {
|
||||
const address = safe.query(r, 'addr_info[0].local');
|
||||
if (address) addresses.push(address);
|
||||
}
|
||||
const inet6 = safe.JSON.parse(safe.child_process.execSync(`ip -f inet6 -j addr show ${phy.name}`, { encoding: 'utf8' }));
|
||||
const inet6 = safe.JSON.parse(safe.child_process.execSync(`ip -f inet6 -j addr show dev ${phy.name} scope global`, { encoding: 'utf8' }));
|
||||
for (const r of inet6) {
|
||||
const address = safe.query(r, 'addr_info[0].local');
|
||||
if (address) addresses.push(address);
|
||||
@@ -355,7 +355,8 @@ async function createSubcontainer(app, name, cmd, options) {
|
||||
VolumesFrom: isAppContainer ? null : [ app.containerId + ':rw' ],
|
||||
SecurityOpt: [ 'apparmor=docker-cloudron-app' ],
|
||||
CapAdd: [],
|
||||
CapDrop: []
|
||||
CapDrop: [],
|
||||
Sysctls: {}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -388,7 +389,13 @@ async function createSubcontainer(app, name, cmd, options) {
|
||||
const capabilities = manifest.capabilities || [];
|
||||
|
||||
// https://docs-stage.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities
|
||||
if (capabilities.includes('net_admin')) containerOptions.HostConfig.CapAdd.push('NET_ADMIN', 'NET_RAW');
|
||||
if (capabilities.includes('net_admin')) {
|
||||
containerOptions.HostConfig.CapAdd.push('NET_ADMIN', 'NET_RAW');
|
||||
// ipv6 for new interfaces is disabled in the container. this prevents the openvpn tun device having ipv6
|
||||
// See https://github.com/moby/moby/issues/20569 and https://github.com/moby/moby/issues/33099
|
||||
containerOptions.HostConfig.Sysctls['net.ipv6.conf.all.disable_ipv6'] = '0';
|
||||
containerOptions.HostConfig.Sysctls['net.ipv6.conf.all.forwarding'] = '1';
|
||||
}
|
||||
if (capabilities.includes('mlock')) containerOptions.HostConfig.CapAdd.push('IPC_LOCK'); // mlock prevents swapping
|
||||
if (!capabilities.includes('ping')) containerOptions.HostConfig.CapDrop.push('NET_RAW'); // NET_RAW is included by default by Docker
|
||||
|
||||
@@ -506,7 +513,7 @@ async function deleteImage(manifest) {
|
||||
|
||||
const dockerImage = manifest ? manifest.dockerImage : null;
|
||||
if (!dockerImage) return;
|
||||
if (dockerImage.includes('//')) return; // a common mistake is to paste a https:// as docker image. this results in a crash at runtime in dockerode module
|
||||
if (dockerImage.includes('//') || dockerImage.startsWith('/')) return; // a common mistake is to paste a https:// as docker image. this results in a crash at runtime in dockerode module (https://github.com/apocas/dockerode/issues/548)
|
||||
|
||||
const removeOptions = {
|
||||
force: false, // might be shared with another instance of this app
|
||||
@@ -552,30 +559,51 @@ async function getContainerIp(containerId) {
|
||||
return ip;
|
||||
}
|
||||
|
||||
async function execContainer(containerId, options) {
|
||||
async function createExec(containerId, options) {
|
||||
assert.strictEqual(typeof containerId, 'string');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
const container = gConnection.getContainer(containerId);
|
||||
|
||||
const [error, exec] = await safe(container.exec(options.execOptions));
|
||||
const [error, exec] = await safe(container.exec(options));
|
||||
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND);
|
||||
if (error && error.statusCode === 409) throw new BoxError(BoxError.BAD_STATE, error.message); // container restarting/not running
|
||||
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
|
||||
|
||||
const [startError, stream] = await safe(exec.start(options.startOptions)); /* in hijacked mode, stream is a net.socket */
|
||||
if (startError) throw new BoxError(BoxError.DOCKER_ERROR, startError);
|
||||
return exec.id;
|
||||
}
|
||||
|
||||
if (options.rows && options.columns) {
|
||||
// there is a race where resizing too early results in a 404 "no such exec"
|
||||
// https://git.cloudron.io/cloudron/box/issues/549
|
||||
setTimeout(function () {
|
||||
exec.resize({ h: options.rows, w: options.columns }, function (error) { if (error) debug('Error resizing console', error); });
|
||||
}, 2000);
|
||||
}
|
||||
async function startExec(execId, options) {
|
||||
assert.strictEqual(typeof execId, 'string');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
const exec = gConnection.getExec(execId);
|
||||
const [error, stream] = await safe(exec.start(options)); /* in hijacked mode, stream is a net.socket */
|
||||
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND);
|
||||
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
|
||||
return stream;
|
||||
}
|
||||
|
||||
async function getExec(execId) {
|
||||
assert.strictEqual(typeof execId, 'string');
|
||||
|
||||
const exec = gConnection.getExec(execId);
|
||||
const [error, result] = await safe(exec.inspect());
|
||||
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Unable to find exec container ${execId}`);
|
||||
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
|
||||
|
||||
return { exitCode: result.ExitCode, running: result.Running };
|
||||
}
|
||||
|
||||
async function resizeExec(execId, options) {
|
||||
assert.strictEqual(typeof execId, 'string');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
const exec = gConnection.getExec(execId);
|
||||
const [error] = await safe(exec.resize(options)); // { h, w }
|
||||
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND);
|
||||
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
|
||||
}
|
||||
|
||||
async function getEvents(options) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
@@ -596,53 +624,6 @@ async function memoryUsage(containerId) {
|
||||
return result;
|
||||
}
|
||||
|
||||
async function createVolume(name, volumeDataDir, labels) {
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof volumeDataDir, 'string');
|
||||
assert.strictEqual(typeof labels, 'object');
|
||||
|
||||
const volumeOptions = {
|
||||
Name: name,
|
||||
Driver: 'local',
|
||||
DriverOpts: { // https://github.com/moby/moby/issues/19990#issuecomment-248955005
|
||||
type: 'none',
|
||||
device: volumeDataDir,
|
||||
o: 'bind'
|
||||
},
|
||||
Labels: labels
|
||||
};
|
||||
|
||||
// requires sudo because the path can be outside appsdata
|
||||
let [error] = await safe(shell.promises.sudo('createVolume', [ MKDIRVOLUME_CMD, volumeDataDir ], {}));
|
||||
if (error) throw new BoxError(BoxError.FS_ERROR, `Error creating app data dir: ${error.message}`);
|
||||
|
||||
[error] = await safe(gConnection.createVolume(volumeOptions));
|
||||
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
|
||||
}
|
||||
|
||||
async function clearVolume(name, options) {
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
let volume = gConnection.getVolume(name);
|
||||
let [error, v] = await safe(volume.inspect());
|
||||
if (error && error.statusCode === 404) return;
|
||||
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
|
||||
|
||||
const volumeDataDir = v.Options.device;
|
||||
[error] = await shell.promises.sudo('clearVolume', [ CLEARVOLUME_CMD, options.removeDirectory ? 'rmdir' : 'clear', volumeDataDir ], {});
|
||||
if (error) throw new BoxError(BoxError.FS_ERROR, error);
|
||||
}
|
||||
|
||||
// this only removes the volume and not the data
|
||||
async function removeVolume(name) {
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
|
||||
let volume = gConnection.getVolume(name);
|
||||
const [error] = await safe(volume.remove());
|
||||
if (error && error.statusCode !== 404) throw new BoxError(BoxError.DOCKER_ERROR, `removeVolume: Error removing volume: ${error.message}`);
|
||||
}
|
||||
|
||||
async function info() {
|
||||
const [error, result] = await safe(gConnection.info());
|
||||
if (error) throw new BoxError(BoxError.DOCKER_ERROR, 'Error connecting to docker');
|
||||
|
||||
+3
-3
@@ -41,7 +41,7 @@ async function authorizeApp(req, res, next) {
|
||||
}
|
||||
|
||||
function attachDockerRequest(req, res, next) {
|
||||
var options = {
|
||||
const options = {
|
||||
socketPath: '/var/run/docker.sock',
|
||||
method: req.method,
|
||||
path: req.url,
|
||||
@@ -143,8 +143,8 @@ async function start() {
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
gHttpServer.on('upgrade', function (req, client, head) {
|
||||
// Create a new tcp connection to the TCP server
|
||||
var remote = net.connect('/var/run/docker.sock', function () {
|
||||
var upgradeMessage = req.method + ' ' + req.url + ' HTTP/1.1\r\n' +
|
||||
const remote = net.connect('/var/run/docker.sock', function () {
|
||||
let upgradeMessage = req.method + ' ' + req.url + ' HTTP/1.1\r\n' +
|
||||
`Host: ${req.headers.host}\r\n` +
|
||||
'Connection: Upgrade\r\n' +
|
||||
'Upgrade: tcp\r\n';
|
||||
|
||||
+27
-31
@@ -54,6 +54,7 @@ function api(provider) {
|
||||
case 'digitalocean': return require('./dns/digitalocean.js');
|
||||
case 'gandi': return require('./dns/gandi.js');
|
||||
case 'godaddy': return require('./dns/godaddy.js');
|
||||
case 'hetzner': return require('./dns/hetzner.js');
|
||||
case 'linode': return require('./dns/linode.js');
|
||||
case 'vultr': return require('./dns/vultr.js');
|
||||
case 'namecom': return require('./dns/namecom.js');
|
||||
@@ -76,13 +77,13 @@ async function verifyDomainConfig(domainConfig, domain, zoneName, provider) {
|
||||
if (!backend) throw new BoxError(BoxError.BAD_FIELD, 'Invalid provider');
|
||||
|
||||
const domainObject = { config: domainConfig, domain: domain, zoneName: zoneName };
|
||||
const [error, result] = await safe(api(provider).verifyDomainConfig(domainObject));
|
||||
if (error && error.reason === BoxError.ACCESS_DENIED) return { error: new BoxError(BoxError.BAD_FIELD, `Access denied: ${error.message}`) };
|
||||
if (error && error.reason === BoxError.NOT_FOUND) return { error: new BoxError(BoxError.BAD_FIELD, `Zone not found: ${error.message}`) };
|
||||
if (error && error.reason === BoxError.EXTERNAL_ERROR) return { error: new BoxError(BoxError.BAD_FIELD, `Configuration error: ${error.message}`) };
|
||||
if (error) return { error };
|
||||
const [error, sanitizedConfig] = await safe(api(provider).verifyDomainConfig(domainObject));
|
||||
if (error && error.reason === BoxError.ACCESS_DENIED) throw new BoxError(BoxError.BAD_FIELD, `Access denied: ${error.message}`);
|
||||
if (error && error.reason === BoxError.NOT_FOUND) throw new BoxError(BoxError.BAD_FIELD, `Zone not found: ${error.message}`);
|
||||
if (error && error.reason === BoxError.EXTERNAL_ERROR) throw new BoxError(BoxError.BAD_FIELD, `Configuration error: ${error.message}`);
|
||||
if (error) throw error;
|
||||
|
||||
return { error: null, sanitizedConfig: result };
|
||||
return sanitizedConfig;
|
||||
}
|
||||
|
||||
function validateTlsConfig(tlsConfig, dnsProvider) {
|
||||
@@ -150,12 +151,11 @@ async function add(domain, data, auditSource) {
|
||||
dkimSelector = `cloudron-${suffix}`;
|
||||
}
|
||||
|
||||
const result = await verifyDomainConfig(config, domain, zoneName, provider);
|
||||
if (result.error) throw result.error;
|
||||
const sanitizedConfig = await verifyDomainConfig(config, domain, zoneName, provider);
|
||||
|
||||
let queries = [
|
||||
const queries = [
|
||||
{ query: 'INSERT INTO domains (domain, zoneName, provider, configJson, tlsConfigJson, fallbackCertificateJson) VALUES (?, ?, ?, ?, ?, ?)',
|
||||
args: [ domain, zoneName, provider, JSON.stringify(result.sanitizedConfig), JSON.stringify(tlsConfig), JSON.stringify(fallbackCertificate) ] },
|
||||
args: [ domain, zoneName, provider, JSON.stringify(sanitizedConfig), JSON.stringify(tlsConfig), JSON.stringify(fallbackCertificate) ] },
|
||||
{ query: 'INSERT INTO mail (domain, dkimKeyJson, dkimSelector) VALUES (?, ?, ?)', args: [ domain, JSON.stringify(dkimKey), dkimSelector || 'cloudron' ] },
|
||||
];
|
||||
|
||||
@@ -165,7 +165,7 @@ async function add(domain, data, auditSource) {
|
||||
|
||||
await reverseProxy.setFallbackCertificate(domain, fallbackCertificate);
|
||||
|
||||
eventlog.add(eventlog.ACTION_DOMAIN_ADD, auditSource, { domain, zoneName, provider });
|
||||
await eventlog.add(eventlog.ACTION_DOMAIN_ADD, auditSource, { domain, zoneName, provider });
|
||||
|
||||
safe(mail.onDomainAdded(domain)); // background
|
||||
}
|
||||
@@ -194,7 +194,6 @@ async function setConfig(domain, data, auditSource) {
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
let { zoneName, provider, config, fallbackCertificate, tlsConfig } = data;
|
||||
let error;
|
||||
|
||||
if (settings.isDemo() && (domain === settings.dashboardDomain())) throw new BoxError(BoxError.CONFLICT, 'Not allowed in demo mode');
|
||||
|
||||
@@ -210,16 +209,15 @@ async function setConfig(domain, data, auditSource) {
|
||||
if (error) throw error;
|
||||
}
|
||||
|
||||
error = validateTlsConfig(tlsConfig, provider);
|
||||
if (error) throw error;
|
||||
const tlsConfigError = validateTlsConfig(tlsConfig, provider);
|
||||
if (tlsConfigError) throw tlsConfigError;
|
||||
|
||||
if (provider === domainObject.provider) api(provider).injectPrivateFields(config, domainObject.config);
|
||||
|
||||
const result = await verifyDomainConfig(config, domain, zoneName, provider);
|
||||
if (result.error) throw result.error;
|
||||
const sanitizedConfig = await verifyDomainConfig(config, domain, zoneName, provider);
|
||||
|
||||
const newData = {
|
||||
config: result.sanitizedConfig,
|
||||
config: sanitizedConfig,
|
||||
zoneName,
|
||||
provider,
|
||||
tlsConfig,
|
||||
@@ -227,7 +225,7 @@ async function setConfig(domain, data, auditSource) {
|
||||
|
||||
if (fallbackCertificate) newData.fallbackCertificate = fallbackCertificate;
|
||||
|
||||
let args = [ ], fields = [ ];
|
||||
const args = [], fields = [];
|
||||
for (const k in newData) {
|
||||
if (k === 'config' || k === 'tlsConfig' || k === 'fallbackCertificate') { // json fields
|
||||
fields.push(`${k}Json = ?`);
|
||||
@@ -239,15 +237,14 @@ async function setConfig(domain, data, auditSource) {
|
||||
}
|
||||
args.push(domain);
|
||||
|
||||
[error] = await safe(database.query('UPDATE domains SET ' + fields.join(', ') + ' WHERE domain=?', args));
|
||||
if (error && error.reason === BoxError.NOT_FOUND) throw new BoxError(BoxError.NOT_FOUND, 'Domain not found');
|
||||
if (error) throw new BoxError(BoxError.DATABASE_ERROR, error);
|
||||
const result = await database.query('UPDATE domains SET ' + fields.join(', ') + ' WHERE domain=?', args);
|
||||
if (result.affectedRows === 0) throw new BoxError(BoxError.NOT_FOUND, 'Domain not found');
|
||||
|
||||
if (!fallbackCertificate) return;
|
||||
|
||||
await reverseProxy.setFallbackCertificate(domain, fallbackCertificate);
|
||||
|
||||
eventlog.add(eventlog.ACTION_DOMAIN_UPDATE, auditSource, { domain, zoneName, provider });
|
||||
await eventlog.add(eventlog.ACTION_DOMAIN_UPDATE, auditSource, { domain, zoneName, provider });
|
||||
}
|
||||
|
||||
async function setWellKnown(domain, wellKnown, auditSource) {
|
||||
@@ -255,14 +252,13 @@ async function setWellKnown(domain, wellKnown, auditSource) {
|
||||
assert.strictEqual(typeof wellKnown, 'object');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
let error = validateWellKnown(wellKnown);
|
||||
if (error) throw error;
|
||||
const wellKnownError = validateWellKnown(wellKnown);
|
||||
if (wellKnownError) throw wellKnownError;
|
||||
|
||||
[error] = await safe(database.query('UPDATE domains SET wellKnownJson = ? WHERE domain=?', [ JSON.stringify(wellKnown), domain ]));
|
||||
if (error && error.reason === BoxError.NOT_FOUND) throw new BoxError(BoxError.NOT_FOUND, 'Domain not found');
|
||||
if (error) throw new BoxError(BoxError.DATABASE_ERROR, error);
|
||||
const result = await database.query('UPDATE domains SET wellKnownJson = ? WHERE domain=?', [ JSON.stringify(wellKnown), domain ]);
|
||||
if (result.affectedRows === 0) throw new BoxError(BoxError.NOT_FOUND, 'Domain not found');
|
||||
|
||||
eventlog.add(eventlog.ACTION_DOMAIN_UPDATE, auditSource, { domain, wellKnown });
|
||||
await eventlog.add(eventlog.ACTION_DOMAIN_UPDATE, auditSource, { domain, wellKnown });
|
||||
}
|
||||
|
||||
async function del(domain, auditSource) {
|
||||
@@ -287,7 +283,7 @@ async function del(domain, auditSource) {
|
||||
if (error) throw error;
|
||||
if (results[1].affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Domain not found');
|
||||
|
||||
eventlog.add(eventlog.ACTION_DOMAIN_REMOVE, auditSource, { domain });
|
||||
await eventlog.add(eventlog.ACTION_DOMAIN_REMOVE, auditSource, { domain });
|
||||
|
||||
safe(mail.onDomainRemoved(domain));
|
||||
}
|
||||
@@ -298,13 +294,13 @@ async function clear() {
|
||||
|
||||
// removes all fields that are strictly private and should never be returned by API calls
|
||||
function removePrivateFields(domain) {
|
||||
var result = _.pick(domain, 'domain', 'zoneName', 'provider', 'config', 'tlsConfig', 'fallbackCertificate', 'wellKnown');
|
||||
const result = _.pick(domain, 'domain', 'zoneName', 'provider', 'config', 'tlsConfig', 'fallbackCertificate', 'wellKnown');
|
||||
return api(result.provider).removePrivateFields(result);
|
||||
}
|
||||
|
||||
// removes all fields that are not accessible by a normal user
|
||||
function removeRestrictedFields(domain) {
|
||||
var result = _.pick(domain, 'domain', 'zoneName', 'provider');
|
||||
const result = _.pick(domain, 'domain', 'zoneName', 'provider');
|
||||
|
||||
result.config = {}; // always ensure config object
|
||||
|
||||
|
||||
+4
-4
@@ -27,7 +27,7 @@ async function sync(auditSource) {
|
||||
info.ipv4 = info.ip;
|
||||
delete info.ip;
|
||||
}
|
||||
const ipv4Changed = info.ip !== ipv4;
|
||||
const ipv4Changed = info.ipv4 !== ipv4;
|
||||
const ipv6Changed = ipv6 && info.ipv6 !== ipv6; // both should be RFC 5952 format
|
||||
|
||||
if (!ipv4Changed && !ipv6Changed) {
|
||||
@@ -35,9 +35,9 @@ async function sync(auditSource) {
|
||||
return;
|
||||
}
|
||||
|
||||
debug(`refreshDNS: updating IP from ${info.ip} to ipv4: ${ipv4} (changed: ${ipv4Changed}) ipv6: ${ipv6} (changed: ${ipv6Changed})`);
|
||||
if (ipv4Changed) await dns.upsertDnsRecords(constants.DASHBOARD_LOCATION, settings.dashboardDomain(), 'A', [ ipv4 ]);
|
||||
if (ipv6Changed) await dns.upsertDnsRecords(constants.DASHBOARD_LOCATION, settings.dashboardDomain(), 'AAAA', [ ipv6 ]);
|
||||
debug(`refreshDNS: updating IP from ${info.ipv4} to ipv4: ${ipv4} (changed: ${ipv4Changed}) ipv6: ${ipv6} (changed: ${ipv6Changed})`);
|
||||
if (ipv4Changed) await dns.upsertDnsRecords(constants.DASHBOARD_SUBDOMAIN, settings.dashboardDomain(), 'A', [ ipv4 ]);
|
||||
if (ipv6Changed) await dns.upsertDnsRecords(constants.DASHBOARD_SUBDOMAIN, settings.dashboardDomain(), 'AAAA', [ ipv6 ]);
|
||||
|
||||
const result = await apps.list();
|
||||
for (const app of result) {
|
||||
|
||||
@@ -36,6 +36,7 @@ exports = module.exports = {
|
||||
|
||||
ACTION_CERTIFICATE_NEW: 'certificate.new',
|
||||
ACTION_CERTIFICATE_RENEWAL: 'certificate.renew',
|
||||
ACTION_CERTIFICATE_CLEANUP: 'certificate.cleanup',
|
||||
|
||||
ACTION_DASHBOARD_DOMAIN_UPDATE: 'dashboard.domain.update',
|
||||
|
||||
@@ -43,6 +44,8 @@ exports = module.exports = {
|
||||
ACTION_DOMAIN_UPDATE: 'domain.update',
|
||||
ACTION_DOMAIN_REMOVE: 'domain.remove',
|
||||
|
||||
ACTION_INSTALL_FINISH: 'cloudron.install.finish',
|
||||
|
||||
ACTION_MAIL_LOCATION: 'mail.location',
|
||||
ACTION_MAIL_ENABLED: 'mail.enabled',
|
||||
ACTION_MAIL_DISABLED: 'mail.disabled',
|
||||
@@ -66,6 +69,7 @@ exports = module.exports = {
|
||||
|
||||
ACTION_USER_ADD: 'user.add',
|
||||
ACTION_USER_LOGIN: 'user.login',
|
||||
ACTION_USER_LOGIN_GHOST: 'user.login.ghost',
|
||||
ACTION_USER_LOGOUT: 'user.logout',
|
||||
ACTION_USER_REMOVE: 'user.remove',
|
||||
ACTION_USER_UPDATE: 'user.update',
|
||||
|
||||
+36
-5
@@ -2,6 +2,7 @@
|
||||
|
||||
exports = module.exports = {
|
||||
verifyPassword,
|
||||
verifyPasswordAndTotpToken,
|
||||
maybeCreateUser,
|
||||
|
||||
testConfig,
|
||||
@@ -20,7 +21,7 @@ const assert = require('assert'),
|
||||
debug = require('debug')('box:externalldap'),
|
||||
groups = require('./groups.js'),
|
||||
ldap = require('ldapjs'),
|
||||
once = require('once'),
|
||||
once = require('./once.js'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js'),
|
||||
tasks = require('./tasks.js'),
|
||||
@@ -42,8 +43,9 @@ function translateUser(ldapConfig, ldapUser) {
|
||||
|
||||
// RFC: https://datatracker.ietf.org/doc/html/rfc2798
|
||||
return {
|
||||
username: ldapUser[ldapConfig.usernameField],
|
||||
username: ldapUser[ldapConfig.usernameField].toLowerCase(),
|
||||
email: ldapUser.mail || ldapUser.mailPrimaryAddress,
|
||||
twoFactorAuthenticationEnabled: !!ldapUser.twoFactorAuthenticationEnabled,
|
||||
displayName: ldapUser.displayName || ldapUser.cn // user.giveName + ' ' + user.sn
|
||||
};
|
||||
}
|
||||
@@ -254,8 +256,11 @@ async function maybeCreateUser(identifier) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
// fetch the full record
|
||||
return await users.get(userId);
|
||||
// fetch the full record and amend potential twoFA settings
|
||||
const newUser = await users.get(userId);
|
||||
if (user.twoFactorAuthenticationEnabled) newUser.twoFactorAuthenticationEnabled = true;
|
||||
|
||||
return newUser;
|
||||
}
|
||||
|
||||
async function verifyPassword(user, password) {
|
||||
@@ -279,6 +284,32 @@ async function verifyPassword(user, password) {
|
||||
return translateUser(externalLdapConfig, ldapUsers[0]);
|
||||
}
|
||||
|
||||
async function verifyPasswordAndTotpToken(user, password, totpToken) {
|
||||
assert.strictEqual(typeof user, 'object');
|
||||
assert.strictEqual(typeof password, 'string');
|
||||
assert.strictEqual(typeof totpToken, 'string');
|
||||
|
||||
const externalLdapConfig = await settings.getExternalLdapConfig();
|
||||
if (externalLdapConfig.provider === 'noop') throw new BoxError(BoxError.BAD_STATE, 'not enabled');
|
||||
|
||||
const ldapUsers = await ldapUserSearch(externalLdapConfig, { filter: `${externalLdapConfig.usernameField}=${user.username}` });
|
||||
if (ldapUsers.length === 0) throw new BoxError(BoxError.NOT_FOUND);
|
||||
if (ldapUsers.length > 1) throw new BoxError(BoxError.CONFLICT);
|
||||
|
||||
const client = await getClient(externalLdapConfig, { bind: false });
|
||||
|
||||
// inject totptoken into first attribute
|
||||
const rdns = ldapUsers[0].dn.split(',');
|
||||
const totpTokenDn = `${rdns[0]}+totptoken=${totpToken},` + rdns.slice(1).join(',');
|
||||
|
||||
const [error] = await safe(util.promisify(client.bind.bind(client))(totpTokenDn, password));
|
||||
client.unbind();
|
||||
if (error instanceof ldap.InvalidCredentialsError) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, error);
|
||||
|
||||
return translateUser(externalLdapConfig, ldapUsers[0]);
|
||||
}
|
||||
|
||||
async function startSyncer() {
|
||||
const externalLdapConfig = await settings.getExternalLdapConfig();
|
||||
if (externalLdapConfig.provider === 'noop') throw new BoxError(BoxError.BAD_STATE, 'not enabled');
|
||||
@@ -432,7 +463,7 @@ async function syncGroupUsers(externalLdapConfig, progressCallback) {
|
||||
|
||||
debug(`syncGroupUsers: Found member object at ${memberDn} adding to group ${group.name}`);
|
||||
|
||||
const username = result[externalLdapConfig.usernameField];
|
||||
const username = result[externalLdapConfig.usernameField].toLowerCase();
|
||||
if (!username) continue;
|
||||
|
||||
const [getError, userObject] = await safe(users.getByUsername(username));
|
||||
|
||||
+184
@@ -0,0 +1,184 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
getSystem,
|
||||
getByApp
|
||||
};
|
||||
|
||||
const apps = require('./apps.js'),
|
||||
assert = require('assert'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
fs = require('fs'),
|
||||
safe = require('safetydance'),
|
||||
superagent = require('superagent'),
|
||||
system = require('./system.js');
|
||||
|
||||
// for testing locally: curl 'http://127.0.0.1:8417/graphite-web/render?format=json&from=-1min&target=absolute(collectd.localhost.du-docker.capacity-usage)'
|
||||
// the datapoint is (value, timestamp) https://buildmedia.readthedocs.org/media/pdf/graphite/0.9.16/graphite.pdf
|
||||
const GRAPHITE_RENDER_URL = 'http://127.0.0.1:8417/graphite-web/render';
|
||||
|
||||
// https://rootlesscontaine.rs/getting-started/common/cgroup2/#checking-whether-cgroup-v2-is-already-enabled
|
||||
const CGROUP_VERSION = fs.existsSync('/sys/fs/cgroup/cgroup.controllers') ? '2' : '1';
|
||||
|
||||
async function getByApp(app, fromMinutes, noNullPoints) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof fromMinutes, 'number');
|
||||
assert.strictEqual(typeof noNullPoints, 'boolean');
|
||||
|
||||
const timeBucketSize = fromMinutes > (24 * 60) ? (6*60) : 5;
|
||||
|
||||
const memoryQuery = {
|
||||
target: null, // filled below
|
||||
format: 'json',
|
||||
from: `-${fromMinutes}min`,
|
||||
until: 'now'
|
||||
};
|
||||
if (CGROUP_VERSION === '1') {
|
||||
memoryQuery.target = `summarize(collectd.localhost.table-${app.id}-memory.gauge-memsw_usage_in_bytes, "${timeBucketSize}min", "avg")`;
|
||||
} else {
|
||||
memoryQuery.target = `summarize(sum(collectd.localhost.table-${app.id}-memory.gauge-memory_current, collectd.localhost.table-${app.id}-memory.gauge-memory_swap_current), "${timeBucketSize}min", "avg")`;
|
||||
}
|
||||
if (noNullPoints) memoryQuery.noNullPoints = true;
|
||||
|
||||
const [memoryError, memoryResponse] = await safe(superagent.get(GRAPHITE_RENDER_URL)
|
||||
.query(memoryQuery)
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (memoryError) throw new BoxError(BoxError.NETWORK_ERROR, memoryError.message);
|
||||
if (memoryResponse.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unknown error: ${memoryResponse.status} ${memoryResponse.text}`);
|
||||
|
||||
let diskDataPoints;
|
||||
if (app.manifest.addons.localstorage) {
|
||||
const diskQuery = {
|
||||
target: `summarize(collectd.localhost.du-${app.id}.capacity-usage, "${timeBucketSize}min", "avg")`,
|
||||
format: 'json',
|
||||
from: `-${fromMinutes}min`,
|
||||
until: 'now'
|
||||
};
|
||||
if (noNullPoints) diskQuery.noNullPoints = true;
|
||||
|
||||
const [diskError, diskResponse] = await safe(superagent.get(GRAPHITE_RENDER_URL)
|
||||
.query(diskQuery)
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (diskError) throw new BoxError(BoxError.NETWORK_ERROR, diskError.message);
|
||||
if (diskResponse.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unknown error: ${diskResponse.status} ${diskResponse.text}`);
|
||||
|
||||
// we may not have any datapoints
|
||||
if (diskResponse.body.length === 0) diskDataPoints = [];
|
||||
else diskDataPoints = diskResponse.body[0].datapoints;
|
||||
} else {
|
||||
diskDataPoints = [];
|
||||
}
|
||||
|
||||
// app proxy instances have no container and thus no datapoints
|
||||
return { memory: memoryResponse.body[0] || { datapoints: [] }, disk: { datapoints: diskDataPoints } };
|
||||
}
|
||||
|
||||
async function getSystem(fromMinutes, noNullPoints) {
|
||||
assert.strictEqual(typeof fromMinutes, 'number');
|
||||
assert.strictEqual(typeof noNullPoints, 'boolean');
|
||||
|
||||
const timeBucketSize = fromMinutes > (24 * 60) ? (6*60) : 5;
|
||||
|
||||
const cpuQuery = `summarize(sum(collectd.localhost.aggregation-cpu-average.cpu-system, collectd.localhost.aggregation-cpu-average.cpu-user), "${timeBucketSize}min", "avg")`;
|
||||
const memoryQuery = `summarize(sum(collectd.localhost.memory.memory-used, collectd.localhost.swap.swap-used), "${timeBucketSize}min", "avg")`;
|
||||
|
||||
const query = {
|
||||
target: [ cpuQuery, memoryQuery ],
|
||||
format: 'json',
|
||||
from: `-${fromMinutes}min`,
|
||||
until: 'now'
|
||||
};
|
||||
|
||||
const [memCpuError, memCpuResponse] = await safe(superagent.get(GRAPHITE_RENDER_URL)
|
||||
.query(query)
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (memCpuError) throw new BoxError(BoxError.NETWORK_ERROR, memCpuError.message);
|
||||
if (memCpuResponse.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unknown error: ${memCpuResponse.status} ${memCpuResponse.text}`);
|
||||
|
||||
const allApps = await apps.list();
|
||||
const appResponses = {};
|
||||
for (const app of allApps) {
|
||||
appResponses[app.id] = await getByApp(app, fromMinutes, noNullPoints);
|
||||
}
|
||||
|
||||
const diskInfo = await system.getDisks();
|
||||
|
||||
// segregate locations into the correct disks based on 'filesystem'
|
||||
diskInfo.disks.forEach(function (disk, index) {
|
||||
disk.id = index;
|
||||
disk.contains = [];
|
||||
|
||||
if (disk.filesystem === diskInfo.platformDataDisk) disk.contains.push({ type: 'standard', label: 'Platform data', id: 'platformdata', usage: 0 });
|
||||
if (disk.filesystem === diskInfo.boxDataDisk) disk.contains.push({ type: 'standard', label: 'Box data', id: 'boxdata', usage: 0 });
|
||||
if (disk.filesystem === diskInfo.dockerDataDisk) disk.contains.push({ type: 'standard', label: 'Docker images', id: 'docker', usage: 0 });
|
||||
if (disk.filesystem === diskInfo.mailDataDisk) disk.contains.push({ type: 'standard', label: 'Email data', id: 'maildata', usage: 0 });
|
||||
if (disk.filesystem === diskInfo.backupsDisk) disk.contains.push({ type: 'standard', label: 'Backup data', id: 'cloudron-backup', usage: 0 });
|
||||
|
||||
// attach appIds which reside on this disk
|
||||
const apps = Object.keys(diskInfo.apps).filter(function (appId) { return diskInfo.apps[appId] === disk.filesystem; });
|
||||
apps.forEach(function (appId) {
|
||||
disk.contains.push({ type: 'app', id: appId, label: '', usage: 0 });
|
||||
});
|
||||
|
||||
// attach volumeIds which reside on this disk
|
||||
const volumes = Object.keys(diskInfo.volumes).filter(function (volumeId) { return diskInfo.volumes[volumeId] === disk.filesystem; });
|
||||
volumes.forEach(function (volumeId) {
|
||||
disk.contains.push({ type: 'volume', id: volumeId, label: '', usage: 0 });
|
||||
});
|
||||
});
|
||||
|
||||
for (const disk of diskInfo.disks) {
|
||||
// /dev/sda1 -> sda1
|
||||
// /dev/mapper/foo.com -> mapper_foo_com (see #348)
|
||||
let diskName = disk.filesystem.slice(disk.filesystem.indexOf('/', 1) + 1);
|
||||
diskName = diskName.replace(/\/|\./g, '_');
|
||||
|
||||
const target = [
|
||||
`absolute(collectd.localhost.df-${diskName}.df_complex-free)`,
|
||||
`absolute(collectd.localhost.df-${diskName}.df_complex-reserved)`, // reserved for root (default: 5%) tune2fs -l/m
|
||||
`absolute(collectd.localhost.df-${diskName}.df_complex-used)`
|
||||
];
|
||||
|
||||
const diskQuery = {
|
||||
target: target,
|
||||
format: 'json',
|
||||
from: '-1day',
|
||||
until: 'now'
|
||||
};
|
||||
|
||||
const [diskError, diskResponse] = await safe(superagent.get(GRAPHITE_RENDER_URL).query(diskQuery).timeout(30 * 1000).ok(() => true));
|
||||
if (diskError) throw new BoxError(BoxError.NETWORK_ERROR, diskError.message);
|
||||
if (diskResponse.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unknown error: ${diskResponse.status} ${diskResponse.text}`);
|
||||
|
||||
disk.size = diskResponse.body[2].datapoints[0][0] + diskResponse.body[1].datapoints[0][0] + diskResponse.body[0].datapoints[0][0];
|
||||
disk.free = diskResponse.body[0].datapoints[0][0];
|
||||
disk.occupied = diskResponse.body[2].datapoints[0][0];
|
||||
|
||||
for (const content of disk.contains) {
|
||||
const query = {
|
||||
target: `absolute(collectd.localhost.du-${content.id}.capacity-usage)`,
|
||||
format: 'json',
|
||||
from: '-1day',
|
||||
until: 'now'
|
||||
};
|
||||
|
||||
const [error, response] = await safe(superagent.get(GRAPHITE_RENDER_URL).query(query).timeout(30 * 1000).ok(() => true));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unknown error: ${response.status} ${response.text}`);
|
||||
|
||||
// we may not have any datapoints
|
||||
if (response.body.length === 0) content.usage = null;
|
||||
else content.usage = response.body[0].datapoints[0][0];
|
||||
|
||||
console.log(content)
|
||||
}
|
||||
}
|
||||
|
||||
return { cpu: memCpuResponse.body[0], memory: memCpuResponse.body[1], apps: appResponses, disks: diskInfo.disks };
|
||||
}
|
||||
+225
@@ -0,0 +1,225 @@
|
||||
'use strict';
|
||||
|
||||
const assert = require('assert'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
crypto = require('crypto'),
|
||||
debug = require('debug')('box:hush'),
|
||||
fs = require('fs'),
|
||||
progressStream = require('progress-stream'),
|
||||
TransformStream = require('stream').Transform;
|
||||
|
||||
class EncryptStream extends TransformStream {
|
||||
constructor(encryption) {
|
||||
super();
|
||||
this._headerPushed = false;
|
||||
this._iv = crypto.randomBytes(16);
|
||||
this._cipher = crypto.createCipheriv('aes-256-cbc', Buffer.from(encryption.dataKey, 'hex'), this._iv);
|
||||
this._hmac = crypto.createHmac('sha256', Buffer.from(encryption.dataHmacKey, 'hex'));
|
||||
}
|
||||
|
||||
pushHeaderIfNeeded() {
|
||||
if (!this._headerPushed) {
|
||||
const magic = Buffer.from('CBV2');
|
||||
this.push(magic);
|
||||
this._hmac.update(magic);
|
||||
this.push(this._iv);
|
||||
this._hmac.update(this._iv);
|
||||
this._headerPushed = true;
|
||||
}
|
||||
}
|
||||
|
||||
_transform(chunk, ignoredEncoding, callback) {
|
||||
this.pushHeaderIfNeeded();
|
||||
|
||||
try {
|
||||
const crypt = this._cipher.update(chunk);
|
||||
this._hmac.update(crypt);
|
||||
callback(null, crypt);
|
||||
} catch (error) {
|
||||
callback(new BoxError(BoxError.CRYPTO_ERROR, `Encryption error when updating: ${error.message}`));
|
||||
}
|
||||
}
|
||||
|
||||
_flush(callback) {
|
||||
try {
|
||||
this.pushHeaderIfNeeded(); // for 0-length files
|
||||
const crypt = this._cipher.final();
|
||||
this.push(crypt);
|
||||
this._hmac.update(crypt);
|
||||
callback(null, this._hmac.digest()); // +32 bytes
|
||||
} catch (error) {
|
||||
callback(new BoxError(BoxError.CRYPTO_ERROR, `Encryption error when flushing: ${error.message}`));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class DecryptStream extends TransformStream {
|
||||
constructor(encryption) {
|
||||
super();
|
||||
this._key = Buffer.from(encryption.dataKey, 'hex');
|
||||
this._header = Buffer.alloc(0);
|
||||
this._decipher = null;
|
||||
this._hmac = crypto.createHmac('sha256', Buffer.from(encryption.dataHmacKey, 'hex'));
|
||||
this._buffer = Buffer.alloc(0);
|
||||
}
|
||||
|
||||
_transform(chunk, ignoredEncoding, callback) {
|
||||
const needed = 20 - this._header.length; // 4 for magic, 16 for iv
|
||||
|
||||
if (this._header.length !== 20) { // not gotten header yet
|
||||
this._header = Buffer.concat([this._header, chunk.slice(0, needed)]);
|
||||
if (this._header.length !== 20) return callback();
|
||||
|
||||
if (!this._header.slice(0, 4).equals(new Buffer.from('CBV2'))) return callback(new BoxError(BoxError.CRYPTO_ERROR, 'Invalid magic in header'));
|
||||
|
||||
const iv = this._header.slice(4);
|
||||
this._decipher = crypto.createDecipheriv('aes-256-cbc', this._key, iv);
|
||||
this._hmac.update(this._header);
|
||||
}
|
||||
|
||||
this._buffer = Buffer.concat([ this._buffer, chunk.slice(needed) ]);
|
||||
if (this._buffer.length < 32) return callback(); // hmac trailer length is 32
|
||||
|
||||
try {
|
||||
const cipherText = this._buffer.slice(0, -32);
|
||||
this._hmac.update(cipherText);
|
||||
const plainText = this._decipher.update(cipherText);
|
||||
this._buffer = this._buffer.slice(-32);
|
||||
callback(null, plainText);
|
||||
} catch (error) {
|
||||
callback(new BoxError(BoxError.CRYPTO_ERROR, `Decryption error: ${error.message}`));
|
||||
}
|
||||
}
|
||||
|
||||
_flush (callback) {
|
||||
if (this._buffer.length !== 32) return callback(new BoxError(BoxError.CRYPTO_ERROR, 'Invalid password or tampered file (not enough data)'));
|
||||
|
||||
try {
|
||||
if (!this._hmac.digest().equals(this._buffer)) return callback(new BoxError(BoxError.CRYPTO_ERROR, 'Invalid password or tampered file (mac mismatch)'));
|
||||
|
||||
const plainText = this._decipher.final();
|
||||
callback(null, plainText);
|
||||
} catch (error) {
|
||||
callback(new BoxError(BoxError.CRYPTO_ERROR, `Invalid password or tampered file: ${error.message}`));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function encryptFilePath(filePath, encryption) {
|
||||
assert.strictEqual(typeof filePath, 'string');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
|
||||
const encryptedParts = filePath.split('/').map(function (part) {
|
||||
let hmac = crypto.createHmac('sha256', Buffer.from(encryption.filenameHmacKey, 'hex'));
|
||||
const iv = hmac.update(part).digest().slice(0, 16); // iv has to be deterministic, for our sync (copy) logic to work
|
||||
const cipher = crypto.createCipheriv('aes-256-cbc', Buffer.from(encryption.filenameKey, 'hex'), iv);
|
||||
let crypt = cipher.update(part);
|
||||
crypt = Buffer.concat([ iv, crypt, cipher.final() ]);
|
||||
|
||||
return crypt.toString('base64') // ensures path is valid
|
||||
.replace(/\//g, '-') // replace '/' of base64 since it conflicts with path separator
|
||||
.replace(/=/g,''); // strip trailing = padding. this is only needed if we concat base64 strings, which we don't
|
||||
});
|
||||
|
||||
return encryptedParts.join('/');
|
||||
}
|
||||
|
||||
function decryptFilePath(filePath, encryption) {
|
||||
assert.strictEqual(typeof filePath, 'string');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
|
||||
const decryptedParts = [];
|
||||
for (let part of filePath.split('/')) {
|
||||
part = part + Array(part.length % 4).join('='); // add back = padding
|
||||
part = part.replace(/-/g, '/'); // replace with '/'
|
||||
|
||||
try {
|
||||
const buffer = Buffer.from(part, 'base64');
|
||||
const iv = buffer.slice(0, 16);
|
||||
let decrypt = crypto.createDecipheriv('aes-256-cbc', Buffer.from(encryption.filenameKey, 'hex'), iv);
|
||||
const plainText = decrypt.update(buffer.slice(16));
|
||||
const plainTextString = Buffer.concat([ plainText, decrypt.final() ]).toString('utf8');
|
||||
const hmac = crypto.createHmac('sha256', Buffer.from(encryption.filenameHmacKey, 'hex'));
|
||||
if (!hmac.update(plainTextString).digest().slice(0, 16).equals(iv)) return { error: new BoxError(BoxError.CRYPTO_ERROR, `mac error decrypting part ${part} of path ${filePath}`) };
|
||||
|
||||
decryptedParts.push(plainTextString);
|
||||
} catch (error) {
|
||||
debug(`Error decrypting part ${part} of path ${filePath}:`, error);
|
||||
return { error: new BoxError(BoxError.CRYPTO_ERROR, `Error decrypting part ${part} of path ${filePath}: ${error.message}`) };
|
||||
}
|
||||
}
|
||||
|
||||
return { result: decryptedParts.join('/') };
|
||||
}
|
||||
|
||||
function createReadStream(sourceFile, encryption) {
|
||||
assert.strictEqual(typeof sourceFile, 'string');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
|
||||
const stream = fs.createReadStream(sourceFile);
|
||||
const ps = progressStream({ time: 10000 }); // display a progress every 10 seconds
|
||||
|
||||
stream.on('error', function (error) {
|
||||
debug(`createReadStream: read stream error at ${sourceFile}`, error);
|
||||
ps.emit('error', new BoxError(BoxError.FS_ERROR, `Error reading ${sourceFile}: ${error.message} ${error.code}`));
|
||||
});
|
||||
|
||||
stream.on('open', () => ps.emit('open'));
|
||||
|
||||
if (encryption) {
|
||||
let encryptStream = new EncryptStream(encryption);
|
||||
|
||||
encryptStream.on('error', function (error) {
|
||||
debug(`createReadStream: encrypt stream error ${sourceFile}`, error);
|
||||
ps.emit('error', new BoxError(BoxError.CRYPTO_ERROR, `Encryption error at ${sourceFile}: ${error.message}`));
|
||||
});
|
||||
|
||||
return stream.pipe(encryptStream).pipe(ps);
|
||||
} else {
|
||||
return stream.pipe(ps);
|
||||
}
|
||||
}
|
||||
|
||||
function createWriteStream(destFile, encryption) {
|
||||
assert.strictEqual(typeof destFile, 'string');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
|
||||
const stream = fs.createWriteStream(destFile);
|
||||
const ps = progressStream({ time: 10000 }); // display a progress every 10 seconds
|
||||
|
||||
stream.on('error', function (error) {
|
||||
debug(`createWriteStream: write stream error ${destFile}`, error);
|
||||
ps.emit('error', new BoxError(BoxError.FS_ERROR, `Write error ${destFile}: ${error.message}`));
|
||||
});
|
||||
|
||||
stream.on('finish', function () {
|
||||
debug('createWriteStream: done.');
|
||||
// we use a separate event because ps is a through2 stream which emits 'finish' event indicating end of inStream and not write
|
||||
ps.emit('done');
|
||||
});
|
||||
|
||||
if (encryption) {
|
||||
let decrypt = new DecryptStream(encryption);
|
||||
decrypt.on('error', function (error) {
|
||||
debug(`createWriteStream: decrypt stream error ${destFile}`, error);
|
||||
ps.emit('error', new BoxError(BoxError.CRYPTO_ERROR, `Decryption error at ${destFile}: ${error.message}`));
|
||||
});
|
||||
|
||||
ps.pipe(decrypt).pipe(stream);
|
||||
} else {
|
||||
ps.pipe(stream);
|
||||
}
|
||||
|
||||
return ps;
|
||||
}
|
||||
|
||||
exports = module.exports = {
|
||||
EncryptStream,
|
||||
DecryptStream,
|
||||
|
||||
encryptFilePath,
|
||||
decryptFilePath,
|
||||
|
||||
createReadStream,
|
||||
createWriteStream
|
||||
};
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
exports = module.exports = {
|
||||
// a version change recreates all containers with latest docker config
|
||||
'version': '49.0.0',
|
||||
'version': '49.1.0',
|
||||
|
||||
'baseImages': [
|
||||
{ repo: 'cloudron/base', tag: 'cloudron/base:3.2.0@sha256:ba1d566164a67c266782545ea9809dc611c4152e27686fd14060332dd88263ea' }
|
||||
@@ -16,12 +16,12 @@ exports = module.exports = {
|
||||
// docker inspect --format='{{index .RepoDigests 0}}' $IMAGE to get the sha256
|
||||
'images': {
|
||||
'turn': { repo: 'cloudron/turn', tag: 'cloudron/turn:1.4.0@sha256:45817f1631992391d585f171498d257487d872480fd5646723a2b956cc4ef15d' },
|
||||
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:3.2.0@sha256:c70d0a1ff7ce8a27dcf7d6f8d1718ddba82ef254079fee5d20fdf074f28cd009' },
|
||||
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:4.3.0@sha256:9f0cfe83310a6f67885c21804c01e73c8d256606217f44dcefb3e05a5402b2c9' },
|
||||
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:4.2.0@sha256:c8ebdbe2663b26fcd58b1e6b97906b62565adbe4a06256ba0f86114f78b37e6b' },
|
||||
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:3.2.1@sha256:30a5550c41c3be3a01dbf457497ba2f3c05f3121c595a17ef1aacbef931b6114' },
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:3.6.0@sha256:67541d29f1ce3ace245b4acdaac28acde3cc15f4f83b98e9b7315930aeb5084c' },
|
||||
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:3.2.1@sha256:75cef64ba4917ba9ec68bc0c9d9ba3a9eeae00a70173cd6d81cc6118038737d9' },
|
||||
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:4.3.1@sha256:b0c564d097b765d4a639330843e2e813d2c87fc8ed34b7df7550bf2c6df0012c' },
|
||||
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:4.2.1@sha256:f7f689beea07b1c6a9503a48f6fb38ef66e5b22f59fc585a92842a6578b33d46' },
|
||||
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:3.3.0@sha256:89c4e8083631b6d16b5d630d9b27f8ecf301c62f81219d77bd5948a1f4a4375c' },
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:3.7.0@sha256:a41a52ba45bea0b2f14be82f8480d5f4583d806dc1f9c99c3bce858d2c9f27d7' },
|
||||
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:3.1.0@sha256:30ec3a01964a1e01396acf265183997c3e17fb07eac1a82b979292cc7719ff4b' },
|
||||
'sftp': { repo: 'cloudron/sftp', tag: 'cloudron/sftp:3.6.0@sha256:9c686b10c1a3ba344a743f399d08b4da5426e111f455114980f0ae0229c1ab23' }
|
||||
'sftp': { repo: 'cloudron/sftp', tag: 'cloudron/sftp:3.6.1@sha256:ba4b9a1fe274c0ef0a900e5d0deeb8f3da08e118798d1d90fbf995cc0cf6e3a3' }
|
||||
}
|
||||
};
|
||||
|
||||
+69
-64
@@ -21,13 +21,10 @@ const addonConfigs = require('./addonconfigs.js'),
|
||||
users = require('./users.js'),
|
||||
util = require('util');
|
||||
|
||||
var gServer = null;
|
||||
let gServer = null;
|
||||
|
||||
const NOOP = function () {};
|
||||
|
||||
const GROUP_USERS_DN = 'cn=users,ou=groups,dc=cloudron';
|
||||
const GROUP_ADMINS_DN = 'cn=admins,ou=groups,dc=cloudron';
|
||||
|
||||
// Will attach req.app if successful
|
||||
async function authenticateApp(req, res, next) {
|
||||
const sourceIp = req.connection.ldap.id.split(':')[0];
|
||||
@@ -113,13 +110,13 @@ function finalSend(results, req, res, next) {
|
||||
|
||||
if (cookie && Buffer.isBuffer(cookie)) {
|
||||
// we have pagination
|
||||
var first = min;
|
||||
let first = min;
|
||||
if (cookie.length !== 0) {
|
||||
first = parseInt(cookie.toString(), 10);
|
||||
}
|
||||
var last = sendPagedResults(first, first + pageSize);
|
||||
const last = sendPagedResults(first, first + pageSize);
|
||||
|
||||
var resultCookie;
|
||||
let resultCookie;
|
||||
if (last < max) {
|
||||
resultCookie = Buffer.from(last.toString());
|
||||
} else {
|
||||
@@ -150,6 +147,9 @@ async function userSearch(req, res, next) {
|
||||
const [error, result] = await safe(getUsersWithAccessToApp(req));
|
||||
if (error) return next(new ldap.OperationsError(error.toString()));
|
||||
|
||||
const [groupsError, allGroups] = await safe(groups.listWithMembers());
|
||||
if (groupsError) return next(new ldap.OperationsError(error.toString()));
|
||||
|
||||
let results = [];
|
||||
|
||||
// send user objects
|
||||
@@ -159,9 +159,6 @@ async function userSearch(req, res, next) {
|
||||
|
||||
const dn = ldap.parseDN('cn=' + user.id + ',ou=users,dc=cloudron');
|
||||
|
||||
const memberof = [ GROUP_USERS_DN ];
|
||||
if (users.compareRoles(user.role, users.ROLE_ADMIN) >= 0) memberof.push(GROUP_ADMINS_DN);
|
||||
|
||||
const displayName = user.displayName || user.username || ''; // displayName can be empty and username can be null
|
||||
const nameParts = displayName.split(' ');
|
||||
const firstName = nameParts[0];
|
||||
@@ -172,7 +169,7 @@ async function userSearch(req, res, next) {
|
||||
attributes: {
|
||||
objectclass: ['user', 'inetorgperson', 'person', 'organizationalperson', 'top' ],
|
||||
objectcategory: 'person',
|
||||
cn: user.id,
|
||||
cn: displayName,
|
||||
uid: user.id,
|
||||
entryuuid: user.id, // to support OpenLDAP clients
|
||||
mail: user.email,
|
||||
@@ -181,7 +178,7 @@ async function userSearch(req, res, next) {
|
||||
givenName: firstName,
|
||||
username: user.username,
|
||||
samaccountname: user.username, // to support ActiveDirectory clients
|
||||
memberof: memberof
|
||||
memberof: allGroups.filter(function (g) { return g.userIds.indexOf(user.id) !== -1; }).map(function (g) { return g.name; })
|
||||
}
|
||||
};
|
||||
|
||||
@@ -204,42 +201,8 @@ async function userSearch(req, res, next) {
|
||||
async function groupSearch(req, res, next) {
|
||||
debug('group search: dn %s, scope %s, filter %s (from %s)', req.dn.toString(), req.scope, req.filter.toString(), req.connection.ldap.id);
|
||||
|
||||
const [error, usersWithAccess] = await safe(getUsersWithAccessToApp(req));
|
||||
if (error) return next(new ldap.OperationsError(error.toString()));
|
||||
|
||||
const results = [];
|
||||
|
||||
// those are the old virtual groups for backwards compat
|
||||
const virtualGroups = [{
|
||||
name: 'users',
|
||||
admin: false
|
||||
}, {
|
||||
name: 'admins',
|
||||
admin: true
|
||||
}];
|
||||
|
||||
virtualGroups.forEach(function (group) {
|
||||
const dn = ldap.parseDN('cn=' + group.name + ',ou=groups,dc=cloudron');
|
||||
const members = group.admin ? usersWithAccess.filter(function (user) { return users.compareRoles(user.role, users.ROLE_ADMIN) >= 0; }) : usersWithAccess;
|
||||
|
||||
const obj = {
|
||||
dn: dn.toString(),
|
||||
attributes: {
|
||||
objectclass: ['group'],
|
||||
cn: group.name,
|
||||
memberuid: members.map(function(entry) { return entry.id; }).sort()
|
||||
}
|
||||
};
|
||||
|
||||
// ensure all filter values are also lowercase
|
||||
const lowerCaseFilter = safe(function () { return ldap.parseFilter(req.filter.toString().toLowerCase()); }, null);
|
||||
if (!lowerCaseFilter) return next(new ldap.OperationsError(safe.error.toString()));
|
||||
|
||||
if ((req.dn.equals(dn) || req.dn.parentOf(dn)) && lowerCaseFilter.matches(obj.attributes)) {
|
||||
results.push(obj);
|
||||
}
|
||||
});
|
||||
|
||||
let [errorGroups, resultGroups] = await safe(groups.listWithMembers());
|
||||
if (errorGroups) return next(new ldap.OperationsError(errorGroups.toString()));
|
||||
|
||||
@@ -248,15 +211,15 @@ async function groupSearch(req, res, next) {
|
||||
}
|
||||
|
||||
resultGroups.forEach(function (group) {
|
||||
const dn = ldap.parseDN('cn=' + group.name + ',ou=groups,dc=cloudron');
|
||||
const members = group.userIds.filter(function (uid) { return usersWithAccess.map(function (u) { return u.id; }).indexOf(uid) !== -1; });
|
||||
const dn = ldap.parseDN(`cn=${group.name},ou=groups,dc=cloudron`);
|
||||
|
||||
const obj = {
|
||||
dn: dn.toString(),
|
||||
attributes: {
|
||||
objectclass: ['group'],
|
||||
cn: group.name,
|
||||
memberuid: members
|
||||
gidnumber: group.id,
|
||||
memberuid: group.userIds
|
||||
}
|
||||
};
|
||||
|
||||
@@ -295,7 +258,7 @@ async function groupAdminsCompare(req, res, next) {
|
||||
|
||||
// we only support memberuid here, if we add new group attributes later add them here
|
||||
if (req.attribute === 'memberuid') {
|
||||
var user = result.find(function (u) { return u.id === req.value; });
|
||||
const user = result.find(function (u) { return u.id === req.value; });
|
||||
if (user && users.compareRoles(user.role, users.ROLE_ADMIN) >= 0) return res.end(true);
|
||||
}
|
||||
|
||||
@@ -305,25 +268,35 @@ async function groupAdminsCompare(req, res, next) {
|
||||
async function mailboxSearch(req, res, next) {
|
||||
debug('mailbox search: dn %s, scope %s, filter %s (from %s)', req.dn.toString(), req.scope, req.filter.toString(), req.connection.ldap.id);
|
||||
|
||||
// if cn is set we only search for one mailbox specifically
|
||||
// if cn is set OR filter is mail= we only search for one mailbox specifically
|
||||
let email, dn;
|
||||
if (req.dn.rdns[0].attrs.cn) {
|
||||
const email = req.dn.rdns[0].attrs.cn.value.toLowerCase();
|
||||
email = req.dn.rdns[0].attrs.cn.value.toLowerCase();
|
||||
dn = req.dn.toString();
|
||||
} else if (req.filter instanceof ldap.EqualityFilter && req.filter.attribute === 'mail') {
|
||||
email = req.filter.value.toLowerCase();
|
||||
dn = `cn=${email},${req.dn.toString()}`;
|
||||
}
|
||||
|
||||
if (email) {
|
||||
const parts = email.split('@');
|
||||
if (parts.length !== 2) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
if (parts.length !== 2) return next(new ldap.NoSuchObjectError(dn.toString()));
|
||||
|
||||
const [error, mailbox] = await safe(mail.getMailbox(parts[0], parts[1]));
|
||||
if (error) return next(new ldap.OperationsError(error.toString()));
|
||||
if (!mailbox) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
if (!mailbox.active) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
if (!mailbox) return next(new ldap.NoSuchObjectError(dn.toString()));
|
||||
if (!mailbox.active) return next(new ldap.NoSuchObjectError(dn.toString()));
|
||||
|
||||
const obj = {
|
||||
dn: req.dn.toString(),
|
||||
dn: dn.toString(),
|
||||
attributes: {
|
||||
objectclass: ['mailbox'],
|
||||
objectcategory: 'mailbox',
|
||||
cn: `${mailbox.name}@${mailbox.domain}`,
|
||||
uid: `${mailbox.name}@${mailbox.domain}`,
|
||||
mail: `${mailbox.name}@${mailbox.domain}`
|
||||
mail: `${mailbox.name}@${mailbox.domain}`,
|
||||
storagequota: mailbox.storageQuota,
|
||||
messagesquota: mailbox.messagesQuota,
|
||||
}
|
||||
};
|
||||
|
||||
@@ -336,7 +309,7 @@ async function mailboxSearch(req, res, next) {
|
||||
} else {
|
||||
res.end();
|
||||
}
|
||||
} else { // new sogo
|
||||
} else { // new sogo and dovecot listing (doveadm -A)
|
||||
// TODO figure out how proper pagination here could work
|
||||
let [error, mailboxes] = await safe(mail.listAllMailboxes(1, 100000));
|
||||
if (error) return next(new ldap.OperationsError(error.toString()));
|
||||
@@ -360,7 +333,9 @@ async function mailboxSearch(req, res, next) {
|
||||
displayname: mailbox.ownerType === mail.OWNERTYPE_USER ? ownerObject.displayName : ownerObject.name,
|
||||
cn: `${mailbox.name}@${mailbox.domain}`,
|
||||
uid: `${mailbox.name}@${mailbox.domain}`,
|
||||
mail: `${mailbox.name}@${mailbox.domain}`
|
||||
mail: `${mailbox.name}@${mailbox.domain}`,
|
||||
storagequota: mailbox.storageQuota,
|
||||
messagesquota: mailbox.messagesQuota,
|
||||
}
|
||||
};
|
||||
|
||||
@@ -390,7 +365,7 @@ async function mailAliasSearch(req, res, next) {
|
||||
const parts = email.split('@');
|
||||
if (parts.length !== 2) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
|
||||
const [error, alias] = await safe(mail.getAlias(parts[0], parts[1]));
|
||||
const [error, alias] = await safe(mail.searchAlias(parts[0], parts[1]));
|
||||
if (error) return next(new ldap.OperationsError(error.toString()));
|
||||
if (!alias) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
|
||||
@@ -404,7 +379,7 @@ async function mailAliasSearch(req, res, next) {
|
||||
attributes: {
|
||||
objectclass: ['nisMailAlias'],
|
||||
objectcategory: 'nisMailAlias',
|
||||
cn: `${alias.name}@${alias.domain}`,
|
||||
cn: `${parts[0]}@${alias.domain}`, // alias.name can contain wildcard character
|
||||
rfc822MailMember: `${alias.aliasName}@${alias.aliasDomain}`
|
||||
}
|
||||
};
|
||||
@@ -480,7 +455,7 @@ async function authorizeUserForApp(req, res, next) {
|
||||
// we return no such object, to avoid leakage of a users existence
|
||||
if (!canAccess) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
|
||||
await eventlog.upsertLoginEvent(eventlog.ACTION_USER_LOGIN, { authType: 'ldap', appId: req.app.id }, { userId: req.user.id, user: users.removePrivateFields(req.user) });
|
||||
await eventlog.upsertLoginEvent(req.user.ghost ? eventlog.ACTION_USER_LOGIN_GHOST : eventlog.ACTION_USER_LOGIN, { authType: 'ldap', appId: req.app.id }, { userId: req.user.id, user: users.removePrivateFields(req.user) });
|
||||
|
||||
res.end();
|
||||
}
|
||||
@@ -558,7 +533,7 @@ async function userSearchSftp(req, res, next) {
|
||||
const obj = {
|
||||
dn: ldap.parseDN(`cn=${username}@${appFqdn},ou=sftp,dc=cloudron`).toString(),
|
||||
attributes: {
|
||||
homeDirectory: app.dataDir ? `/mnt/app-${app.id}` : `/mnt/appsdata/${app.id}/data`, // see also sftp.js
|
||||
homeDirectory: app.storageVolumeId ? `/mnt/app-${app.id}` : `/mnt/appsdata/${app.id}/data`, // see also sftp.js
|
||||
objectclass: ['user'],
|
||||
objectcategory: 'person',
|
||||
cn: user.id,
|
||||
@@ -625,7 +600,7 @@ async function authenticateService(serviceId, dn, req, res, next) {
|
||||
if (verifyError && verifyError.reason === BoxError.INVALID_CREDENTIALS) return next(new ldap.InvalidCredentialsError(dn.toString()));
|
||||
if (verifyError) return next(new ldap.OperationsError(verifyError.message));
|
||||
|
||||
eventlog.upsertLoginEvent(eventlog.ACTION_USER_LOGIN, { authType: 'ldap', mailboxId: email }, { userId: result.id, user: users.removePrivateFields(result) });
|
||||
eventlog.upsertLoginEvent(result.ghost ? eventlog.ACTION_USER_LOGIN_GHOST : eventlog.ACTION_USER_LOGIN, { authType: 'ldap', mailboxId: email }, { userId: result.id, user: users.removePrivateFields(result) });
|
||||
|
||||
res.end();
|
||||
}
|
||||
@@ -635,6 +610,26 @@ async function authenticateMail(req, res, next) {
|
||||
await authenticateService(req.dn.rdns[1].attrs.ou.value.toLowerCase(), req.dn, req, res, next);
|
||||
}
|
||||
|
||||
// https://ldapwiki.com/wiki/RootDSE / RFC 4512 - ldapsearch -x -h "${CLOUDRON_LDAP_SERVER}" -p "${CLOUDRON_LDAP_PORT}" -b "" -s base
|
||||
// ldapjs seems to call this handler for everything when search === ''
|
||||
async function maybeRootDSE(req, res, next) {
|
||||
debug(`maybeRootDSE: requested with scope:${req.scope} dn:${req.dn.toString()}`);
|
||||
|
||||
if (req.scope !== 'base') return next(new ldap.NoSuchObjectError()); // per the spec, rootDSE search require base scope
|
||||
if (!req.dn || req.dn.toString() !== '') return next(new ldap.NoSuchObjectError());
|
||||
|
||||
res.send({
|
||||
dn: '',
|
||||
attributes: {
|
||||
objectclass: [ 'RootDSE', 'top', 'OpenLDAProotDSE' ],
|
||||
supportedLDAPVersion: '3',
|
||||
vendorName: 'Cloudron LDAP',
|
||||
vendorVersion: '1.0.0'
|
||||
}
|
||||
});
|
||||
res.end();
|
||||
}
|
||||
|
||||
async function start() {
|
||||
const logger = {
|
||||
trace: NOOP,
|
||||
@@ -687,6 +682,16 @@ async function start() {
|
||||
res.end();
|
||||
});
|
||||
|
||||
// directus looks for the "DN" of the bind user
|
||||
gServer.search('ou=apps,dc=cloudron', function(req, res, next) {
|
||||
const obj = {
|
||||
dn: req.dn.toString(),
|
||||
};
|
||||
finalSend([obj], req, res, next);
|
||||
});
|
||||
|
||||
gServer.search('', maybeRootDSE); // when '', it seems the callback is called for everything else
|
||||
|
||||
// just log that an attempt was made to unknown route, this helps a lot during app packaging
|
||||
gServer.use(function(req, res, next) {
|
||||
debug('not handled: dn %s, scope %s, filter %s (from %s)', req.dn ? req.dn.toString() : '-', req.scope, req.filter ? req.filter.toString() : '-', req.connection.ldap.id);
|
||||
|
||||
+2
-3
@@ -1,6 +1,6 @@
|
||||
'use strict';
|
||||
|
||||
var assert = require('assert'),
|
||||
const assert = require('assert'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
debug = require('debug')('box:locker'),
|
||||
EventEmitter = require('events').EventEmitter,
|
||||
@@ -32,8 +32,7 @@ Locker.prototype.lock = function (operation) {
|
||||
this._operation = operation;
|
||||
++this._lockDepth;
|
||||
this._timestamp = new Date();
|
||||
var that = this;
|
||||
this._watcherId = setInterval(function () { debug('Lock unreleased %s', that._operation); }, 1000 * 60 * 5);
|
||||
this._watcherId = setInterval(() => { debug('Lock unreleased %s', this._operation); }, 1000 * 60 * 5);
|
||||
|
||||
debug('Acquired : %s', this._operation);
|
||||
|
||||
|
||||
+89
-38
@@ -22,6 +22,7 @@ exports = module.exports = {
|
||||
setDnsRecords,
|
||||
|
||||
validateName,
|
||||
validateDisplayName,
|
||||
|
||||
setMailFromValidation,
|
||||
setCatchAllAddress,
|
||||
@@ -47,6 +48,7 @@ exports = module.exports = {
|
||||
getAlias,
|
||||
getAliases,
|
||||
setAliases,
|
||||
searchAlias,
|
||||
|
||||
getLists,
|
||||
getList,
|
||||
@@ -65,7 +67,6 @@ exports = module.exports = {
|
||||
TYPE_LIST: 'list',
|
||||
TYPE_ALIAS: 'alias',
|
||||
|
||||
_validateName: validateName,
|
||||
_delByDomain: delByDomain,
|
||||
_updateDomain: updateDomain
|
||||
};
|
||||
@@ -96,13 +97,11 @@ const assert = require('assert'),
|
||||
services = require('./services.js'),
|
||||
settings = require('./settings.js'),
|
||||
shell = require('./shell.js'),
|
||||
smtpTransport = require('nodemailer-smtp-transport'),
|
||||
superagent = require('superagent'),
|
||||
sysinfo = require('./sysinfo.js'),
|
||||
system = require('./system.js'),
|
||||
tasks = require('./tasks.js'),
|
||||
users = require('./users.js'),
|
||||
util = require('util'),
|
||||
validator = require('validator'),
|
||||
_ = require('underscore');
|
||||
|
||||
@@ -112,7 +111,7 @@ const REMOVE_MAILBOX_CMD = path.join(__dirname, 'scripts/rmmailbox.sh');
|
||||
const OWNERTYPES = [ exports.OWNERTYPE_USER, exports.OWNERTYPE_GROUP, exports.OWNERTYPE_APP ];
|
||||
|
||||
// if you add a field here, listMailboxes has to be updated
|
||||
const MAILBOX_FIELDS = [ 'name', 'type', 'ownerId', 'ownerType', 'aliasName', 'aliasDomain', 'creationTime', 'membersJson', 'membersOnly', 'domain', 'active', 'enablePop3' ].join(',');
|
||||
const MAILBOX_FIELDS = [ 'name', 'type', 'ownerId', 'ownerType', 'aliasName', 'aliasDomain', 'creationTime', 'membersJson', 'membersOnly', 'domain', 'active', 'enablePop3', 'storageQuota', 'messagesQuota' ].join(',');
|
||||
const MAILDB_FIELDS = [ 'domain', 'enabled', 'mailFromValidation', 'catchAllJson', 'relayJson', 'dkimKeyJson', 'dkimSelector', 'bannerJson' ].join(',');
|
||||
|
||||
function postProcessMailbox(data) {
|
||||
@@ -169,6 +168,28 @@ function validateName(name) {
|
||||
return null;
|
||||
}
|
||||
|
||||
function validateAlias(name) {
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
|
||||
if (name.length < 1) return new BoxError(BoxError.BAD_FIELD, 'mailbox name must be atleast 1 char');
|
||||
if (name.length >= 200) return new BoxError(BoxError.BAD_FIELD, 'mailbox name too long');
|
||||
|
||||
// also need to consider valid LDAP characters here (e.g '+' is reserved). keep hyphen at the end so it doesn't become a range.
|
||||
if (/[^a-zA-Z0-9._*-]/.test(name)) return new BoxError(BoxError.BAD_FIELD, 'mailbox name can only contain alphanumerals, dot, hyphen, asterisk or underscore');
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function validateDisplayName(name) {
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
|
||||
if (name.length < 1) return new BoxError(BoxError.BAD_FIELD, 'mailbox display name must be atleast 1 char');
|
||||
if (name.length >= 100) return new BoxError(BoxError.BAD_FIELD, 'mailbox display name too long');
|
||||
if (/["<>@]/.test(name)) return new BoxError(BoxError.BAD_FIELD, 'mailbox display name is not valid');
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
async function checkOutboundPort25() {
|
||||
const relay = {
|
||||
value: 'OK',
|
||||
@@ -197,7 +218,7 @@ async function checkOutboundPort25() {
|
||||
relay.status = false;
|
||||
relay.value = `Connect to ${constants.PORT25_CHECK_SERVER} failed: ${error.message}. Check if port 25 (outbound) is blocked`;
|
||||
client.destroy();
|
||||
relay.errorMessage = `Connect to ${constants.PORT25_CHECK_SERVER} failed.`;
|
||||
relay.errorMessage = `Connect to ${constants.PORT25_CHECK_SERVER} failed: ${error.message}`;
|
||||
resolve(relay);
|
||||
});
|
||||
});
|
||||
@@ -214,7 +235,8 @@ async function checkSmtpRelay(relay) {
|
||||
connectionTimeout: 5000,
|
||||
greetingTimeout: 5000,
|
||||
host: relay.host,
|
||||
port: relay.port
|
||||
port: relay.port,
|
||||
secure: false // haraka relay only supports STARTTLS
|
||||
};
|
||||
|
||||
// only set auth if either username or password is provided, some relays auth based on IP (range)
|
||||
@@ -227,9 +249,9 @@ async function checkSmtpRelay(relay) {
|
||||
|
||||
if (relay.acceptSelfSignedCerts) options.tls = { rejectUnauthorized: false };
|
||||
|
||||
const transporter = nodemailer.createTransport(smtpTransport(options));
|
||||
const transporter = nodemailer.createTransport(options);
|
||||
|
||||
const [error] = await safe(util.promisify(transporter.verify)());
|
||||
const [error] = await safe(transporter.verify());
|
||||
result.status = !error;
|
||||
if (error) {
|
||||
result.value = result.errorMessage = error.message;
|
||||
@@ -360,9 +382,9 @@ async function checkMx(domain, mailFqdn) {
|
||||
}
|
||||
|
||||
function txtToDict(txt) {
|
||||
var dict = {};
|
||||
const dict = {};
|
||||
txt.split(';').forEach(function(v) {
|
||||
var p = v.trim().split('=');
|
||||
const p = v.trim().split('=');
|
||||
dict[p[0]]=p[1];
|
||||
});
|
||||
return dict;
|
||||
@@ -516,7 +538,7 @@ async function checkRblStatus(domain) {
|
||||
blacklistedServers.push(result);
|
||||
}
|
||||
|
||||
debug(`checkRblStatus: ${domain} (ip: ${ip}) servers: ${JSON.stringify(blacklistedServers)})`);
|
||||
debug(`checkRblStatus: ${domain} (ip: ${ip}) blacklistedServers: ${JSON.stringify(blacklistedServers)})`);
|
||||
|
||||
return { status: blacklistedServers.length === 0, ip, servers: blacklistedServers };
|
||||
}
|
||||
@@ -632,7 +654,7 @@ async function createMailConfig(mailFqdn) {
|
||||
|
||||
// create sections for per-domain configuration
|
||||
for (const domain of mailDomains) {
|
||||
const catchAll = domain.catchAll.map(function (c) { return `${c}@${domain.domain}`; }).join(',');
|
||||
const catchAll = domain.catchAll.join(',');
|
||||
const mailFromValidation = domain.mailFromValidation;
|
||||
|
||||
if (!safe.fs.appendFileSync(`${paths.MAIL_CONFIG_DIR}/mail.ini`,
|
||||
@@ -688,15 +710,15 @@ async function configureMail(mailFqdn, mailDomain, serviceConfig) {
|
||||
const memory = system.getMemoryAllocation(memoryLimit);
|
||||
const cloudronToken = hat(8 * 128), relayToken = hat(8 * 128);
|
||||
|
||||
const bundle = await reverseProxy.getCertificatePath(mailFqdn, mailDomain);
|
||||
const certificatePath = await reverseProxy.getCertificatePath(mailFqdn, mailDomain);
|
||||
|
||||
const dhparamsFilePath = `${paths.MAIL_CONFIG_DIR}/dhparams.pem`;
|
||||
const mailCertFilePath = `${paths.MAIL_CONFIG_DIR}/tls_cert.pem`;
|
||||
const mailKeyFilePath = `${paths.MAIL_CONFIG_DIR}/tls_key.pem`;
|
||||
|
||||
if (!safe.child_process.execSync(`cp ${paths.DHPARAMS_FILE} ${dhparamsFilePath}`)) throw new BoxError(BoxError.FS_ERROR, 'Could not copy dhparams:' + safe.error.message);
|
||||
if (!safe.child_process.execSync(`cp ${bundle.certFilePath} ${mailCertFilePath}`)) throw new BoxError(BoxError.FS_ERROR, 'Could not create cert file:' + safe.error.message);
|
||||
if (!safe.child_process.execSync(`cp ${bundle.keyFilePath} ${mailKeyFilePath}`)) throw new BoxError(BoxError.FS_ERROR, 'Could not create key file:' + safe.error.message);
|
||||
if (!safe.child_process.execSync(`cp ${certificatePath.certFilePath} ${mailCertFilePath}`)) throw new BoxError(BoxError.FS_ERROR, 'Could not create cert file:' + safe.error.message);
|
||||
if (!safe.child_process.execSync(`cp ${certificatePath.keyFilePath} ${mailKeyFilePath}`)) throw new BoxError(BoxError.FS_ERROR, 'Could not create key file:' + safe.error.message);
|
||||
|
||||
// if the 'yellowtent' user of OS and the 'cloudron' user of mail container don't match, the keys become inaccessible by mail code
|
||||
if (!safe.fs.chmodSync(mailKeyFilePath, 0o644)) throw new BoxError(BoxError.FS_ERROR, `Could not chmod key file: ${safe.error.message}`);
|
||||
@@ -1019,6 +1041,10 @@ async function setCatchAllAddress(domain, addresses) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert(Array.isArray(addresses));
|
||||
|
||||
for (const address of addresses) {
|
||||
if (!validator.isEmail(address)) throw new BoxError(BoxError.BAD_FIELD, `Invalid catch all address: ${address}`);
|
||||
}
|
||||
|
||||
await updateDomain(domain, { catchAll: addresses });
|
||||
|
||||
safe(restartMail(), { debug }); // have to restart mail container since haraka cannot watch symlinked config files (mail.ini)
|
||||
@@ -1079,7 +1105,7 @@ async function listMailboxes(domain, search, page, perPage) {
|
||||
const escapedSearch = mysql.escape('%' + search + '%'); // this also quotes the string
|
||||
const searchQuery = search ? ` HAVING (name LIKE ${escapedSearch} OR aliasNames LIKE ${escapedSearch} OR aliasDomains LIKE ${escapedSearch})` : ''; // having instead of where because of aggregated columns use
|
||||
|
||||
const query = 'SELECT m1.name AS name, m1.domain AS domain, m1.ownerId AS ownerId, m1.ownerType as ownerType, m1.active as active, JSON_ARRAYAGG(m2.name) AS aliasNames, JSON_ARRAYAGG(m2.domain) AS aliasDomains, m1.enablePop3 AS enablePop3 '
|
||||
const query = 'SELECT m1.name AS name, m1.domain AS domain, m1.ownerId AS ownerId, m1.ownerType as ownerType, m1.active as active, JSON_ARRAYAGG(m2.name) AS aliasNames, JSON_ARRAYAGG(m2.domain) AS aliasDomains, m1.enablePop3 AS enablePop3, m1.storageQuota AS storageQuota, m1.messagesQuota AS messagesQuota '
|
||||
+ ` FROM (SELECT * FROM mailboxes WHERE type='${exports.TYPE_MAILBOX}') AS m1`
|
||||
+ ` LEFT JOIN (SELECT * FROM mailboxes WHERE type='${exports.TYPE_ALIAS}') AS m2`
|
||||
+ ' ON m1.name=m2.aliasName AND m1.domain=m2.aliasDomain AND m1.ownerId=m2.ownerId'
|
||||
@@ -1100,7 +1126,7 @@ async function listAllMailboxes(page, perPage) {
|
||||
assert.strictEqual(typeof page, 'number');
|
||||
assert.strictEqual(typeof perPage, 'number');
|
||||
|
||||
const query = 'SELECT m1.name AS name, m1.domain AS domain, m1.ownerId AS ownerId, m1.ownerType as ownerType, m1.active as active, JSON_ARRAYAGG(m2.name) AS aliasNames, JSON_ARRAYAGG(m2.domain) AS aliasDomains, m1.enablePop3 AS enablePop3 '
|
||||
const query = 'SELECT m1.name AS name, m1.domain AS domain, m1.ownerId AS ownerId, m1.ownerType as ownerType, m1.active as active, JSON_ARRAYAGG(m2.name) AS aliasNames, JSON_ARRAYAGG(m2.domain) AS aliasDomains, m1.enablePop3 AS enablePop3, m1.storageQuota AS storageQuota, m1.messagesQuota AS messagesQuota '
|
||||
+ ` FROM (SELECT * FROM mailboxes WHERE type='${exports.TYPE_MAILBOX}') AS m1`
|
||||
+ ` LEFT JOIN (SELECT * FROM mailboxes WHERE type='${exports.TYPE_ALIAS}') AS m2`
|
||||
+ ' ON m1.name=m2.aliasName AND m1.domain=m2.aliasDomain AND m1.ownerId=m2.ownerId'
|
||||
@@ -1154,10 +1180,12 @@ async function addMailbox(name, domain, data, auditSource) {
|
||||
assert.strictEqual(typeof data, 'object');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
const { ownerId, ownerType, active } = data;
|
||||
const { ownerId, ownerType, active, storageQuota, messagesQuota } = data;
|
||||
assert.strictEqual(typeof ownerId, 'string');
|
||||
assert.strictEqual(typeof ownerType, 'string');
|
||||
assert.strictEqual(typeof active, 'boolean');
|
||||
assert(Number.isInteger(storageQuota) && storageQuota >= 0);
|
||||
assert(Number.isInteger(messagesQuota) && messagesQuota >= 0);
|
||||
|
||||
name = name.toLowerCase();
|
||||
|
||||
@@ -1166,12 +1194,13 @@ async function addMailbox(name, domain, data, auditSource) {
|
||||
|
||||
if (!OWNERTYPES.includes(ownerType)) throw new BoxError(BoxError.BAD_FIELD, 'bad owner type');
|
||||
|
||||
[error] = await safe(database.query('INSERT INTO mailboxes (name, type, domain, ownerId, ownerType, active) VALUES (?, ?, ?, ?, ?, ?)', [ name, exports.TYPE_MAILBOX, domain, ownerId, ownerType, active ]));
|
||||
[error] = await safe(database.query('INSERT INTO mailboxes (name, type, domain, ownerId, ownerType, active, storageQuota, messagesQuota) VALUES (?, ?, ?, ?, ?, ?, ?, ?)',
|
||||
[ name, exports.TYPE_MAILBOX, domain, ownerId, ownerType, active, storageQuota, messagesQuota ]));
|
||||
if (error && error.code === 'ER_DUP_ENTRY') throw new BoxError(BoxError.ALREADY_EXISTS, 'mailbox already exists');
|
||||
if (error && error.code === 'ER_NO_REFERENCED_ROW_2' && error.sqlMessage.includes('mailboxes_domain_constraint')) throw new BoxError(BoxError.NOT_FOUND, `no such domain '${domain}'`);
|
||||
if (error) throw error;
|
||||
|
||||
eventlog.add(eventlog.ACTION_MAIL_MAILBOX_ADD, auditSource, { name, domain, ownerId, ownerType, active });
|
||||
await eventlog.add(eventlog.ACTION_MAIL_MAILBOX_ADD, auditSource, { name, domain, ownerId, ownerType, active, storageQuota, messageQuota: messagesQuota });
|
||||
}
|
||||
|
||||
async function updateMailbox(name, domain, data, auditSource) {
|
||||
@@ -1180,23 +1209,30 @@ async function updateMailbox(name, domain, data, auditSource) {
|
||||
assert.strictEqual(typeof data, 'object');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
const { ownerId, ownerType, active, enablePop3 } = data;
|
||||
assert.strictEqual(typeof ownerId, 'string');
|
||||
assert.strictEqual(typeof ownerType, 'string');
|
||||
assert.strictEqual(typeof active, 'boolean');
|
||||
assert.strictEqual(typeof enablePop3, 'boolean');
|
||||
const args = [];
|
||||
const fields = [];
|
||||
for (const k in data) {
|
||||
if (k === 'enablePop3' || k === 'active') {
|
||||
fields.push(k + ' = ?');
|
||||
args.push(data[k] ? 1 : 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
name = name.toLowerCase();
|
||||
if (k === 'ownerType' && !OWNERTYPES.includes(data[k])) throw new BoxError(BoxError.BAD_FIELD, 'bad owner type');
|
||||
|
||||
if (!OWNERTYPES.includes(ownerType)) throw new BoxError(BoxError.BAD_FIELD, 'bad owner type');
|
||||
fields.push(k + ' = ?');
|
||||
args.push(data[k]);
|
||||
}
|
||||
args.push(name.toLowerCase());
|
||||
args.push(domain);
|
||||
|
||||
const mailbox = await getMailbox(name, domain);
|
||||
if (!mailbox) throw new BoxError(BoxError.NOT_FOUND, 'No such mailbox');
|
||||
|
||||
const result = await database.query('UPDATE mailboxes SET ownerId = ?, ownerType = ?, active = ?, enablePop3 = ? WHERE name = ? AND domain = ?', [ ownerId, ownerType, active, enablePop3, name, domain ]);
|
||||
const result = await safe(database.query('UPDATE mailboxes SET ' + fields.join(', ') + ' WHERE name = ? AND domain = ?', args));
|
||||
if (result.affectedRows === 0) throw new BoxError(BoxError.NOT_FOUND, 'Mailbox not found');
|
||||
|
||||
eventlog.add(eventlog.ACTION_MAIL_MAILBOX_UPDATE, auditSource, { name, domain, oldUserId: mailbox.userId, ownerId, ownerType, active });
|
||||
await eventlog.add(eventlog.ACTION_MAIL_MAILBOX_UPDATE, auditSource, Object.assign(data, { name, domain, oldUserId: mailbox.userId }) );
|
||||
}
|
||||
|
||||
async function removeSolrIndex(mailbox) {
|
||||
@@ -1233,7 +1269,7 @@ async function delMailbox(name, domain, options, auditSource) {
|
||||
const [error] = await safe(removeSolrIndex(mailbox));
|
||||
if (error) debug(`delMailbox: failed to remove solr index: ${error.message}`);
|
||||
|
||||
eventlog.add(eventlog.ACTION_MAIL_MAILBOX_REMOVE, auditSource, { name, domain });
|
||||
await eventlog.add(eventlog.ACTION_MAIL_MAILBOX_REMOVE, auditSource, { name, domain });
|
||||
}
|
||||
|
||||
async function getAlias(name, domain) {
|
||||
@@ -1248,6 +1284,18 @@ async function getAlias(name, domain) {
|
||||
return results[0];
|
||||
}
|
||||
|
||||
async function searchAlias(name, domain) {
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
|
||||
const results = await database.query(`SELECT ${MAILBOX_FIELDS} FROM mailboxes WHERE ? LIKE REPLACE(REPLACE(name, '*', '%'), '_', '\\_') AND type = ? AND domain = ?`, [ name, exports.TYPE_ALIAS, domain ]);
|
||||
if (results.length === 0) return null;
|
||||
|
||||
results.forEach(function (result) { postProcessMailbox(result); });
|
||||
|
||||
return results[0];
|
||||
}
|
||||
|
||||
async function getAliases(name, domain) {
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
@@ -1257,16 +1305,17 @@ async function getAliases(name, domain) {
|
||||
return await database.query('SELECT name, domain FROM mailboxes WHERE type = ? AND aliasName = ? AND aliasDomain = ? ORDER BY name', [ exports.TYPE_ALIAS, name, domain ]);
|
||||
}
|
||||
|
||||
async function setAliases(name, domain, aliases) {
|
||||
async function setAliases(name, domain, aliases, auditSource) {
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert(Array.isArray(aliases));
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
for (let i = 0; i < aliases.length; i++) {
|
||||
const name = aliases[i].name.toLowerCase();
|
||||
const domain = aliases[i].domain.toLowerCase();
|
||||
|
||||
const error = validateName(name);
|
||||
const error = validateAlias(name);
|
||||
if (error) throw error;
|
||||
|
||||
const mailDomain = await getDomain(domain);
|
||||
@@ -1278,13 +1327,13 @@ async function setAliases(name, domain, aliases) {
|
||||
const results = await database.query('SELECT ' + MAILBOX_FIELDS + ' FROM mailboxes WHERE name = ? AND domain = ?', [ name, domain ]);
|
||||
if (results.length === 0) throw new BoxError(BoxError.NOT_FOUND, 'Mailbox not found');
|
||||
|
||||
let queries = [];
|
||||
const queries = [];
|
||||
// clear existing aliases
|
||||
queries.push({ query: 'DELETE FROM mailboxes WHERE aliasName = ? AND aliasDomain = ? AND type = ?', args: [ name, domain, exports.TYPE_ALIAS ] });
|
||||
aliases.forEach(function (alias) {
|
||||
for (const alias of aliases) {
|
||||
queries.push({ query: 'INSERT INTO mailboxes (name, domain, type, aliasName, aliasDomain, ownerId, ownerType) VALUES (?, ?, ?, ?, ?, ?, ?)',
|
||||
args: [ alias.name, alias.domain, exports.TYPE_ALIAS, name, domain, results[0].ownerId, results[0].ownerType ] });
|
||||
});
|
||||
}
|
||||
|
||||
const [error] = await safe(database.transaction(queries));
|
||||
if (error && error.code === 'ER_DUP_ENTRY' && error.message.indexOf('mailboxes_name_domain_unique_index') !== -1) {
|
||||
@@ -1294,6 +1343,8 @@ async function setAliases(name, domain, aliases) {
|
||||
throw new BoxError(BoxError.ALREADY_EXISTS, `Mailbox, mailinglist or alias for ${aliasMatch[1]} already exists`);
|
||||
}
|
||||
if (error) throw error;
|
||||
|
||||
await eventlog.add(eventlog.ACTION_MAIL_MAILBOX_UPDATE, auditSource, { name, domain, aliases });
|
||||
}
|
||||
|
||||
async function getLists(domain, search, page, perPage) {
|
||||
@@ -1348,7 +1399,7 @@ async function addList(name, domain, data, auditSource) {
|
||||
if (error && error.code === 'ER_DUP_ENTRY') throw new BoxError(BoxError.ALREADY_EXISTS, 'mailbox already exists');
|
||||
if (error) throw error;
|
||||
|
||||
eventlog.add(eventlog.ACTION_MAIL_LIST_ADD, auditSource, { name, domain, members, membersOnly, active });
|
||||
await eventlog.add(eventlog.ACTION_MAIL_LIST_ADD, auditSource, { name, domain, members, membersOnly, active });
|
||||
}
|
||||
|
||||
async function updateList(name, domain, data, auditSource) {
|
||||
@@ -1375,7 +1426,7 @@ async function updateList(name, domain, data, auditSource) {
|
||||
[ JSON.stringify(members), membersOnly, active, name, domain ]);
|
||||
if (result.affectedRows === 0) throw new BoxError(BoxError.NOT_FOUND, 'Mailbox not found');
|
||||
|
||||
eventlog.add(eventlog.ACTION_MAIL_MAILBOX_UPDATE, auditSource, { name, domain, oldMembers: result.members, members, membersOnly, active });
|
||||
await eventlog.add(eventlog.ACTION_MAIL_LIST_UPDATE, auditSource, { name, domain, oldMembers: result.members, members, membersOnly, active });
|
||||
}
|
||||
|
||||
async function delList(name, domain, auditSource) {
|
||||
@@ -1387,7 +1438,7 @@ async function delList(name, domain, auditSource) {
|
||||
const result = await database.query('DELETE FROM mailboxes WHERE ((name=? AND domain=?) OR (aliasName = ? AND aliasDomain=?))', [ name, domain, name, domain ]);
|
||||
if (result.affectedRows === 0) throw new BoxError(BoxError.NOT_FOUND, 'Mailbox not found');
|
||||
|
||||
eventlog.add(eventlog.ACTION_MAIL_LIST_REMOVE, auditSource, { name, domain });
|
||||
await eventlog.add(eventlog.ACTION_MAIL_LIST_REMOVE, auditSource, { name, domain });
|
||||
}
|
||||
|
||||
// resolves the members of a list. i.e the lists and aliases
|
||||
|
||||
@@ -6,10 +6,11 @@
|
||||
|
||||
<p>{{ passwordResetEmail.description }}</p>
|
||||
|
||||
<p>
|
||||
<a href="<%= resetLink %>">{{ passwordResetEmail.resetAction }}</a>
|
||||
</p>
|
||||
<br/>
|
||||
|
||||
<a style="border-radius: 2px; background-color: #2196f3; color: white; padding: 6px 12px; text-decoration: none;" href="<%= resetLink %>">{{ passwordResetEmail.resetAction }}</a>
|
||||
|
||||
<br/>
|
||||
<br/>
|
||||
|
||||
{{ passwordResetEmail.expireNote }}
|
||||
|
||||
@@ -5,9 +5,9 @@
|
||||
<h3>{{ welcomeEmail.salutation }}</h3>
|
||||
<h2>{{ welcomeEmail.welcomeTo }}</h2>
|
||||
|
||||
<p>
|
||||
<a href="<%= inviteLink %>">{{ welcomeEmail.inviteLinkAction }}</a>
|
||||
</p>
|
||||
<br/>
|
||||
|
||||
<a style="border-radius: 2px; background-color: #2196f3; color: white; padding: 6px 12px; text-decoration: none;" href="<%= inviteLink %>">{{ welcomeEmail.inviteLinkAction }}</a>
|
||||
|
||||
<br/>
|
||||
<br/>
|
||||
|
||||
+2
-3
@@ -25,7 +25,6 @@ const assert = require('assert'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js'),
|
||||
translation = require('./translation.js'),
|
||||
smtpTransport = require('nodemailer-smtp-transport'),
|
||||
util = require('util');
|
||||
|
||||
const MAIL_TEMPLATES_DIR = path.join(__dirname, 'mail_templates');
|
||||
@@ -52,14 +51,14 @@ async function sendMail(mailOptions) {
|
||||
|
||||
const data = await mail.getMailAuth();
|
||||
|
||||
const transport = nodemailer.createTransport(smtpTransport({
|
||||
const transport = nodemailer.createTransport({
|
||||
host: data.ip,
|
||||
port: data.port,
|
||||
auth: {
|
||||
user: mailOptions.authUser || `no-reply@${settings.dashboardDomain()}`,
|
||||
pass: data.relayToken
|
||||
}
|
||||
}));
|
||||
});
|
||||
|
||||
const transportSendMail = util.promisify(transport.sendMail.bind(transport));
|
||||
const [error] = await safe(transportSendMail(mailOptions));
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
'use strict';
|
||||
|
||||
var url = require('url');
|
||||
const url = require('url');
|
||||
|
||||
/*
|
||||
* CORS middleware
|
||||
@@ -11,19 +11,19 @@ var url = require('url');
|
||||
*/
|
||||
module.exports = function cors(options) {
|
||||
options = options || { };
|
||||
var maxAge = options.maxAge || 60 * 60 * 25 * 5; // 5 days
|
||||
var origins = options.origins || [ '*' ];
|
||||
var allowCredentials = options.allowCredentials || false; // cookies
|
||||
const maxAge = options.maxAge || 60 * 60 * 25 * 5; // 5 days
|
||||
const origins = options.origins || [ '*' ];
|
||||
const allowCredentials = options.allowCredentials || false; // cookies
|
||||
|
||||
return function (req, res, next) {
|
||||
var requestOrigin = req.headers.origin;
|
||||
let requestOrigin = req.headers.origin;
|
||||
if (!requestOrigin) return next();
|
||||
|
||||
requestOrigin = url.parse(requestOrigin);
|
||||
if (!requestOrigin.host) return res.status(405).send('CORS not allowed from this domain');
|
||||
|
||||
var hostname = requestOrigin.host.split(':')[0]; // remove any port
|
||||
var originAllowed = origins.some(function (o) { return o === '*' || o === hostname; });
|
||||
const hostname = requestOrigin.host.split(':')[0]; // remove any port
|
||||
const originAllowed = origins.some(function (o) { return o === '*' || o === hostname; });
|
||||
if (!originAllowed) {
|
||||
return res.status(405).send('CORS not allowed from this domain');
|
||||
}
|
||||
|
||||
@@ -2,19 +2,19 @@
|
||||
|
||||
'use strict';
|
||||
|
||||
var multiparty = require('multiparty'),
|
||||
const multiparty = require('multiparty'),
|
||||
timeout = require('connect-timeout');
|
||||
|
||||
function _mime(req) {
|
||||
var str = req.headers['content-type'] || '';
|
||||
return str.split(';')[0];
|
||||
const str = req.headers['content-type'] || '';
|
||||
return str.split(';')[0];
|
||||
}
|
||||
|
||||
module.exports = function multipart(options) {
|
||||
return function (req, res, next) {
|
||||
if (_mime(req) !== 'multipart/form-data') return res.status(400).send('Invalid content-type. Expecting multipart');
|
||||
|
||||
var form = new multiparty.Form({
|
||||
const form = new multiparty.Form({
|
||||
uploadDir: '/tmp',
|
||||
keepExtensions: true,
|
||||
maxFieldsSize: options.maxFieldsSize || (2 * 1024), // only field size, not files
|
||||
@@ -29,7 +29,7 @@ module.exports = function multipart(options) {
|
||||
req.fields = { };
|
||||
req.files = { };
|
||||
|
||||
form.parse(req, function (err, fields, files) {
|
||||
form.parse(req, function (err /*, fields, files */) {
|
||||
if (err) return res.status(400).send('Error parsing request');
|
||||
next(null);
|
||||
});
|
||||
|
||||
+23
-11
@@ -54,6 +54,7 @@ function validateMountOptions(type, options) {
|
||||
if (typeof options.remoteDir !== 'string') return new BoxError(BoxError.BAD_FIELD, 'remoteDir is not a string');
|
||||
return null;
|
||||
case 'ext4':
|
||||
case 'xfs':
|
||||
if (typeof options.diskPath !== 'string') return new BoxError(BoxError.BAD_FIELD, 'diskPath is not a string');
|
||||
return null;
|
||||
default:
|
||||
@@ -62,7 +63,7 @@ function validateMountOptions(type, options) {
|
||||
}
|
||||
|
||||
function isManagedProvider(provider) {
|
||||
return provider === 'sshfs' || provider === 'cifs' || provider === 'nfs' || provider === 'ext4';
|
||||
return provider === 'sshfs' || provider === 'cifs' || provider === 'nfs' || provider === 'ext4' || provider === 'xfs';
|
||||
}
|
||||
|
||||
function mountObjectFromBackupConfig(backupConfig) {
|
||||
@@ -83,15 +84,21 @@ function mountObjectFromBackupConfig(backupConfig) {
|
||||
function renderMountFile(mount) {
|
||||
assert.strictEqual(typeof mount, 'object');
|
||||
|
||||
const {name, hostPath, mountType, mountOptions} = mount;
|
||||
const { name, hostPath, mountType, mountOptions } = mount;
|
||||
|
||||
let options, what, type;
|
||||
switch (mountType) {
|
||||
case 'cifs':
|
||||
case 'cifs': {
|
||||
const out = safe.child_process.execSync(`systemd-escape -p '${hostPath}'`, { encoding: 'utf8' }); // this ensures uniqueness of creds file
|
||||
if (!out) throw new BoxError(BoxError.FS_ERROR, `Could not determine credentials file name: ${safe.error.message}`);
|
||||
const credentialsFilePath = path.join(paths.CIFS_CREDENTIALS_DIR, `${out.trim()}.cred`);
|
||||
if (!safe.fs.writeFileSync(credentialsFilePath, `username=${mountOptions.username}\npassword=${mountOptions.password}\n`, { mode: 0o600 })) throw new BoxError(BoxError.FS_ERROR, `Could not write credentials file: ${safe.error.message}`);
|
||||
|
||||
type = 'cifs';
|
||||
what = `//${mountOptions.host}` + path.join('/', mountOptions.remoteDir);
|
||||
options = `username=${mountOptions.username},password=${mountOptions.password},rw,${mountOptions.seal ? 'seal,' : ''}iocharset=utf8,file_mode=0666,dir_mode=0777,uid=yellowtent,gid=yellowtent`;
|
||||
options = `credentials=${credentialsFilePath},rw,${mountOptions.seal ? 'seal,' : ''}iocharset=utf8,file_mode=0666,dir_mode=0777,uid=yellowtent,gid=yellowtent`;
|
||||
break;
|
||||
}
|
||||
case 'nfs':
|
||||
type = 'nfs';
|
||||
what = `${mountOptions.host}:${mountOptions.remoteDir}`;
|
||||
@@ -102,8 +109,15 @@ function renderMountFile(mount) {
|
||||
what = mountOptions.diskPath; // like /dev/disk/by-uuid/uuid or /dev/disk/by-id/scsi-id
|
||||
options = 'discard,defaults,noatime';
|
||||
break;
|
||||
case 'xfs':
|
||||
type = 'xfs';
|
||||
what = mountOptions.diskPath; // like /dev/disk/by-uuid/uuid or /dev/disk/by-id/scsi-id
|
||||
options = 'discard,defaults,noatime';
|
||||
break;
|
||||
case 'sshfs': {
|
||||
const keyFilePath = path.join(paths.SSHFS_KEYS_DIR, `id_rsa_${mountOptions.host}`);
|
||||
if (!safe.fs.writeFileSync(keyFilePath, `${mount.mountOptions.privateKey}\n`, { mode: 0o600 })) throw new BoxError(BoxError.FS_ERROR, `Could not write private key: ${safe.error.message}`);
|
||||
|
||||
type = 'fuse.sshfs';
|
||||
what = `${mountOptions.user}@${mountOptions.host}:${mountOptions.remoteDir}`;
|
||||
options = `allow_other,port=${mountOptions.port},IdentityFile=${keyFilePath},StrictHostKeyChecking=no,reconnect`; // allow_other means non-root users can access it
|
||||
@@ -129,6 +143,11 @@ async function removeMount(mount) {
|
||||
if (mountType === 'sshfs') {
|
||||
const keyFilePath = path.join(paths.SSHFS_KEYS_DIR, `id_rsa_${mountOptions.host}`);
|
||||
safe.fs.unlinkSync(keyFilePath);
|
||||
} else if (mountType === 'cifs') {
|
||||
const out = safe.child_process.execSync(`systemd-escape -p '${hostPath}'`, { encoding: 'utf8' });
|
||||
if (!out) return;
|
||||
const credentialsFilePath = path.join(paths.CIFS_CREDENTIALS_DIR, `${out.trim()}.cred`);
|
||||
safe.fs.unlinkSync(credentialsFilePath);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -181,13 +200,6 @@ async function tryAddMount(mount, options) {
|
||||
|
||||
if (constants.TEST) return;
|
||||
|
||||
if (mount.mountType === 'sshfs') {
|
||||
const keyFilePath = path.join(paths.SSHFS_KEYS_DIR, `id_rsa_${mount.mountOptions.host}`);
|
||||
|
||||
safe.fs.mkdirSync(paths.SSHFS_KEYS_DIR);
|
||||
if (!safe.fs.writeFileSync(keyFilePath, `${mount.mountOptions.privateKey}\n`, { mode: 0o600 })) throw new BoxError(BoxError.FS_ERROR, safe.error);
|
||||
}
|
||||
|
||||
const [error] = await safe(shell.promises.sudo('addMount', [ ADD_MOUNT_CMD, renderMountFile(mount), options.timeout ], {}));
|
||||
if (error && error.code === 2) throw new BoxError(BoxError.MOUNT_ERROR, 'Failed to unmount existing mount'); // at this point, the old mount config is still there
|
||||
|
||||
|
||||
+44
-15
@@ -45,7 +45,7 @@ server {
|
||||
location / {
|
||||
<% if ( endpoint === 'dashboard' || endpoint === 'setup' ) { %>
|
||||
return 301 https://$host$request_uri;
|
||||
<% } else if ( endpoint === 'app' ) { %>
|
||||
<% } else if ( endpoint === 'app' || endpoint === 'external' ) { %>
|
||||
return 301 https://$host$request_uri;
|
||||
<% } else if ( endpoint === 'redirect' ) { %>
|
||||
return 301 https://<%= redirectTo %>$request_uri;
|
||||
@@ -88,6 +88,9 @@ server {
|
||||
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256;
|
||||
ssl_prefer_server_ciphers off;
|
||||
|
||||
# some apps have underscores in headers. this is apparently disabled by default because of some legacy CGI compat
|
||||
underscores_in_headers on;
|
||||
|
||||
<% if (endpoint !== 'ip' && endpoint !== 'setup') { -%>
|
||||
# dhparams is generated only after dns setup
|
||||
ssl_dhparam /home/yellowtent/platformdata/dhparams.pem;
|
||||
@@ -116,14 +119,6 @@ server {
|
||||
add_header Referrer-Policy $hrp;
|
||||
proxy_hide_header Referrer-Policy;
|
||||
|
||||
# workaround caching issue after /logout. if max-age is set, browser uses cache and user thinks they have not logged out
|
||||
# have to keep all the add_header here to avoid repeating all add_header in location block
|
||||
<% if (proxyAuth.enabled) { %>
|
||||
proxy_hide_header Cache-Control;
|
||||
add_header Cache-Control no-cache;
|
||||
add_header Set-Cookie $auth_cookie;
|
||||
<% } %>
|
||||
|
||||
# gzip responses that are > 50k and not images
|
||||
gzip on;
|
||||
gzip_min_length 18k;
|
||||
@@ -180,6 +175,12 @@ server {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
<% } else if ( endpoint === 'app' ) { %>
|
||||
proxy_pass http://<%= ip %>:<%= port %>;
|
||||
<% } else if ( endpoint === 'external' ) { %>
|
||||
# without a variable, nginx will not start if upstream is down or
|
||||
resolver 127.0.0.1 valid=30s;
|
||||
set $upstream <%= upstreamUri %>;
|
||||
proxy_ssl_verify off;
|
||||
proxy_pass $upstream;
|
||||
<% } else if ( endpoint === 'redirect' ) { %>
|
||||
return 302 https://<%= redirectTo %>$request_uri;
|
||||
<% } %>
|
||||
@@ -191,10 +192,6 @@ server {
|
||||
proxy_pass http://127.0.0.1:3000/well-known-handler/;
|
||||
}
|
||||
|
||||
<% if (proxyAuth.enabled) { %>
|
||||
proxy_set_header X-App-ID "<%= proxyAuth.id %>";
|
||||
<% } %>
|
||||
|
||||
# increase the proxy buffer sizes to not run into buffer issues (http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffers)
|
||||
proxy_buffer_size 128k;
|
||||
proxy_buffers 4 256k;
|
||||
@@ -215,7 +212,7 @@ server {
|
||||
<% if ( endpoint === 'dashboard' || endpoint === 'setup' ) { %>
|
||||
location /api/ {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
client_max_body_size 1m;
|
||||
client_max_body_size 2m;
|
||||
}
|
||||
|
||||
location ~ ^/api/v1/cloudron/login$ {
|
||||
@@ -225,7 +222,7 @@ server {
|
||||
}
|
||||
|
||||
# the read timeout is between successive reads and not the whole connection
|
||||
location ~ ^/api/v1/apps/.*/exec$ {
|
||||
location ~ ^/api/v1/apps/.*/exec/.*/start$ {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
proxy_read_timeout 30m;
|
||||
}
|
||||
@@ -245,6 +242,11 @@ server {
|
||||
client_max_body_size 0;
|
||||
}
|
||||
|
||||
location ~ ^/api/v1/profile/backgroundImage {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
client_max_body_size 0;
|
||||
}
|
||||
|
||||
# graphite paths (uncomment block below and visit /graphite-web/dashboard)
|
||||
# remember to comment out the CSP policy as well to access the graphite dashboard
|
||||
# location ~ ^/graphite-web/ {
|
||||
@@ -288,6 +290,11 @@ server {
|
||||
location <%= proxyAuth.location %> {
|
||||
auth_request /proxy-auth;
|
||||
auth_request_set $auth_cookie $upstream_http_set_cookie;
|
||||
|
||||
auth_request_set $user $upstream_http_x_remote_user;
|
||||
auth_request_set $email $upstream_http_x_remote_email;
|
||||
auth_request_set $name $upstream_http_x_remote_name;
|
||||
|
||||
error_page 401 = @proxy-auth-login;
|
||||
|
||||
proxy_pass http://<%= ip %>:<%= port %>;
|
||||
@@ -305,12 +312,34 @@ server {
|
||||
}
|
||||
<% } %>
|
||||
|
||||
<% if (proxyAuth.enabled) { %>
|
||||
# workaround caching issue after /logout. if max-age is set, browser uses cache and user thinks they have not logged out
|
||||
# IMPORTANT: have to keep all the add_headers at top level here to avoid repeating all the add_headers and proxy_set_headers in location block
|
||||
proxy_hide_header Cache-Control;
|
||||
add_header Cache-Control no-cache;
|
||||
add_header Set-Cookie $auth_cookie;
|
||||
|
||||
# To prevent header spoofing from a client, these variables must always be set (or removed with '') for all proxyAuth routes
|
||||
proxy_set_header X-App-ID "<%= proxyAuth.id %>";
|
||||
proxy_set_header X-Remote-User $user;
|
||||
proxy_set_header X-Remote-Email $email;
|
||||
proxy_set_header X-Remote-Name $name;
|
||||
<% } %>
|
||||
|
||||
<% } else if ( endpoint === 'redirect' ) { %>
|
||||
location / {
|
||||
# redirect everything to the app. this is temporary because there is no way
|
||||
# to clear a permanent redirect on the browser
|
||||
return 302 https://<%= redirectTo %>$request_uri;
|
||||
}
|
||||
<% } else if ( endpoint === 'external' ) { %>
|
||||
location / {
|
||||
# without a variable, nginx will not start if upstream is down or unavailable
|
||||
resolver 127.0.0.1 valid=30s;
|
||||
set $upstream <%= upstreamUri %>;
|
||||
proxy_ssl_verify off;
|
||||
proxy_pass $upstream;
|
||||
}
|
||||
<% } else if ( endpoint === 'ip' ) { %>
|
||||
location /notfound.html {
|
||||
root <%= sourceDir %>/dashboard/dist;
|
||||
|
||||
+19
-4
@@ -14,6 +14,7 @@ exports = module.exports = {
|
||||
ALERT_REBOOT: 'reboot',
|
||||
ALERT_BOX_UPDATE: 'boxUpdate',
|
||||
ALERT_UPDATE_UBUNTU: 'ubuntuUpdate',
|
||||
ALERT_MANUAL_APP_UPDATE: 'manualAppUpdate',
|
||||
|
||||
alert,
|
||||
|
||||
@@ -25,6 +26,7 @@ const assert = require('assert'),
|
||||
AuditSource = require('./auditsource.js'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
changelog = require('./changelog.js'),
|
||||
constants = require('./constants.js'),
|
||||
database = require('./database.js'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
mailer = require('./mailer.js'),
|
||||
@@ -160,6 +162,16 @@ async function appUpdated(eventId, app, fromManifest, toManifest) {
|
||||
await add(eventId, title, `The application installed at https://${app.fqdn} was updated.\n\nChangelog:\n${toManifest.changelog}\n`);
|
||||
}
|
||||
|
||||
async function boxInstalled(eventId, version) {
|
||||
assert.strictEqual(typeof eventId, 'string');
|
||||
assert.strictEqual(typeof version, 'string');
|
||||
|
||||
const changes = changelog.getChanges(version.replace(/\.([^.]*)$/, '.0')); // last .0 release
|
||||
const changelogMarkdown = changes.map((m) => `* ${m}\n`).join('');
|
||||
|
||||
await add(eventId, `Cloudron v${version} installed`, `Cloudron v${version} was installed.\n\nPlease join our community at ${constants.FORUM_URL} .\n\nChangelog:\n${changelogMarkdown}\n`);
|
||||
}
|
||||
|
||||
async function boxUpdated(eventId, oldVersion, newVersion) {
|
||||
assert.strictEqual(typeof eventId, 'string');
|
||||
assert.strictEqual(typeof oldVersion, 'string');
|
||||
@@ -178,16 +190,16 @@ async function boxUpdateError(eventId, errorMessage) {
|
||||
await add(eventId, 'Cloudron update failed', `Failed to update Cloudron: ${errorMessage}.`);
|
||||
}
|
||||
|
||||
async function certificateRenewalError(eventId, vhost, errorMessage) {
|
||||
async function certificateRenewalError(eventId, fqdn, errorMessage) {
|
||||
assert.strictEqual(typeof eventId, 'string');
|
||||
assert.strictEqual(typeof vhost, 'string');
|
||||
assert.strictEqual(typeof fqdn, 'string');
|
||||
assert.strictEqual(typeof errorMessage, 'string');
|
||||
|
||||
await add(eventId, `Certificate renewal of ${vhost} failed`, `Failed to renew certs of ${vhost}: ${errorMessage}. Renewal will be retried in 12 hours.`);
|
||||
await add(eventId, `Certificate renewal of ${fqdn} failed`, `Failed to renew certs of ${fqdn}: ${errorMessage}. Renewal will be retried in 12 hours.`);
|
||||
|
||||
const admins = await users.getAdmins();
|
||||
for (const admin of admins) {
|
||||
await mailer.certificateRenewalError(admin.email, vhost, errorMessage);
|
||||
await mailer.certificateRenewalError(admin.email, fqdn, errorMessage);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -267,6 +279,9 @@ async function onEvent(id, action, source, data) {
|
||||
|
||||
return await backupFailed(id, data.taskId, data.errorMessage); // only notify for automated backups or timedout
|
||||
|
||||
case eventlog.ACTION_INSTALL_FINISH:
|
||||
return await boxInstalled(id, data.version);
|
||||
|
||||
case eventlog.ACTION_UPDATE_FINISH:
|
||||
if (!data.errorMessage) return await boxUpdated(id, data.oldVersion, data.newVersion);
|
||||
if (data.timedOut) return await boxUpdateError(id, data.errorMessage);
|
||||
|
||||
+14
@@ -0,0 +1,14 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = once;
|
||||
|
||||
// https://github.com/isaacs/once/blob/main/LICENSE (ISC)
|
||||
function once (fn) {
|
||||
const f = function () {
|
||||
if (f.called) return f.value;
|
||||
f.called = true;
|
||||
return f.value = fn.apply(this, arguments);
|
||||
};
|
||||
f.called = false;
|
||||
return f;
|
||||
}
|
||||
+3
-1
@@ -1,6 +1,6 @@
|
||||
'use strict';
|
||||
|
||||
var constants = require('./constants.js'),
|
||||
const constants = require('./constants.js'),
|
||||
path = require('path');
|
||||
|
||||
function baseDir() {
|
||||
@@ -16,6 +16,7 @@ exports = module.exports = {
|
||||
|
||||
CLOUDRON_DEFAULT_AVATAR_FILE: path.join(__dirname + '/../assets/avatar.png'),
|
||||
INFRA_VERSION_FILE: path.join(baseDir(), 'platformdata/INFRA_VERSION'),
|
||||
CRON_SEED_FILE: path.join(baseDir(), 'platformdata/CRON_SEED'),
|
||||
DASHBOARD_DIR: constants.TEST ? path.join(__dirname, '../../dashboard/src') : path.join(baseDir(), 'box/dashboard/dist'),
|
||||
|
||||
PROVIDER_FILE: '/etc/cloudron/PROVIDER',
|
||||
@@ -43,6 +44,7 @@ exports = module.exports = {
|
||||
DHPARAMS_FILE: path.join(baseDir(), 'platformdata/dhparams.pem'),
|
||||
FEATURES_INFO_FILE: path.join(baseDir(), 'platformdata/features-info.json'),
|
||||
VERSION_FILE: path.join(baseDir(), 'platformdata/VERSION'),
|
||||
CIFS_CREDENTIALS_DIR: path.join(baseDir(), 'platformdata/cifs'),
|
||||
SSHFS_KEYS_DIR: path.join(baseDir(), 'platformdata/sshfs'),
|
||||
SFTP_KEYS_DIR: path.join(baseDir(), 'platformdata/sftp/ssh'),
|
||||
SFTP_PUBLIC_KEY_FILE: path.join(baseDir(), 'platformdata/sftp/ssh/ssh_host_rsa_key.pub'),
|
||||
|
||||
+1
-1
@@ -13,7 +13,7 @@ const apps = require('./apps.js'),
|
||||
AuditSource = require('./auditsource.js'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
debug = require('debug')('box:platform'),
|
||||
delay = require('delay'),
|
||||
delay = require('./delay.js'),
|
||||
fs = require('fs'),
|
||||
infra = require('./infra_version.js'),
|
||||
locker = require('./locker.js'),
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
exports = module.exports = promiseRetry;
|
||||
|
||||
const assert = require('assert'),
|
||||
delay = require('delay'),
|
||||
delay = require('./delay.js'),
|
||||
util = require('util');
|
||||
|
||||
async function promiseRetry(options, asyncFunction) {
|
||||
|
||||
+13
-14
@@ -45,10 +45,9 @@ const gProvisionStatus = {
|
||||
}
|
||||
};
|
||||
|
||||
function setProgress(task, message, callback) {
|
||||
function setProgress(task, message) {
|
||||
debug(`setProgress: ${task} - ${message}`);
|
||||
gProvisionStatus[task].message = message;
|
||||
if (callback) callback();
|
||||
}
|
||||
|
||||
async function ensureDhparams() {
|
||||
@@ -71,7 +70,7 @@ async function setupTask(domain, auditSource) {
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
try {
|
||||
await cloudron.setupDnsAndCert(constants.DASHBOARD_LOCATION, domain, auditSource, (progress) => setProgress('setup', progress.message));
|
||||
await cloudron.setupDnsAndCert(constants.DASHBOARD_SUBDOMAIN, domain, auditSource, (progress) => setProgress('setup', progress.message));
|
||||
await ensureDhparams();
|
||||
await cloudron.setDashboardDomain(domain, auditSource);
|
||||
setProgress('setup', 'Done'),
|
||||
@@ -112,7 +111,7 @@ async function setup(domainConfig, sysinfoConfig, auditSource) {
|
||||
dkimSelector: 'cloudron'
|
||||
};
|
||||
|
||||
await settings.setMailLocation(domain, `${constants.DASHBOARD_LOCATION}.${domain}`); // default mail location. do this before we add the domain for upserting mail DNS
|
||||
await settings.setMailLocation(domain, `${constants.DASHBOARD_SUBDOMAIN}.${domain}`); // default mail location. do this before we add the domain for upserting mail DNS
|
||||
await domains.add(domain, data, auditSource);
|
||||
await settings.setSysinfoConfig(sysinfoConfig);
|
||||
|
||||
@@ -142,7 +141,7 @@ async function activate(username, password, email, displayName, ip, auditSource)
|
||||
const token = { clientId: tokens.ID_WEBADMIN, identifier: ownerId, expires: Date.now() + constants.DEFAULT_TOKEN_EXPIRATION_MSECS };
|
||||
const result = await tokens.add(token);
|
||||
|
||||
eventlog.add(eventlog.ACTION_ACTIVATE, auditSource, {});
|
||||
await eventlog.add(eventlog.ACTION_ACTIVATE, auditSource, {});
|
||||
|
||||
setImmediate(() => safe(cloudron.onActivated({}), { debug }));
|
||||
|
||||
@@ -153,21 +152,21 @@ async function activate(username, password, email, displayName, ip, auditSource)
|
||||
};
|
||||
}
|
||||
|
||||
async function restoreTask(backupConfig, backupId, sysinfoConfig, options, auditSource) {
|
||||
async function restoreTask(backupConfig, remotePath, sysinfoConfig, options, auditSource) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
assert.strictEqual(typeof sysinfoConfig, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
try {
|
||||
setProgress('restore', 'Downloading box backup');
|
||||
await backuptask.restore(backupConfig, backupId, (progress) => setProgress('restore', progress.message));
|
||||
await backuptask.restore(backupConfig, remotePath, (progress) => setProgress('restore', progress.message));
|
||||
|
||||
setProgress('restore', 'Downloading mail backup');
|
||||
const mailBackups = await backups.getByIdentifierAndStatePaged(backups.BACKUP_IDENTIFIER_MAIL, backups.BACKUP_STATE_NORMAL, 1, 1);
|
||||
if (mailBackups.length === 0) throw new BoxError(BoxError.NOT_FOUND, 'mail backup not found');
|
||||
const mailRestoreConfig = { backupConfig, backupId: mailBackups[0].id, backupFormat: mailBackups[0].format };
|
||||
const mailRestoreConfig = { backupConfig, remotePath: mailBackups[0].remotePath, backupFormat: mailBackups[0].format };
|
||||
await backuptask.downloadMail(mailRestoreConfig, (progress) => setProgress('restore', progress.message));
|
||||
|
||||
await ensureDhparams();
|
||||
@@ -175,10 +174,10 @@ async function restoreTask(backupConfig, backupId, sysinfoConfig, options, audit
|
||||
await reverseProxy.restoreFallbackCertificates();
|
||||
|
||||
const dashboardDomain = settings.dashboardDomain(); // load this fresh from after the backup.restore
|
||||
if (!options.skipDnsSetup) await cloudron.setupDnsAndCert(constants.DASHBOARD_LOCATION, dashboardDomain, auditSource, (progress) => setProgress('restore', progress.message));
|
||||
if (!options.skipDnsSetup) await cloudron.setupDnsAndCert(constants.DASHBOARD_SUBDOMAIN, dashboardDomain, auditSource, (progress) => setProgress('restore', progress.message));
|
||||
await cloudron.setDashboardDomain(dashboardDomain, auditSource);
|
||||
await settings.setBackupCredentials(backupConfig); // update just the credentials and not the policy and flags
|
||||
await eventlog.add(eventlog.ACTION_RESTORE, auditSource, { backupId });
|
||||
await eventlog.add(eventlog.ACTION_RESTORE, auditSource, { remotePath });
|
||||
|
||||
setImmediate(() => safe(cloudron.onActivated(options), { debug }));
|
||||
} catch (error) {
|
||||
@@ -187,9 +186,9 @@ async function restoreTask(backupConfig, backupId, sysinfoConfig, options, audit
|
||||
gProvisionStatus.restore.active = false;
|
||||
}
|
||||
|
||||
async function restore(backupConfig, backupId, version, sysinfoConfig, options, auditSource) {
|
||||
async function restore(backupConfig, remotePath, version, sysinfoConfig, options, auditSource) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
assert.strictEqual(typeof version, 'string');
|
||||
assert.strictEqual(typeof sysinfoConfig, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
@@ -233,7 +232,7 @@ async function restore(backupConfig, backupId, version, sysinfoConfig, options,
|
||||
error = await sysinfo.testIPv4Config(sysinfoConfig);
|
||||
if (error) throw error;
|
||||
|
||||
safe(restoreTask(backupConfig, backupId, sysinfoConfig, options, auditSource), { debug }); // now that args are validated run the task in the background
|
||||
safe(restoreTask(backupConfig, remotePath, sysinfoConfig, options, auditSource), { debug }); // now that args are validated run the task in the background
|
||||
} catch (error) {
|
||||
gProvisionStatus.restore.active = false;
|
||||
gProvisionStatus.restore.errorMessage = error ? error.message : '';
|
||||
|
||||
+24
-14
@@ -50,15 +50,23 @@ function jwtVerify(req, res, next) {
|
||||
});
|
||||
}
|
||||
|
||||
async function basicAuthVerify(req, res, next) {
|
||||
async function authorizationHeader(req, res, next) {
|
||||
const appId = req.headers['x-app-id'] || '';
|
||||
const credentials = basicAuth(req);
|
||||
if (!appId || !credentials) return next();
|
||||
if (!appId) return next();
|
||||
|
||||
if (!req.headers.authorization) return next();
|
||||
|
||||
const [error, app] = await safe(apps.get(appId));
|
||||
if (error) return next(new HttpError(503, error.message));
|
||||
if (!app) return next(new HttpError(503, 'Error getting app'));
|
||||
|
||||
if (!app.manifest.addons.proxyAuth.basicAuth) return next();
|
||||
// only if the app supports bearer auth, pass it through to the app. without this flag, anyone can access the app with Bearer auth!
|
||||
if (req.headers.authorization.startsWith('Bearer ') && app.manifest.addons.proxyAuth.supportsBearerAuth) return next(new HttpSuccess(200, {}));
|
||||
|
||||
const credentials = basicAuth(req);
|
||||
if (!credentials) return next();
|
||||
|
||||
if (!app.manifest.addons.proxyAuth.basicAuth) return next(); // this is a flag because this allows auth to bypass 2FA
|
||||
|
||||
const verifyFunc = credentials.name.indexOf('@') !== -1 ? users.verifyWithEmail : users.verifyWithUsername;
|
||||
const [verifyError, user] = await safe(verifyFunc(credentials.name, credentials.pass, appId));
|
||||
@@ -86,8 +94,12 @@ async function loginPage(req, res, next) {
|
||||
|
||||
const title = app.label || app.manifest.title;
|
||||
|
||||
const [iconError, iconBuffer] = await safe(apps.getIcon(app, {}));
|
||||
if (iconError || !iconBuffer) return next(new HttpError(500, 'Icon rendering error'));
|
||||
let [iconError, iconBuffer] = await safe(apps.getIcon(app, {}));
|
||||
if (iconError) return next(new HttpError(500, `Error getting app icon: ${error.message}`));
|
||||
if (!iconBuffer) {
|
||||
iconBuffer = safe.fs.readFileSync(path.join(paths.DASHBOARD_DIR, 'img/appicon_fallback.png'));
|
||||
if (!iconBuffer) return next(new HttpError(500, 'App icon and fallback icon is missing'));
|
||||
}
|
||||
|
||||
const icon = 'data:image/png;base64,' + iconBuffer.toString('base64');
|
||||
const dashboardOrigin = settings.dashboardOrigin();
|
||||
@@ -131,8 +143,11 @@ function auth(req, res, next) {
|
||||
maxAge: constants.DEFAULT_TOKEN_EXPIRATION_MSECS,
|
||||
secure: true
|
||||
});
|
||||
res.set('x-remote-user', req.user.username);
|
||||
res.set('x-remote-email', req.user.email);
|
||||
res.set('x-remote-name', req.user.displayName);
|
||||
|
||||
return next(new HttpSuccess(200, {}));
|
||||
next(new HttpSuccess(200, {}));
|
||||
}
|
||||
|
||||
// endpoint called by login page, username and password posted as JSON body
|
||||
@@ -197,11 +212,6 @@ async function logoutPage(req, res, next) {
|
||||
res.redirect(302, app.manifest.addons.proxyAuth.path ? '/' : '/login');
|
||||
}
|
||||
|
||||
function logout(req, res, next) {
|
||||
res.clearCookie('authToken');
|
||||
next(new HttpSuccess(200, {}));
|
||||
}
|
||||
|
||||
// provides webhooks for the auth wall
|
||||
function initializeAuthwallExpressSync() {
|
||||
const app = express();
|
||||
@@ -241,10 +251,10 @@ function initializeAuthwallExpressSync() {
|
||||
.use(middleware.lastMile());
|
||||
|
||||
router.get ('/login', loginPage);
|
||||
router.get ('/auth', jwtVerify, basicAuthVerify, auth); // called by nginx before accessing protected page
|
||||
router.get ('/auth', jwtVerify, authorizationHeader, auth); // called by nginx before accessing protected page
|
||||
router.post('/login', json, passwordAuth, authorize);
|
||||
router.get ('/logout', logoutPage);
|
||||
router.post('/logout', json, logout);
|
||||
router.post('/logout', json, logoutPage);
|
||||
|
||||
return httpServer;
|
||||
}
|
||||
|
||||
+141
-130
@@ -1,26 +1,26 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
setAppCertificate,
|
||||
setFallbackCertificate,
|
||||
setUserCertificate, // per location certificate
|
||||
setFallbackCertificate, // per domain certificate
|
||||
|
||||
generateFallbackCertificate,
|
||||
|
||||
validateCertificate,
|
||||
|
||||
getCertificatePath,
|
||||
getCertificatePath, // resolved cert path
|
||||
ensureCertificate,
|
||||
|
||||
checkCerts,
|
||||
|
||||
// the 'configure' ensure a certificate and generate nginx config
|
||||
// the 'configure' functions ensure a certificate and generate nginx config
|
||||
configureApp,
|
||||
unconfigureApp,
|
||||
|
||||
// these only generate nginx config
|
||||
writeDefaultConfig,
|
||||
writeDashboardConfig,
|
||||
writeAppConfig,
|
||||
writeAppConfigs,
|
||||
|
||||
removeAppConfigs,
|
||||
restoreFallbackCertificates,
|
||||
@@ -59,17 +59,14 @@ const RESTART_SERVICE_CMD = path.join(__dirname, 'scripts/restartservice.sh');
|
||||
function nginxLocation(s) {
|
||||
if (!s.startsWith('!')) return s;
|
||||
|
||||
let re = s.replace(/[\^$\\.*+?()[\]{}|]/g, '\\$&'); // https://github.com/es-shims/regexp.escape/blob/master/implementation.js
|
||||
|
||||
const re = s.replace(/[\^$\\.*+?()[\]{}|]/g, '\\$&'); // https://github.com/es-shims/regexp.escape/blob/master/implementation.js
|
||||
return `~ ^(?!(${re.slice(1)}))`; // negative regex assertion - https://stackoverflow.com/questions/16302897/nginx-location-not-equal-to-regex
|
||||
}
|
||||
|
||||
async function getAcmeApi(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
|
||||
const acmeApi = acme2;
|
||||
|
||||
let apiOptions = { prod: false, performHttpAuthorization: false, wildcard: false, email: '' };
|
||||
const apiOptions = { prod: false, performHttpAuthorization: false, wildcard: false, email: '' };
|
||||
apiOptions.prod = domainObject.tlsConfig.provider.match(/.*-prod/) !== null; // matches 'le-prod' or 'letsencrypt-prod'
|
||||
apiOptions.performHttpAuthorization = domainObject.provider.match(/noop|manual|wildcard/) !== null;
|
||||
apiOptions.wildcard = !!domainObject.tlsConfig.wildcard;
|
||||
@@ -81,7 +78,7 @@ async function getAcmeApi(domainObject) {
|
||||
const [error, owner] = await safe(users.getOwner());
|
||||
apiOptions.email = (error || !owner) ? 'webmaster@cloudron.io' : owner.email; // can error if not activated yet
|
||||
|
||||
return { acmeApi, apiOptions };
|
||||
return { acme2, apiOptions };
|
||||
}
|
||||
|
||||
function getExpiryDate(certFilePath) {
|
||||
@@ -144,19 +141,19 @@ function providerMatchesSync(domainObject, certFilePath, apiOptions) {
|
||||
|
||||
// note: https://tools.ietf.org/html/rfc4346#section-7.4.2 (certificate_list) requires that the
|
||||
// servers certificate appears first (and not the intermediate cert)
|
||||
function validateCertificate(location, domainObject, certificate) {
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
function validateCertificate(subdomain, domainObject, certificate) {
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert(certificate && typeof certificate, 'object');
|
||||
|
||||
const cert = certificate.cert, key = certificate.key;
|
||||
const { cert, key } = certificate;
|
||||
|
||||
// check for empty cert and key strings
|
||||
if (!cert && key) return new BoxError(BoxError.BAD_FIELD, 'missing cert');
|
||||
if (cert && !key) return new BoxError(BoxError.BAD_FIELD, 'missing key');
|
||||
|
||||
// -checkhost checks for SAN or CN exclusively. SAN takes precedence and if present, ignores the CN.
|
||||
const fqdn = dns.fqdn(location, domainObject);
|
||||
const fqdn = dns.fqdn(subdomain, domainObject);
|
||||
|
||||
let result = safe.child_process.execSync(`openssl x509 -noout -checkhost "${fqdn}"`, { encoding: 'utf8', input: cert });
|
||||
if (result === null) return new BoxError(BoxError.BAD_FIELD, 'Unable to get certificate subject:' + safe.error.message);
|
||||
@@ -198,7 +195,7 @@ async function generateFallbackCertificate(domain) {
|
||||
let opensslConfWithSan;
|
||||
const cn = domain;
|
||||
|
||||
debug(`generateFallbackCertificateSync: domain=${domain} cn=${cn}`);
|
||||
debug(`generateFallbackCertificate: domain=${domain} cn=${cn}`);
|
||||
|
||||
opensslConfWithSan = `${opensslConf}\n[SAN]\nsubjectAltName=DNS:${domain},DNS:*.${cn}\n`;
|
||||
const configFile = path.join(os.tmpdir(), 'openssl-' + crypto.randomBytes(4).readUInt32LE(0) + '.conf');
|
||||
@@ -219,14 +216,13 @@ async function generateFallbackCertificate(domain) {
|
||||
return { cert, key };
|
||||
}
|
||||
|
||||
async function setFallbackCertificate(domain, fallback) {
|
||||
async function setFallbackCertificate(domain, certificate) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert(fallback && typeof fallback === 'object');
|
||||
assert.strictEqual(typeof fallback, 'object');
|
||||
assert(certificate && typeof certificate === 'object');
|
||||
|
||||
debug(`setFallbackCertificate: setting certs for domain ${domain}`);
|
||||
if (!safe.fs.writeFileSync(path.join(paths.NGINX_CERT_DIR, `${domain}.host.cert`), fallback.cert)) throw new BoxError(BoxError.FS_ERROR, safe.error.message);
|
||||
if (!safe.fs.writeFileSync(path.join(paths.NGINX_CERT_DIR, `${domain}.host.key`), fallback.key)) throw new BoxError(BoxError.FS_ERROR, safe.error.message);
|
||||
if (!safe.fs.writeFileSync(path.join(paths.NGINX_CERT_DIR, `${domain}.host.cert`), certificate.cert)) throw new BoxError(BoxError.FS_ERROR, safe.error.message);
|
||||
if (!safe.fs.writeFileSync(path.join(paths.NGINX_CERT_DIR, `${domain}.host.key`), certificate.key)) throw new BoxError(BoxError.FS_ERROR, safe.error.message);
|
||||
|
||||
// TODO: maybe the cert is being used by the mail container
|
||||
await reload();
|
||||
@@ -250,55 +246,36 @@ function getFallbackCertificatePathSync(domain) {
|
||||
return { certFilePath, keyFilePath };
|
||||
}
|
||||
|
||||
function getAppCertificatePathSync(vhost) {
|
||||
assert.strictEqual(typeof vhost, 'string');
|
||||
function getUserCertificatePathSync(fqdn) {
|
||||
assert.strictEqual(typeof fqdn, 'string');
|
||||
|
||||
const certFilePath = path.join(paths.NGINX_CERT_DIR, `${vhost}.user.cert`);
|
||||
const keyFilePath = path.join(paths.NGINX_CERT_DIR, `${vhost}.user.key`);
|
||||
const certFilePath = path.join(paths.NGINX_CERT_DIR, `${fqdn}.user.cert`);
|
||||
const keyFilePath = path.join(paths.NGINX_CERT_DIR, `${fqdn}.user.key`);
|
||||
|
||||
return { certFilePath, keyFilePath };
|
||||
}
|
||||
|
||||
function getAcmeCertificatePathSync(vhost, domainObject) {
|
||||
assert.strictEqual(typeof vhost, 'string'); // this can contain wildcard domain (for alias domains)
|
||||
function getAcmeCertificatePathSync(fqdn, domainObject) {
|
||||
assert.strictEqual(typeof fqdn, 'string'); // this can contain wildcard domain (for alias domains)
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
|
||||
let certName, certFilePath, keyFilePath, csrFilePath, acmeChallengesDir = paths.ACME_CHALLENGES_DIR;
|
||||
|
||||
if (vhost !== domainObject.domain && domainObject.tlsConfig.wildcard) { // bare domain is not part of wildcard SAN
|
||||
certName = dns.makeWildcard(vhost).replace('*.', '_.');
|
||||
if (fqdn !== domainObject.domain && domainObject.tlsConfig.wildcard) { // bare domain is not part of wildcard SAN
|
||||
certName = dns.makeWildcard(fqdn).replace('*.', '_.');
|
||||
certFilePath = path.join(paths.NGINX_CERT_DIR, `${certName}.cert`);
|
||||
keyFilePath = path.join(paths.NGINX_CERT_DIR, `${certName}.key`);
|
||||
csrFilePath = path.join(paths.NGINX_CERT_DIR, `${certName}.csr`);
|
||||
} else {
|
||||
certName = vhost;
|
||||
certFilePath = path.join(paths.NGINX_CERT_DIR, `${vhost}.cert`);
|
||||
keyFilePath = path.join(paths.NGINX_CERT_DIR, `${vhost}.key`);
|
||||
csrFilePath = path.join(paths.NGINX_CERT_DIR, `${vhost}.csr`);
|
||||
certName = fqdn;
|
||||
certFilePath = path.join(paths.NGINX_CERT_DIR, `${fqdn}.cert`);
|
||||
keyFilePath = path.join(paths.NGINX_CERT_DIR, `${fqdn}.key`);
|
||||
csrFilePath = path.join(paths.NGINX_CERT_DIR, `${fqdn}.csr`);
|
||||
}
|
||||
|
||||
return { certName, certFilePath, keyFilePath, csrFilePath, acmeChallengesDir };
|
||||
}
|
||||
|
||||
async function setAppCertificate(location, domainObject, certificate) {
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof certificate, 'object');
|
||||
|
||||
const fqdn = dns.fqdn(location, domainObject);
|
||||
const { certFilePath, keyFilePath } = getAppCertificatePathSync(fqdn);
|
||||
|
||||
if (certificate.cert && certificate.key) {
|
||||
if (!safe.fs.writeFileSync(certFilePath, certificate.cert)) throw safe.error;
|
||||
if (!safe.fs.writeFileSync(keyFilePath, certificate.key)) throw safe.error;
|
||||
} else { // remove existing cert/key
|
||||
if (!safe.fs.unlinkSync(certFilePath)) debug(`Error removing cert: ${safe.error.message}`);
|
||||
if (!safe.fs.unlinkSync(keyFilePath)) debug(`Error removing key: ${safe.error.message}`);
|
||||
}
|
||||
|
||||
await reload();
|
||||
}
|
||||
|
||||
async function getCertificatePath(fqdn, domain) {
|
||||
assert.strictEqual(typeof fqdn, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
@@ -309,38 +286,38 @@ async function getCertificatePath(fqdn, domain) {
|
||||
|
||||
const domainObject = await domains.get(domain);
|
||||
|
||||
const appCertPath = getAppCertificatePathSync(fqdn); // user cert always wins
|
||||
if (fs.existsSync(appCertPath.certFilePath) && fs.existsSync(appCertPath.keyFilePath)) return appCertPath;
|
||||
const userPath = getUserCertificatePathSync(fqdn); // user cert always wins
|
||||
if (fs.existsSync(userPath.certFilePath) && fs.existsSync(userPath.keyFilePath)) return userPath;
|
||||
|
||||
if (domainObject.tlsConfig.provider === 'fallback') return getFallbackCertificatePathSync(domain);
|
||||
|
||||
const acmeCertPath = getAcmeCertificatePathSync(fqdn, domainObject);
|
||||
if (fs.existsSync(acmeCertPath.certFilePath) && fs.existsSync(acmeCertPath.keyFilePath)) return acmeCertPath;
|
||||
const acmePath = getAcmeCertificatePathSync(fqdn, domainObject);
|
||||
if (fs.existsSync(acmePath.certFilePath) && fs.existsSync(acmePath.keyFilePath)) return acmePath;
|
||||
|
||||
return getFallbackCertificatePathSync(domain);
|
||||
}
|
||||
|
||||
async function checkAppCertificate(vhost, domainObject) {
|
||||
assert.strictEqual(typeof vhost, 'string'); // this can contain wildcard domain (for alias domains)
|
||||
async function syncUserCertificate(fqdn, domainObject) {
|
||||
assert.strictEqual(typeof fqdn, 'string'); // this can contain wildcard domain (for alias domains)
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
|
||||
const subdomain = vhost.substr(0, vhost.length - domainObject.domain.length - 1);
|
||||
const certificate = await apps.getCertificate(subdomain, domainObject.domain);
|
||||
if (!certificate) return null;
|
||||
const subdomain = fqdn.substr(0, fqdn.length - domainObject.domain.length - 1);
|
||||
const userCertificate = await apps.getCertificate(subdomain, domainObject.domain);
|
||||
if (!userCertificate) return null;
|
||||
|
||||
const { certFilePath, keyFilePath } = getAppCertificatePathSync(vhost);
|
||||
const { certFilePath, keyFilePath } = getUserCertificatePathSync(fqdn);
|
||||
|
||||
if (!safe.fs.writeFileSync(certFilePath, certificate.cert)) throw new BoxError(BoxError.FS_ERROR, `Failed to write certificate: ${safe.error.message}`);
|
||||
if (!safe.fs.writeFileSync(keyFilePath, certificate.key)) throw new BoxError(BoxError.FS_ERROR, `Failed to write key: ${safe.error.message}`);
|
||||
if (!safe.fs.writeFileSync(certFilePath, userCertificate.cert)) throw new BoxError(BoxError.FS_ERROR, `Failed to write certificate: ${safe.error.message}`);
|
||||
if (!safe.fs.writeFileSync(keyFilePath, userCertificate.key)) throw new BoxError(BoxError.FS_ERROR, `Failed to write key: ${safe.error.message}`);
|
||||
|
||||
return { certFilePath, keyFilePath };
|
||||
}
|
||||
|
||||
async function checkAcmeCertificate(vhost, domainObject) {
|
||||
assert.strictEqual(typeof vhost, 'string'); // this can contain wildcard domain (for alias domains)
|
||||
async function syncAcmeCertificate(fqdn, domainObject) {
|
||||
assert.strictEqual(typeof fqdn, 'string'); // this can contain wildcard domain (for alias domains)
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
|
||||
const { certName, certFilePath, keyFilePath, csrFilePath } = getAcmeCertificatePathSync(vhost, domainObject);
|
||||
const { certName, certFilePath, keyFilePath, csrFilePath } = getAcmeCertificatePathSync(fqdn, domainObject);
|
||||
|
||||
const privateKey = await blobs.get(`${blobs.CERT_PREFIX}-${certName}.key`);
|
||||
const cert = await blobs.get(`${blobs.CERT_PREFIX}-${certName}.cert`);
|
||||
@@ -356,11 +333,11 @@ async function checkAcmeCertificate(vhost, domainObject) {
|
||||
return { certFilePath, keyFilePath };
|
||||
}
|
||||
|
||||
async function updateCertBlobs(vhost, domainObject) {
|
||||
assert.strictEqual(typeof vhost, 'string'); // this can contain wildcard domain (for alias domains)
|
||||
async function updateCertBlobs(fqdn, domainObject) {
|
||||
assert.strictEqual(typeof fqdn, 'string'); // this can contain wildcard domain (for alias domains)
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
|
||||
const { certName, certFilePath, keyFilePath, csrFilePath } = getAcmeCertificatePathSync(vhost, domainObject);
|
||||
const { certName, certFilePath, keyFilePath, csrFilePath } = getAcmeCertificatePathSync(fqdn, domainObject);
|
||||
|
||||
const privateKey = safe.fs.readFileSync(keyFilePath);
|
||||
if (!privateKey) throw new BoxError(BoxError.FS_ERROR, `Failed to read private key: ${safe.error.message}`);
|
||||
@@ -376,76 +353,76 @@ async function updateCertBlobs(vhost, domainObject) {
|
||||
await blobs.set(`${blobs.CERT_PREFIX}-${certName}.csr`, csr);
|
||||
}
|
||||
|
||||
async function ensureCertificate(vhost, domain, auditSource) {
|
||||
assert.strictEqual(typeof vhost, 'string');
|
||||
async function ensureCertificate(subdomain, domain, auditSource) {
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
const domainObject = await domains.get(domain);
|
||||
|
||||
let bundle = await checkAppCertificate(vhost, domainObject);
|
||||
if (bundle) return { bundle, renewed: false };
|
||||
const userCertificatePath = await syncUserCertificate(subdomain, domainObject);
|
||||
if (userCertificatePath) return { certificatePath: userCertificatePath, renewed: false };
|
||||
|
||||
if (domainObject.tlsConfig.provider === 'fallback') {
|
||||
debug(`ensureCertificate: ${vhost} will use fallback certs`);
|
||||
debug(`ensureCertificate: ${subdomain} will use fallback certs`);
|
||||
|
||||
return { bundle: getFallbackCertificatePathSync(domain), renewed: false };
|
||||
return { certificatePath: getFallbackCertificatePathSync(domain), renewed: false };
|
||||
}
|
||||
|
||||
const { acmeApi, apiOptions } = await getAcmeApi(domainObject);
|
||||
const { acme2, apiOptions } = await getAcmeApi(domainObject);
|
||||
let notAfter = null;
|
||||
|
||||
const [, currentBundle] = await safe(checkAcmeCertificate(vhost, domainObject));
|
||||
if (currentBundle) {
|
||||
debug(`ensureCertificate: ${vhost} certificate already exists at ${currentBundle.keyFilePath}`);
|
||||
notAfter = getExpiryDate(currentBundle.certFilePath);
|
||||
const [, acmeCertificatePath] = await safe(syncAcmeCertificate(subdomain, domainObject));
|
||||
if (acmeCertificatePath) {
|
||||
debug(`ensureCertificate: ${subdomain} certificate already exists at ${acmeCertificatePath.keyFilePath}`);
|
||||
notAfter = getExpiryDate(acmeCertificatePath.certFilePath);
|
||||
const isExpiring = (notAfter - new Date()) <= (30 * 24 * 60 * 60 * 1000); // expiring in a month
|
||||
if (!isExpiring && providerMatchesSync(domainObject, currentBundle.certFilePath, apiOptions)) return { bundle: currentBundle, renewed: false };
|
||||
debug(`ensureCertificate: ${vhost} cert requires renewal`);
|
||||
if (!isExpiring && providerMatchesSync(domainObject, acmeCertificatePath.certFilePath, apiOptions)) return { certificatePath: acmeCertificatePath, renewed: false };
|
||||
debug(`ensureCertificate: ${subdomain} cert requires renewal`);
|
||||
} else {
|
||||
debug(`ensureCertificate: ${vhost} cert does not exist`);
|
||||
debug(`ensureCertificate: ${subdomain} cert does not exist`);
|
||||
}
|
||||
|
||||
debug('ensureCertificate: getting certificate for %s with options %j', vhost, apiOptions);
|
||||
debug(`ensureCertificate: getting certificate for ${subdomain} with options ${JSON.stringify(apiOptions)}`);
|
||||
|
||||
const acmePaths = getAcmeCertificatePathSync(vhost, domainObject);
|
||||
let [error] = await safe(acmeApi.getCertificate(vhost, domain, acmePaths, apiOptions));
|
||||
const acmePaths = getAcmeCertificatePathSync(subdomain, domainObject);
|
||||
const [error] = await safe(acme2.getCertificate(subdomain, domain, acmePaths, apiOptions));
|
||||
debug(`ensureCertificate: error: ${error ? error.message : 'null'} cert: ${acmePaths.certFilePath || 'null'}`);
|
||||
|
||||
await safe(eventlog.add(currentBundle ? eventlog.ACTION_CERTIFICATE_RENEWAL : eventlog.ACTION_CERTIFICATE_NEW, auditSource, { domain: vhost, errorMessage: error ? error.message : '', notAfter }));
|
||||
await safe(eventlog.add(acmeCertificatePath ? eventlog.ACTION_CERTIFICATE_RENEWAL : eventlog.ACTION_CERTIFICATE_NEW, auditSource, { domain: subdomain, errorMessage: error ? error.message : '', notAfter }));
|
||||
|
||||
if (error && currentBundle && (notAfter - new Date() > 0)) { // still some life left in this certificate
|
||||
debug('ensureCertificate: continue using existing bundle since renewal failed');
|
||||
return { bundle: currentBundle, renewed: false };
|
||||
if (error && acmeCertificatePath && (notAfter - new Date() > 0)) { // still some life left in this certificate
|
||||
debug('ensureCertificate: continue using existing certificate since renewal failed');
|
||||
return { certificatePath: acmeCertificatePath, renewed: false };
|
||||
}
|
||||
|
||||
if (!error) {
|
||||
[error] = await safe(updateCertBlobs(vhost, domainObject));
|
||||
if (!error) return { bundle: { certFilePath: acmePaths.certFilePath, keyFilePath: acmePaths.keyFilePath }, renewed: true };
|
||||
const [updateCertError] = await safe(updateCertBlobs(subdomain, domainObject));
|
||||
if (!updateCertError) return { certificatePath: { certFilePath: acmePaths.certFilePath, keyFilePath: acmePaths.keyFilePath }, renewed: true };
|
||||
}
|
||||
|
||||
debug(`ensureCertificate: renewal of ${vhost} failed. using fallback certificates for ${domain}`);
|
||||
debug(`ensureCertificate: renewal of ${subdomain} failed. using fallback certificates for ${domain}`);
|
||||
|
||||
return { bundle: getFallbackCertificatePathSync(domain), renewed: false };
|
||||
return { certificatePath: getFallbackCertificatePathSync(domain), renewed: false };
|
||||
}
|
||||
|
||||
async function writeDashboardNginxConfig(vhost, bundle) {
|
||||
assert.strictEqual(typeof vhost, 'string');
|
||||
assert.strictEqual(typeof bundle, 'object');
|
||||
async function writeDashboardNginxConfig(fqdn, certificatePath) {
|
||||
assert.strictEqual(typeof fqdn, 'string');
|
||||
assert.strictEqual(typeof certificatePath, 'object');
|
||||
|
||||
const data = {
|
||||
sourceDir: path.resolve(__dirname, '..'),
|
||||
vhost: vhost,
|
||||
vhost: fqdn,
|
||||
hasIPv6: sysinfo.hasIPv6(),
|
||||
endpoint: 'dashboard',
|
||||
certFilePath: bundle.certFilePath,
|
||||
keyFilePath: bundle.keyFilePath,
|
||||
certFilePath: certificatePath.certFilePath,
|
||||
keyFilePath: certificatePath.keyFilePath,
|
||||
robotsTxtQuoted: JSON.stringify('User-agent: *\nDisallow: /\n'),
|
||||
proxyAuth: { enabled: false, id: null, location: nginxLocation('/') },
|
||||
ocsp: await isOcspEnabled(bundle.certFilePath)
|
||||
ocsp: await isOcspEnabled(certificatePath.certFilePath)
|
||||
};
|
||||
const nginxConf = ejs.render(NGINX_APPCONFIG_EJS, data);
|
||||
const nginxConfigFilename = path.join(paths.NGINX_APPCONFIG_DIR, `${vhost}.conf`);
|
||||
const nginxConfigFilename = path.join(paths.NGINX_APPCONFIG_DIR, `${fqdn}.conf`);
|
||||
|
||||
if (!safe.fs.writeFileSync(nginxConfigFilename, nginxConf)) throw new BoxError(BoxError.FS_ERROR, safe.error);
|
||||
|
||||
@@ -457,10 +434,10 @@ async function writeDashboardConfig(domainObject) {
|
||||
|
||||
debug(`writeDashboardConfig: writing admin config for ${domainObject.domain}`);
|
||||
|
||||
const dashboardFqdn = dns.fqdn(constants.DASHBOARD_LOCATION, domainObject);
|
||||
const bundle = await getCertificatePath(dashboardFqdn, domainObject.domain);
|
||||
const dashboardFqdn = dns.fqdn(constants.DASHBOARD_SUBDOMAIN, domainObject);
|
||||
const certificatePath = await getCertificatePath(dashboardFqdn, domainObject.domain);
|
||||
|
||||
await writeDashboardNginxConfig(dashboardFqdn, bundle);
|
||||
await writeDashboardNginxConfig(dashboardFqdn, certificatePath);
|
||||
}
|
||||
|
||||
function getNginxConfigFilename(app, fqdn, type) {
|
||||
@@ -481,11 +458,11 @@ function getNginxConfigFilename(app, fqdn, type) {
|
||||
return path.join(paths.NGINX_APPCONFIG_DIR, `${app.id}${nginxConfigFilenameSuffix}.conf`);
|
||||
}
|
||||
|
||||
async function writeAppNginxConfig(app, fqdn, type, bundle) {
|
||||
async function writeAppNginxConfig(app, fqdn, type, certificatePath) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof fqdn, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof bundle, 'object');
|
||||
assert.strictEqual(typeof certificatePath, 'object');
|
||||
|
||||
const data = {
|
||||
sourceDir: path.resolve(__dirname, '..'),
|
||||
@@ -495,17 +472,28 @@ async function writeAppNginxConfig(app, fqdn, type, bundle) {
|
||||
port: null,
|
||||
endpoint: null,
|
||||
redirectTo: null,
|
||||
certFilePath: bundle.certFilePath,
|
||||
keyFilePath: bundle.keyFilePath,
|
||||
certFilePath: certificatePath.certFilePath,
|
||||
keyFilePath: certificatePath.keyFilePath,
|
||||
robotsTxtQuoted: null,
|
||||
cspQuoted: null,
|
||||
hideHeaders: [],
|
||||
proxyAuth: { enabled: false },
|
||||
ocsp: await isOcspEnabled(bundle.certFilePath)
|
||||
upstreamUri: '', // only for endpoint === external
|
||||
ocsp: await isOcspEnabled(certificatePath.certFilePath)
|
||||
};
|
||||
|
||||
if (type === apps.LOCATION_TYPE_PRIMARY || type === apps.LOCATION_TYPE_ALIAS || type === apps.LOCATION_TYPE_SECONDARY) {
|
||||
data.endpoint = 'app';
|
||||
|
||||
if (app.manifest.id === constants.PROXY_APP_APPSTORE_ID) {
|
||||
data.endpoint = 'external';
|
||||
|
||||
// prevent generating invalid nginx configs
|
||||
if (!app.upstreamUri) throw new BoxError(BoxError.BAD_FIELD, 'upstreamUri cannot be empty');
|
||||
|
||||
data.upstreamUri = app.upstreamUri;
|
||||
}
|
||||
|
||||
// maybe these should become per domain at some point
|
||||
const reverseProxyConfig = app.reverseProxyConfig || {}; // some of our code uses fake app objects
|
||||
if (reverseProxyConfig.robotsTxt) data.robotsTxtQuoted = JSON.stringify(app.reverseProxyConfig.robotsTxt);
|
||||
@@ -544,20 +532,34 @@ async function writeAppNginxConfig(app, fqdn, type, bundle) {
|
||||
await reload();
|
||||
}
|
||||
|
||||
async function writeAppConfig(app) {
|
||||
async function writeAppConfigs(app) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
|
||||
const appDomains = [{ domain: app.domain, fqdn: app.fqdn, type: apps.LOCATION_TYPE_PRIMARY }]
|
||||
.concat(app.secondaryDomains.map(sd => { return { domain: sd.domain, fqdn: sd.fqdn, type: apps.LOCATION_TYPE_SECONDARY }; }))
|
||||
.concat(app.redirectDomains.map(rd => { return { domain: rd.domain, fqdn: rd.fqdn, type: apps.LOCATION_TYPE_REDIRECT }; }))
|
||||
.concat(app.aliasDomains.map(ad => { return { domain: ad.domain, fqdn: ad.fqdn, type: apps.LOCATION_TYPE_ALIAS }; }));
|
||||
const appDomains = [{ domain: app.domain, fqdn: app.fqdn, certificate: app.certificate, type: apps.LOCATION_TYPE_PRIMARY }]
|
||||
.concat(app.secondaryDomains.map(sd => { return { domain: sd.domain, certificate: sd.certificate, fqdn: sd.fqdn, type: apps.LOCATION_TYPE_SECONDARY }; }))
|
||||
.concat(app.redirectDomains.map(rd => { return { domain: rd.domain, certificate: rd.certificate, fqdn: rd.fqdn, type: apps.LOCATION_TYPE_REDIRECT }; }))
|
||||
.concat(app.aliasDomains.map(ad => { return { domain: ad.domain, certificate: ad.certificate, fqdn: ad.fqdn, type: apps.LOCATION_TYPE_ALIAS }; }));
|
||||
|
||||
for (const appDomain of appDomains) {
|
||||
const bundle = await getCertificatePath(appDomain.fqdn, appDomain.domain);
|
||||
await writeAppNginxConfig(app, appDomain.fqdn, appDomain.type, bundle);
|
||||
const certificatePath = await getCertificatePath(appDomain.fqdn, appDomain.domain);
|
||||
await writeAppNginxConfig(app, appDomain.fqdn, appDomain.type, certificatePath);
|
||||
}
|
||||
}
|
||||
|
||||
async function setUserCertificate(app, fqdn, certificate) {
|
||||
const { certFilePath, keyFilePath } = getUserCertificatePathSync(fqdn);
|
||||
|
||||
if (certificate !== null) {
|
||||
if (!safe.fs.writeFileSync(certFilePath, certificate.cert)) throw safe.error;
|
||||
if (!safe.fs.writeFileSync(keyFilePath, certificate.key)) throw safe.error;
|
||||
} else { // remove existing cert/key
|
||||
if (!safe.fs.unlinkSync(certFilePath)) debug(`Error removing cert: ${safe.error.message}`);
|
||||
if (!safe.fs.unlinkSync(keyFilePath)) debug(`Error removing key: ${safe.error.message}`);
|
||||
}
|
||||
|
||||
await writeAppConfigs(app);
|
||||
}
|
||||
|
||||
async function configureApp(app, auditSource) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
@@ -571,7 +573,7 @@ async function configureApp(app, auditSource) {
|
||||
await ensureCertificate(appDomain.fqdn, appDomain.domain, auditSource);
|
||||
}
|
||||
|
||||
await writeAppConfig(app);
|
||||
await writeAppConfigs(app);
|
||||
}
|
||||
|
||||
async function unconfigureApp(app) {
|
||||
@@ -609,7 +611,7 @@ async function renewCerts(options, auditSource, progressCallback) {
|
||||
for (const app of allApps) {
|
||||
if (app.runState === apps.RSTATE_STOPPED) continue; // do not renew certs of stopped apps
|
||||
|
||||
appDomains = appDomains.concat([{ app, domain: app.domain, fqdn: app.fqdn, type: apps.LOCATION_TYPE_PRIMARY }])
|
||||
appDomains = appDomains.concat([{ app, domain: app.domain, fqdn: app.fqdn, type: apps.LOCATION_TYPE_PRIMARY, nginxConfigFilename: getNginxConfigFilename(app, app.fqdn, apps.LOCATION_TYPE_PRIMARY) }])
|
||||
.concat(app.secondaryDomains.map(sd => { return { app, domain: sd.domain, fqdn: sd.fqdn, type: apps.LOCATION_TYPE_SECONDARY, nginxConfigFilename: getNginxConfigFilename(app, sd.fqdn, apps.LOCATION_TYPE_SECONDARY) }; }))
|
||||
.concat(app.redirectDomains.map(rd => { return { app, domain: rd.domain, fqdn: rd.fqdn, type: apps.LOCATION_TYPE_REDIRECT, nginxConfigFilename: getNginxConfigFilename(app, rd.fqdn, apps.LOCATION_TYPE_REDIRECT) }; }))
|
||||
.concat(app.aliasDomains.map(ad => { return { app, domain: ad.domain, fqdn: ad.fqdn, type: apps.LOCATION_TYPE_ALIAS, nginxConfigFilename: getNginxConfigFilename(app, ad.fqdn, apps.LOCATION_TYPE_ALIAS) }; }));
|
||||
@@ -623,7 +625,7 @@ async function renewCerts(options, auditSource, progressCallback) {
|
||||
progressCallback({ percent: progress, message: `Ensuring certs of ${appDomain.fqdn}` });
|
||||
progress += Math.round(100/appDomains.length);
|
||||
|
||||
const { bundle, renewed } = await ensureCertificate(appDomain.fqdn, appDomain.domain, auditSource);
|
||||
const { certificatePath, renewed } = await ensureCertificate(appDomain.fqdn, appDomain.domain, auditSource);
|
||||
|
||||
if (renewed) renewedCerts.push(appDomain.fqdn);
|
||||
|
||||
@@ -631,15 +633,15 @@ async function renewCerts(options, auditSource, progressCallback) {
|
||||
|
||||
// hack to check if the app's cert changed or not. this doesn't handle prod/staging le change since they use same file name
|
||||
let currentNginxConfig = safe.fs.readFileSync(appDomain.nginxConfigFilename, 'utf8') || '';
|
||||
if (currentNginxConfig.includes(bundle.certFilePath)) continue;
|
||||
if (currentNginxConfig.includes(certificatePath.certFilePath)) continue;
|
||||
|
||||
debug(`renewCerts: creating new nginx config since ${appDomain.nginxConfigFilename} does not have ${bundle.certFilePath}`);
|
||||
debug(`renewCerts: creating new nginx config since ${appDomain.nginxConfigFilename} does not have ${certificatePath.certFilePath}`);
|
||||
|
||||
// reconfigure since the cert changed
|
||||
if (appDomain.type === 'webadmin' || appDomain.type === 'webadmin+mail') {
|
||||
await writeDashboardNginxConfig(settings.dashboardFqdn(), bundle);
|
||||
await writeDashboardNginxConfig(settings.dashboardFqdn(), certificatePath);
|
||||
} else {
|
||||
await writeAppNginxConfig(appDomain.app, appDomain.fqdn, appDomain.type, bundle);
|
||||
await writeAppNginxConfig(appDomain.app, appDomain.fqdn, appDomain.type, certificatePath);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -657,12 +659,17 @@ async function renewCerts(options, auditSource, progressCallback) {
|
||||
}
|
||||
}
|
||||
|
||||
async function cleanupCerts() {
|
||||
async function cleanupCerts(auditSource, progressCallback) {
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
const filenames = await fs.promises.readdir(paths.NGINX_CERT_DIR);
|
||||
const certFilenames = filenames.filter(f => f.endsWith('.cert'));
|
||||
const now = new Date();
|
||||
|
||||
debug('cleanupCerts: start');
|
||||
progressCallback({ message: 'Checking expired certs for removal' });
|
||||
|
||||
const fqdns = [];
|
||||
|
||||
for (const certFilename of certFilenames) {
|
||||
const certFilePath = path.join(paths.NGINX_CERT_DIR, certFilename);
|
||||
@@ -671,7 +678,7 @@ async function cleanupCerts() {
|
||||
|
||||
if (now - notAfter >= (60 * 60 * 24 * 30 * 6 * 1000)) { // expired 6 months ago
|
||||
const fqdn = certFilename.replace(/\.cert$/, '');
|
||||
debug(`cleanupCerts: deleting certs of ${fqdn}`);
|
||||
progressCallback({ message: `deleting certs of ${fqdn}` });
|
||||
|
||||
// it is safe to delete the certs of stopped apps because their nginx configs are removed
|
||||
safe.fs.unlinkSync(certFilePath);
|
||||
@@ -681,9 +688,13 @@ async function cleanupCerts() {
|
||||
await blobs.del(`${blobs.CERT_PREFIX}-${fqdn}.key`);
|
||||
await blobs.del(`${blobs.CERT_PREFIX}-${fqdn}.cert`);
|
||||
await blobs.del(`${blobs.CERT_PREFIX}-${fqdn}.csr`);
|
||||
|
||||
fqdns.push(fqdn);
|
||||
}
|
||||
}
|
||||
|
||||
if (fqdns.length) await safe(eventlog.add(eventlog.ACTION_CERTIFICATE_CLEANUP, auditSource, { domains: fqdns }));
|
||||
|
||||
debug('cleanupCerts: done');
|
||||
}
|
||||
|
||||
@@ -693,7 +704,7 @@ async function checkCerts(options, auditSource, progressCallback) {
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
await renewCerts(options, auditSource, progressCallback);
|
||||
await cleanupCerts();
|
||||
await cleanupCerts(auditSource, progressCallback);
|
||||
}
|
||||
|
||||
function removeAppConfigs() {
|
||||
@@ -702,7 +713,7 @@ function removeAppConfigs() {
|
||||
debug('removeAppConfigs: reomving nginx configs of apps');
|
||||
|
||||
// remove all configs which are not the default or current dashboard
|
||||
for (let appConfigFile of fs.readdirSync(paths.NGINX_APPCONFIG_DIR)) {
|
||||
for (const appConfigFile of fs.readdirSync(paths.NGINX_APPCONFIG_DIR)) {
|
||||
if (appConfigFile !== constants.NGINX_DEFAULT_CONFIG_FILE_NAME && appConfigFile !== dashboardConfigFilename) {
|
||||
fs.unlinkSync(path.join(paths.NGINX_APPCONFIG_DIR, appConfigFile));
|
||||
}
|
||||
|
||||
+27
-13
@@ -8,8 +8,8 @@ exports = module.exports = {
|
||||
authorizeOperator,
|
||||
};
|
||||
|
||||
const accesscontrol = require('../accesscontrol.js'),
|
||||
apps = require('../apps.js'),
|
||||
const apps = require('../apps.js'),
|
||||
tokens = require('../tokens.js'),
|
||||
assert = require('assert'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
externalLdap = require('../externalldap.js'),
|
||||
@@ -43,8 +43,13 @@ async function passwordAuth(req, res, next) {
|
||||
if (!user.ghost && !user.appPassword && user.twoFactorAuthenticationEnabled) {
|
||||
if (!totpToken) return next(new HttpError(401, 'A totpToken must be provided'));
|
||||
|
||||
const verified = speakeasy.totp.verify({ secret: user.twoFactorAuthenticationSecret, encoding: 'base32', token: totpToken, window: 2 });
|
||||
if (!verified) return next(new HttpError(401, 'Invalid totpToken'));
|
||||
if (user.source === 'ldap') {
|
||||
const [error] = await safe(externalLdap.verifyPasswordAndTotpToken(user, password, totpToken));
|
||||
if (error) return next(new HttpError(401, 'Invalid totpToken'));
|
||||
} else {
|
||||
const verified = speakeasy.totp.verify({ secret: user.twoFactorAuthenticationSecret, encoding: 'base32', token: totpToken, window: 2 });
|
||||
if (!verified) return next(new HttpError(401, 'Invalid totpToken'));
|
||||
}
|
||||
}
|
||||
|
||||
req.user = user;
|
||||
@@ -53,27 +58,32 @@ async function passwordAuth(req, res, next) {
|
||||
}
|
||||
|
||||
async function tokenAuth(req, res, next) {
|
||||
let token;
|
||||
let accessToken;
|
||||
|
||||
// this determines the priority
|
||||
if (req.body && req.body.access_token) token = req.body.access_token;
|
||||
if (req.query && req.query.access_token) token = req.query.access_token;
|
||||
if (req.body && req.body.access_token) accessToken = req.body.access_token;
|
||||
if (req.query && req.query.access_token) accessToken = req.query.access_token;
|
||||
if (req.headers && req.headers.authorization) {
|
||||
const parts = req.headers.authorization.split(' ');
|
||||
if (parts.length == 2) {
|
||||
const [scheme, credentials] = parts;
|
||||
|
||||
if (/^Bearer$/i.test(scheme)) token = credentials;
|
||||
if (/^Bearer$/i.test(scheme)) accessToken = credentials;
|
||||
}
|
||||
}
|
||||
|
||||
if (!token) return next(new HttpError(401, 'Token required'));
|
||||
if (!accessToken) return next(new HttpError(401, 'Token required'));
|
||||
|
||||
const [error, user] = await safe(accesscontrol.verifyToken(token));
|
||||
if (error && error.reason === BoxError.INVALID_CREDENTIALS) return next(new HttpError(401, error.message));
|
||||
if (error) return next(new HttpError(500, error.message));
|
||||
const token = await tokens.getByAccessToken(accessToken);
|
||||
if (!token) return next(new HttpError(401, 'No such token'));
|
||||
|
||||
req.access_token = token; // used in logout route
|
||||
const user = await users.get(token.identifier);
|
||||
if (!user) return next(new HttpError(401,'User not found'));
|
||||
if (!user.active) return next(new HttpError(401,'User not active'));
|
||||
|
||||
await safe(tokens.update(token.id, { lastUsedTime: new Date() })); // ignore any error
|
||||
|
||||
req.token = token;
|
||||
req.user = user;
|
||||
|
||||
next();
|
||||
@@ -84,8 +94,10 @@ function authorize(requiredRole) {
|
||||
|
||||
return function (req, res, next) {
|
||||
assert.strictEqual(typeof req.user, 'object');
|
||||
assert.strictEqual(typeof req.token, 'object');
|
||||
|
||||
if (users.compareRoles(req.user.role, requiredRole) < 0) return next(new HttpError(403, `role '${requiredRole}' is required but user has only '${req.user.role}'`));
|
||||
if (!tokens.hasScope(req.token, req.method, req.path)) return next(new HttpError(403, 'access token does not have this scope'));
|
||||
|
||||
next();
|
||||
};
|
||||
@@ -95,7 +107,9 @@ async function authorizeOperator(req, res, next) {
|
||||
assert.strictEqual(typeof req.params.id, 'string');
|
||||
assert.strictEqual(typeof req.user, 'object');
|
||||
assert.strictEqual(typeof req.app, 'object');
|
||||
assert.strictEqual(typeof req.token, 'object');
|
||||
|
||||
if (!tokens.hasScope(req.token, req.method, req.path)) return next(new HttpError(403, 'access token does not have this scope'));
|
||||
if (apps.isOperator(req.app, req.user)) return next();
|
||||
|
||||
return next(new HttpError(403, 'user is not an operator'));
|
||||
|
||||
@@ -0,0 +1,90 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
listByUser,
|
||||
add,
|
||||
get,
|
||||
update,
|
||||
remove,
|
||||
getIcon
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
applinks = require('../applinks.js'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
safe = require('safetydance'),
|
||||
HttpError = require('connect-lastmile').HttpError,
|
||||
HttpSuccess = require('connect-lastmile').HttpSuccess;
|
||||
|
||||
async function listByUser(req, res, next) {
|
||||
assert.strictEqual(typeof req.user, 'object');
|
||||
|
||||
const [error, result] = await safe(applinks.listByUser(req.user));
|
||||
if (error) return next(BoxError.toHttpError(error));
|
||||
|
||||
// we have a separate route for this
|
||||
result.forEach(function (a) { delete a.icon; });
|
||||
|
||||
next(new HttpSuccess(200, { applinks: result }));
|
||||
}
|
||||
|
||||
async function add(req, res, next) {
|
||||
assert.strictEqual(typeof req.body, 'object');
|
||||
|
||||
if (!req.body.upstreamUri || typeof req.body.upstreamUri !== 'string') return next(new HttpError(400, 'upstreamUri must be a non-empty string'));
|
||||
if ('label' in req.body && typeof req.body.label !== 'string') return next(new HttpError(400, 'label must be a string'));
|
||||
if ('tags' in req.body && !Array.isArray(req.body.tags)) return next(new HttpError(400, 'tags must be an array with strings'));
|
||||
if ('accessRestriction' in req.body && typeof req.body.accessRestriction !== 'object') return next(new HttpError(400, 'accessRestriction must be an object'));
|
||||
|
||||
const [error] = await safe(applinks.add(req.body));
|
||||
if (error) return next(BoxError.toHttpError(error));
|
||||
|
||||
next(new HttpSuccess(201, {}));
|
||||
}
|
||||
|
||||
async function get(req, res, next) {
|
||||
assert.strictEqual(typeof req.params.id, 'string');
|
||||
|
||||
const [error, result] = await safe(applinks.get(req.params.id));
|
||||
if (error) return next(BoxError.toHttpError(error));
|
||||
|
||||
// we have a separate route for this
|
||||
delete result.icon;
|
||||
|
||||
next(new HttpSuccess(200, result));
|
||||
}
|
||||
|
||||
async function update(req, res, next) {
|
||||
assert.strictEqual(typeof req.params.id, 'string');
|
||||
assert.strictEqual(typeof req.body, 'object');
|
||||
|
||||
if (!req.body.upstreamUri || typeof req.body.upstreamUri !== 'string') return next(new HttpError(400, 'upstreamUri must be a non-empty string'));
|
||||
if ('label' in req.body && typeof req.body.label !== 'string') return next(new HttpError(400, 'label must be a string'));
|
||||
if ('tags' in req.body && !Array.isArray(req.body.tags)) return next(new HttpError(400, 'tags must be an array with strings'));
|
||||
if ('accessRestriction' in req.body && typeof req.body.accessRestriction !== 'object') return next(new HttpError(400, 'accessRestriction must be an object'));
|
||||
if ('icon' in req.body && typeof req.body.icon !== 'string') return next(new HttpError(400, 'icon must be a string'));
|
||||
|
||||
const [error] = await safe(applinks.update(req.params.id, req.body));
|
||||
if (error) return next(BoxError.toHttpError(error));
|
||||
|
||||
next(new HttpSuccess(202, {}));
|
||||
}
|
||||
|
||||
async function remove(req, res, next) {
|
||||
assert.strictEqual(typeof req.params.id, 'string');
|
||||
|
||||
const [error] = await safe(applinks.remove(req.params.id));
|
||||
if (error) return next(BoxError.toHttpError(error));
|
||||
|
||||
next(new HttpSuccess(204));
|
||||
}
|
||||
|
||||
async function getIcon(req, res, next) {
|
||||
assert.strictEqual(typeof req.params.id, 'string');
|
||||
|
||||
const [error, icon] = await safe(applinks.getIcon(req.params.id, { original: req.query.original }));
|
||||
if (error) return next(BoxError.toHttpError(error));
|
||||
if (!icon) return next(new HttpError(404, 'no such icon'));
|
||||
|
||||
res.send(icon);
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user