Compare commits
445 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c9916c4107 | ||
|
|
c7956872cb | ||
|
|
3adf8b5176 | ||
|
|
40eae601da | ||
|
|
3eead2fdbe | ||
|
|
9fcd6f9c0a | ||
|
|
17910584ca | ||
|
|
d9a02faf7a | ||
|
|
d366f3107d | ||
|
|
2596afa7b3 | ||
|
|
aa1e8dc930 | ||
|
|
f3c66056b5 | ||
|
|
93bacd00da | ||
|
|
b5c2a0ff44 | ||
|
|
6bd478b8b0 | ||
|
|
c5c62ff294 | ||
|
|
7ed8678d50 | ||
|
|
e19e5423f0 | ||
|
|
622ba01c7a | ||
|
|
935da3ed15 | ||
|
|
ce054820a6 | ||
|
|
a7668624b4 | ||
|
|
01b36bb37e | ||
|
|
5d1aaf6bc6 | ||
|
|
7ceb307110 | ||
|
|
6371b7c20d | ||
|
|
7ec648164e | ||
|
|
6e98f5f36c | ||
|
|
a098c6da34 | ||
|
|
94e70aca33 | ||
|
|
ea01586b52 | ||
|
|
8ceb80dc44 | ||
|
|
2280b7eaf5 | ||
|
|
1c1d247a24 | ||
|
|
90a6ad8cf5 | ||
|
|
80d91e5540 | ||
|
|
26cf084e1c | ||
|
|
8ef730ad9c | ||
|
|
7123ec433c | ||
|
|
c67d9fd082 | ||
|
|
dd8f710605 | ||
|
|
e097b79f65 | ||
|
|
765f6d1b12 | ||
|
|
7cf80ebf69 | ||
|
|
cc328f3a6e | ||
|
|
045c3917c9 | ||
|
|
ac2186ccf6 | ||
|
|
a57fe36643 | ||
|
|
1e711f7928 | ||
|
|
eafccde6cb | ||
|
|
6b85e11a22 | ||
|
|
a74de3811b | ||
|
|
070a425c85 | ||
|
|
32153ed47d | ||
|
|
454f9c4a79 | ||
|
|
3d28833c35 | ||
|
|
be458020dd | ||
|
|
9b6733fd88 | ||
|
|
1b34a3e599 | ||
|
|
67d29dbad8 | ||
|
|
28b0043541 | ||
|
|
78824b059e | ||
|
|
c63709312d | ||
|
|
11cf24075b | ||
|
|
5d440d55c3 | ||
|
|
4c3b81d29c | ||
|
|
032218c0fd | ||
|
|
0cd48bd239 | ||
|
|
f5a2e8545b | ||
|
|
4306e20a8e | ||
|
|
635dd5f10d | ||
|
|
7f89dfd261 | ||
|
|
e878e71b20 | ||
|
|
64a2493ca2 | ||
|
|
26f9635a38 | ||
|
|
5f2492558d | ||
|
|
c83c151e10 | ||
|
|
801dddc269 | ||
|
|
9a886111ad | ||
|
|
bdc9a0cbe3 | ||
|
|
555f914537 | ||
|
|
43f86674b4 | ||
|
|
f7ed044a40 | ||
|
|
72408f2542 | ||
|
|
0abc6c8844 | ||
|
|
d46de32ffb | ||
|
|
185d5d66ad | ||
|
|
01ce251596 | ||
|
|
05d7a7f496 | ||
|
|
685bda35b9 | ||
|
|
8d8cdd38a9 | ||
|
|
d54c03f0a0 | ||
|
|
11f7be2065 | ||
|
|
a39e0ab934 | ||
|
|
b51082f7e4 | ||
|
|
9ec76c69ec | ||
|
|
b0a09a8a00 | ||
|
|
5870f949a3 | ||
|
|
87cb90c9b6 | ||
|
|
21b900258a | ||
|
|
de9f3c10f4 | ||
|
|
47e45808a3 | ||
|
|
0280c2baba | ||
|
|
2f8f5fcb7d | ||
|
|
709d4041b2 | ||
|
|
b4b999bd74 | ||
|
|
ea3fd27123 | ||
|
|
452a4d9a75 | ||
|
|
54934c41a7 | ||
|
|
a05e564ae6 | ||
|
|
57ac94bab6 | ||
|
|
6839ff4cf6 | ||
|
|
993dda9121 | ||
|
|
70695b1b0f | ||
|
|
d47b39d90b | ||
|
|
574d3b120f | ||
|
|
3d1f2bf716 | ||
|
|
bac5edc188 | ||
|
|
7700c56d3e | ||
|
|
9f395f64da | ||
|
|
73d029ba4b | ||
|
|
a292393a43 | ||
|
|
37a4e8d5c5 | ||
|
|
81728f4202 | ||
|
|
2d2ddd1c49 | ||
|
|
bc49f64a0c | ||
|
|
52fc031516 | ||
|
|
cae528158c | ||
|
|
566a03cd59 | ||
|
|
ad2221350f | ||
|
|
656dca7c66 | ||
|
|
638fe2e6c8 | ||
|
|
3295d2b727 | ||
|
|
c4689a8385 | ||
|
|
d09d6c21fa | ||
|
|
7ec1594428 | ||
|
|
529f6fb2cd | ||
|
|
724f5643bc | ||
|
|
74e849e2a1 | ||
|
|
bfb233eca1 | ||
|
|
5b27eb9c54 | ||
|
|
faf91d4d00 | ||
|
|
dbb803ff5e | ||
|
|
0dea2d283b | ||
|
|
cbc44da102 | ||
|
|
3f633c9779 | ||
|
|
6933ccefe2 | ||
|
|
54aeff1419 | ||
|
|
14f9d7fe25 | ||
|
|
144e98abab | ||
|
|
e0e0c049c8 | ||
|
|
ef0f9c5298 | ||
|
|
d13905377c | ||
|
|
6f1023e0cd | ||
|
|
eeddc233dd | ||
|
|
f48690ee11 | ||
|
|
3b0bdd9807 | ||
|
|
6dc5c4f13b | ||
|
|
9bb5096f1c | ||
|
|
af42008fd3 | ||
|
|
d6875d4949 | ||
|
|
4396bd3ea7 | ||
|
|
db03053e05 | ||
|
|
193dff8c30 | ||
|
|
59582d081a | ||
|
|
ef684d32a2 | ||
|
|
fc2a326332 | ||
|
|
e66a804012 | ||
|
|
5afa7345a5 | ||
|
|
c100be4131 | ||
|
|
d326d05ad6 | ||
|
|
eb0662b245 | ||
|
|
b92641d1b8 | ||
|
|
7912d521ca | ||
|
|
71dac64c4c | ||
|
|
aab6f222b3 | ||
|
|
1cb1be321c | ||
|
|
2434e81383 | ||
|
|
62142c42ea | ||
|
|
0ae30e6447 | ||
|
|
1a87856655 | ||
|
|
a3e097d541 | ||
|
|
9a6694286a | ||
|
|
a662a60332 | ||
|
|
69f3b4e987 | ||
|
|
481586d7b7 | ||
|
|
34c3a2b42d | ||
|
|
c4a9295d3e | ||
|
|
993ff50681 | ||
|
|
ba5c2f623c | ||
|
|
24a16cf8b4 | ||
|
|
5d34460e7f | ||
|
|
64b6187a26 | ||
|
|
c15913a1b2 | ||
|
|
8ef5e35677 | ||
|
|
c55d1f6a22 | ||
|
|
8b5b13af4d | ||
|
|
dfd51aad62 | ||
|
|
2b81120d43 | ||
|
|
91dc91a390 | ||
|
|
b886a35cff | ||
|
|
e59efc7e34 | ||
|
|
2160644124 | ||
|
|
b54c4bb399 | ||
|
|
feaa5585e1 | ||
|
|
6f7bede7bd | ||
|
|
eb3e87c340 | ||
|
|
26a8738b21 | ||
|
|
012a3e2984 | ||
|
|
dfebda7170 | ||
|
|
149f778652 | ||
|
|
773dfd9a7b | ||
|
|
426ed435a4 | ||
|
|
2ed770affd | ||
|
|
9d2d5d16f3 | ||
|
|
9dbb299bb9 | ||
|
|
661799cd54 | ||
|
|
0f25458914 | ||
|
|
d0c59c1f75 | ||
|
|
c6da8c8167 | ||
|
|
0dbe8ee8f2 | ||
|
|
f8b124caa6 | ||
|
|
125325721f | ||
|
|
ac57e433b1 | ||
|
|
de84cbc977 | ||
|
|
d6d7bc93e8 | ||
|
|
8f4779ad2f | ||
|
|
6aa034ea41 | ||
|
|
ca83deb761 | ||
|
|
ff664486ff | ||
|
|
c5f9c80f89 | ||
|
|
852eebac4d | ||
|
|
f0f9ade972 | ||
|
|
f3ba1a9702 | ||
|
|
c2f2a70d7f | ||
|
|
f18d108467 | ||
|
|
566def2b64 | ||
|
|
c9e3da22ab | ||
|
|
430f5e939b | ||
|
|
7bfa237d26 | ||
|
|
85964676fa | ||
|
|
68c2f6e2bd | ||
|
|
75c0caaa3d | ||
|
|
46b497d87e | ||
|
|
964c1a5f5a | ||
|
|
d5481342ed | ||
|
|
e3a0a9e5dc | ||
|
|
23b3070c52 | ||
|
|
5048f455a3 | ||
|
|
e27bad4bdd | ||
|
|
4273c56b44 | ||
|
|
0af9069f23 | ||
|
|
e1db45ef81 | ||
|
|
59b2bf72f7 | ||
|
|
8802b3bb14 | ||
|
|
ee0cbb0e42 | ||
|
|
5d415d4d7d | ||
|
|
3b3b510343 | ||
|
|
5c56cdfbc7 | ||
|
|
7601b4919a | ||
|
|
856b23d940 | ||
|
|
bd4097098d | ||
|
|
1441c59589 | ||
|
|
0373fb70d5 | ||
|
|
da5b5aadbc | ||
|
|
b75afaf5d5 | ||
|
|
26bfa32c7b | ||
|
|
67fe17d20c | ||
|
|
150f89ae43 | ||
|
|
944d364e1a | ||
|
|
aeef815bf7 | ||
|
|
46144ae07a | ||
|
|
8f08ed1aed | ||
|
|
73f637be26 | ||
|
|
37c8ca7617 | ||
|
|
c4bcbb8074 | ||
|
|
19ddff058e | ||
|
|
5382e3d832 | ||
|
|
ee3d1b3697 | ||
|
|
a786fad3ee | ||
|
|
8b9d821905 | ||
|
|
04b7c14fd7 | ||
|
|
5517d09e45 | ||
|
|
50adac3d99 | ||
|
|
8f8a59bd87 | ||
|
|
8e15f27080 | ||
|
|
e7977525a0 | ||
|
|
be9830d0d4 | ||
|
|
8958b154e9 | ||
|
|
d21d13afb0 | ||
|
|
43759061a4 | ||
|
|
a3efa8db54 | ||
|
|
f017e297f7 | ||
|
|
e8577d4d85 | ||
|
|
e8d08968a1 | ||
|
|
1e2f01cc69 | ||
|
|
b34f66b115 | ||
|
|
d18977ccad | ||
|
|
89c3847fb0 | ||
|
|
aeeeaae62a | ||
|
|
1e98a2affb | ||
|
|
3da19d5fa6 | ||
|
|
d7d46a5a81 | ||
|
|
d4369851bf | ||
|
|
97e439f8a3 | ||
|
|
e9945d8010 | ||
|
|
d35f948157 | ||
|
|
09d3d258b6 | ||
|
|
4513b6de70 | ||
|
|
ded5db20e6 | ||
|
|
6cf7ae4788 | ||
|
|
0508a04bab | ||
|
|
e7983f03d8 | ||
|
|
eada292ef3 | ||
|
|
3a19be5a2e | ||
|
|
52385fcc9c | ||
|
|
cc998ba805 | ||
|
|
37d641ec76 | ||
|
|
3fd45f8537 | ||
|
|
f4a21bdeb4 | ||
|
|
d65ac353fe | ||
|
|
7d7539f931 | ||
|
|
ac19921ca1 | ||
|
|
0654d549db | ||
|
|
91b1265833 | ||
|
|
2bc5c3cb6e | ||
|
|
cc61ee00be | ||
|
|
c74556fa3b | ||
|
|
bf51bc25e9 | ||
|
|
bbf1a5af3d | ||
|
|
235d18cbb1 | ||
|
|
32668b04c6 | ||
|
|
9ccf46dc8b | ||
|
|
d049aa1b57 | ||
|
|
44a149d1d9 | ||
|
|
38dd7e7414 | ||
|
|
fb5d726d42 | ||
|
|
531a6fe0dc | ||
|
|
15d0dd93f4 | ||
|
|
d8314d335a | ||
|
|
b18626c75c | ||
|
|
a04abf25f4 | ||
|
|
ebb6a246cb | ||
|
|
e672514ec7 | ||
|
|
b531a10392 | ||
|
|
9a71360346 | ||
|
|
5e9a46d71e | ||
|
|
66fd05ce47 | ||
|
|
7117c17777 | ||
|
|
9ad7123da4 | ||
|
|
98fd78159e | ||
|
|
3d57b2b47c | ||
|
|
2bc49682c4 | ||
|
|
bb2d9fca9b | ||
|
|
be8ab3578b | ||
|
|
43af0e1e3c | ||
|
|
43f33a34b8 | ||
|
|
7aded4aed7 | ||
|
|
d37652d362 | ||
|
|
9590a60c47 | ||
|
|
54bb7edf3b | ||
|
|
34d11f7f6e | ||
|
|
3a956857d2 | ||
|
|
08d41f4302 | ||
|
|
219fafc8e4 | ||
|
|
53593a10a9 | ||
|
|
26dc63553e | ||
|
|
83fd3d9ab4 | ||
|
|
d69758e559 | ||
|
|
d6fbe2a1bb | ||
|
|
a3280a0e30 | ||
|
|
e7f94b6748 | ||
|
|
6492c9b71f | ||
|
|
438bd36267 | ||
|
|
1c7eeb6ac6 | ||
|
|
86d642c8a3 | ||
|
|
d02d2dcb80 | ||
|
|
b5695c98af | ||
|
|
fcdc53f7bd | ||
|
|
5d85fe2577 | ||
|
|
013f5d359d | ||
|
|
ae0e572593 | ||
|
|
b4ed05c911 | ||
|
|
683ac9b16e | ||
|
|
2415e1ca4b | ||
|
|
cefbe7064f | ||
|
|
a687b7da26 | ||
|
|
ea2b11e448 | ||
|
|
39807e6ba4 | ||
|
|
5592dc8a42 | ||
|
|
aab69772e6 | ||
|
|
a5a9fce1eb | ||
|
|
e5fecdaabf | ||
|
|
412bb406c0 | ||
|
|
98b28db092 | ||
|
|
63fe75ecd2 | ||
|
|
c51a4514f4 | ||
|
|
3dcbeb11b8 | ||
|
|
e5301fead5 | ||
|
|
4a467c4dce | ||
|
|
3a8aaf72ba | ||
|
|
735737b513 | ||
|
|
37f066f2b0 | ||
|
|
1a9cfd046a | ||
|
|
31523af5e1 | ||
|
|
e71d932de0 | ||
|
|
7f45e1db06 | ||
|
|
2ab2255115 | ||
|
|
515b1db9d0 | ||
|
|
a7fe7b0aa3 | ||
|
|
89389258d7 | ||
|
|
1aacf65372 | ||
|
|
7ffcfc5206 | ||
|
|
5ab2d9da8a | ||
|
|
cd302a7621 | ||
|
|
1c8e699a71 | ||
|
|
c4db0d746d | ||
|
|
b7c5c99301 | ||
|
|
132c1872f4 | ||
|
|
0f04933dbf | ||
|
|
6d864d3621 | ||
|
|
b6ee1fb662 | ||
|
|
649cd896fc | ||
|
|
39be267805 | ||
|
|
f6356b2dff | ||
|
|
48574ce350 | ||
|
|
40a3145d92 | ||
|
|
f42430b7c4 | ||
|
|
178d93033f | ||
|
|
01a1803625 | ||
|
|
42eef42cf3 | ||
|
|
9c096b18e1 | ||
|
|
aa3ee2e180 | ||
|
|
fdefc780b4 | ||
|
|
3826ae64c6 | ||
|
|
dcdafda124 | ||
|
|
fc2cc25861 | ||
|
|
68db4524f1 | ||
|
|
48b75accdd | ||
|
|
0313a60f44 | ||
|
|
9897b5d18a | ||
|
|
e4cc431d35 | ||
|
|
535a755e74 | ||
|
|
2ae77a5ab7 | ||
|
|
e36d7665fa |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,5 +1,6 @@
|
||||
node_modules/
|
||||
coverage/
|
||||
.nyc_output/
|
||||
webadmin/dist/
|
||||
installer/src/certs/server.key
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"node": true,
|
||||
"unused": true,
|
||||
"esversion": 8
|
||||
"esversion": 11
|
||||
}
|
||||
|
||||
102
CHANGES
102
CHANGES
@@ -2387,3 +2387,105 @@
|
||||
* restore: do not redirect until mail data has been restored
|
||||
* proxyauth: set viewport meta tag in login view
|
||||
|
||||
[7.0.4]
|
||||
* Add password reveal button to login pages
|
||||
* appstore: fix crash if account already registered
|
||||
* Do not nuke all the logrotate configs on update
|
||||
* Remove unused httpPaths from manifest
|
||||
* cloudron-support: add option to reset cloudron.io account
|
||||
* Fix flicker in login page
|
||||
* Fix LE account key re-use issue in DO 1-click image
|
||||
* mail: add non-tls ports for recvmail addon
|
||||
* backups: fix issue where mail backups where not cleaned up
|
||||
* notifications: fix automatic app update notifications
|
||||
|
||||
[7.1.0]
|
||||
* Add mail manager role
|
||||
* mailbox: app can be set as owner when recvmail addon enabled
|
||||
* domains: add well known config UI (for jitsi configuration)
|
||||
* Prefix email addon variables with CLOUDRON_EMAIL instead of CLOUDRON_MAIL
|
||||
* remove support for manifest version 1
|
||||
* Add option to enable/disable mailbox sharing
|
||||
* base image 3.2.0
|
||||
* Update node to 16.13.1
|
||||
* mongodb: update to 4.4
|
||||
* Add `upstreamVersion` to manifest
|
||||
* Add `logPaths` to manifest
|
||||
* Add cifs seal support for backup and volume mounts
|
||||
* add a way for admins to set username when profiles are locked
|
||||
* Add support for secondary domains
|
||||
* postgresql: enable postgis
|
||||
* remove nginx config of stopped apps
|
||||
* mail: use port25check.cloudron.io to check outbound port 25 connectivity
|
||||
* Add import/export of mailboxes and users
|
||||
* LDAP server can now be exposed
|
||||
* Update monaco-editor to 0.32.1
|
||||
* Update xterm.js to 4.17.0
|
||||
* Update docker to 20.10.12
|
||||
* IPv6 support
|
||||
|
||||
[7.1.1]
|
||||
* Fix issue where dkimKey of a mail domain is sometimes null
|
||||
* firewall: add retry for xtables lock
|
||||
* redis: fix issue where protected mode was enabled with no password
|
||||
|
||||
[7.1.2]
|
||||
* Fix crash in cloudron-firewall when ports are whitelisted
|
||||
* eventlog: add event for certificate cleanup
|
||||
* eventlog: log event for mailbox alias update
|
||||
* backups: fix incorrect mountpoint check with managed mounts
|
||||
|
||||
[7.1.3]
|
||||
* Fix security issue where an admin can impersonate an owner
|
||||
* block list: can upload up to 2MB
|
||||
* dns: fix issue where link local address was picked up for ipv6
|
||||
* setup: ufw may not be installed
|
||||
* mysql: fix default collation of databases
|
||||
|
||||
[7.1.4]
|
||||
* wildcard dns: fix handling of ENODATA
|
||||
* cloudflare: fix error handling
|
||||
* openvpn: ipv6 support
|
||||
* dyndns: fix issue where eventlog was getting filled with empty entries
|
||||
* mandatory 2fa: Fix typo in 2FA check
|
||||
|
||||
[7.2.0]
|
||||
* mail: hide log button for non-superadmins
|
||||
* firewall: do not add duplicate ldap redirect rules
|
||||
* ldap: respond to RootDSE
|
||||
* Check if CNAME record exists and remove it if overwrite is set
|
||||
* cifs: use credentials file for better password support
|
||||
* installer: rework script to fix DNS resolution issues
|
||||
* backup cleaner: do not clean if not mounted
|
||||
* restore: fix sftp private key perms
|
||||
* support: add a separate system user named cloudron-support
|
||||
* sshfs: fix bug where sshfs mounts were generated without unbound dependancy
|
||||
* cloudron-setup: add --setup-token
|
||||
* notifications: add installation event
|
||||
* backups: set label of backup and control it's retention
|
||||
* wasabi: add new regions (London, Frankfurt, Paris, Toronto)
|
||||
* docker: update to 20.10.14
|
||||
* Ensure LDAP usernames are always treated lowercase
|
||||
* Add a way to make LDAP users local
|
||||
* proxyAuth: set X-Remote-User (rfc3875)
|
||||
* GoDaddy: there is now a delete API
|
||||
* nginx: use ubuntu packages for ubuntu 20.04 and 22.04
|
||||
* Ubuntu 22.04 LTS support
|
||||
* Add Hetzner DNS
|
||||
* cron: add support for extensions (@reboot, @weekly etc)
|
||||
* Add profile backgroundImage api
|
||||
* exec: rework API to get exit code
|
||||
* Add update available filter
|
||||
|
||||
[7.2.1]
|
||||
* Refactor backup code to use async/await
|
||||
* mongodb: fix bug where a small timeout prevented import of large backups
|
||||
* Add update available filter
|
||||
* exec: rework API to get exit code
|
||||
* Add profile backgroundImage api
|
||||
* cron: add support for extensions (@reboot, @weekly etc)
|
||||
|
||||
[7.2.2]
|
||||
* Update cloudron-manifestformat for new scheduler patterns
|
||||
* collectd: FQDNLookup causes collectd install to fail
|
||||
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -1,5 +1,5 @@
|
||||
The Cloudron Subscription license
|
||||
Copyright (c) 2021 Cloudron UG
|
||||
Copyright (c) 2022 Cloudron UG
|
||||
|
||||
With regard to the Cloudron Software:
|
||||
|
||||
|
||||
@@ -1,180 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euv -o pipefail
|
||||
|
||||
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
readonly arg_infraversionpath="${SOURCE_DIR}/../src"
|
||||
|
||||
function die {
|
||||
echo $1
|
||||
exit 1
|
||||
}
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
readonly ubuntu_codename=$(lsb_release -cs)
|
||||
readonly ubuntu_version=$(lsb_release -rs)
|
||||
|
||||
# hold grub since updating it breaks on some VPS providers. also, dist-upgrade will trigger it
|
||||
apt-mark hold grub* >/dev/null
|
||||
apt-get -o Dpkg::Options::="--force-confdef" update -y
|
||||
apt-get -o Dpkg::Options::="--force-confdef" upgrade -y
|
||||
apt-mark unhold grub* >/dev/null
|
||||
|
||||
echo "==> Installing required packages"
|
||||
|
||||
debconf-set-selections <<< 'mysql-server mysql-server/root_password password password'
|
||||
debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password password'
|
||||
|
||||
# this enables automatic security upgrades (https://help.ubuntu.com/community/AutomaticSecurityUpdates)
|
||||
# resolvconf is needed for unbound to work property after disabling systemd-resolved in 18.04
|
||||
|
||||
gpg_package=$([[ "${ubuntu_version}" == "16.04" ]] && echo "gnupg" || echo "gpg")
|
||||
mysql_package=$([[ "${ubuntu_version}" == "20.04" ]] && echo "mysql-server-8.0" || echo "mysql-server-5.7")
|
||||
ntpd_package=$([[ "${ubuntu_version}" == "20.04" ]] && echo "systemd-timesyncd" || echo "")
|
||||
apt-get -y install --no-install-recommends \
|
||||
acl \
|
||||
apparmor \
|
||||
build-essential \
|
||||
cifs-utils \
|
||||
cron \
|
||||
curl \
|
||||
debconf-utils \
|
||||
dmsetup \
|
||||
$gpg_package \
|
||||
ipset \
|
||||
iptables \
|
||||
libpython2.7 \
|
||||
linux-generic \
|
||||
logrotate \
|
||||
$mysql_package \
|
||||
nfs-common \
|
||||
$ntpd_package \
|
||||
openssh-server \
|
||||
pwgen \
|
||||
resolvconf \
|
||||
sshfs \
|
||||
swaks \
|
||||
tzdata \
|
||||
unattended-upgrades \
|
||||
unbound \
|
||||
unzip \
|
||||
xfsprogs
|
||||
|
||||
echo "==> installing nginx for xenial for TLSv3 support"
|
||||
curl -sL http://nginx.org/packages/ubuntu/pool/nginx/n/nginx/nginx_1.18.0-2~${ubuntu_codename}_amd64.deb -o /tmp/nginx.deb
|
||||
# apt install with install deps (as opposed to dpkg -i)
|
||||
apt install -y /tmp/nginx.deb
|
||||
rm /tmp/nginx.deb
|
||||
|
||||
# on some providers like scaleway the sudo file is changed and we want to keep the old one
|
||||
apt-get -o Dpkg::Options::="--force-confold" install -y --no-install-recommends sudo
|
||||
|
||||
# this ensures that unattended upgades are enabled, if it was disabled during ubuntu install time (see #346)
|
||||
# debconf-set-selection of unattended-upgrades/enable_auto_updates + dpkg-reconfigure does not work
|
||||
cp /usr/share/unattended-upgrades/20auto-upgrades /etc/apt/apt.conf.d/20auto-upgrades
|
||||
|
||||
echo "==> Installing node.js"
|
||||
readonly node_version=14.17.6
|
||||
mkdir -p /usr/local/node-${node_version}
|
||||
curl -sL https://nodejs.org/dist/v${node_version}/node-v${node_version}-linux-x64.tar.gz | tar zxf - --strip-components=1 -C /usr/local/node-${node_version}
|
||||
ln -sf /usr/local/node-${node_version}/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-${node_version}/bin/npm /usr/bin/npm
|
||||
apt-get install -y --no-install-recommends python # Install python which is required for npm rebuild
|
||||
[[ "$(python --version 2>&1)" == "Python 2.7."* ]] || die "Expecting python version to be 2.7.x"
|
||||
|
||||
# https://docs.docker.com/engine/installation/linux/ubuntulinux/
|
||||
echo "==> Installing Docker"
|
||||
|
||||
# create systemd drop-in file. if you channge options here, be sure to fixup installer.sh as well
|
||||
mkdir -p /etc/systemd/system/docker.service.d
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=overlay2" > /etc/systemd/system/docker.service.d/cloudron.conf
|
||||
|
||||
# there are 3 packages for docker - containerd, CLI and the daemon
|
||||
readonly docker_version=20.10.7
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.4.6-1_amd64.deb" -o /tmp/containerd.deb
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
|
||||
# apt install with install deps (as opposed to dpkg -i)
|
||||
apt install -y /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
|
||||
rm /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
|
||||
|
||||
storage_driver=$(docker info | grep "Storage Driver" | sed 's/.*: //')
|
||||
if [[ "${storage_driver}" != "overlay2" ]]; then
|
||||
echo "Docker is using "${storage_driver}" instead of overlay2"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# do not upgrade grub because it might prompt user and break this script
|
||||
echo "==> Enable memory accounting"
|
||||
apt-get -y --no-upgrade --no-install-recommends install grub2-common
|
||||
sed -e 's/^GRUB_CMDLINE_LINUX="\(.*\)"$/GRUB_CMDLINE_LINUX="\1 cgroup_enable=memory swapaccount=1 panic_on_oops=1 panic=5"/' -i /etc/default/grub
|
||||
update-grub
|
||||
|
||||
echo "==> Downloading docker images"
|
||||
if [ ! -f "${arg_infraversionpath}/infra_version.js" ]; then
|
||||
echo "No infra_versions.js found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
images=$(node -e "var i = require('${arg_infraversionpath}/infra_version.js'); console.log(i.baseImages.map(function (x) { return x.tag; }).join(' '), Object.keys(i.images).map(function (x) { return i.images[x].tag; }).join(' '));")
|
||||
|
||||
echo -e "\tPulling docker images: ${images}"
|
||||
for image in ${images}; do
|
||||
docker pull "${image}"
|
||||
docker pull "${image%@sha256:*}" # this will tag the image for readability
|
||||
done
|
||||
|
||||
echo "==> Install collectd"
|
||||
# without this, libnotify4 will install gnome-shell
|
||||
apt-get install -y libnotify4 --no-install-recommends
|
||||
if ! apt-get install -y --no-install-recommends libcurl3-gnutls collectd collectd-utils; then
|
||||
# FQDNLookup is true in default debian config. The box code has a custom collectd.conf that fixes this
|
||||
echo "Failed to install collectd. Presumably because of http://mailman.verplant.org/pipermail/collectd/2015-March/006491.html"
|
||||
sed -e 's/^FQDNLookup true/FQDNLookup false/' -i /etc/collectd/collectd.conf
|
||||
fi
|
||||
# https://bugs.launchpad.net/ubuntu/+source/collectd/+bug/1872281
|
||||
[[ "${ubuntu_version}" == "20.04" ]] && echo -e "\nLD_PRELOAD=/usr/lib/python3.8/config-3.8-x86_64-linux-gnu/libpython3.8.so" >> /etc/default/collectd
|
||||
|
||||
# some hosts like atlantic install ntp which conflicts with timedatectl. https://serverfault.com/questions/1024770/ubuntu-20-04-time-sync-problems-and-possibly-incorrect-status-information
|
||||
echo "==> Configuring host"
|
||||
sed -e 's/^#NTP=/NTP=0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org/' -i /etc/systemd/timesyncd.conf
|
||||
if systemctl is-active ntp; then
|
||||
systemctl stop ntp
|
||||
apt purge -y ntp
|
||||
fi
|
||||
timedatectl set-ntp 1
|
||||
# mysql follows the system timezone
|
||||
timedatectl set-timezone UTC
|
||||
|
||||
echo "==> Adding sshd configuration warning"
|
||||
sed -e '/Port 22/ i # NOTE: Cloudron only supports moving SSH to port 202. See https://docs.cloudron.io/security/#securing-ssh-access' -i /etc/ssh/sshd_config
|
||||
|
||||
# https://bugs.launchpad.net/ubuntu/+source/base-files/+bug/1701068
|
||||
echo "==> Disabling motd news"
|
||||
if [ -f "/etc/default/motd-news" ]; then
|
||||
sed -i 's/^ENABLED=.*/ENABLED=0/' /etc/default/motd-news
|
||||
fi
|
||||
|
||||
# Disable bind for good measure (on online.net, kimsufi servers these are pre-installed)
|
||||
systemctl stop bind9 || true
|
||||
systemctl disable bind9 || true
|
||||
|
||||
# on ovh images dnsmasq seems to run by default
|
||||
systemctl stop dnsmasq || true
|
||||
systemctl disable dnsmasq || true
|
||||
|
||||
# on ssdnodes postfix seems to run by default
|
||||
systemctl stop postfix || true
|
||||
systemctl disable postfix || true
|
||||
|
||||
# on ubuntu 18.04 and 20.04, this is the default. this requires resolvconf for DNS to work further after the disable
|
||||
systemctl stop systemd-resolved || true
|
||||
systemctl disable systemd-resolved || true
|
||||
|
||||
# ubuntu's default config for unbound does not work if ipv6 is disabled. this config is overwritten in start.sh
|
||||
# we need unbound to work as this is required for installer.sh to do any DNS requests
|
||||
ip6=$([[ -s /proc/net/if_inet6 ]] && echo "yes" || echo "no")
|
||||
echo -e "server:\n\tinterface: 127.0.0.1\n\tdo-ip6: ${ip6}" > /etc/unbound/unbound.conf.d/cloudron-network.conf
|
||||
systemctl restart unbound
|
||||
18
box.js
18
box.js
@@ -2,13 +2,14 @@
|
||||
|
||||
'use strict';
|
||||
|
||||
const dockerProxy = require('./src/dockerproxy.js'),
|
||||
fs = require('fs'),
|
||||
const fs = require('fs'),
|
||||
ldap = require('./src/ldap.js'),
|
||||
paths = require('./src/paths.js'),
|
||||
proxyAuth = require('./src/proxyauth.js'),
|
||||
safe = require('safetydance'),
|
||||
server = require('./src/server.js');
|
||||
server = require('./src/server.js'),
|
||||
settings = require('./src/settings.js'),
|
||||
userdirectory = require('./src/userdirectory.js');
|
||||
|
||||
let logFd;
|
||||
|
||||
@@ -36,15 +37,16 @@ async function startServers() {
|
||||
await server.start(); // do this first since it also inits the database
|
||||
await proxyAuth.start();
|
||||
await ldap.start();
|
||||
await dockerProxy.start();
|
||||
|
||||
const conf = await settings.getUserDirectoryConfig();
|
||||
if (conf.enabled) await userdirectory.start();
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const [error] = await safe(startServers());
|
||||
if (error) return exitSync({ error: new Error(`Error starting server: ${JSON.stringify(error)}`), code: 1 });
|
||||
|
||||
// require those here so that logging handler is already setup
|
||||
require('supererror');
|
||||
// require this here so that logging handler is already setup
|
||||
const debug = require('debug')('box:box');
|
||||
|
||||
process.on('SIGINT', async function () {
|
||||
@@ -52,8 +54,8 @@ async function main() {
|
||||
|
||||
await proxyAuth.stop();
|
||||
await server.stop();
|
||||
await userdirectory.stop();
|
||||
await ldap.stop();
|
||||
await dockerProxy.stop();
|
||||
setTimeout(process.exit.bind(process), 3000);
|
||||
});
|
||||
|
||||
@@ -62,8 +64,8 @@ async function main() {
|
||||
|
||||
await proxyAuth.stop();
|
||||
await server.stop();
|
||||
await userdirectory.stop();
|
||||
await ldap.stop();
|
||||
await dockerProxy.stop();
|
||||
setTimeout(process.exit.bind(process), 3000);
|
||||
});
|
||||
|
||||
|
||||
@@ -12,6 +12,8 @@ exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM domains', [ ], function (error, domains) {
|
||||
if (error) return callback(error);
|
||||
|
||||
// this code is br0ken since async 3.x since async functions won't get iteratorDone anymore
|
||||
// no point fixing this migration though since it won't run again in old cloudrons. and in new cloudron domains will be empty
|
||||
async.eachSeries(domains, async function (domain, iteratorDone) {
|
||||
let fallbackCertificate = safe.JSON.parse(domain.fallbackCertificateJson);
|
||||
if (!fallbackCertificate || !fallbackCertificate.cert || !fallbackCertificate.key) {
|
||||
|
||||
9
migrations/20211117070204-blobs-delete-dhparams.js
Normal file
9
migrations/20211117070204-blobs-delete-dhparams.js
Normal file
@@ -0,0 +1,9 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('DELETE FROM blobs WHERE id=?', [ 'dhparams' ], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,17 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE eventlog CHANGE source sourceJson TEXT', []),
|
||||
db.runSql.bind(db, 'ALTER TABLE eventlog CHANGE data dataJson TEXT', []),
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE eventlog CHANGE sourceJson source TEXT', []),
|
||||
db.runSql.bind(db, 'ALTER TABLE eventlog CHANGE dataJson data TEXT', []),
|
||||
], callback);
|
||||
};
|
||||
17
migrations/20220107015249-settings-sysinfo-ip-to-ipv4.js
Normal file
17
migrations/20220107015249-settings-sysinfo-ip-to-ipv4.js
Normal file
@@ -0,0 +1,17 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('SELECT value FROM settings WHERE name=?', [ 'sysinfo_config' ], function (error, result) {
|
||||
if (error || result.length === 0) return callback(error);
|
||||
const sysinfoConfig = JSON.parse(result[0].value);
|
||||
if (sysinfoConfig.provider !== 'fixed' || !sysinfoConfig.ip) return callback();
|
||||
sysinfoConfig.ipv4 = sysinfoConfig.ip;
|
||||
delete sysinfoConfig.ip;
|
||||
|
||||
db.runSql('REPLACE INTO settings (name, value) VALUES(?, ?)', [ 'sysinfo_config', JSON.stringify(sysinfoConfig) ], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,9 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('UPDATE settings SET name=? WHERE name=?', [ 'directory_config', 'profile_config' ], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('UPDATE settings SET name=? WHERE name=?', [ 'profile_config', 'directory_config' ], callback);
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE subdomains ADD COLUMN environmentVariable VARCHAR(128)', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE subdomains DROP COLUMN environmentVariable', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
19
migrations/20220202011148-blobs-migrate-token-secret.js
Normal file
19
migrations/20220202011148-blobs-migrate-token-secret.js
Normal file
@@ -0,0 +1,19 @@
|
||||
'use strict';
|
||||
|
||||
const safe = require('safetydance');
|
||||
|
||||
const PROXY_AUTH_TOKEN_SECRET_FILE = '/home/yellowtent/platformdata/proxy-auth-token-secret';
|
||||
|
||||
exports.up = function (db, callback) {
|
||||
const token = safe.fs.readFileSync(PROXY_AUTH_TOKEN_SECRET_FILE);
|
||||
if (!token) return callback();
|
||||
db.runSql('INSERT INTO blobs (id, value) VALUES (?, ?)', [ 'proxy_auth_token_secret', token ], function (error) {
|
||||
if (error) return callback(error);
|
||||
safe.fs.unlinkSync(PROXY_AUTH_TOKEN_SECRET_FILE);
|
||||
callback();
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
12
migrations/20220207214956-rename-subdomains-to-locations.js
Normal file
12
migrations/20220207214956-rename-subdomains-to-locations.js
Normal file
@@ -0,0 +1,12 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('RENAME TABLE subdomains TO locations', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
27
migrations/20220219220751-mail-ensure-dkimKey.js
Normal file
27
migrations/20220219220751-mail-ensure-dkimKey.js
Normal file
@@ -0,0 +1,27 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
mail = require('../src/mail.js'),
|
||||
safe = require('safetydance'),
|
||||
util = require('util');
|
||||
|
||||
// it seems some mail domains do not have dkimKey in the database for some reason because of some previous bad migration
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM mail', [ ], function (error, mailDomains) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(mailDomains, function (mailDomain, iteratorDone) {
|
||||
let dkimKey = safe.JSON.parse(mailDomain.dkimKeyJson);
|
||||
if (dkimKey && dkimKey.publicKey && dkimKey.privateKey) return iteratorDone();
|
||||
console.log(`${mailDomain.domain} has no dkim key in the database. generating a new one`);
|
||||
util.callbackify(mail.generateDkimKey)(function (error, dkimKey) {
|
||||
if (error) return iteratorDone(error);
|
||||
db.runSql('UPDATE mail SET dkimKeyJson=? WHERE domain=?', [ JSON.stringify(dkimKey), mailDomain.domain ], iteratorDone);
|
||||
});
|
||||
}, callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
9
migrations/20220331193223-settings-delete-licenseKey.js
Normal file
9
migrations/20220331193223-settings-delete-licenseKey.js
Normal file
@@ -0,0 +1,9 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('DELETE FROM settings WHERE name=?', [ 'license_key' ], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,9 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('UPDATE settings SET name=? WHERE name=?', [ 'appstore_api_token', 'cloudron_token' ], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
37
migrations/20220401045439-settings-get-appstore-web-token.js
Normal file
37
migrations/20220401045439-settings-get-appstore-web-token.js
Normal file
@@ -0,0 +1,37 @@
|
||||
'use strict';
|
||||
|
||||
const superagent = require('superagent');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT value FROM settings WHERE name="api_server_origin"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
const apiServerOrigin = results[0].value;
|
||||
|
||||
db.all('SELECT value FROM settings WHERE name="appstore_api_token"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
const apiToken = results[0].value;
|
||||
|
||||
console.log(`Getting appstore web token from ${apiServerOrigin}`);
|
||||
|
||||
superagent.post(`${apiServerOrigin}/api/v1/user_token`)
|
||||
.send({})
|
||||
.query({ accessToken: apiToken })
|
||||
.timeout(30 * 1000).end(function (error, response) {
|
||||
if (error && !error.response) {
|
||||
console.log('Network error getting web token', error);
|
||||
return callback();
|
||||
}
|
||||
if (response.statusCode !== 201 || !response.body.accessToken) {
|
||||
console.log(`Bad status getting web token: ${response.status} ${response.text}`);
|
||||
return callback();
|
||||
}
|
||||
|
||||
db.runSql('INSERT settings (name, value) VALUES(?, ?)', [ 'appstore_web_token', response.body.accessToken ], callback);
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
16
migrations/20220402233643-backups-add-label.js
Normal file
16
migrations/20220402233643-backups-add-label.js
Normal file
@@ -0,0 +1,16 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups ADD COLUMN label VARCHAR(128) DEFAULT ""', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups DROP COLUMN label', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
51
migrations/20220404211215-backups-rename-id-to-remotePath.js
Normal file
51
migrations/20220404211215-backups-rename-id-to-remotePath.js
Normal file
@@ -0,0 +1,51 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async'),
|
||||
hat = require('../src/hat.js');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * from backups', function (error, allBackups) {
|
||||
if (error) return callback(error);
|
||||
|
||||
console.log(`Fixing up ${allBackups.length} backup entries`);
|
||||
const idMap = {};
|
||||
allBackups.forEach(b => {
|
||||
b.remotePath = b.id;
|
||||
b.id = `${b.type}_${b.identifier}_v${b.packageVersion}_${hat(256)}`; // id is used by the UI to derive dependent packages. making this a UUID will require a lot of db querying
|
||||
idMap[b.remotePath] = b.id;
|
||||
});
|
||||
|
||||
db.runSql('ALTER TABLE backups ADD COLUMN remotePath VARCHAR(256)', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
db.runSql('ALTER TABLE backups CHANGE COLUMN dependsOn dependsOnJson TEXT', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(allBackups, function (backup, iteratorDone) {
|
||||
const dependsOnPaths = backup.dependsOn ? backup.dependsOn.split(',') : []; // previously, it was paths
|
||||
let dependsOnIds = [];
|
||||
dependsOnPaths.forEach(p => { if (idMap[p]) dependsOnIds.push(idMap[p]); });
|
||||
|
||||
db.runSql('UPDATE backups SET id = ?, remotePath = ?, dependsOnJson = ? WHERE id = ?', [ backup.id, backup.remotePath, JSON.stringify(dependsOnIds), backup.remotePath ], iteratorDone);
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
db.runSql('ALTER TABLE backups MODIFY COLUMN remotePath VARCHAR(256) NOT NULL UNIQUE', callback);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups DROP COLUMN remotePath', function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
db.runSql('ALTER TABLE backups RENAME COLUMN dependsOnJson to dependsOn', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(error);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
22
migrations/20220426060528-make-apps-sso-consistent.js
Normal file
22
migrations/20220426060528-make-apps-sso-consistent.js
Normal file
@@ -0,0 +1,22 @@
|
||||
'use strict';
|
||||
|
||||
const async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM apps', function (error, apps) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(apps, function (app, iteratorDone) {
|
||||
const manifest = JSON.parse(app.manifestJson);
|
||||
const hasSso = !!manifest.addons['proxyAuth'] || !!manifest.addons['ldap'];
|
||||
if (hasSso || !app.sso) return iteratorDone();
|
||||
|
||||
console.log(`Unsetting sso flag of ${app.id}`);
|
||||
db.runSql('UPDATE apps SET sso=? WHERE id=?', [ 0, app.id ], iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
20
migrations/20220505162417-settings-add-console-origin.js
Normal file
20
migrations/20220505162417-settings-add-console-origin.js
Normal file
@@ -0,0 +1,20 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM settings WHERE name = ?', [ 'api_server_origin' ], function (error, result) {
|
||||
if (error || result.length === 0) return callback(error);
|
||||
|
||||
let consoleOrigin;
|
||||
switch (result[0].value) {
|
||||
case 'https://api.dev.cloudron.io': consoleOrigin = 'https://console.dev.cloudron.io'; break;
|
||||
case 'https://api.staging.cloudron.io': consoleOrigin = 'https://console.staging.cloudron.io'; break;
|
||||
default: consoleOrigin = 'https://console.cloudron.io'; break;
|
||||
}
|
||||
|
||||
db.runSql('REPLACE INTO settings (name, value) VALUES (?, ?)', [ 'console_server_origin', consoleOrigin ], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
9
migrations/20220514170234-users-add-backgroundImage.js
Normal file
9
migrations/20220514170234-users-add-backgroundImage.js
Normal file
@@ -0,0 +1,9 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN backgroundImage MEDIUMBLOB', callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users DROP COLUMN backgroundImage', callback);
|
||||
};
|
||||
@@ -33,6 +33,7 @@ CREATE TABLE IF NOT EXISTS users(
|
||||
resetTokenCreationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
active BOOLEAN DEFAULT 1,
|
||||
avatar MEDIUMBLOB NOT NULL,
|
||||
backgroundImage MEDIUMBLOB,
|
||||
loginLocationsJson MEDIUMTEXT, // { locations: [{ ip, userAgent, city, country, ts }] }
|
||||
|
||||
INDEX creationTime_index (creationTime),
|
||||
@@ -98,6 +99,7 @@ CREATE TABLE IF NOT EXISTS apps(
|
||||
containerIp VARCHAR(16) UNIQUE, // this is not-null because of ip allocation fails, user can 'repair'
|
||||
appStoreIcon MEDIUMBLOB,
|
||||
icon MEDIUMBLOB,
|
||||
crontab TEXT,
|
||||
|
||||
FOREIGN KEY(mailboxDomain) REFERENCES domains(domain),
|
||||
FOREIGN KEY(taskId) REFERENCES tasks(id),
|
||||
@@ -132,12 +134,14 @@ CREATE TABLE IF NOT EXISTS appEnvVars(
|
||||
|
||||
CREATE TABLE IF NOT EXISTS backups(
|
||||
id VARCHAR(128) NOT NULL,
|
||||
remotePath VARCHAR(256) NOT NULL UNIQUE,
|
||||
label VARCHAR(128) DEFAULT "",
|
||||
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
packageVersion VARCHAR(128) NOT NULL, /* app version or box version */
|
||||
encryptionVersion INTEGER, /* when null, unencrypted backup */
|
||||
type VARCHAR(16) NOT NULL, /* 'box' or 'app' */
|
||||
identifier VARCHAR(128) NOT NULL, /* 'box' or the app id */
|
||||
dependsOn TEXT, /* comma separate list of objects this backup depends on */
|
||||
dependsOnJson TEXT, /* comma separate list of objects this backup depends on */
|
||||
state VARCHAR(16) NOT NULL,
|
||||
manifestJson TEXT, /* to validate if the app can be installed in this version of box */
|
||||
format VARCHAR(16) DEFAULT "tgz",
|
||||
@@ -149,8 +153,8 @@ CREATE TABLE IF NOT EXISTS backups(
|
||||
CREATE TABLE IF NOT EXISTS eventlog(
|
||||
id VARCHAR(128) NOT NULL,
|
||||
action VARCHAR(128) NOT NULL,
|
||||
source TEXT, /* { userId, username, ip }. userId can be null for cron,sysadmin */
|
||||
data TEXT, /* free flowing json based on action */
|
||||
sourceJson TEXT, /* { userId, username, ip }. userId can be null for cron,sysadmin */
|
||||
dataJson TEXT, /* free flowing json based on action */
|
||||
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
|
||||
INDEX creationTime_index (creationTime),
|
||||
@@ -213,11 +217,12 @@ CREATE TABLE IF NOT EXISTS mailboxes(
|
||||
FOREIGN KEY(aliasDomain) REFERENCES mail(domain),
|
||||
UNIQUE (name, domain));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS subdomains(
|
||||
CREATE TABLE IF NOT EXISTS locations(
|
||||
appId VARCHAR(128) NOT NULL,
|
||||
domain VARCHAR(128) NOT NULL,
|
||||
subdomain VARCHAR(128) NOT NULL,
|
||||
type VARCHAR(128) NOT NULL, /* primary or redirect */
|
||||
type VARCHAR(128) NOT NULL, /* primary, secondary, redirect, alias */
|
||||
environmentVariable VARCHAR(128), /* only set for secondary */
|
||||
|
||||
certificateJson MEDIUMTEXT,
|
||||
|
||||
@@ -285,7 +290,7 @@ CREATE TABLE IF NOT EXISTS appMounts(
|
||||
|
||||
CREATE TABLE IF NOT EXISTS blobs(
|
||||
id VARCHAR(128) NOT NULL UNIQUE,
|
||||
value TEXT,
|
||||
value MEDIUMBLOB,
|
||||
PRIMARY KEY(id));
|
||||
|
||||
CHARACTER SET utf8 COLLATE utf8_bin;
|
||||
|
||||
11479
package-lock.json
generated
11479
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
72
package.json
72
package.json
@@ -11,77 +11,67 @@
|
||||
"url": "https://git.cloudron.io/cloudron/box.git"
|
||||
},
|
||||
"dependencies": {
|
||||
"@google-cloud/dns": "^2.2.0",
|
||||
"@google-cloud/storage": "^5.8.5",
|
||||
"@google-cloud/dns": "^2.2.4",
|
||||
"@google-cloud/storage": "^5.19.2",
|
||||
"@sindresorhus/df": "git+https://github.com/cloudron-io/df.git#type",
|
||||
"async": "^3.2.0",
|
||||
"aws-sdk": "^2.936.0",
|
||||
"async": "^3.2.3",
|
||||
"aws-sdk": "^2.1115.0",
|
||||
"basic-auth": "^2.0.1",
|
||||
"body-parser": "^1.19.0",
|
||||
"cloudron-manifestformat": "^5.10.2",
|
||||
"body-parser": "^1.20.0",
|
||||
"cloudron-manifestformat": "^5.16.0",
|
||||
"connect": "^3.7.0",
|
||||
"connect-lastmile": "^2.1.1",
|
||||
"connect-timeout": "^1.9.0",
|
||||
"cookie-parser": "^1.4.5",
|
||||
"cookie-session": "^1.4.0",
|
||||
"cookie-parser": "^1.4.6",
|
||||
"cookie-session": "^2.0.0",
|
||||
"cron": "^1.8.2",
|
||||
"db-migrate": "^0.11.12",
|
||||
"db-migrate-mysql": "^2.1.2",
|
||||
"debug": "^4.3.1",
|
||||
"delay": "^5.0.0",
|
||||
"dockerode": "^3.3.0",
|
||||
"db-migrate": "^0.11.13",
|
||||
"db-migrate-mysql": "^2.2.0",
|
||||
"debug": "^4.3.4",
|
||||
"dockerode": "^3.3.1",
|
||||
"ejs": "^3.1.6",
|
||||
"ejs-cli": "^2.2.1",
|
||||
"express": "^4.17.1",
|
||||
"ejs-cli": "^2.2.3",
|
||||
"express": "^4.17.3",
|
||||
"ipaddr.js": "^2.0.1",
|
||||
"js-yaml": "^4.1.0",
|
||||
"json": "^11.0.0",
|
||||
"jsonwebtoken": "^8.5.1",
|
||||
"ldapjs": "^2.3.0",
|
||||
"ldapjs": "^2.3.2",
|
||||
"lodash": "^4.17.21",
|
||||
"lodash.chunk": "^4.2.0",
|
||||
"mime": "^2.5.2",
|
||||
"moment": "^2.29.1",
|
||||
"moment-timezone": "^0.5.33",
|
||||
"moment": "^2.29.2",
|
||||
"moment-timezone": "^0.5.34",
|
||||
"morgan": "^1.10.0",
|
||||
"multiparty": "^4.2.2",
|
||||
"mustache-express": "^1.3.1",
|
||||
"multiparty": "^4.2.3",
|
||||
"mysql": "^2.18.1",
|
||||
"nodemailer": "^6.6.2",
|
||||
"nodemailer": "^6.7.3",
|
||||
"nodemailer-smtp-transport": "^2.7.4",
|
||||
"once": "^1.4.0",
|
||||
"pretty-bytes": "^5.6.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"proxy-middleware": "^0.15.0",
|
||||
"qrcode": "^1.4.4",
|
||||
"qrcode": "^1.5.0",
|
||||
"readdirp": "^3.6.0",
|
||||
"request": "^2.88.2",
|
||||
"s3-block-read-stream": "^0.5.0",
|
||||
"safetydance": "^2.2.0",
|
||||
"semver": "^7.3.5",
|
||||
"semver": "^7.3.7",
|
||||
"speakeasy": "^2.0.0",
|
||||
"split": "^1.0.1",
|
||||
"superagent": "^6.1.0",
|
||||
"supererror": "^0.7.2",
|
||||
"superagent": "^7.1.1",
|
||||
"tar-fs": "github:cloudron-io/tar-fs#ignore_stat_error",
|
||||
"tar-stream": "^2.2.0",
|
||||
"tldjs": "^2.3.1",
|
||||
"ua-parser-js": "^0.7.28",
|
||||
"underscore": "^1.13.1",
|
||||
"ua-parser-js": "^1.0.2",
|
||||
"underscore": "^1.13.2",
|
||||
"uuid": "^8.3.2",
|
||||
"validator": "^13.6.0",
|
||||
"ws": "^7.5.1",
|
||||
"validator": "^13.7.0",
|
||||
"ws": "^8.5.0",
|
||||
"xml2js": "^0.4.23"
|
||||
},
|
||||
"devDependencies": {
|
||||
"expect.js": "*",
|
||||
"hock": "^1.4.1",
|
||||
"js2xmlparser": "^4.0.1",
|
||||
"mocha": "^9.0.1",
|
||||
"js2xmlparser": "^4.0.2",
|
||||
"mocha": "^9.2.2",
|
||||
"mock-aws-s3": "git+https://github.com/cloudron-io/mock-aws-s3.git",
|
||||
"nock": "^13.1.0",
|
||||
"node-sass": "^6.0.1",
|
||||
"recursive-readdir": "^2.2.2"
|
||||
"nock": "^13.2.4",
|
||||
"node-sass": "^7.0.1",
|
||||
"nyc": "^15.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "./runTests",
|
||||
|
||||
11
runTests
11
runTests
@@ -45,7 +45,7 @@ else
|
||||
fi
|
||||
|
||||
# create docker network (while the infra code does this, most tests skip infra setup)
|
||||
docker network create --subnet=172.18.0.0/16 --ip-range=172.18.0.0/20 cloudron || true
|
||||
docker network create --subnet=172.18.0.0/16 --ip-range=172.18.0.0/20 --gateway 172.18.0.1 cloudron --ipv6 --subnet=fd00:c107:d509::/64 || true
|
||||
|
||||
# create the same mysql server version to test with
|
||||
OUT=`docker inspect mysql-server` || true
|
||||
@@ -79,10 +79,15 @@ echo "=> Run database migrations"
|
||||
cd "${source_dir}"
|
||||
BOX_ENV=test DATABASE_URL=mysql://root:password@${MYSQL_IP}/box node_modules/.bin/db-migrate up
|
||||
|
||||
echo "=> Run tests with mocha"
|
||||
TESTS=${DEFAULT_TESTS}
|
||||
if [[ $# -gt 0 ]]; then
|
||||
TESTS="$*"
|
||||
fi
|
||||
|
||||
BOX_ENV=test ./node_modules/mocha/bin/_mocha --bail --no-timeouts --exit -R spec ${TESTS}
|
||||
if [[ -z ${COVERAGE+x} ]]; then
|
||||
echo "=> Run tests with mocha"
|
||||
BOX_ENV=test ./node_modules/.bin/mocha --bail --no-timeouts --exit -R spec ${TESTS}
|
||||
else
|
||||
echo "=> Run tests with mocha and coverage"
|
||||
BOX_ENV=test ./node_modules/.bin/nyc --reporter=html ./node_modules/.bin/mocha --no-timeouts --exit -R spec ${TESTS}
|
||||
fi
|
||||
|
||||
@@ -46,23 +46,30 @@ if [[ "$(uname -m)" != "x86_64" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if cvirt=$(systemd-detect-virt --container); then
|
||||
echo "Error: Cloudron does not support ${cvirt}, only runs on bare metal or with full hardware virtualization"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# do not use is-active in case box service is down and user attempts to re-install
|
||||
if systemctl cat box.service >/dev/null 2>&1; then
|
||||
echo "Error: Cloudron is already installed. To reinstall, start afresh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
initBaseImage="true"
|
||||
provider="generic"
|
||||
requestedVersion=""
|
||||
installServerOrigin="https://api.cloudron.io"
|
||||
apiServerOrigin="https://api.cloudron.io"
|
||||
webServerOrigin="https://cloudron.io"
|
||||
consoleServerOrigin="https://console.cloudron.io"
|
||||
sourceTarballUrl=""
|
||||
rebootServer="true"
|
||||
setupToken=""
|
||||
setupToken="" # this is a OTP for securing an installation (https://forum.cloudron.io/topic/6389/add-password-for-initial-configuration)
|
||||
appstoreSetupToken=""
|
||||
redo="false"
|
||||
|
||||
args=$(getopt -o "" -l "help,skip-baseimage-init,provider:,version:,env:,skip-reboot,generate-setup-token" -n "$0" -- "$@")
|
||||
args=$(getopt -o "" -l "help,provider:,version:,env:,skip-reboot,generate-setup-token,setup-token:,redo" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
@@ -74,17 +81,20 @@ while true; do
|
||||
if [[ "$2" == "dev" ]]; then
|
||||
apiServerOrigin="https://api.dev.cloudron.io"
|
||||
webServerOrigin="https://dev.cloudron.io"
|
||||
consoleServerOrigin="https://console.dev.cloudron.io"
|
||||
installServerOrigin="https://api.dev.cloudron.io"
|
||||
elif [[ "$2" == "staging" ]]; then
|
||||
apiServerOrigin="https://api.staging.cloudron.io"
|
||||
webServerOrigin="https://staging.cloudron.io"
|
||||
consoleServerOrigin="https://console.staging.cloudron.io"
|
||||
installServerOrigin="https://api.staging.cloudron.io"
|
||||
elif [[ "$2" == "unstable" ]]; then
|
||||
installServerOrigin="https://api.dev.cloudron.io"
|
||||
fi
|
||||
shift 2;;
|
||||
--skip-baseimage-init) initBaseImage="false"; shift;;
|
||||
--skip-reboot) rebootServer="false"; shift;;
|
||||
--redo) redo="true"; shift;;
|
||||
--setup-token) appstoreSetupToken="$2"; shift 2;;
|
||||
--generate-setup-token) setupToken="$(openssl rand -hex 10)"; shift;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
@@ -99,14 +109,16 @@ fi
|
||||
|
||||
# Only --help works with mismatched ubuntu
|
||||
ubuntu_version=$(lsb_release -rs)
|
||||
if [[ "${ubuntu_version}" != "16.04" && "${ubuntu_version}" != "18.04" && "${ubuntu_version}" != "20.04" ]]; then
|
||||
echo "Cloudron requires Ubuntu 16.04, 18.04 or 20.04" > /dev/stderr
|
||||
if [[ "${ubuntu_version}" != "16.04" && "${ubuntu_version}" != "18.04" && "${ubuntu_version}" != "20.04" && "${ubuntu_version}" != "22.04" ]]; then
|
||||
echo "Cloudron requires Ubuntu 18.04, 20.04, 22.04" > /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if which nginx >/dev/null || which docker >/dev/null || which node > /dev/null; then
|
||||
echo "Error: Some packages like nginx/docker/nodejs are already installed. Cloudron requires specific versions of these packages and will install them as part of it's installation. Please start with a fresh Ubuntu install and run this script again." > /dev/stderr
|
||||
exit 1
|
||||
if [[ "${redo}" == "false" ]]; then
|
||||
echo "Error: Some packages like nginx/docker/nodejs are already installed. Cloudron requires specific versions of these packages and will install them as part of it's installation. Please start with a fresh Ubuntu install and run this script again." > /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Install MOTD file for stack script style installations. this is removed by the trap exit handler. Heredoc quotes prevents parameter expansion
|
||||
@@ -143,17 +155,15 @@ echo ""
|
||||
echo " Join us at https://forum.cloudron.io for any questions."
|
||||
echo ""
|
||||
|
||||
if [[ "${initBaseImage}" == "true" ]]; then
|
||||
echo "=> Updating apt and installing script dependencies"
|
||||
if ! apt-get update &>> "${LOG_FILE}"; then
|
||||
echo "Could not update package repositories. See ${LOG_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
echo "=> Updating apt and installing script dependencies"
|
||||
if ! apt-get update &>> "${LOG_FILE}"; then
|
||||
echo "Could not update package repositories. See ${LOG_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y install --no-install-recommends curl python3 ubuntu-standard software-properties-common -y &>> "${LOG_FILE}"; then
|
||||
echo "Could not install setup dependencies (curl). See ${LOG_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
if ! DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y install --no-install-recommends curl python3 ubuntu-standard software-properties-common -y &>> "${LOG_FILE}"; then
|
||||
echo "Could not install setup dependencies (curl). See ${LOG_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "=> Checking version"
|
||||
@@ -173,7 +183,7 @@ if ! sourceTarballUrl=$(echo "${releaseJson}" | python3 -c 'import json,sys;obj=
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "=> Downloading version ${version} ..."
|
||||
echo "=> Downloading Cloudron version ${version} ..."
|
||||
box_src_tmp_dir=$(mktemp -dt box-src-XXXXXX)
|
||||
|
||||
if ! $curl -sL "${sourceTarballUrl}" | tar -zxf - -C "${box_src_tmp_dir}"; then
|
||||
@@ -181,18 +191,16 @@ if ! $curl -sL "${sourceTarballUrl}" | tar -zxf - -C "${box_src_tmp_dir}"; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${initBaseImage}" == "true" ]]; then
|
||||
echo -n "=> Installing base dependencies and downloading docker images (this takes some time) ..."
|
||||
# initializeBaseUbuntuImage.sh args (provider, infraversion path) are only to support installation of pre 5.3 Cloudrons
|
||||
if ! /bin/bash "${box_src_tmp_dir}/baseimage/initializeBaseUbuntuImage.sh" "generic" "../src" &>> "${LOG_FILE}"; then
|
||||
echo "Init script failed. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
echo -n "=> Installing base dependencies and downloading docker images (this takes some time) ..."
|
||||
init_ubuntu_script=$(test -f "${box_src_tmp_dir}/scripts/init-ubuntu.sh" && echo "${box_src_tmp_dir}/scripts/init-ubuntu.sh" || echo "${box_src_tmp_dir}/baseimage/initializeBaseUbuntuImage.sh")
|
||||
if ! /bin/bash "${init_ubuntu_script}" &>> "${LOG_FILE}"; then
|
||||
echo "Init script failed. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# The provider flag is still used for marketplace images
|
||||
echo "=> Installing version ${version} (this takes some time) ..."
|
||||
echo "=> Installing Cloudron version ${version} (this takes some time) ..."
|
||||
mkdir -p /etc/cloudron
|
||||
echo "${provider}" > /etc/cloudron/PROVIDER
|
||||
[[ ! -z "${setupToken}" ]] && echo "${setupToken}" > /etc/cloudron/SETUP_TOKEN
|
||||
@@ -204,6 +212,20 @@ fi
|
||||
|
||||
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('api_server_origin', '${apiServerOrigin}');" 2>/dev/null
|
||||
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('web_server_origin', '${webServerOrigin}');" 2>/dev/null
|
||||
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('console_server_origin', '${consoleServerOrigin}');" 2>/dev/null
|
||||
|
||||
if [[ -n "${appstoreSetupToken}" ]]; then
|
||||
if ! setupResponse=$(curl -sX POST -H "Content-type: application/json" --data "{\"setupToken\": \"${appstoreSetupToken}\"}" "${apiServerOrigin}/api/v1/cloudron_setup_done"); then
|
||||
echo "Could not complete setup. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cloudronId=$(echo "${setupResponse}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj["cloudronId"])')
|
||||
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('cloudron_id', '${cloudronId}');" 2>/dev/null
|
||||
|
||||
appstoreApiToken=$(echo "${setupResponse}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj["cloudronToken"])')
|
||||
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('appstore_api_token', '${appstoreApiToken}');" 2>/dev/null
|
||||
fi
|
||||
|
||||
echo -n "=> Waiting for cloudron to be ready (this takes some time) ..."
|
||||
while true; do
|
||||
@@ -214,7 +236,7 @@ while true; do
|
||||
sleep 10
|
||||
done
|
||||
|
||||
if ! ip=$(curl -s --fail --connect-timeout 2 --max-time 2 https://api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p'); then
|
||||
if ! ip=$(curl -s --fail --connect-timeout 2 --max-time 2 https://ipv4.api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p'); then
|
||||
ip='<IP>'
|
||||
fi
|
||||
if [[ -z "${setupToken}" ]]; then
|
||||
|
||||
@@ -8,14 +8,15 @@ set -eu -o pipefail
|
||||
PASTEBIN="https://paste.cloudron.io"
|
||||
OUT="/tmp/cloudron-support.log"
|
||||
LINE="\n========================================================\n"
|
||||
CLOUDRON_SUPPORT_PUBLIC_KEY="ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQVilclYAIu+ioDp/sgzzFz6YU0hPcRYY7ze/LiF/lC7uQqK062O54BFXTvQ3ehtFZCx3bNckjlT2e6gB8Qq07OM66De4/S/g+HJW4TReY2ppSPMVNag0TNGxDzVH8pPHOysAm33LqT2b6L/wEXwC6zWFXhOhHjcMqXvi8Ejaj20H1HVVcf/j8qs5Thkp9nAaFTgQTPu8pgwD8wDeYX1hc9d0PYGesTADvo6HF4hLEoEnefLw7PaStEbzk2fD3j7/g5r5HcgQQXBe74xYZ/1gWOX2pFNuRYOBSEIrNfJEjFJsqk3NR1+ZoMGK7j+AZBR4k0xbrmncQLcQzl6MMDzkp support@cloudron.io"
|
||||
CLOUDRON_SUPPORT_PUBLIC_KEY="ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGWS+930b8QdzbchGljt3KSljH9wRhYvht8srrtQHdzg support@cloudron.io"
|
||||
HELP_MESSAGE="
|
||||
This script collects diagnostic information to help debug server related issues
|
||||
This script collects diagnostic information to help debug server related issues.
|
||||
|
||||
Options:
|
||||
--owner-login Login as owner
|
||||
--enable-ssh Enable SSH access for the Cloudron support team
|
||||
--help Show this message
|
||||
--owner-login Login as owner
|
||||
--enable-ssh Enable SSH access for the Cloudron support team
|
||||
--reset-appstore-account Reset associated cloudron.io account
|
||||
--help Show this message
|
||||
"
|
||||
|
||||
# We require root
|
||||
@@ -26,7 +27,7 @@ fi
|
||||
|
||||
enableSSH="false"
|
||||
|
||||
args=$(getopt -o "" -l "help,enable-ssh,admin-login,owner-login" -n "$0" -- "$@")
|
||||
args=$(getopt -o "" -l "help,enable-ssh,admin-login,owner-login,reset-appstore-account" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
@@ -44,6 +45,15 @@ while true; do
|
||||
echo "Login at https://${dashboard_domain} as ${admin_username} / ${admin_password} . This password may only be used once."
|
||||
exit 0
|
||||
;;
|
||||
--reset-appstore-account)
|
||||
echo -e "This will reset the Cloudron.io account associated with this Cloudron. Once reset, you can re-login with a different account in the Cloudron Dashboard. See https://docs.cloudron.io/appstore/#change-account for more information.\n"
|
||||
read -e -p "Reset the Cloudron.io account? [y/N] " choice
|
||||
[[ "$choice" != [Yy]* ]] && exit 1
|
||||
mysql -uroot -ppassword -e "DELETE FROM box.settings WHERE name='cloudron_token';" 2>/dev/null
|
||||
dashboard_domain=$(mysql -NB -uroot -ppassword -e "SELECT value FROM box.settings WHERE name='admin_fqdn'" 2>/dev/null)
|
||||
echo "Account reset. Please re-login at https://${dashboard_domain}/#/appstore"
|
||||
exit 0
|
||||
;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
@@ -67,6 +77,31 @@ if [[ "`df --output="avail" /tmp | sed -n 2p`" -lt "5120" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${enableSSH}" == "true" ]]; then
|
||||
ssh_port=$(cat /etc/ssh/sshd_config | grep "Port " | sed -e "s/.*Port //")
|
||||
|
||||
ssh_user="cloudron-support"
|
||||
keys_file="/home/cloudron-support/.ssh/authorized_keys"
|
||||
|
||||
echo -e $LINE"SSH"$LINE >> $OUT
|
||||
echo "Username: ${ssh_user}" >> $OUT
|
||||
echo "Port: ${ssh_port}" >> $OUT
|
||||
echo "Key file: ${keys_file}" >> $OUT
|
||||
|
||||
echo -n "Enabling ssh access for the Cloudron support team..."
|
||||
mkdir -p $(dirname "${keys_file}") # .ssh does not exist sometimes
|
||||
touch "${keys_file}" # required for concat to work
|
||||
if ! grep -q "${CLOUDRON_SUPPORT_PUBLIC_KEY}" "${keys_file}"; then
|
||||
echo -e "\n${CLOUDRON_SUPPORT_PUBLIC_KEY}" >> "${keys_file}"
|
||||
chmod 600 "${keys_file}"
|
||||
chown "${ssh_user}" "${keys_file}"
|
||||
fi
|
||||
|
||||
echo "Done"
|
||||
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -n "Generating Cloudron Support stats..."
|
||||
|
||||
# clear file
|
||||
@@ -109,40 +144,8 @@ iptables -L &>> $OUT
|
||||
|
||||
echo "Done"
|
||||
|
||||
if [[ "${enableSSH}" == "true" ]]; then
|
||||
ssh_port=$(cat /etc/ssh/sshd_config | grep "Port " | sed -e "s/.*Port //")
|
||||
permit_root_login=$(grep -q ^PermitRootLogin.*yes /etc/ssh/sshd_config && echo "yes" || echo "no")
|
||||
|
||||
# support.js uses similar logic
|
||||
if [[ -d /home/ubuntu ]]; then
|
||||
ssh_user="ubuntu"
|
||||
keys_file="/home/ubuntu/.ssh/authorized_keys"
|
||||
else
|
||||
ssh_user="root"
|
||||
keys_file="/root/.ssh/authorized_keys"
|
||||
fi
|
||||
|
||||
echo -e $LINE"SSH"$LINE >> $OUT
|
||||
echo "Username: ${ssh_user}" >> $OUT
|
||||
echo "Port: ${ssh_port}" >> $OUT
|
||||
echo "PermitRootLogin: ${permit_root_login}" >> $OUT
|
||||
echo "Key file: ${keys_file}" >> $OUT
|
||||
|
||||
echo -n "Enabling ssh access for the Cloudron support team..."
|
||||
mkdir -p $(dirname "${keys_file}") # .ssh does not exist sometimes
|
||||
touch "${keys_file}" # required for concat to work
|
||||
if ! grep -q "${CLOUDRON_SUPPORT_PUBLIC_KEY}" "${keys_file}"; then
|
||||
echo -e "\n${CLOUDRON_SUPPORT_PUBLIC_KEY}" >> "${keys_file}"
|
||||
chmod 600 "${keys_file}"
|
||||
chown "${ssh_user}" "${keys_file}"
|
||||
fi
|
||||
|
||||
echo "Done"
|
||||
fi
|
||||
|
||||
echo -n "Uploading information..."
|
||||
# for some reason not using $(cat $OUT) will not contain newlines!?
|
||||
paste_key=$(curl -X POST ${PASTEBIN}/documents --silent -d "$(cat $OUT)" | python3 -c "import sys, json; print(json.load(sys.stdin)['key'])")
|
||||
paste_key=$(curl -X POST ${PASTEBIN}/documents --silent --data-binary "@$OUT" | python3 -c "import sys, json; print(json.load(sys.stdin)['key'])")
|
||||
echo "Done"
|
||||
|
||||
echo ""
|
||||
|
||||
@@ -41,8 +41,8 @@ if ! $(cd "${SOURCE_DIR}/../dashboard" && git diff --exit-code >/dev/null); then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$(node --version)" != "v14.17.6" ]]; then
|
||||
echo "This script requires node 14.17.6"
|
||||
if [[ "$(node --version)" != "v16.13.1" ]]; then
|
||||
echo "This script requires node 16.13.1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
198
scripts/init-ubuntu.sh
Executable file
198
scripts/init-ubuntu.sh
Executable file
@@ -0,0 +1,198 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script is run on the base ubuntu. Put things here which are managed by ubuntu
|
||||
|
||||
set -euv -o pipefail
|
||||
|
||||
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
readonly arg_infraversionpath="${SOURCE_DIR}/../src"
|
||||
|
||||
function die {
|
||||
echo $1
|
||||
exit 1
|
||||
}
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
readonly ubuntu_codename=$(lsb_release -cs)
|
||||
readonly ubuntu_version=$(lsb_release -rs)
|
||||
|
||||
# hold grub since updating it breaks on some VPS providers. also, dist-upgrade will trigger it
|
||||
apt-mark hold grub* >/dev/null
|
||||
apt-get -o Dpkg::Options::="--force-confdef" update -y
|
||||
apt-get -o Dpkg::Options::="--force-confdef" upgrade -y
|
||||
apt-mark unhold grub* >/dev/null
|
||||
|
||||
echo "==> Installing required packages"
|
||||
|
||||
debconf-set-selections <<< 'mysql-server mysql-server/root_password password password'
|
||||
debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password password'
|
||||
|
||||
# this enables automatic security upgrades (https://help.ubuntu.com/community/AutomaticSecurityUpdates)
|
||||
# resolvconf is needed for unbound to work property after disabling systemd-resolved in 18.04
|
||||
case "${ubuntu_version}" in
|
||||
16.04)
|
||||
gpg_package="gnupg"
|
||||
mysql_package="mysql-server-5.7"
|
||||
ntpd_package=""
|
||||
python_package="python2.7"
|
||||
nginx_package="" # we use custom package for TLS v1.3 support
|
||||
;;
|
||||
18.04)
|
||||
gpg_package="gpg"
|
||||
mysql_package="mysql-server-5.7"
|
||||
ntpd_package=""
|
||||
python_package="python2.7"
|
||||
nginx_package="" # we use custom package for TLS v1.3 support
|
||||
;;
|
||||
20.04)
|
||||
gpg_package="gpg"
|
||||
mysql_package="mysql-server-8.0"
|
||||
ntpd_package="systemd-timesyncd"
|
||||
python_package="python3.8"
|
||||
nginx_package="nginx-full"
|
||||
;;
|
||||
22.04)
|
||||
gpg_package="gpg"
|
||||
mysql_package="mysql-server-8.0"
|
||||
ntpd_package="systemd-timesyncd"
|
||||
python_package="python3.10"
|
||||
nginx_package="nginx-full"
|
||||
;;
|
||||
esac
|
||||
|
||||
apt-get -y install --no-install-recommends \
|
||||
acl \
|
||||
apparmor \
|
||||
build-essential \
|
||||
cifs-utils \
|
||||
cron \
|
||||
curl \
|
||||
debconf-utils \
|
||||
dmsetup \
|
||||
$gpg_package \
|
||||
ipset \
|
||||
iptables \
|
||||
lib${python_package} \
|
||||
linux-generic \
|
||||
logrotate \
|
||||
$mysql_package \
|
||||
nfs-common \
|
||||
$nginx_package \
|
||||
$ntpd_package \
|
||||
openssh-server \
|
||||
pwgen \
|
||||
resolvconf \
|
||||
sshfs \
|
||||
swaks \
|
||||
tzdata \
|
||||
unattended-upgrades \
|
||||
unbound \
|
||||
unzip \
|
||||
xfsprogs
|
||||
|
||||
# on some providers like scaleway the sudo file is changed and we want to keep the old one
|
||||
apt-get -o Dpkg::Options::="--force-confold" install -y --no-install-recommends sudo
|
||||
|
||||
# this ensures that unattended upgades are enabled, if it was disabled during ubuntu install time (see #346)
|
||||
# debconf-set-selection of unattended-upgrades/enable_auto_updates + dpkg-reconfigure does not work
|
||||
cp /usr/share/unattended-upgrades/20auto-upgrades /etc/apt/apt.conf.d/20auto-upgrades
|
||||
|
||||
apt-get install -y --no-install-recommends $python_package # Install python which is required for npm rebuild
|
||||
|
||||
# do not upgrade grub because it might prompt user and break this script
|
||||
echo "==> Enable memory accounting"
|
||||
apt-get -y --no-upgrade --no-install-recommends install grub2-common
|
||||
sed -e 's/^GRUB_CMDLINE_LINUX="\(.*\)"$/GRUB_CMDLINE_LINUX="\1 cgroup_enable=memory swapaccount=1 panic_on_oops=1 panic=5"/' -i /etc/default/grub
|
||||
update-grub
|
||||
|
||||
echo "==> Install collectd"
|
||||
# without this, libnotify4 will install gnome-shell
|
||||
apt-get install -y libnotify4 libcurl3-gnutls --no-install-recommends
|
||||
# https://bugs.launchpad.net/ubuntu/+source/collectd/+bug/1872281
|
||||
if [[ "${ubuntu_version}" == "22.04" ]]; then
|
||||
readonly launchpad="https://launchpad.net/ubuntu/+source/collectd/5.12.0-9/+build/23189375/+files"
|
||||
cd /tmp && wget -q "${launchpad}/collectd_5.12.0-9_amd64.deb" "${launchpad}/collectd-utils_5.12.0-9_amd64.deb" "${launchpad}/collectd-core_5.12.0-9_amd64.deb" "${launchpad}/libcollectdclient1_5.12.0-9_amd64.deb"
|
||||
cd /tmp && apt install -y --no-install-recommends ./libcollectdclient1_5.12.0-9_amd64.deb ./collectd-core_5.12.0-9_amd64.deb ./collectd_5.12.0-9_amd64.deb ./collectd-utils_5.12.0-9_amd64.deb && rm -f /tmp/collectd_*.deb
|
||||
echo -e "\nLD_PRELOAD=/usr/lib/python3.10/config-3.10-x86_64-linux-gnu/libpython3.10.so" >> /etc/default/collectd
|
||||
else
|
||||
if ! apt-get install -y --no-install-recommends collectd collectd-utils; then
|
||||
# FQDNLookup is true in default debian config. The box code has a custom collectd.conf that fixes this
|
||||
echo "Failed to install collectd, continuing anyway. Presumably because of http://mailman.verplant.org/pipermail/collectd/2015-March/006491.html"
|
||||
sed -e 's/^FQDNLookup true/FQDNLookup false/' -i /etc/collectd/collectd.conf
|
||||
fi
|
||||
|
||||
if [[ "${ubuntu_version}" == "20.04" ]]; then
|
||||
echo -e "\nLD_PRELOAD=/usr/lib/python3.8/config-3.8-x86_64-linux-gnu/libpython3.8.so" >> /etc/default/collectd
|
||||
fi
|
||||
fi
|
||||
|
||||
# some hosts like atlantic install ntp which conflicts with timedatectl. https://serverfault.com/questions/1024770/ubuntu-20-04-time-sync-problems-and-possibly-incorrect-status-information
|
||||
echo "==> Configuring host"
|
||||
sed -e 's/^#NTP=/NTP=0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org/' -i /etc/systemd/timesyncd.conf
|
||||
if systemctl is-active ntp; then
|
||||
systemctl stop ntp
|
||||
apt purge -y ntp
|
||||
fi
|
||||
timedatectl set-ntp 1
|
||||
# mysql follows the system timezone
|
||||
timedatectl set-timezone UTC
|
||||
|
||||
echo "==> Adding sshd configuration warning"
|
||||
sed -e '/Port 22/ i # NOTE: Cloudron only supports moving SSH to port 202. See https://docs.cloudron.io/security/#securing-ssh-access' -i /etc/ssh/sshd_config
|
||||
|
||||
# https://bugs.launchpad.net/ubuntu/+source/base-files/+bug/1701068
|
||||
echo "==> Disabling motd news"
|
||||
if [[ -f "/etc/default/motd-news" ]]; then
|
||||
sed -i 's/^ENABLED=.*/ENABLED=0/' /etc/default/motd-news
|
||||
fi
|
||||
|
||||
# If privacy extensions are not disabled on server, this breaks IPv6 detection
|
||||
# https://bugs.launchpad.net/ubuntu/+source/procps/+bug/1068756
|
||||
if [[ ! -f /etc/sysctl.d/99-cloudimg-ipv6.conf ]]; then
|
||||
echo "==> Disable temporary address (IPv6)"
|
||||
echo -e "# See https://bugs.launchpad.net/ubuntu/+source/procps/+bug/1068756\nnet.ipv6.conf.all.use_tempaddr = 0\nnet.ipv6.conf.default.use_tempaddr = 0\n\n" > /etc/sysctl.d/99-cloudimg-ipv6.conf
|
||||
fi
|
||||
|
||||
# Disable exim4 (1blu.de)
|
||||
systemctl stop exim4 || true
|
||||
systemctl disable exim4 || true
|
||||
|
||||
# Disable bind for good measure (on online.net, kimsufi servers these are pre-installed)
|
||||
systemctl stop bind9 || true
|
||||
systemctl disable bind9 || true
|
||||
|
||||
# on ovh images dnsmasq seems to run by default
|
||||
systemctl stop dnsmasq || true
|
||||
systemctl disable dnsmasq || true
|
||||
|
||||
# on ssdnodes postfix seems to run by default
|
||||
systemctl stop postfix || true
|
||||
systemctl disable postfix || true
|
||||
|
||||
# on ubuntu 18.04 and 20.04, this is the default. this requires resolvconf for DNS to work further after the disable
|
||||
systemctl stop systemd-resolved || true
|
||||
systemctl disable systemd-resolved || true
|
||||
|
||||
# on vultr, ufw is enabled by default. we have our own firewall
|
||||
ufw disable || true
|
||||
|
||||
# we need unbound to work as this is required for installer.sh to do any DNS requests
|
||||
echo -e "server:\n\tinterface: 127.0.0.1\n\tdo-ip6: no" > /etc/unbound/unbound.conf.d/cloudron-network.conf
|
||||
systemctl restart unbound
|
||||
|
||||
# Ubuntu 22 has private home directories by default (https://discourse.ubuntu.com/t/private-home-directories-for-ubuntu-21-04-onwards/)
|
||||
sed -e 's/^HOME_MODE\([[:space:]]\+\).*$/HOME_MODE\10755/' -i /etc/login.defs
|
||||
|
||||
# create the yellowtent user. system user has different numeric range, no age and won't show in login/gdm UI
|
||||
# the nologin will also disable su/login
|
||||
if ! id yellowtent 2>/dev/null; then
|
||||
useradd --system --comment "Cloudron Box" --create-home --shell /usr/sbin/nologin yellowtent
|
||||
fi
|
||||
|
||||
# add support user (no password, sudo)
|
||||
if ! id cloudron-support 2>/dev/null; then
|
||||
useradd --system --comment "Cloudron Support (support@cloudron.io)" --create-home --no-user-group --shell /bin/bash cloudron-support
|
||||
fi
|
||||
|
||||
@@ -71,60 +71,60 @@ readonly is_update=$(systemctl is-active -q box && echo "yes" || echo "no")
|
||||
|
||||
log "Updating from $(cat $box_src_dir/VERSION) to $(cat $box_src_tmp_dir/VERSION)"
|
||||
|
||||
log "updating docker"
|
||||
# https://docs.docker.com/engine/installation/linux/ubuntulinux/
|
||||
readonly docker_version=20.10.14
|
||||
if ! which docker 2>/dev/null || [[ $(docker version --format {{.Client.Version}}) != "${docker_version}" ]]; then
|
||||
log "installing/updating docker"
|
||||
|
||||
# create systemd drop-in file already to make sure images are with correct driver
|
||||
mkdir -p /etc/systemd/system/docker.service.d
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=overlay2 --experimental --ip6tables" > /etc/systemd/system/docker.service.d/cloudron.conf
|
||||
|
||||
readonly docker_version=20.10.7
|
||||
if [[ $(docker version --format {{.Client.Version}}) != "${docker_version}" ]]; then
|
||||
# there are 3 packages for docker - containerd, CLI and the daemon
|
||||
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.4.6-1_amd64.deb" -o /tmp/containerd.deb
|
||||
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.5.11-1_amd64.deb" -o /tmp/containerd.deb
|
||||
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
|
||||
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
|
||||
|
||||
log "installing docker"
|
||||
prepare_apt_once
|
||||
|
||||
while ! apt install -y /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb; do
|
||||
log "Failed to install docker. Retry"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
apt install -y /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
|
||||
rm /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
|
||||
fi
|
||||
|
||||
# we want atleast nginx 1.14 for TLS v1.3 support. Ubuntu 20/22 already has nginx 1.18
|
||||
# Ubuntu 18 OpenSSL does not have TLS v1.3 support, so we use the upstream nginx packages
|
||||
readonly nginx_version=$(nginx -v 2>&1)
|
||||
if [[ "${nginx_version}" != *"1.18."* ]]; then
|
||||
log "installing nginx 1.18"
|
||||
$curl -sL http://nginx.org/packages/ubuntu/pool/nginx/n/nginx/nginx_1.18.0-2~${ubuntu_codename}_amd64.deb -o /tmp/nginx.deb
|
||||
if [[ "${ubuntu_version}" == "20.04" ]]; then
|
||||
if [[ "${nginx_version}" == *"Ubuntu"* ]]; then
|
||||
log "switching nginx to ubuntu package"
|
||||
prepare_apt_once
|
||||
apt remove -y nginx
|
||||
apt install -y nginx-full
|
||||
fi
|
||||
elif [[ "${ubuntu_version}" == "18.04" ]]; then
|
||||
if [[ "${nginx_version}" != *"1.18."* ]]; then
|
||||
log "installing/updating nginx 1.18"
|
||||
$curl -sL http://nginx.org/packages/ubuntu/pool/nginx/n/nginx/nginx_1.18.0-2~${ubuntu_codename}_amd64.deb -o /tmp/nginx.deb
|
||||
|
||||
prepare_apt_once
|
||||
prepare_apt_once
|
||||
|
||||
# apt install with install deps (as opposed to dpkg -i)
|
||||
apt install -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes /tmp/nginx.deb
|
||||
rm /tmp/nginx.deb
|
||||
# apt install with install deps (as opposed to dpkg -i)
|
||||
apt install -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes /tmp/nginx.deb
|
||||
rm /tmp/nginx.deb
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! which mount.nfs; then
|
||||
log "installing nfs-common"
|
||||
prepare_apt_once
|
||||
apt install -y nfs-common
|
||||
fi
|
||||
|
||||
if ! which sshfs; then
|
||||
log "installing sshfs"
|
||||
prepare_apt_once
|
||||
apt install -y sshfs
|
||||
fi
|
||||
|
||||
log "updating node"
|
||||
readonly node_version=14.17.6
|
||||
if [[ "$(node --version)" != "v${node_version}" ]]; then
|
||||
readonly node_version=16.14.2
|
||||
if ! which node 2>/dev/null || [[ "$(node --version)" != "v${node_version}" ]]; then
|
||||
log "installing/updating node ${node_version}"
|
||||
mkdir -p /usr/local/node-${node_version}
|
||||
$curl -sL https://nodejs.org/dist/v${node_version}/node-v${node_version}-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-${node_version}
|
||||
ln -sf /usr/local/node-${node_version}/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-${node_version}/bin/npm /usr/bin/npm
|
||||
rm -rf /usr/local/node-14.15.4
|
||||
rm -rf /usr/local/node-16.13.1
|
||||
fi
|
||||
|
||||
# this is here (and not in updater.js) because rebuild requires the above node
|
||||
# note that rebuild requires the above node
|
||||
for try in `seq 1 10`; do
|
||||
# for reasons unknown, the dtrace package will fail. but rebuilding second time will work
|
||||
|
||||
@@ -142,7 +142,7 @@ if [[ ${try} -eq 10 ]]; then
|
||||
fi
|
||||
|
||||
log "downloading new addon images"
|
||||
images=$(node -e "var i = require('${box_src_tmp_dir}/src/infra_version.js'); console.log(i.baseImages.map(function (x) { return x.tag; }).join(' '), Object.keys(i.images).map(function (x) { return i.images[x].tag; }).join(' '));")
|
||||
images=$(node -e "let i = require('${box_src_tmp_dir}/src/infra_version.js'); console.log(i.baseImages.map(function (x) { return x.tag; }).join(' '), Object.keys(i.images).map(function (x) { return i.images[x].tag; }).join(' '));")
|
||||
|
||||
log "\tPulling docker images: ${images}"
|
||||
for image in ${images}; do
|
||||
@@ -159,7 +159,7 @@ done
|
||||
log "update cloudron-syslog"
|
||||
CLOUDRON_SYSLOG_DIR=/usr/local/cloudron-syslog
|
||||
CLOUDRON_SYSLOG="${CLOUDRON_SYSLOG_DIR}/bin/cloudron-syslog"
|
||||
CLOUDRON_SYSLOG_VERSION="1.0.3"
|
||||
CLOUDRON_SYSLOG_VERSION="1.1.0"
|
||||
while [[ ! -f "${CLOUDRON_SYSLOG}" || "$(${CLOUDRON_SYSLOG} --version)" != ${CLOUDRON_SYSLOG_VERSION} ]]; do
|
||||
rm -rf "${CLOUDRON_SYSLOG_DIR}"
|
||||
mkdir -p "${CLOUDRON_SYSLOG_DIR}"
|
||||
@@ -168,10 +168,15 @@ while [[ ! -f "${CLOUDRON_SYSLOG}" || "$(${CLOUDRON_SYSLOG} --version)" != ${CLO
|
||||
sleep 5
|
||||
done
|
||||
|
||||
if ! id "${user}" 2>/dev/null; then
|
||||
useradd "${user}" -m
|
||||
log "creating cloudron-support user"
|
||||
if ! id cloudron-support 2>/dev/null; then
|
||||
useradd --system --comment "Cloudron Support (support@cloudron.io)" --create-home --no-user-group --shell /bin/bash cloudron-support
|
||||
fi
|
||||
|
||||
log "locking the ${user} account"
|
||||
usermod --shell /usr/sbin/nologin "${user}"
|
||||
passwd --lock "${user}"
|
||||
|
||||
if [[ "${is_update}" == "yes" ]]; then
|
||||
log "stop box service for update"
|
||||
${box_src_dir}/setup/stop.sh
|
||||
|
||||
@@ -37,8 +37,13 @@ systemctl enable apparmor
|
||||
systemctl restart apparmor
|
||||
|
||||
usermod ${USER} -a -G docker
|
||||
# unbound (which starts after box code) relies on this interface to exist. dockerproxy also relies on this.
|
||||
docker network create --subnet=172.18.0.0/16 --ip-range=172.18.0.0/20 cloudron || true
|
||||
|
||||
if ! grep -q ip6tables /etc/systemd/system/docker.service.d/cloudron.conf; then
|
||||
log "Adding ip6tables flag to docker" # https://github.com/moby/moby/pull/41622
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=overlay2 --experimental --ip6tables" > /etc/systemd/system/docker.service.d/cloudron.conf
|
||||
systemctl daemon-reload
|
||||
systemctl restart docker
|
||||
fi
|
||||
|
||||
mkdir -p "${BOX_DATA_DIR}"
|
||||
mkdir -p "${APPS_DATA_DIR}"
|
||||
@@ -66,6 +71,8 @@ mkdir -p "${PLATFORM_DATA_DIR}/logs/backup" \
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/update"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/sftp/ssh" # sftp keys
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/firewall"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/sshfs"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/cifs"
|
||||
|
||||
# ensure backups folder exists and is writeable
|
||||
mkdir -p /var/backups
|
||||
@@ -94,11 +101,6 @@ systemctl restart systemd-journald
|
||||
usermod -a -G adm ${USER}
|
||||
|
||||
log "Setting up unbound"
|
||||
# DO uses Google nameservers by default. This causes RBL queries to fail (host 2.0.0.127.zen.spamhaus.org)
|
||||
# We do not use dnsmasq because it is not a recursive resolver and defaults to the value in the interfaces file (which is Google DNS!)
|
||||
# We listen on 0.0.0.0 because there is no way control ordering of docker (which creates the 172.18.0.0/16) and unbound
|
||||
# If IP6 is not enabled, dns queries seem to fail on some hosts. -s returns false if file missing or 0 size
|
||||
ip6=$([[ -s /proc/net/if_inet6 ]] && echo "yes" || echo "no")
|
||||
cp -f "${script_dir}/start/unbound.conf" /etc/unbound/unbound.conf.d/cloudron-network.conf
|
||||
# update the root anchor after a out-of-disk-space situation (see #269)
|
||||
unbound-anchor -a /var/lib/unbound/root.key
|
||||
@@ -114,7 +116,7 @@ systemctl enable box
|
||||
systemctl enable cloudron-firewall
|
||||
systemctl enable --now cloudron-disable-thp
|
||||
|
||||
# update firewall rules
|
||||
# update firewall rules. this must be done after docker created it's rules
|
||||
systemctl restart cloudron-firewall
|
||||
|
||||
# For logrotate
|
||||
@@ -127,26 +129,28 @@ systemctl restart unbound
|
||||
systemctl restart cloudron-syslog
|
||||
|
||||
log "Configuring sudoers"
|
||||
rm -f /etc/sudoers.d/${USER}
|
||||
cp "${script_dir}/start/sudoers" /etc/sudoers.d/${USER}
|
||||
rm -f /etc/sudoers.d/${USER} /etc/sudoers.d/cloudron
|
||||
cp "${script_dir}/start/sudoers" /etc/sudoers.d/cloudron
|
||||
|
||||
log "Configuring collectd"
|
||||
rm -rf /etc/collectd /var/log/collectd.log
|
||||
ln -sfF "${PLATFORM_DATA_DIR}/collectd" /etc/collectd
|
||||
cp "${script_dir}/start/collectd/collectd.conf" "${PLATFORM_DATA_DIR}/collectd/collectd.conf"
|
||||
if [[ "${ubuntu_version}" == "20.04" ]]; then
|
||||
# https://bugs.launchpad.net/ubuntu/+source/collectd/+bug/1872281
|
||||
if ! grep -q LD_PRELOAD /etc/default/collectd; then
|
||||
echo -e "\nLD_PRELOAD=/usr/lib/python3.8/config-3.8-x86_64-linux-gnu/libpython3.8.so" >> /etc/default/collectd
|
||||
fi
|
||||
fi
|
||||
systemctl restart collectd
|
||||
|
||||
log "Configuring sysctl"
|
||||
# If privacy extensions are not disabled on server, this breaks IPv6 detection
|
||||
# https://bugs.launchpad.net/ubuntu/+source/procps/+bug/1068756
|
||||
if [[ ! -f /etc/sysctl.d/99-cloudimg-ipv6.conf ]]; then
|
||||
echo "==> Disable temporary address (IPv6)"
|
||||
echo -e "# See https://bugs.launchpad.net/ubuntu/+source/procps/+bug/1068756\nnet.ipv6.conf.all.use_tempaddr = 0\nnet.ipv6.conf.default.use_tempaddr = 0\n\n" > /etc/sysctl.d/99-cloudimg-ipv6.conf
|
||||
sysctl -p
|
||||
fi
|
||||
|
||||
log "Configuring logrotate"
|
||||
if ! grep -q "^include ${PLATFORM_DATA_DIR}/logrotate.d" /etc/logrotate.conf; then
|
||||
echo -e "\ninclude ${PLATFORM_DATA_DIR}/logrotate.d\n" >> /etc/logrotate.conf
|
||||
fi
|
||||
rm -f "${PLATFORM_DATA_DIR}/logrotate.d/"*
|
||||
cp "${script_dir}/start/logrotate/"* "${PLATFORM_DATA_DIR}/logrotate.d/"
|
||||
|
||||
# logrotate files have to be owned by root, this is here to fixup existing installations where we were resetting the owner to yellowtent
|
||||
@@ -170,7 +174,7 @@ fi
|
||||
|
||||
# worker_rlimit_nofile in nginx config can be max this number
|
||||
mkdir -p /etc/systemd/system/nginx.service.d
|
||||
if ! grep -q "^LimitNOFILE=" /etc/systemd/system/nginx.service.d/cloudron.conf; then
|
||||
if ! grep -q "^LimitNOFILE=" /etc/systemd/system/nginx.service.d/cloudron.conf 2>/dev/null; then
|
||||
echo -e "[Service]\nLimitNOFILE=16384\n" > /etc/systemd/system/nginx.service.d/cloudron.conf
|
||||
fi
|
||||
|
||||
@@ -205,7 +209,8 @@ done
|
||||
|
||||
readonly mysql_root_password="password"
|
||||
mysqladmin -u root -ppassword password password # reset default root password
|
||||
if [[ "${ubuntu_version}" == "20.04" ]]; then
|
||||
readonly mysqlVersion=$(mysql -NB -u root -p${mysql_root_password} -e 'SELECT VERSION()' 2>/dev/null)
|
||||
if [[ "${mysqlVersion}" == "8.0."* ]]; then
|
||||
# mysql 8 added a new caching_sha2_password scheme which mysqljs does not support
|
||||
mysql -u root -p${mysql_root_password} -e "ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY '${mysql_root_password}';"
|
||||
fi
|
||||
@@ -226,7 +231,7 @@ log "Changing ownership"
|
||||
# note, change ownership after db migrate. this allow db migrate to move files around as root and then we can fix it up here
|
||||
# be careful of what is chown'ed here. subdirs like mysql,redis etc are owned by the containers and will stop working if perms change
|
||||
chown -R "${USER}" /etc/cloudron
|
||||
chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/nginx" "${PLATFORM_DATA_DIR}/collectd" "${PLATFORM_DATA_DIR}/addons" "${PLATFORM_DATA_DIR}/acme" "${PLATFORM_DATA_DIR}/backup" "${PLATFORM_DATA_DIR}/logs" "${PLATFORM_DATA_DIR}/update" "${PLATFORM_DATA_DIR}/sftp" "${PLATFORM_DATA_DIR}/firewall"
|
||||
chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/nginx" "${PLATFORM_DATA_DIR}/collectd" "${PLATFORM_DATA_DIR}/addons" "${PLATFORM_DATA_DIR}/acme" "${PLATFORM_DATA_DIR}/backup" "${PLATFORM_DATA_DIR}/logs" "${PLATFORM_DATA_DIR}/update" "${PLATFORM_DATA_DIR}/sftp" "${PLATFORM_DATA_DIR}/firewall" "${PLATFORM_DATA_DIR}/sshfs" "${PLATFORM_DATA_DIR}/cifs"
|
||||
chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}/INFRA_VERSION" 2>/dev/null || true
|
||||
chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}"
|
||||
chown "${USER}:${USER}" "${APPS_DATA_DIR}"
|
||||
|
||||
@@ -3,100 +3,155 @@
|
||||
set -eu -o pipefail
|
||||
|
||||
echo "==> Setting up firewall"
|
||||
iptables -t filter -N CLOUDRON || true
|
||||
iptables -t filter -F CLOUDRON # empty any existing rules
|
||||
|
||||
has_ipv6=$(cat /proc/net/if_inet6 >/dev/null 2>&1 && echo "yes" || echo "no")
|
||||
|
||||
# wait for 120 seconds for xtables lock, checking every 1 second
|
||||
readonly iptables="iptables --wait 120 --wait-interval 1"
|
||||
readonly ip6tables="ip6tables --wait 120 --wait-interval 1"
|
||||
|
||||
function ipxtables() {
|
||||
$iptables "$@"
|
||||
[[ "${has_ipv6}" == "yes" ]] && $ip6tables "$@"
|
||||
}
|
||||
|
||||
ipxtables -t filter -N CLOUDRON || true
|
||||
ipxtables -t filter -F CLOUDRON # empty any existing rules
|
||||
|
||||
# first setup any user IP block lists
|
||||
ipset create cloudron_blocklist hash:net || true
|
||||
ipset create cloudron_blocklist6 hash:net family inet6 || true
|
||||
|
||||
/home/yellowtent/box/src/scripts/setblocklist.sh
|
||||
|
||||
iptables -t filter -A CLOUDRON -m set --match-set cloudron_blocklist src -j DROP
|
||||
$iptables -t filter -A CLOUDRON -m set --match-set cloudron_blocklist src -j DROP
|
||||
# the DOCKER-USER chain is not cleared on docker restart
|
||||
if ! iptables -t filter -C DOCKER-USER -m set --match-set cloudron_blocklist src -j DROP; then
|
||||
iptables -t filter -I DOCKER-USER 1 -m set --match-set cloudron_blocklist src -j DROP
|
||||
if ! $iptables -t filter -C DOCKER-USER -m set --match-set cloudron_blocklist src -j DROP; then
|
||||
$iptables -t filter -I DOCKER-USER 1 -m set --match-set cloudron_blocklist src -j DROP
|
||||
fi
|
||||
|
||||
$ip6tables -t filter -A CLOUDRON -m set --match-set cloudron_blocklist6 src -j DROP
|
||||
# there is no DOCKER-USER chain in ip6tables, bug?
|
||||
$ip6tables -D FORWARD -m set --match-set cloudron_blocklist6 src -j DROP || true
|
||||
$ip6tables -I FORWARD 1 -m set --match-set cloudron_blocklist6 src -j DROP
|
||||
|
||||
# allow related and establisted connections
|
||||
iptables -t filter -A CLOUDRON -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p tcp -m tcp -m multiport --dports 22,80,202,443 -j ACCEPT # 202 is the alternate ssh port
|
||||
ipxtables -t filter -A CLOUDRON -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
ipxtables -t filter -A CLOUDRON -p tcp -m tcp -m multiport --dports 22,80,202,443 -j ACCEPT # 202 is the alternate ssh port
|
||||
|
||||
# whitelist any user ports. we used to use --dports but it has a 15 port limit (XT_MULTI_PORTS)
|
||||
ports_json="/home/yellowtent/platformdata/firewall/ports.json"
|
||||
if allowed_tcp_ports=$(node -e "console.log(JSON.parse(fs.readFileSync('${ports_json}', 'utf8')).allowed_tcp_ports.join(','))" 2>/dev/null); then
|
||||
IFS=',' arr=(${allowed_tcp_ports})
|
||||
for p in "${arr[@]}"; do
|
||||
iptables -A CLOUDRON -p tcp -m tcp --dport "${p}" -j ACCEPT
|
||||
if allowed_tcp_ports=$(node -e "console.log(JSON.parse(fs.readFileSync('${ports_json}', 'utf8')).allowed_tcp_ports.join(' '))" 2>/dev/null); then
|
||||
for p in $allowed_tcp_ports; do
|
||||
ipxtables -A CLOUDRON -p tcp -m tcp --dport "${p}" -j ACCEPT
|
||||
done
|
||||
fi
|
||||
|
||||
if allowed_udp_ports=$(node -e "console.log(JSON.parse(fs.readFileSync('${ports_json}', 'utf8')).allowed_udp_ports.join(','))" 2>/dev/null); then
|
||||
IFS=',' arr=(${allowed_udp_ports})
|
||||
for p in "${arr[@]}"; do
|
||||
iptables -A CLOUDRON -p udp -m udp --dport "${p}" -j ACCEPT
|
||||
if allowed_udp_ports=$(node -e "console.log(JSON.parse(fs.readFileSync('${ports_json}', 'utf8')).allowed_udp_ports.join(' '))" 2>/dev/null); then
|
||||
for p in $allowed_udp_ports; do
|
||||
ipxtables -A CLOUDRON -p udp -m udp --dport "${p}" -j ACCEPT
|
||||
done
|
||||
fi
|
||||
|
||||
# LDAP user directory allow list
|
||||
ipset create cloudron_ldap_allowlist hash:net || true
|
||||
ipset flush cloudron_ldap_allowlist
|
||||
|
||||
ipset create cloudron_ldap_allowlist6 hash:net family inet6 || true
|
||||
ipset flush cloudron_ldap_allowlist6
|
||||
|
||||
ldap_allowlist_json="/home/yellowtent/platformdata/firewall/ldap_allowlist.txt"
|
||||
# delete any existing redirect rule
|
||||
$iptables -t nat -D PREROUTING -p tcp --dport 636 -j REDIRECT --to-ports 3004 2>/dev/null || true
|
||||
$ip6tables -t nat -D PREROUTING -p tcp --dport 636 -j REDIRECT --to-ports 3004 >/dev/null || true
|
||||
if [[ -f "${ldap_allowlist_json}" ]]; then
|
||||
# without the -n block, any last line without a new line won't be read it!
|
||||
while read -r line || [[ -n "$line" ]]; do
|
||||
[[ -z "${line}" ]] && continue # ignore empty lines
|
||||
[[ "$line" =~ ^#.*$ ]] && continue # ignore lines starting with #
|
||||
if [[ "$line" == *":"* ]]; then
|
||||
ipset add -! cloudron_ldap_allowlist6 "${line}" # the -! ignore duplicates
|
||||
else
|
||||
ipset add -! cloudron_ldap_allowlist "${line}" # the -! ignore duplicates
|
||||
fi
|
||||
done < "${ldap_allowlist_json}"
|
||||
|
||||
# ldap server we expose 3004 and also redirect from standard ldaps port 636
|
||||
$iptables -t filter -C INPUT -j CLOUDRON_RATELIMIT 2>/dev/null || $iptables -t filter -I INPUT 1 -j CLOUDRON_RATELIMIT
|
||||
|
||||
$iptables -t nat -I PREROUTING -p tcp --dport 636 -j REDIRECT --to-ports 3004
|
||||
$iptables -t filter -A CLOUDRON -m set --match-set cloudron_ldap_allowlist src -p tcp --dport 3004 -j ACCEPT
|
||||
|
||||
$ip6tables -t nat -I PREROUTING -p tcp --dport 636 -j REDIRECT --to-ports 3004
|
||||
$ip6tables -t filter -A CLOUDRON -m set --match-set cloudron_ldap_allowlist6 src -p tcp --dport 3004 -j ACCEPT
|
||||
fi
|
||||
|
||||
# turn and stun service
|
||||
iptables -t filter -A CLOUDRON -p tcp -m multiport --dports 3478,5349 -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p udp -m multiport --dports 3478,5349 -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p udp -m multiport --dports 50000:51000 -j ACCEPT
|
||||
ipxtables -t filter -A CLOUDRON -p tcp -m multiport --dports 3478,5349 -j ACCEPT
|
||||
ipxtables -t filter -A CLOUDRON -p udp -m multiport --dports 3478,5349 -j ACCEPT
|
||||
ipxtables -t filter -A CLOUDRON -p udp -m multiport --dports 50000:51000 -j ACCEPT
|
||||
|
||||
iptables -t filter -A CLOUDRON -p icmp --icmp-type echo-request -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p icmp --icmp-type echo-reply -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p udp --sport 53 -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -s 172.18.0.0/16 -j ACCEPT # required to accept any connections from apps to our IP:<public port>
|
||||
iptables -t filter -A CLOUDRON -i lo -j ACCEPT # required for localhost connections (mysql)
|
||||
# ICMPv6 is very fundamental to IPv6 connectivity unlike ICMPv4
|
||||
$iptables -t filter -A CLOUDRON -p icmp --icmp-type echo-request -j ACCEPT
|
||||
$iptables -t filter -A CLOUDRON -p icmp --icmp-type echo-reply -j ACCEPT
|
||||
$ip6tables -t filter -A CLOUDRON -p ipv6-icmp -j ACCEPT
|
||||
|
||||
ipxtables -t filter -A CLOUDRON -p udp --sport 53 -j ACCEPT
|
||||
$iptables -t filter -A CLOUDRON -s 172.18.0.0/16 -j ACCEPT # required to accept any connections from apps to our IP:<public port>
|
||||
ipxtables -t filter -A CLOUDRON -i lo -j ACCEPT # required for localhost connections (mysql)
|
||||
|
||||
# log dropped incoming. keep this at the end of all the rules
|
||||
iptables -t filter -A CLOUDRON -m limit --limit 2/min -j LOG --log-prefix "IPTables Packet Dropped: " --log-level 7
|
||||
iptables -t filter -A CLOUDRON -j DROP
|
||||
ipxtables -t filter -A CLOUDRON -m limit --limit 2/min -j LOG --log-prefix "Packet dropped: " --log-level 7
|
||||
ipxtables -t filter -A CLOUDRON -j DROP
|
||||
|
||||
if ! iptables -t filter -C INPUT -j CLOUDRON 2>/dev/null; then
|
||||
iptables -t filter -I INPUT -j CLOUDRON
|
||||
fi
|
||||
# prepend our chain to the filter table
|
||||
$iptables -t filter -C INPUT -j CLOUDRON 2>/dev/null || $iptables -t filter -I INPUT -j CLOUDRON
|
||||
$ip6tables -t filter -C INPUT -j CLOUDRON 2>/dev/null || $ip6tables -t filter -I INPUT -j CLOUDRON
|
||||
|
||||
# Setup rate limit chain (the recent info is at /proc/net/xt_recent)
|
||||
iptables -t filter -N CLOUDRON_RATELIMIT || true
|
||||
iptables -t filter -F CLOUDRON_RATELIMIT # empty any existing rules
|
||||
ipxtables -t filter -N CLOUDRON_RATELIMIT || true
|
||||
ipxtables -t filter -F CLOUDRON_RATELIMIT # empty any existing rules
|
||||
|
||||
# log dropped incoming. keep this at the end of all the rules
|
||||
iptables -t filter -N CLOUDRON_RATELIMIT_LOG || true
|
||||
iptables -t filter -F CLOUDRON_RATELIMIT_LOG # empty any existing rules
|
||||
iptables -t filter -A CLOUDRON_RATELIMIT_LOG -m limit --limit 2/min -j LOG --log-prefix "IPTables RateLimit: " --log-level 7
|
||||
iptables -t filter -A CLOUDRON_RATELIMIT_LOG -j DROP
|
||||
ipxtables -t filter -N CLOUDRON_RATELIMIT_LOG || true
|
||||
ipxtables -t filter -F CLOUDRON_RATELIMIT_LOG # empty any existing rules
|
||||
ipxtables -t filter -A CLOUDRON_RATELIMIT_LOG -m limit --limit 2/min -j LOG --log-prefix "IPTables RateLimit: " --log-level 7
|
||||
ipxtables -t filter -A CLOUDRON_RATELIMIT_LOG -j DROP
|
||||
|
||||
# http https
|
||||
for port in 80 443; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --syn --dport ${port} -m connlimit --connlimit-above 5000 -j CLOUDRON_RATELIMIT_LOG
|
||||
ipxtables -A CLOUDRON_RATELIMIT -p tcp --syn --dport ${port} -m connlimit --connlimit-above 5000 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# ssh
|
||||
for port in 22 202; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --dport ${port} -m state --state NEW -m recent --set --name "public-${port}"
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --dport ${port} -m state --state NEW -m recent --update --name "public-${port}" --seconds 10 --hitcount 5 -j CLOUDRON_RATELIMIT_LOG
|
||||
ipxtables -A CLOUDRON_RATELIMIT -p tcp --dport ${port} -m state --state NEW -m recent --set --name "public-${port}"
|
||||
ipxtables -A CLOUDRON_RATELIMIT -p tcp --dport ${port} -m state --state NEW -m recent --update --name "public-${port}" --seconds 10 --hitcount 5 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# ldaps
|
||||
for port in 636 3004; do
|
||||
ipxtables -A CLOUDRON_RATELIMIT -p tcp --syn --dport ${port} -m connlimit --connlimit-above 5000 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# docker translates (dnat) 25, 587, 993, 4190 in the PREROUTING step
|
||||
for port in 2525 4190 9993; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --syn ! -s 172.18.0.0/16 -d 172.18.0.0/16 --dport ${port} -m connlimit --connlimit-above 50 -j CLOUDRON_RATELIMIT_LOG
|
||||
$iptables -A CLOUDRON_RATELIMIT -p tcp --syn ! -s 172.18.0.0/16 -d 172.18.0.0/16 --dport ${port} -m connlimit --connlimit-above 50 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# msa, ldap, imap, sieve, pop3
|
||||
for port in 2525 3002 4190 9993 9995; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --syn -s 172.18.0.0/16 -d 172.18.0.0/16 --dport ${port} -m connlimit --connlimit-above 500 -j CLOUDRON_RATELIMIT_LOG
|
||||
$iptables -A CLOUDRON_RATELIMIT -p tcp --syn -s 172.18.0.0/16 -d 172.18.0.0/16 --dport ${port} -m connlimit --connlimit-above 500 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# cloudron docker network: mysql postgresql redis mongodb
|
||||
for port in 3306 5432 6379 27017; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --syn -s 172.18.0.0/16 -d 172.18.0.0/16 --dport ${port} -m connlimit --connlimit-above 5000 -j CLOUDRON_RATELIMIT_LOG
|
||||
$iptables -A CLOUDRON_RATELIMIT -p tcp --syn -s 172.18.0.0/16 -d 172.18.0.0/16 --dport ${port} -m connlimit --connlimit-above 5000 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
if ! iptables -t filter -C INPUT -j CLOUDRON_RATELIMIT 2>/dev/null; then
|
||||
iptables -t filter -I INPUT 1 -j CLOUDRON_RATELIMIT
|
||||
fi
|
||||
$iptables -t filter -C INPUT -j CLOUDRON_RATELIMIT 2>/dev/null || $iptables -t filter -I INPUT 1 -j CLOUDRON_RATELIMIT
|
||||
$ip6tables -t filter -C INPUT -j CLOUDRON_RATELIMIT 2>/dev/null || $ip6tables -t filter -I INPUT 1 -j CLOUDRON_RATELIMIT
|
||||
|
||||
# Workaround issue where Docker insists on adding itself first in FORWARD table
|
||||
iptables -D FORWARD -j CLOUDRON_RATELIMIT || true
|
||||
iptables -I FORWARD 1 -j CLOUDRON_RATELIMIT
|
||||
|
||||
echo "==> Setting up firewall done"
|
||||
ipxtables -D FORWARD -j CLOUDRON_RATELIMIT || true
|
||||
ipxtables -I FORWARD 1 -j CLOUDRON_RATELIMIT
|
||||
|
||||
@@ -4,13 +4,17 @@
|
||||
|
||||
printf "**********************************************************************\n\n"
|
||||
|
||||
cache_file="/var/cache/cloudron-motd-cache"
|
||||
|
||||
if [[ -z "$(ls -A /home/yellowtent/platformdata/addons/mail/dkim)" ]]; then
|
||||
if [[ -f /tmp/.cloudron-motd-cache ]]; then
|
||||
ip=$(cat /tmp/.cloudron-motd-cache)
|
||||
elif ! ip=$(curl --fail --connect-timeout 2 --max-time 2 -q https://api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p'); then
|
||||
if [[ ! -f "${cache_file}" ]]; then
|
||||
curl --fail --connect-timeout 2 --max-time 2 -q https://ipv4.api.cloudron.io/api/v1/helper/public_ip --output "${cache_file}" || true
|
||||
fi
|
||||
if [[ -f "${cache_file}" ]]; then
|
||||
ip=$(sed -n -e 's/.*"ip": "\(.*\)"/\1/p' /var/cache/cloudron-motd-cache)
|
||||
else
|
||||
ip='<IP>'
|
||||
fi
|
||||
echo "${ip}" > /tmp/.cloudron-motd-cache
|
||||
|
||||
if [[ ! -f /etc/cloudron/SETUP_TOKEN ]]; then
|
||||
url="https://${ip}"
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
##############################################################################
|
||||
|
||||
Hostname "localhost"
|
||||
#FQDNLookup true
|
||||
FQDNLookup false
|
||||
#BaseDir "/var/lib/collectd"
|
||||
#PluginDir "/usr/lib/collectd"
|
||||
#TypesDB "/usr/share/collectd/types.db" "/etc/collectd/my_types.db"
|
||||
|
||||
@@ -19,15 +19,10 @@ http {
|
||||
include mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
# the collectd config depends on this log format
|
||||
log_format combined2 '$remote_addr - [$time_local] '
|
||||
'"$request" $status $body_bytes_sent $request_time '
|
||||
'"$http_referer" "$host" "$http_user_agent"';
|
||||
|
||||
# required for long host names
|
||||
server_names_hash_bucket_size 128;
|
||||
|
||||
access_log /var/log/nginx/access.log combined2;
|
||||
access_log /var/log/nginx/access.log combined;
|
||||
|
||||
sendfile on;
|
||||
|
||||
|
||||
@@ -53,6 +53,9 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/stoptask.sh
|
||||
Defaults!/home/yellowtent/box/src/scripts/setblocklist.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/setblocklist.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/setldapallowlist.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/setldapallowlist.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/addmount.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/addmount.sh
|
||||
|
||||
@@ -61,3 +64,6 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmmount.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/remountmount.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/remountmount.sh
|
||||
|
||||
cloudron-support ALL=(ALL) NOPASSWD: ALL
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
[Unit]
|
||||
Description=Unbound DNS Resolver
|
||||
After=network-online.target docker.service
|
||||
After=network-online.target
|
||||
Before=nss-lookup.target
|
||||
Wants=network-online.target nss-lookup.target
|
||||
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
# Unbound is used primarily for RBL queries (host 2.0.0.127.zen.spamhaus.org)
|
||||
# We cannot use dnsmasq because it is not a recursive resolver and defaults to the value in the interfaces file (which is Google DNS!)
|
||||
|
||||
server:
|
||||
port: 53
|
||||
interface: 127.0.0.1
|
||||
interface: 172.18.0.1
|
||||
ip-freebind: yes
|
||||
do-ip6: no
|
||||
access-control: 127.0.0.1 allow
|
||||
access-control: 172.18.0.1/16 allow
|
||||
@@ -10,4 +14,3 @@ server:
|
||||
# enable below for logging to journalctl -u unbound
|
||||
# verbosity: 5
|
||||
# log-queries: yes
|
||||
|
||||
|
||||
102
src/acme2.js
102
src/acme2.js
@@ -9,6 +9,7 @@ exports = module.exports = {
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
blobs = require('./blobs.js'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
crypto = require('crypto'),
|
||||
debug = require('debug')('box:cert/acme2'),
|
||||
@@ -31,7 +32,7 @@ const CA_PROD_DIRECTORY_URL = 'https://acme-v02.api.letsencrypt.org/directory',
|
||||
function Acme2(options) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
this.accountKeyPem = options.accountKeyPem; // Buffer
|
||||
this.accountKeyPem = null; // Buffer .
|
||||
this.email = options.email;
|
||||
this.keyId = null;
|
||||
this.caDirectory = options.prod ? CA_PROD_DIRECTORY_URL : CA_STAGING_DIRECTORY_URL;
|
||||
@@ -46,16 +47,16 @@ function urlBase64Encode(string) {
|
||||
}
|
||||
|
||||
function b64(str) {
|
||||
var buf = Buffer.isBuffer(str) ? str : Buffer.from(str);
|
||||
const buf = Buffer.isBuffer(str) ? str : Buffer.from(str);
|
||||
return urlBase64Encode(buf.toString('base64'));
|
||||
}
|
||||
|
||||
function getModulus(pem) {
|
||||
assert(Buffer.isBuffer(pem));
|
||||
|
||||
var stdout = safe.child_process.execSync('openssl rsa -modulus -noout', { input: pem, encoding: 'utf8' });
|
||||
const stdout = safe.child_process.execSync('openssl rsa -modulus -noout', { input: pem, encoding: 'utf8' });
|
||||
if (!stdout) return null;
|
||||
var match = stdout.match(/Modulus=([0-9a-fA-F]+)$/m);
|
||||
const match = stdout.match(/Modulus=([0-9a-fA-F]+)$/m);
|
||||
if (!match) return null;
|
||||
return Buffer.from(match[1], 'hex');
|
||||
}
|
||||
@@ -87,10 +88,10 @@ Acme2.prototype.sendSignedRequest = async function (url, payload) {
|
||||
|
||||
let [error, response] = await safe(superagent.get(this.directory.newNonce).timeout(30000).ok(() => true));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, `Network error sending signed request: ${error.message}`);
|
||||
if (response.status !== 204) throw new BoxError(BoxError.EXTERNAL_ERROR, `Invalid response code when fetching nonce : ${response.status}`);
|
||||
if (response.status !== 204) throw new BoxError(BoxError.ACME_ERROR, `Invalid response code when fetching nonce : ${response.status}`);
|
||||
|
||||
const nonce = response.headers['Replay-Nonce'.toLowerCase()];
|
||||
if (!nonce) throw new BoxError(BoxError.EXTERNAL_ERROR, 'No nonce in response');
|
||||
if (!nonce) throw new BoxError(BoxError.ACME_ERROR, 'No nonce in response');
|
||||
|
||||
debug('sendSignedRequest: using nonce %s for url %s', nonce, url);
|
||||
|
||||
@@ -128,23 +129,43 @@ Acme2.prototype.updateContact = async function (registrationUri) {
|
||||
};
|
||||
|
||||
const result = await this.sendSignedRequest(registrationUri, JSON.stringify(payload));
|
||||
if (result.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Failed to update contact. Expecting 200, got ${result.status} ${JSON.stringify(result.body)}`);
|
||||
if (result.status !== 200) throw new BoxError(BoxError.ACME_ERROR, `Failed to update contact. Expecting 200, got ${result.status} ${JSON.stringify(result.body)}`);
|
||||
|
||||
debug(`updateContact: contact of user updated to ${this.email}`);
|
||||
};
|
||||
|
||||
Acme2.prototype.registerUser = async function () {
|
||||
async function generateAccountKey() {
|
||||
const acmeAccountKey = safe.child_process.execSync('openssl genrsa 4096');
|
||||
if (!acmeAccountKey) throw new BoxError(BoxError.OPENSSL_ERROR, `Could not generate acme account key: ${safe.error.message}`);
|
||||
return acmeAccountKey;
|
||||
}
|
||||
|
||||
Acme2.prototype.ensureAccount = async function () {
|
||||
const payload = {
|
||||
termsOfServiceAgreed: true
|
||||
};
|
||||
|
||||
debug('registerUser: registering user');
|
||||
debug('ensureAccount: registering user');
|
||||
|
||||
this.accountKeyPem = await blobs.get(blobs.ACME_ACCOUNT_KEY);
|
||||
if (!this.accountKeyPem) {
|
||||
debug('ensureAccount: generating new account keys');
|
||||
this.accountKeyPem = await generateAccountKey();
|
||||
await blobs.set(blobs.ACME_ACCOUNT_KEY, this.accountKeyPem);
|
||||
}
|
||||
|
||||
let result = await this.sendSignedRequest(this.directory.newAccount, JSON.stringify(payload));
|
||||
if (result.status === 403 && result.body.type === 'urn:ietf:params:acme:error:unauthorized') {
|
||||
debug(`ensureAccount: key was revoked. ${result.status} ${JSON.stringify(result.body)}. generating new account key`);
|
||||
this.accountKeyPem = await generateAccountKey();
|
||||
await blobs.set(blobs.ACME_ACCOUNT_KEY, this.accountKeyPem);
|
||||
result = await this.sendSignedRequest(this.directory.newAccount, JSON.stringify(payload));
|
||||
}
|
||||
|
||||
const result = await this.sendSignedRequest(this.directory.newAccount, JSON.stringify(payload));
|
||||
// 200 if already exists. 201 for new accounts
|
||||
if (result.status !== 200 && result.status !== 201) return new BoxError(BoxError.EXTERNAL_ERROR, `Failed to register new account. Expecting 200 or 201, got ${result.status} ${JSON.stringify(result.body)}`);
|
||||
if (result.status !== 200 && result.status !== 201) throw new BoxError(BoxError.ACME_ERROR, `Failed to register new account. Expecting 200 or 201, got ${result.status} ${JSON.stringify(result.body)}`);
|
||||
|
||||
debug(`registerUser: user registered keyid: ${result.headers.location}`);
|
||||
debug(`ensureAccount: user registered keyid: ${result.headers.location}`);
|
||||
|
||||
this.keyId = result.headers.location;
|
||||
|
||||
@@ -165,15 +186,15 @@ Acme2.prototype.newOrder = async function (domain) {
|
||||
|
||||
const result = await this.sendSignedRequest(this.directory.newOrder, JSON.stringify(payload));
|
||||
if (result.status === 403) throw new BoxError(BoxError.ACCESS_DENIED, `Forbidden sending new order: ${result.body.detail}`);
|
||||
if (result.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, `Failed to send new order. Expecting 201, got ${result.statusCode} ${JSON.stringify(result.body)}`);
|
||||
if (result.status !== 201) throw new BoxError(BoxError.ACME_ERROR, `Failed to send new order. Expecting 201, got ${result.statusCode} ${JSON.stringify(result.body)}`);
|
||||
|
||||
debug('newOrder: created order %s %j', domain, result.body);
|
||||
|
||||
const order = result.body, orderUrl = result.headers.location;
|
||||
|
||||
if (!Array.isArray(order.authorizations)) throw new BoxError(BoxError.EXTERNAL_ERROR, 'invalid authorizations in order');
|
||||
if (typeof order.finalize !== 'string') throw new BoxError(BoxError.EXTERNAL_ERROR, 'invalid finalize in order');
|
||||
if (typeof orderUrl !== 'string') throw new BoxError(BoxError.EXTERNAL_ERROR, 'invalid order location in order header');
|
||||
if (!Array.isArray(order.authorizations)) throw new BoxError(BoxError.ACME_ERROR, 'invalid authorizations in order');
|
||||
if (typeof order.finalize !== 'string') throw new BoxError(BoxError.ACME_ERROR, 'invalid finalize in order');
|
||||
if (typeof orderUrl !== 'string') throw new BoxError(BoxError.ACME_ERROR, 'invalid order location in order header');
|
||||
|
||||
return { order, orderUrl };
|
||||
};
|
||||
@@ -183,20 +204,20 @@ Acme2.prototype.waitForOrder = async function (orderUrl) {
|
||||
|
||||
debug(`waitForOrder: ${orderUrl}`);
|
||||
|
||||
return await promiseRetry({ times: 15, interval: 20000 }, async () => {
|
||||
return await promiseRetry({ times: 15, interval: 20000, debug }, async () => {
|
||||
debug('waitForOrder: getting status');
|
||||
|
||||
const result = await this.postAsGet(orderUrl);
|
||||
if (result.status !== 200) {
|
||||
debug(`waitForOrder: invalid response code getting uri ${result.status}`);
|
||||
throw new BoxError(BoxError.EXTERNAL_ERROR, `Bad response code: ${result.status}`);
|
||||
throw new BoxError(BoxError.ACME_ERROR, `Bad response when waiting for order. code: ${result.status}`);
|
||||
}
|
||||
|
||||
debug('waitForOrder: status is "%s %j', result.body.status, result.body);
|
||||
|
||||
if (result.body.status === 'pending' || result.body.status === 'processing') throw new BoxError(BoxError.TRY_AGAIN, `Request is in ${result.body.status} state`);
|
||||
if (result.body.status === 'pending' || result.body.status === 'processing') throw new BoxError(BoxError.ACME_ERROR, `Request is in ${result.body.status} state`);
|
||||
else if (result.body.status === 'valid' && result.body.certificate) return result.body.certificate;
|
||||
else throw new BoxError(BoxError.EXTERNAL_ERROR, `Unexpected status or invalid response: ${JSON.stringify(result.body)}`);
|
||||
else throw new BoxError(BoxError.ACME_ERROR, `Unexpected status or invalid response when waiting for order: ${JSON.stringify(result.body)}`);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -228,7 +249,7 @@ Acme2.prototype.notifyChallengeReady = async function (challenge) {
|
||||
};
|
||||
|
||||
const result = await this.sendSignedRequest(challenge.url, JSON.stringify(payload));
|
||||
if (result.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Failed to notify challenge. Expecting 200, got ${result.statusCode} ${JSON.stringify(result.body)}`);
|
||||
if (result.status !== 200) throw new BoxError(BoxError.ACME_ERROR, `Failed to notify challenge. Expecting 200, got ${result.statusCode} ${JSON.stringify(result.body)}`);
|
||||
};
|
||||
|
||||
Acme2.prototype.waitForChallenge = async function (challenge) {
|
||||
@@ -236,20 +257,20 @@ Acme2.prototype.waitForChallenge = async function (challenge) {
|
||||
|
||||
debug('waitingForChallenge: %j', challenge);
|
||||
|
||||
await promiseRetry({ times: 15, interval: 20000 }, async () => {
|
||||
await promiseRetry({ times: 15, interval: 20000, debug }, async () => {
|
||||
debug('waitingForChallenge: getting status');
|
||||
|
||||
const result = await this.postAsGet(challenge.url);
|
||||
if (result.status !== 200) {
|
||||
debug(`waitForChallenge: invalid response code getting uri ${result.status}`);
|
||||
throw new BoxError(BoxError.EXTERNAL_ERROR, 'Bad response code:' + result.statusCode);
|
||||
throw new BoxError(BoxError.ACME_ERROR, `Bad response code when waiting for challenge : ${result.status}`);
|
||||
}
|
||||
|
||||
debug(`waitForChallenge: status is "${result.body.status}" "${JSON.stringify(result.body)}"`);
|
||||
|
||||
if (result.body.status === 'pending') throw new BoxError(BoxError.TRY_AGAIN);
|
||||
if (result.body.status === 'pending') throw new BoxError(BoxError.ACME_ERROR, 'Challenge is in pending state');
|
||||
else if (result.body.status === 'valid') return;
|
||||
else throw new BoxError(BoxError.EXTERNAL_ERROR, `Unexpected status: ${result.body.status}`);
|
||||
else throw new BoxError(BoxError.ACME_ERROR, `Unexpected status when waiting for challenge: ${result.body.status}`);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -267,7 +288,7 @@ Acme2.prototype.signCertificate = async function (domain, finalizationUrl, csrDe
|
||||
|
||||
const result = await this.sendSignedRequest(finalizationUrl, JSON.stringify(payload));
|
||||
// 429 means we reached the cert limit for this domain
|
||||
if (result.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Failed to sign certificate. Expecting 200, got ${result.status} ${JSON.stringify(result.body)}`);
|
||||
if (result.status !== 200) throw new BoxError(BoxError.ACME_ERROR, `Failed to sign certificate. Expecting 200, got ${result.status} ${JSON.stringify(result.body)}`);
|
||||
};
|
||||
|
||||
Acme2.prototype.createKeyAndCsr = async function (hostname, keyFilePath, csrFilePath) {
|
||||
@@ -303,7 +324,7 @@ Acme2.prototype.createKeyAndCsr = async function (hostname, keyFilePath, csrFile
|
||||
if (!csrDer) throw new BoxError(BoxError.OPENSSL_ERROR, safe.error);
|
||||
if (!safe.fs.writeFileSync(csrFilePath, csrDer)) throw new BoxError(BoxError.FS_ERROR, safe.error); // bookkeeping. inspect with openssl req -text -noout -in hostname.csr -inform der
|
||||
|
||||
await safe(fs.promises.rmdir(tmpdir, { recursive: true }));
|
||||
await safe(fs.promises.rm(tmpdir, { recursive: true, force: true }));
|
||||
|
||||
debug('createKeyAndCsr: csr file (DER) saved at %s', csrFilePath);
|
||||
|
||||
@@ -314,12 +335,12 @@ Acme2.prototype.downloadCertificate = async function (hostname, certUrl, certFil
|
||||
assert.strictEqual(typeof hostname, 'string');
|
||||
assert.strictEqual(typeof certUrl, 'string');
|
||||
|
||||
await promiseRetry({ times: 5, interval: 20000 }, async () => {
|
||||
debug('downloadCertificate: downloading certificate');
|
||||
await promiseRetry({ times: 5, interval: 20000, debug }, async () => {
|
||||
debug(`downloadCertificate: downloading certificate of ${hostname}`);
|
||||
|
||||
const result = await this.postAsGet(certUrl);
|
||||
if (result.statusCode === 202) throw new BoxError(BoxError.TRY_AGAIN, 'Retry downloading certificate');
|
||||
if (result.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Failed to get cert. Expecting 200, got ${result.statusCode} ${JSON.stringify(result.body)}`);
|
||||
if (result.statusCode === 202) throw new BoxError(BoxError.ACME_ERROR, 'Retry downloading certificate');
|
||||
if (result.statusCode !== 200) throw new BoxError(BoxError.ACME_ERROR, `Failed to get cert. Expecting 200, got ${result.statusCode} ${JSON.stringify(result.body)}`);
|
||||
|
||||
const fullChainPem = result.body; // buffer
|
||||
|
||||
@@ -337,7 +358,7 @@ Acme2.prototype.prepareHttpChallenge = async function (hostname, domain, authori
|
||||
|
||||
debug('prepareHttpChallenge: challenges: %j', authorization);
|
||||
let httpChallenges = authorization.challenges.filter(function(x) { return x.type === 'http-01'; });
|
||||
if (httpChallenges.length === 0) throw new BoxError(BoxError.EXTERNAL_ERROR, 'no http challenges');
|
||||
if (httpChallenges.length === 0) throw new BoxError(BoxError.ACME_ERROR, 'no http challenges');
|
||||
let challenge = httpChallenges[0];
|
||||
|
||||
debug('prepareHttpChallenge: preparing for challenge %j', challenge);
|
||||
@@ -386,7 +407,7 @@ Acme2.prototype.prepareDnsChallenge = async function (hostname, domain, authoriz
|
||||
|
||||
debug('prepareDnsChallenge: challenges: %j', authorization);
|
||||
const dnsChallenges = authorization.challenges.filter(function(x) { return x.type === 'dns-01'; });
|
||||
if (dnsChallenges.length === 0) throw new BoxError(BoxError.EXTERNAL_ERROR, 'no dns challenges');
|
||||
if (dnsChallenges.length === 0) throw new BoxError(BoxError.ACME_ERROR, 'no dns challenges');
|
||||
const challenge = dnsChallenges[0];
|
||||
|
||||
const keyAuthorization = this.getKeyAuthorization(challenge.token);
|
||||
@@ -431,7 +452,7 @@ Acme2.prototype.prepareChallenge = async function (hostname, domain, authorizati
|
||||
debug(`prepareChallenge: http: ${this.performHttpAuthorization}`);
|
||||
|
||||
const response = await this.postAsGet(authorizationUrl);
|
||||
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Invalid response code getting authorization : ${response.status}`);
|
||||
if (response.status !== 200) throw new BoxError(BoxError.ACME_ERROR, `Invalid response code getting authorization : ${response.status}`);
|
||||
|
||||
const authorization = response.body;
|
||||
|
||||
@@ -464,7 +485,7 @@ Acme2.prototype.acmeFlow = async function (hostname, domain, paths) {
|
||||
|
||||
const { certFilePath, keyFilePath, csrFilePath, acmeChallengesDir } = paths;
|
||||
|
||||
await this.registerUser();
|
||||
await this.ensureAccount();
|
||||
const { order, orderUrl } = await this.newOrder(hostname);
|
||||
|
||||
for (let i = 0; i < order.authorizations.length; i++) {
|
||||
@@ -488,14 +509,14 @@ Acme2.prototype.acmeFlow = async function (hostname, domain, paths) {
|
||||
};
|
||||
|
||||
Acme2.prototype.loadDirectory = async function () {
|
||||
await promiseRetry({ times: 3, interval: 20000 }, async () => {
|
||||
await promiseRetry({ times: 3, interval: 20000, debug }, async () => {
|
||||
const response = await superagent.get(this.caDirectory).timeout(30000).ok(() => true);
|
||||
|
||||
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Invalid response code when fetching directory : ${response.status}`);
|
||||
if (response.status !== 200) throw new BoxError(BoxError.ACME_ERROR, `Invalid response code when fetching directory : ${response.status}`);
|
||||
|
||||
if (typeof response.body.newNonce !== 'string' ||
|
||||
typeof response.body.newOrder !== 'string' ||
|
||||
typeof response.body.newAccount !== 'string') throw new BoxError(BoxError.EXTERNAL_ERROR, `Invalid response body : ${response.body}`);
|
||||
typeof response.body.newAccount !== 'string') throw new BoxError(BoxError.ACME_ERROR, `Invalid response body : ${response.body}`);
|
||||
|
||||
this.directory = response.body;
|
||||
});
|
||||
@@ -523,9 +544,8 @@ async function getCertificate(vhost, domain, paths, options) {
|
||||
assert.strictEqual(typeof paths, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
let attempt = 1;
|
||||
await promiseRetry({ times: 3, interval: 0 }, async function () {
|
||||
debug(`getCertificate: attempt ${attempt++}`);
|
||||
await promiseRetry({ times: 3, interval: 0, debug }, async function () {
|
||||
debug(`getCertificate: for vhost ${vhost} and domain ${domain}`);
|
||||
|
||||
const acme = new Acme2(options || { });
|
||||
return await acme.getCertificate(vhost, domain, paths);
|
||||
|
||||
@@ -38,14 +38,14 @@ async function setHealth(app, health) {
|
||||
debug(`setHealth: ${app.id} (${app.fqdn}) switched from ${lastHealth} to healthy`);
|
||||
|
||||
// do not send mails for dev apps
|
||||
if (!app.debugMode) eventlog.add(eventlog.ACTION_APP_UP, AuditSource.HEALTH_MONITOR, { app: app });
|
||||
if (!app.debugMode) await eventlog.add(eventlog.ACTION_APP_UP, AuditSource.HEALTH_MONITOR, { app: app });
|
||||
}
|
||||
} else if (Math.abs(now - healthTime) > UNHEALTHY_THRESHOLD) {
|
||||
if (lastHealth === apps.HEALTH_HEALTHY) {
|
||||
debug(`setHealth: marking ${app.id} (${app.fqdn}) as unhealthy since not seen for more than ${UNHEALTHY_THRESHOLD/(60 * 1000)} minutes`);
|
||||
|
||||
// do not send mails for dev apps
|
||||
if (!app.debugMode) eventlog.add(eventlog.ACTION_APP_DOWN, AuditSource.HEALTH_MONITOR, { app: app });
|
||||
if (!app.debugMode) await eventlog.add(eventlog.ACTION_APP_DOWN, AuditSource.HEALTH_MONITOR, { app: app });
|
||||
}
|
||||
} else {
|
||||
debug(`setHealth: ${app.id} (${app.fqdn}) waiting for ${(UNHEALTHY_THRESHOLD - Math.abs(now - healthTime))/1000} to update health`);
|
||||
|
||||
607
src/apps.js
607
src/apps.js
File diff suppressed because it is too large
Load Diff
141
src/appstore.js
141
src/appstore.js
@@ -13,14 +13,17 @@ exports = module.exports = {
|
||||
purchaseApp,
|
||||
unpurchaseApp,
|
||||
|
||||
createUserToken,
|
||||
getWebToken,
|
||||
getSubscription,
|
||||
isFreePlan,
|
||||
|
||||
getAppUpdate,
|
||||
getBoxUpdate,
|
||||
|
||||
createTicket
|
||||
createTicket,
|
||||
|
||||
// exported for tests
|
||||
_unregister: unregister
|
||||
};
|
||||
|
||||
const apps = require('./apps.js'),
|
||||
@@ -34,6 +37,7 @@ const apps = require('./apps.js'),
|
||||
safe = require('safetydance'),
|
||||
semver = require('semver'),
|
||||
settings = require('./settings.js'),
|
||||
sysinfo = require('./sysinfo.js'),
|
||||
superagent = require('superagent'),
|
||||
support = require('./support.js'),
|
||||
util = require('util');
|
||||
@@ -49,7 +53,7 @@ let gFeatures = {
|
||||
privateDockerRegistry: false,
|
||||
branding: false,
|
||||
support: false,
|
||||
directoryConfig: false,
|
||||
profileConfig: false,
|
||||
mailboxMaxCount: 5,
|
||||
emailPremium: false
|
||||
};
|
||||
@@ -78,17 +82,15 @@ async function login(email, password, totpToken) {
|
||||
assert.strictEqual(typeof password, 'string');
|
||||
assert.strictEqual(typeof totpToken, 'string');
|
||||
|
||||
const data = { email, password, totpToken };
|
||||
|
||||
const url = settings.apiServerOrigin() + '/api/v1/login';
|
||||
const [error, response] = await safe(superagent.post(url)
|
||||
.send(data)
|
||||
const [error, response] = await safe(superagent.post(`${settings.apiServerOrigin()}/api/v1/login`)
|
||||
.send({ email, password, totpToken })
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `login status code: ${response.status}`);
|
||||
if (!response.body.accessToken) throw new BoxError(BoxError.EXTERNAL_ERROR, `login invalid response: ${response.text}`);
|
||||
|
||||
return response.body; // { userId, accessToken }
|
||||
}
|
||||
@@ -97,54 +99,37 @@ async function registerUser(email, password) {
|
||||
assert.strictEqual(typeof email, 'string');
|
||||
assert.strictEqual(typeof password, 'string');
|
||||
|
||||
const data = { email, password };
|
||||
|
||||
const url = settings.apiServerOrigin() + '/api/v1/register_user';
|
||||
const [error, response] = await safe(superagent.post(url)
|
||||
.send(data)
|
||||
const [error, response] = await safe(superagent.post(`${settings.apiServerOrigin()}/api/v1/register_user`)
|
||||
.send({ email, password, utmSource: 'cloudron-dashboard' })
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
|
||||
if (response.status === 409) throw new BoxError(BoxError.ALREADY_EXISTS, error.message);
|
||||
if (response.status === 409) throw new BoxError(BoxError.ALREADY_EXISTS, 'account already exists');
|
||||
if (response.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, `register status code: ${response.status}`);
|
||||
}
|
||||
|
||||
async function createUserToken() {
|
||||
async function getWebToken() {
|
||||
if (settings.isDemo()) throw new BoxError(BoxError.BAD_FIELD, 'Not allowed in demo mode');
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
const token = await settings.getAppstoreWebToken();
|
||||
if (!token) throw new BoxError(BoxError.NOT_FOUND); // user will have to re-login with password somehow
|
||||
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/user_token`;
|
||||
|
||||
const [error, response] = await safe(superagent.post(url)
|
||||
.send({})
|
||||
.query({ accessToken: token })
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, `getUserToken status code: ${response.status}`);
|
||||
|
||||
return response.body.accessToken;
|
||||
return token;
|
||||
}
|
||||
|
||||
async function getSubscription() {
|
||||
const token = await settings.getCloudronToken();
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
const url = settings.apiServerOrigin() + '/api/v1/subscription';
|
||||
|
||||
const [error, response] = await safe(superagent.get(url)
|
||||
const [error, response] = await safe(superagent.get(`${settings.apiServerOrigin()}/api/v1/subscription`)
|
||||
.query({ accessToken: token })
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR);
|
||||
if (response.status === 402) throw new BoxError(BoxError.LICENSE_ERROR);
|
||||
if (response.status === 502) throw new BoxError(BoxError.EXTERNAL_ERROR, `Stripe error: ${error.message}`);
|
||||
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unknown error: ${error.message}`);
|
||||
|
||||
@@ -165,12 +150,10 @@ async function purchaseApp(data) {
|
||||
assert(data.appstoreId || data.manifestId);
|
||||
assert.strictEqual(typeof data.appId, 'string');
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/cloudronapps`;
|
||||
|
||||
const [error, response] = await safe(superagent.post(url)
|
||||
const [error, response] = await safe(superagent.post(`${settings.apiServerOrigin()}/api/v1/cloudronapps`)
|
||||
.send(data)
|
||||
.query({ accessToken: token })
|
||||
.timeout(30 * 1000)
|
||||
@@ -180,7 +163,6 @@ async function purchaseApp(data) {
|
||||
if (response.status === 404) throw new BoxError(BoxError.NOT_FOUND); // appstoreId does not exist
|
||||
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 402) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
// 200 if already purchased, 201 is newly purchased
|
||||
if (response.status !== 201 && response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', response.status, response.body));
|
||||
}
|
||||
@@ -190,7 +172,7 @@ async function unpurchaseApp(appId, data) {
|
||||
assert.strictEqual(typeof data, 'object'); // { appstoreId, manifestId }
|
||||
assert(data.appstoreId || data.manifestId);
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/cloudronapps/${appId}`;
|
||||
@@ -203,7 +185,7 @@ async function unpurchaseApp(appId, data) {
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status === 404) return; // was never purchased
|
||||
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status === 402) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status !== 201 && response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', response.status, response.body));
|
||||
|
||||
[error, response] = await safe(superagent.del(url)
|
||||
@@ -220,25 +202,23 @@ async function unpurchaseApp(appId, data) {
|
||||
async function getBoxUpdate(options) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/boxupdate`;
|
||||
|
||||
const query = {
|
||||
accessToken: token,
|
||||
boxVersion: constants.VERSION,
|
||||
automatic: options.automatic
|
||||
};
|
||||
|
||||
const [error, response] = await safe(superagent.get(url)
|
||||
const [error, response] = await safe(superagent.get(`${settings.apiServerOrigin()}/api/v1/boxupdate`)
|
||||
.query(query)
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status === 402) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status === 204) return; // no update
|
||||
if (response.status !== 200 || !response.body) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('Bad response: %s %s', response.status, response.text));
|
||||
|
||||
@@ -263,10 +243,9 @@ async function getAppUpdate(app, options) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/appupdate`;
|
||||
const query = {
|
||||
accessToken: token,
|
||||
boxVersion: constants.VERSION,
|
||||
@@ -275,14 +254,14 @@ async function getAppUpdate(app, options) {
|
||||
automatic: options.automatic
|
||||
};
|
||||
|
||||
const [error, response] = await safe(superagent.get(url)
|
||||
const [error, response] = await safe(superagent.get(`${settings.apiServerOrigin()}/api/v1/appupdate`)
|
||||
.query(query)
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error);
|
||||
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status === 402) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status === 204) return; // no update
|
||||
if (response.status !== 200 || !response.body) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('Bad response: %s %s', response.status, response.text));
|
||||
|
||||
@@ -306,24 +285,23 @@ async function getAppUpdate(app, options) {
|
||||
async function registerCloudron(data) {
|
||||
assert.strictEqual(typeof data, 'object');
|
||||
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/register_cloudron`;
|
||||
const { domain, accessToken, version } = data;
|
||||
|
||||
const [error, response] = await safe(superagent.post(url)
|
||||
.send(data)
|
||||
const [error, response] = await safe(superagent.post(`${settings.apiServerOrigin()}/api/v1/register_cloudron`)
|
||||
.send({ domain, accessToken, version })
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unable to register cloudron: ${response.statusCode} ${error.message}`);
|
||||
|
||||
// cloudronId, token, licenseKey
|
||||
// cloudronId, token
|
||||
if (!response.body.cloudronId) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response - no cloudron id');
|
||||
if (!response.body.cloudronToken) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response - no token');
|
||||
if (!response.body.licenseKey) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response - no license');
|
||||
|
||||
await settings.setCloudronId(response.body.cloudronId);
|
||||
await settings.setCloudronToken(response.body.cloudronToken);
|
||||
await settings.setLicenseKey(response.body.licenseKey);
|
||||
await settings.setAppstoreApiToken(response.body.cloudronToken);
|
||||
await settings.setAppstoreWebToken(accessToken);
|
||||
|
||||
debug(`registerCloudron: Cloudron registered with id ${response.body.cloudronId}`);
|
||||
}
|
||||
@@ -331,23 +309,24 @@ async function registerCloudron(data) {
|
||||
async function updateCloudron(data) {
|
||||
assert.strictEqual(typeof data, 'object');
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
const { domain } = data;
|
||||
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/update_cloudron`;
|
||||
const query = {
|
||||
accessToken: token
|
||||
};
|
||||
|
||||
const [error, response] = await safe(superagent.post(url)
|
||||
const [error, response] = await safe(superagent.post(`${settings.apiServerOrigin()}/api/v1/update_cloudron`)
|
||||
.query(query)
|
||||
.send(data)
|
||||
.send({ domain })
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error);
|
||||
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status === 402) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('Bad response: %s %s', response.status, response.text));
|
||||
|
||||
debug(`updateCloudron: Cloudron updated with data ${JSON.stringify(data)}`);
|
||||
@@ -356,7 +335,7 @@ async function updateCloudron(data) {
|
||||
async function registerWithLoginCredentials(options) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (token) throw new BoxError(BoxError.CONFLICT, 'Cloudron is already registered');
|
||||
|
||||
if (options.signup) await registerUser(options.email, options.password);
|
||||
@@ -365,6 +344,12 @@ async function registerWithLoginCredentials(options) {
|
||||
await registerCloudron({ domain: settings.dashboardDomain(), accessToken: result.accessToken, version: constants.VERSION });
|
||||
}
|
||||
|
||||
async function unregister() {
|
||||
await settings.setCloudronId('');
|
||||
await settings.setAppstoreApiToken('');
|
||||
await settings.setAppstoreWebToken('');
|
||||
}
|
||||
|
||||
async function createTicket(info, auditSource) {
|
||||
assert.strictEqual(typeof info, 'object');
|
||||
assert.strictEqual(typeof info.email, 'string');
|
||||
@@ -374,11 +359,12 @@ async function createTicket(info, auditSource) {
|
||||
assert.strictEqual(typeof info.description, 'string');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
if (info.enableSshSupport) {
|
||||
await safe(support.enableRemoteSupport(true, auditSource));
|
||||
info.ipv4 = await sysinfo.getServerIPv4();
|
||||
}
|
||||
|
||||
info.app = info.appId ? await apps.get(info.appId) : null;
|
||||
@@ -393,10 +379,11 @@ async function createTicket(info, auditSource) {
|
||||
if (info.app) {
|
||||
request.field('infoJSON', JSON.stringify(info));
|
||||
|
||||
apps.getLocalLogfilePaths(info.app).forEach(function (filePath) {
|
||||
const logs = safe.child_process.execSync(`tail --lines=1000 ${filePath}`);
|
||||
if (logs) request.attach(path.basename(filePath), logs, path.basename(filePath));
|
||||
});
|
||||
const logPaths = await apps.getLogPaths(info.app);
|
||||
for (const logPath of logPaths) {
|
||||
const logs = safe.child_process.execSync(`tail --lines=1000 ${logPath}`);
|
||||
if (logs) request.attach(path.basename(logPath), logs, path.basename(logPath));
|
||||
}
|
||||
} else {
|
||||
request.send(info);
|
||||
}
|
||||
@@ -404,30 +391,28 @@ async function createTicket(info, auditSource) {
|
||||
const [error, response] = await safe(request);
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status === 402) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('Bad response: %s %s', response.status, response.text));
|
||||
|
||||
eventlog.add(eventlog.ACTION_SUPPORT_TICKET, auditSource, info);
|
||||
await eventlog.add(eventlog.ACTION_SUPPORT_TICKET, auditSource, info);
|
||||
|
||||
return { message: `An email was sent to ${constants.SUPPORT_EMAIL}. We will get back shortly!` };
|
||||
}
|
||||
|
||||
async function getApps() {
|
||||
const token = await settings.getCloudronToken();
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
const unstable = await settings.getUnstableAppsConfig();
|
||||
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/apps`;
|
||||
|
||||
const [error, response] = await safe(superagent.get(url)
|
||||
const [error, response] = await safe(superagent.get(`${settings.apiServerOrigin()}/api/v1/apps`)
|
||||
.query({ accessToken: token, boxVersion: constants.VERSION, unstable: unstable })
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status === 403 || response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status === 402) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('App listing failed. %s %j', response.status, response.body));
|
||||
if (!response.body.apps) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('Bad response: %s %s', response.status, response.text));
|
||||
|
||||
@@ -444,7 +429,7 @@ async function getAppVersion(appId, version) {
|
||||
|
||||
if (!isAppAllowed(appId, listingConfig)) throw new BoxError(BoxError.FEATURE_DISABLED);
|
||||
|
||||
const token = await settings.getCloudronToken();
|
||||
const token = await settings.getAppstoreApiToken();
|
||||
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
|
||||
|
||||
let url = `${settings.apiServerOrigin()}/api/v1/apps/${appId}`;
|
||||
@@ -458,7 +443,7 @@ async function getAppVersion(appId, version) {
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.status === 403 || response.statusCode === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
if (response.status === 404) throw new BoxError(BoxError.NOT_FOUND);
|
||||
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status === 402) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
|
||||
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('App fetch failed. %s %j', response.status, response.body));
|
||||
|
||||
return response.body;
|
||||
|
||||
@@ -41,11 +41,14 @@ const apps = require('./apps.js'),
|
||||
sysinfo = require('./sysinfo.js'),
|
||||
_ = require('underscore');
|
||||
|
||||
const COLLECTD_CONFIG_EJS = fs.readFileSync(__dirname + '/collectd/app.ejs', { encoding: 'utf8' }),
|
||||
MV_VOLUME_CMD = path.join(__dirname, 'scripts/mvvolume.sh'),
|
||||
const MV_VOLUME_CMD = path.join(__dirname, 'scripts/mvvolume.sh'),
|
||||
LOGROTATE_CONFIG_EJS = fs.readFileSync(__dirname + '/logrotate.ejs', { encoding: 'utf8' }),
|
||||
CONFIGURE_LOGROTATE_CMD = path.join(__dirname, 'scripts/configurelogrotate.sh');
|
||||
|
||||
// https://rootlesscontaine.rs/getting-started/common/cgroup2/#checking-whether-cgroup-v2-is-already-enabled
|
||||
const CGROUP_VERSION = fs.existsSync('/sys/fs/cgroup/cgroup.controllers') ? '2' : '1';
|
||||
const COLLECTD_CONFIG_EJS = fs.readFileSync(`${__dirname}/collectd/app_cgroup_v${CGROUP_VERSION}.ejs`, { encoding: 'utf8' });
|
||||
|
||||
function makeTaskError(error, app) {
|
||||
assert.strictEqual(typeof error, 'object');
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
@@ -71,11 +74,11 @@ async function updateApp(app, values) {
|
||||
async function allocateContainerIp(app) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
|
||||
await promiseRetry({ times: 10, interval: 0}, async function () {
|
||||
await promiseRetry({ times: 10, interval: 0, debug }, async function () {
|
||||
const iprange = iputils.intFromIp('172.18.20.255') - iputils.intFromIp('172.18.16.1');
|
||||
let rnd = Math.floor(Math.random() * iprange);
|
||||
const containerIp = iputils.ipFromInt(iputils.intFromIp('172.18.16.1') + rnd);
|
||||
updateApp(app, { containerIp });
|
||||
await updateApp(app, { containerIp });
|
||||
});
|
||||
}
|
||||
|
||||
@@ -147,7 +150,7 @@ async function deleteAppDir(app, options) {
|
||||
if (safe.error.code !== 'ENOENT') throw new BoxError(BoxError.FS_ERROR, `Error unlinking dir ${appDataDir} : ${safe.error.message}`);
|
||||
}
|
||||
} else {
|
||||
if (!safe.fs.rmdirSync(appDataDir)) {
|
||||
if (!safe.fs.rmSync(appDataDir, { recursive: true })) {
|
||||
if (safe.error.code !== 'ENOENT') throw new BoxError(BoxError.FS_ERROR, `Error removing dir ${appDataDir} : ${safe.error.message}`);
|
||||
}
|
||||
}
|
||||
@@ -205,10 +208,10 @@ async function verifyManifest(manifest) {
|
||||
assert.strictEqual(typeof manifest, 'object');
|
||||
|
||||
let error = manifestFormat.parse(manifest);
|
||||
if (error) throw new BoxError(BoxError.BAD_FIELD, `Manifest error: ${error.message}`, { field: 'manifest' });
|
||||
if (error) throw new BoxError(BoxError.BAD_FIELD, `Manifest error: ${error.message}`);
|
||||
|
||||
error = apps.checkManifestConstraints(manifest);
|
||||
if (error) throw new BoxError(BoxError.CONFLICT, `Manifest constraint check failed: ${error.message}`, { field: 'manifest' });
|
||||
if (error) throw new BoxError(BoxError.CONFLICT, `Manifest constraint check failed: ${error.message}`);
|
||||
}
|
||||
|
||||
async function downloadIcon(app) {
|
||||
@@ -221,7 +224,7 @@ async function downloadIcon(app) {
|
||||
|
||||
const iconUrl = settings.apiServerOrigin() + '/api/v1/apps/' + app.appStoreId + '/versions/' + app.manifest.version + '/icon';
|
||||
|
||||
await promiseRetry({ times: 10, interval: 5000 }, async function () {
|
||||
await promiseRetry({ times: 10, interval: 5000, debug }, async function () {
|
||||
const [networkError, response] = await safe(superagent.get(iconUrl)
|
||||
.buffer(true)
|
||||
.timeout(30 * 1000)
|
||||
@@ -242,14 +245,26 @@ async function waitForDnsPropagation(app) {
|
||||
return;
|
||||
}
|
||||
|
||||
const ip = await sysinfo.getServerIp();
|
||||
const [error] = await safe(dns.waitForDnsRecord(app.location, app.domain, 'A', ip, { times: 240 }));
|
||||
if (error) throw new BoxError(BoxError.DNS_ERROR, `DNS Record is not synced yet: ${error.message}`, { ip: ip, subdomain: app.location, domain: app.domain });
|
||||
const ipv4 = await sysinfo.getServerIPv4();
|
||||
const ipv6 = await sysinfo.getServerIPv6();
|
||||
|
||||
// now wait for alternateDomains and aliasDomains, if any
|
||||
for (const domain of app.alternateDomains.concat(app.aliasDomains)) {
|
||||
const [error] = await safe(dns.waitForDnsRecord(domain.subdomain, domain.domain, 'A', ip, { times: 240 }));
|
||||
if (error) throw new BoxError(BoxError.DNS_ERROR, `DNS Record is not synced yet: ${error.message}`, { ip: ip, subdomain: domain.subdomain, domain: domain.domain });
|
||||
let error;
|
||||
[error] = await safe(dns.waitForDnsRecord(app.subdomain, app.domain, 'A', ipv4, { times: 240 }));
|
||||
if (error) throw new BoxError(BoxError.DNS_ERROR, `DNS A Record is not synced yet: ${error.message}`, { ipv4, subdomain: app.subdomain, domain: app.domain });
|
||||
if (ipv6) {
|
||||
[error] = await safe(dns.waitForDnsRecord(app.subdomain, app.domain, 'AAAA', ipv6, { times: 240 }));
|
||||
if (error) throw new BoxError(BoxError.DNS_ERROR, `DNS AAAA Record is not synced yet: ${error.message}`, { ipv6, subdomain: app.subdomain, domain: app.domain });
|
||||
}
|
||||
|
||||
// now wait for redirectDomains and aliasDomains, if any
|
||||
const allDomains = app.secondaryDomains.concat(app.redirectDomains).concat(app.aliasDomains);
|
||||
for (const domain of allDomains) {
|
||||
[error] = await safe(dns.waitForDnsRecord(domain.subdomain, domain.domain, 'A', ipv4, { times: 240 }));
|
||||
if (error) throw new BoxError(BoxError.DNS_ERROR, `DNS A Record is not synced yet: ${error.message}`, { ipv4, subdomain: domain.subdomain, domain: domain.domain });
|
||||
if (ipv6) {
|
||||
[error] = await safe(dns.waitForDnsRecord(domain.subdomain, domain.domain, 'AAAA', ipv6, { times: 240 }));
|
||||
if (error) throw new BoxError(BoxError.DNS_ERROR, `DNS AAAA Record is not synced yet: ${error.message}`, { ipv6, subdomain: domain.subdomain, domain: domain.domain });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -316,7 +331,7 @@ async function install(app, args, progressCallback) {
|
||||
}
|
||||
await services.teardownAddons(app, addonsToRemove);
|
||||
|
||||
if (!restoreConfig || restoreConfig.backupId) { // in-place import should not delete data dir
|
||||
if (!restoreConfig || restoreConfig.remotePath) { // in-place import should not delete data dir
|
||||
await deleteAppDir(app, { removeDirectory: false }); // do not remove any symlinked appdata dir
|
||||
}
|
||||
|
||||
@@ -333,7 +348,7 @@ async function install(app, args, progressCallback) {
|
||||
if (!skipDnsSetup) {
|
||||
await progressCallback({ percent: 30, message: 'Registering subdomains' });
|
||||
|
||||
await dns.registerLocations([ { subdomain: app.location, domain: app.domain }].concat(app.alternateDomains).concat(app.aliasDomains), { overwriteDns }, progressCallback);
|
||||
await dns.registerLocations([ { subdomain: app.subdomain, domain: app.domain }].concat(app.secondaryDomains).concat(app.redirectDomains).concat(app.aliasDomains), { overwriteDns }, progressCallback);
|
||||
}
|
||||
|
||||
await progressCallback({ percent: 40, message: 'Downloading image' });
|
||||
@@ -345,7 +360,7 @@ async function install(app, args, progressCallback) {
|
||||
if (!restoreConfig) { // install
|
||||
await progressCallback({ percent: 60, message: 'Setting up addons' });
|
||||
await services.setupAddons(app, app.manifest.addons);
|
||||
} else if (app.installationState === apps.ISTATE_PENDING_IMPORT && !restoreConfig.backupId) { // in-place import
|
||||
} else if (app.installationState === apps.ISTATE_PENDING_IMPORT && !restoreConfig.remotePath) { // in-place import
|
||||
await progressCallback({ percent: 60, message: 'Importing addons in-place' });
|
||||
await services.setupAddons(app, app.manifest.addons);
|
||||
await services.clearAddons(app, _.omit(app.manifest.addons, 'localstorage'));
|
||||
@@ -357,7 +372,7 @@ async function install(app, args, progressCallback) {
|
||||
await services.clearAddons(app, app.manifest.addons);
|
||||
const backupConfig = restoreConfig.backupConfig; // can be null
|
||||
let mountObject = null;
|
||||
if (backupConfig && mounts.isMountProvider(backupConfig.provider)) {
|
||||
if (backupConfig && mounts.isManagedProvider(backupConfig.provider)) {
|
||||
await progressCallback({ percent: 70, message: 'Setting up mount for importing' });
|
||||
mountObject = { // keep this in sync with importApp in apps.js
|
||||
name: `appimport-${app.id}`,
|
||||
@@ -449,9 +464,19 @@ async function changeLocation(app, args, progressCallback) {
|
||||
await deleteContainers(app, { managedOnly: true });
|
||||
|
||||
// unregister old domains
|
||||
let obsoleteDomains = oldConfig.alternateDomains.filter(function (o) {
|
||||
return !app.alternateDomains.some(function (n) { return n.subdomain === o.subdomain && n.domain === o.domain; });
|
||||
});
|
||||
let obsoleteDomains = [];
|
||||
|
||||
if (oldConfig.secondaryDomains) {
|
||||
obsoleteDomains = obsoleteDomains.concat(oldConfig.secondaryDomains.filter(function (o) {
|
||||
return !app.secondaryDomains.some(function (n) { return n.subdomain === o.subdomain && n.domain === o.domain; });
|
||||
}));
|
||||
}
|
||||
|
||||
if (oldConfig.redirectDomains) {
|
||||
obsoleteDomains = obsoleteDomains.concat(oldConfig.redirectDomains.filter(function (o) {
|
||||
return !app.redirectDomains.some(function (n) { return n.subdomain === o.subdomain && n.domain === o.domain; });
|
||||
}));
|
||||
}
|
||||
|
||||
if (oldConfig.aliasDomains) {
|
||||
obsoleteDomains = obsoleteDomains.concat(oldConfig.aliasDomains.filter(function (o) {
|
||||
@@ -459,14 +484,14 @@ async function changeLocation(app, args, progressCallback) {
|
||||
}));
|
||||
}
|
||||
|
||||
if (locationChanged) obsoleteDomains.push({ subdomain: oldConfig.location, domain: oldConfig.domain });
|
||||
if (locationChanged) obsoleteDomains.push({ subdomain: oldConfig.subdomain, domain: oldConfig.domain });
|
||||
|
||||
if (obsoleteDomains.length !== 0) await dns.unregisterLocations(obsoleteDomains, progressCallback);
|
||||
|
||||
// setup dns
|
||||
if (!skipDnsSetup) {
|
||||
await progressCallback({ percent: 30, message: 'Registering subdomains' });
|
||||
await dns.registerLocations([ { subdomain: app.location, domain: app.domain }].concat(app.alternateDomains).concat(app.aliasDomains), { overwriteDns }, progressCallback);
|
||||
await dns.registerLocations([ { subdomain: app.subdomain, domain: app.domain }].concat(app.secondaryDomains).concat(app.redirectDomains).concat(app.aliasDomains), { overwriteDns }, progressCallback);
|
||||
}
|
||||
|
||||
// re-setup addons since they rely on the app's fqdn (e.g oauth)
|
||||
@@ -565,7 +590,6 @@ async function update(app, args, progressCallback) {
|
||||
// app does not want these addons anymore
|
||||
// FIXME: this does not handle option changes (like multipleDatabases)
|
||||
const unusedAddons = _.omit(app.manifest.addons, Object.keys(updateConfig.manifest.addons));
|
||||
const httpPathsChanged = app.manifest.httpPaths !== updateConfig.manifest.httpPaths;
|
||||
const httpPortChanged = app.manifest.httpPort !== updateConfig.manifest.httpPort;
|
||||
const proxyAuthChanged = !_.isEqual(safe.query(app.manifest, 'addons.proxyAuth'), safe.query(updateConfig.manifest, 'addons.proxyAuth'));
|
||||
|
||||
@@ -616,7 +640,7 @@ async function update(app, args, progressCallback) {
|
||||
delete app.portBindings[portName];
|
||||
}
|
||||
|
||||
await updateApp(app, _.pick(updateConfig, 'manifest', 'appStoreId', 'memoryLimit')), // switch over to the new config
|
||||
await updateApp(app, _.pick(updateConfig, 'manifest', 'appStoreId', 'memoryLimit')); // switch over to the new config
|
||||
|
||||
await progressCallback({ percent: 45, message: 'Downloading icon' });
|
||||
await downloadIcon(app);
|
||||
@@ -630,7 +654,7 @@ async function update(app, args, progressCallback) {
|
||||
await startApp(app);
|
||||
|
||||
await progressCallback({ percent: 90, message: 'Configuring reverse proxy' });
|
||||
if (httpPathsChanged || proxyAuthChanged || httpPortChanged) {
|
||||
if (proxyAuthChanged || httpPortChanged) {
|
||||
await reverseProxy.configureApp(app, AuditSource.APPTASK);
|
||||
}
|
||||
|
||||
@@ -666,6 +690,7 @@ async function stop(app, args, progressCallback) {
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
await progressCallback({ percent: 20, message: 'Stopping container' });
|
||||
await reverseProxy.unconfigureApp(app); // removing nginx configs also means that we can auto-cleanup old certs since they are not referenced
|
||||
await docker.stopContainers(app.id);
|
||||
|
||||
await progressCallback({ percent: 50, message: 'Stopping app services' });
|
||||
@@ -711,7 +736,7 @@ async function uninstall(app, args, progressCallback) {
|
||||
await docker.deleteImage(app.manifest);
|
||||
|
||||
await progressCallback({ percent: 70, message: 'Unregistering domains' });
|
||||
await dns.unregisterLocations([ { subdomain: app.location, domain: app.domain } ].concat(app.alternateDomains).concat(app.aliasDomains), progressCallback);
|
||||
await dns.unregisterLocations([ { subdomain: app.subdomain, domain: app.domain } ].concat(app.secondaryDomains).concat(app.redirectDomains).concat(app.aliasDomains), progressCallback);
|
||||
|
||||
await progressCallback({ percent: 90, message: 'Cleanup logs' });
|
||||
await cleanupLogs(app);
|
||||
|
||||
@@ -19,5 +19,6 @@ AuditSource.HEALTH_MONITOR = new AuditSource('healthmonitor');
|
||||
AuditSource.EXTERNAL_LDAP_TASK = new AuditSource('externalldap');
|
||||
AuditSource.EXTERNAL_LDAP_AUTO_CREATE = new AuditSource('externalldap');
|
||||
AuditSource.APPTASK = new AuditSource('apptask');
|
||||
AuditSource.PLATFORM = new AuditSource('platform');
|
||||
|
||||
exports = module.exports = AuditSource;
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
'use strict';
|
||||
|
||||
const BoxError = require('./boxerror.js');
|
||||
|
||||
exports = module.exports = {
|
||||
run,
|
||||
|
||||
@@ -8,16 +10,17 @@ exports = module.exports = {
|
||||
|
||||
const apps = require('./apps.js'),
|
||||
assert = require('assert'),
|
||||
backupFormat = require('./backupformat.js'),
|
||||
backups = require('./backups.js'),
|
||||
constants = require('./constants.js'),
|
||||
debug = require('debug')('box:backupcleaner'),
|
||||
moment = require('moment'),
|
||||
mounts = require('./mounts.js'),
|
||||
path = require('path'),
|
||||
paths = require('./paths.js'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js'),
|
||||
storage = require('./storage.js'),
|
||||
util = require('util'),
|
||||
_ = require('underscore');
|
||||
|
||||
function applyBackupRetentionPolicy(allBackups, policy, referencedBackupIds) {
|
||||
@@ -35,7 +38,7 @@ function applyBackupRetentionPolicy(allBackups, policy, referencedBackupIds) {
|
||||
else backup.discardReason = 'creating-too-long';
|
||||
} else if (referencedBackupIds.includes(backup.id)) {
|
||||
backup.keepReason = 'reference';
|
||||
} else if ((now - backup.creationTime) < (backup.preserveSecs * 1000)) {
|
||||
} else if ((backup.preserveSecs === -1) || ((now - backup.creationTime) < (backup.preserveSecs * 1000))) {
|
||||
backup.keepReason = 'preserveSecs';
|
||||
} else if ((now - backup.creationTime < policy.keepWithinSecs * 1000) || policy.keepWithinSecs < 0) {
|
||||
backup.keepReason = 'keepWithinSecs';
|
||||
@@ -78,49 +81,42 @@ function applyBackupRetentionPolicy(allBackups, policy, referencedBackupIds) {
|
||||
}
|
||||
}
|
||||
|
||||
async function cleanupBackup(backupConfig, backup, progressCallback) {
|
||||
async function removeBackup(backupConfig, backup, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof backup, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
const backupFilePath = storage.getBackupFilePath(backupConfig, backup.id, backup.format);
|
||||
const backupFilePath = backupFormat.api(backup.format).getBackupFilePath(backupConfig, backup.remotePath);
|
||||
|
||||
return new Promise((resolve) => {
|
||||
function done(error) {
|
||||
if (error) {
|
||||
debug('cleanupBackup: error removing backup %j : %s', backup, error.message);
|
||||
return resolve();
|
||||
}
|
||||
let removeError;
|
||||
if (backup.format ==='tgz') {
|
||||
progressCallback({ message: `${backup.remotePath}: Removing ${backupFilePath}`});
|
||||
[removeError] = await safe(storage.api(backupConfig.provider).remove(backupConfig, backupFilePath));
|
||||
} else {
|
||||
progressCallback({ message: `${backup.remotePath}: Removing directory ${backupFilePath}`});
|
||||
[removeError] = await safe(storage.api(backupConfig.provider).removeDir(backupConfig, backupFilePath, progressCallback));
|
||||
}
|
||||
|
||||
// prune empty directory if possible
|
||||
storage.api(backupConfig.provider).remove(backupConfig, path.dirname(backupFilePath), async function (error) {
|
||||
if (error) debug('cleanupBackup: unable to prune backup directory %s : %s', path.dirname(backupFilePath), error.message);
|
||||
if (removeError) {
|
||||
debug('removeBackup: error removing backup %j : %s', backup, removeError.message);
|
||||
return;
|
||||
}
|
||||
|
||||
const [delError] = await safe(backups.del(backup.id));
|
||||
if (delError) debug('cleanupBackup: error removing from database', delError);
|
||||
else debug('cleanupBackup: removed %s', backup.id);
|
||||
// prune empty directory if possible
|
||||
const [pruneError] = await safe(storage.api(backupConfig.provider).remove(backupConfig, path.dirname(backupFilePath)));
|
||||
if (pruneError) debug('removeBackup: unable to prune backup directory %s : %s', path.dirname(backupFilePath), pruneError.message);
|
||||
|
||||
resolve();
|
||||
});
|
||||
}
|
||||
|
||||
if (backup.format ==='tgz') {
|
||||
progressCallback({ message: `${backup.id}: Removing ${backupFilePath}`});
|
||||
storage.api(backupConfig.provider).remove(backupConfig, backupFilePath, done);
|
||||
} else {
|
||||
const events = storage.api(backupConfig.provider).removeDir(backupConfig, backupFilePath);
|
||||
events.on('progress', (message) => progressCallback({ message: `${backup.id}: ${message}` }));
|
||||
events.on('done', done);
|
||||
}
|
||||
});
|
||||
const [delError] = await safe(backups.del(backup.id));
|
||||
if (delError) debug(`removeBackup: error removing ${backup.id} from database`, delError);
|
||||
else debug(`removeBackup: removed ${backup.remotePath}`);
|
||||
}
|
||||
|
||||
async function cleanupAppBackups(backupConfig, referencedAppBackupIds, progressCallback) {
|
||||
async function cleanupAppBackups(backupConfig, referencedBackupIds, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert(Array.isArray(referencedAppBackupIds));
|
||||
assert(Array.isArray(referencedBackupIds));
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
let removedAppBackupIds = [];
|
||||
const removedAppBackupPaths = [];
|
||||
|
||||
const allApps = await apps.list();
|
||||
const allAppIds = allApps.map(a => a.id);
|
||||
@@ -137,26 +133,49 @@ async function cleanupAppBackups(backupConfig, referencedAppBackupIds, progressC
|
||||
// apply backup policy per app. keep latest backup only for existing apps
|
||||
let appBackupsToRemove = [];
|
||||
for (const appId of Object.keys(appBackupsById)) {
|
||||
applyBackupRetentionPolicy(appBackupsById[appId], _.extend({ keepLatest: allAppIds.includes(appId) }, backupConfig.retentionPolicy), referencedAppBackupIds);
|
||||
applyBackupRetentionPolicy(appBackupsById[appId], _.extend({ keepLatest: allAppIds.includes(appId) }, backupConfig.retentionPolicy), referencedBackupIds);
|
||||
appBackupsToRemove = appBackupsToRemove.concat(appBackupsById[appId].filter(b => !b.keepReason));
|
||||
}
|
||||
|
||||
for (const appBackup of appBackupsToRemove) {
|
||||
await progressCallback({ message: `Removing app backup (${appBackup.identifier}): ${appBackup.id}`});
|
||||
removedAppBackupIds.push(appBackup.id);
|
||||
await cleanupBackup(backupConfig, appBackup, progressCallback); // never errors
|
||||
removedAppBackupPaths.push(appBackup.remotePath);
|
||||
await removeBackup(backupConfig, appBackup, progressCallback); // never errors
|
||||
}
|
||||
|
||||
debug('cleanupAppBackups: done');
|
||||
|
||||
return removedAppBackupIds;
|
||||
return removedAppBackupPaths;
|
||||
}
|
||||
|
||||
async function cleanupMailBackups(backupConfig, referencedBackupIds, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert(Array.isArray(referencedBackupIds));
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
const removedMailBackupPaths = [];
|
||||
|
||||
const mailBackups = await backups.getByTypePaged(backups.BACKUP_TYPE_MAIL, 1, 1000);
|
||||
|
||||
applyBackupRetentionPolicy(mailBackups, _.extend({ keepLatest: true }, backupConfig.retentionPolicy), referencedBackupIds);
|
||||
|
||||
for (const mailBackup of mailBackups) {
|
||||
if (mailBackup.keepReason) continue;
|
||||
await progressCallback({ message: `Removing mail backup ${mailBackup.remotePath}`});
|
||||
removedMailBackupPaths.push(mailBackup.remotePath);
|
||||
await removeBackup(backupConfig, mailBackup, progressCallback); // never errors
|
||||
}
|
||||
|
||||
debug('cleanupMailBackups: done');
|
||||
|
||||
return removedMailBackupPaths;
|
||||
}
|
||||
|
||||
async function cleanupBoxBackups(backupConfig, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
let referencedAppBackupIds = [], removedBoxBackupIds = [];
|
||||
let referencedBackupIds = [], removedBoxBackupPaths = [];
|
||||
|
||||
const boxBackups = await backups.getByTypePaged(backups.BACKUP_TYPE_BOX, 1, 1000);
|
||||
|
||||
@@ -164,48 +183,48 @@ async function cleanupBoxBackups(backupConfig, progressCallback) {
|
||||
|
||||
for (const boxBackup of boxBackups) {
|
||||
if (boxBackup.keepReason) {
|
||||
referencedAppBackupIds = referencedAppBackupIds.concat(boxBackup.dependsOn);
|
||||
referencedBackupIds = referencedBackupIds.concat(boxBackup.dependsOn);
|
||||
continue;
|
||||
}
|
||||
|
||||
await progressCallback({ message: `Removing box backup ${boxBackup.id}`});
|
||||
await progressCallback({ message: `Removing box backup ${boxBackup.remotePath}`});
|
||||
|
||||
removedBoxBackupIds.push(boxBackup.id);
|
||||
await cleanupBackup(backupConfig, boxBackup, progressCallback);
|
||||
removedBoxBackupPaths.push(boxBackup.remotePath);
|
||||
await removeBackup(backupConfig, boxBackup, progressCallback);
|
||||
}
|
||||
|
||||
debug('cleanupBoxBackups: done');
|
||||
|
||||
return { removedBoxBackupIds, referencedAppBackupIds };
|
||||
return { removedBoxBackupPaths, referencedBackupIds };
|
||||
}
|
||||
|
||||
// cleans up the database by checking if backup existsing in the remote
|
||||
async function cleanupMissingBackups(backupConfig, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
const perPage = 1000;
|
||||
let missingBackupIds = [];
|
||||
const backupExists = util.promisify(storage.api(backupConfig.provider).exists);
|
||||
const missingBackupPaths = [];
|
||||
|
||||
if (constants.TEST) return missingBackupIds;
|
||||
if (constants.TEST) return missingBackupPaths;
|
||||
|
||||
let page = 1, result = [];
|
||||
do {
|
||||
result = await backups.list(page, perPage);
|
||||
|
||||
for (const backup of result) {
|
||||
let backupFilePath = storage.getBackupFilePath(backupConfig, backup.id, backup.format);
|
||||
let backupFilePath = backupFormat.api(backup.format).getBackupFilePath(backupConfig, backup.remotePath);
|
||||
if (backup.format === 'rsync') backupFilePath = backupFilePath + '/'; // add trailing slash to indicate directory
|
||||
|
||||
const [existsError, exists] = await safe(backupExists(backupConfig, backupFilePath));
|
||||
const [existsError, exists] = await safe(storage.api(backupConfig.provider).exists(backupConfig, backupFilePath));
|
||||
if (existsError || exists) continue;
|
||||
|
||||
await progressCallback({ message: `Removing missing backup ${backup.id}`});
|
||||
await progressCallback({ message: `Removing missing backup ${backup.remotePath}`});
|
||||
|
||||
const [delError] = await safe(backups.del(backup.id));
|
||||
if (delError) debug(`cleanupBackup: error removing ${backup.id} from database`, delError);
|
||||
if (delError) debug(`cleanupMissingBackups: error removing ${backup.id} from database`, delError);
|
||||
|
||||
missingBackupIds.push(backup.id);
|
||||
missingBackupPaths.push(backup.remotePath);
|
||||
}
|
||||
|
||||
++ page;
|
||||
@@ -213,7 +232,7 @@ async function cleanupMissingBackups(backupConfig, progressCallback) {
|
||||
|
||||
debug('cleanupMissingBackups: done');
|
||||
|
||||
return missingBackupIds;
|
||||
return missingBackupPaths;
|
||||
}
|
||||
|
||||
// removes the snapshots of apps that have been uninstalled
|
||||
@@ -226,29 +245,23 @@ async function cleanupSnapshots(backupConfig) {
|
||||
|
||||
delete info.box;
|
||||
|
||||
const progressCallback = (progress) => { debug(`cleanupSnapshots: ${progress.message}`); };
|
||||
|
||||
for (const appId of Object.keys(info)) {
|
||||
const app = await apps.get(appId);
|
||||
if (app) continue; // app is still installed
|
||||
|
||||
await new Promise((resolve) => {
|
||||
async function done(/* ignoredError */) {
|
||||
safe.fs.unlinkSync(path.join(paths.BACKUP_INFO_DIR, `${appId}.sync.cache`));
|
||||
safe.fs.unlinkSync(path.join(paths.BACKUP_INFO_DIR, `${appId}.sync.cache.new`));
|
||||
if (info[appId].format ==='tgz') {
|
||||
await safe(storage.api(backupConfig.provider).remove(backupConfig, backupFormat.api(info[appId].format).getBackupFilePath(backupConfig, `snapshot/app_${appId}`)), { debug });
|
||||
} else {
|
||||
await safe(storage.api(backupConfig.provider).removeDir(backupConfig, backupFormat.api(info[appId].format).getBackupFilePath(backupConfig, `snapshot/app_${appId}`), progressCallback), { debug });
|
||||
}
|
||||
|
||||
await safe(backups.setSnapshotInfo(appId, null /* info */), { debug });
|
||||
debug(`cleanupSnapshots: cleaned up snapshot of app ${appId}`);
|
||||
safe.fs.unlinkSync(path.join(paths.BACKUP_INFO_DIR, `${appId}.sync.cache`));
|
||||
safe.fs.unlinkSync(path.join(paths.BACKUP_INFO_DIR, `${appId}.sync.cache.new`));
|
||||
|
||||
resolve();
|
||||
}
|
||||
|
||||
if (info[appId].format ==='tgz') {
|
||||
storage.api(backupConfig.provider).remove(backupConfig, storage.getBackupFilePath(backupConfig, `snapshot/app_${appId}`, info[appId].format), done);
|
||||
} else {
|
||||
const events = storage.api(backupConfig.provider).removeDir(backupConfig, storage.getBackupFilePath(backupConfig, `snapshot/app_${appId}`, info[appId].format));
|
||||
events.on('progress', function (detail) { debug(`cleanupSnapshots: ${detail}`); });
|
||||
events.on('done', done);
|
||||
}
|
||||
});
|
||||
await safe(backups.setSnapshotInfo(appId, null /* info */), { debug });
|
||||
debug(`cleanupSnapshots: cleaned up snapshot of app ${appId}`);
|
||||
}
|
||||
|
||||
debug('cleanupSnapshots: done');
|
||||
@@ -259,22 +272,32 @@ async function run(progressCallback) {
|
||||
|
||||
const backupConfig = await settings.getBackupConfig();
|
||||
|
||||
if (mounts.isManagedProvider(backupConfig.provider) || backupConfig.provider === 'mountpoint') {
|
||||
const hostPath = mounts.isManagedProvider(backupConfig.provider) ? paths.MANAGED_BACKUP_MOUNT_DIR : backupConfig.mountPoint;
|
||||
const status = await mounts.getStatus(backupConfig.provider, hostPath); // { state, message }
|
||||
debug(`clean: mount point status is ${JSON.stringify(status)}`);
|
||||
if (status.state !== 'active') throw new BoxError(BoxError.MOUNT_ERROR, `Backup endpoint is not mounted: ${status.message}`);
|
||||
}
|
||||
|
||||
if (backupConfig.retentionPolicy.keepWithinSecs < 0) {
|
||||
debug('cleanup: keeping all backups');
|
||||
return {};
|
||||
}
|
||||
|
||||
await progressCallback({ percent: 10, message: 'Cleaning box backups' });
|
||||
const { removedBoxBackupIds, referencedAppBackupIds } = await cleanupBoxBackups(backupConfig, progressCallback);
|
||||
const { removedBoxBackupPaths, referencedBackupIds } = await cleanupBoxBackups(backupConfig, progressCallback); // references is app or mail backup ids
|
||||
|
||||
await progressCallback({ percent: 20, message: 'Cleaning mail backups' });
|
||||
const removedMailBackupPaths = await cleanupMailBackups(backupConfig, referencedBackupIds, progressCallback);
|
||||
|
||||
await progressCallback({ percent: 40, message: 'Cleaning app backups' });
|
||||
const removedAppBackupIds = await cleanupAppBackups(backupConfig, referencedAppBackupIds, progressCallback);
|
||||
const removedAppBackupPaths = await cleanupAppBackups(backupConfig, referencedBackupIds, progressCallback);
|
||||
|
||||
await progressCallback({ percent: 70, message: 'Cleaning missing backups' });
|
||||
const missingBackupIds = await cleanupMissingBackups(backupConfig, progressCallback);
|
||||
const missingBackupPaths = await cleanupMissingBackups(backupConfig, progressCallback);
|
||||
|
||||
await progressCallback({ percent: 90, message: 'Cleaning snapshots' });
|
||||
await cleanupSnapshots(backupConfig);
|
||||
|
||||
return { removedBoxBackupIds, removedAppBackupIds, missingBackupIds };
|
||||
return { removedBoxBackupPaths, removedMailBackupPaths, removedAppBackupPaths, missingBackupPaths };
|
||||
}
|
||||
|
||||
12
src/backupformat.js
Normal file
12
src/backupformat.js
Normal file
@@ -0,0 +1,12 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
api
|
||||
};
|
||||
|
||||
function api(format) {
|
||||
switch (format) {
|
||||
case 'tgz': return require('./backupformat/tgz.js');
|
||||
case 'rsync': return require('./backupformat/rsync.js');
|
||||
}
|
||||
}
|
||||
245
src/backupformat/rsync.js
Normal file
245
src/backupformat/rsync.js
Normal file
@@ -0,0 +1,245 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
getBackupFilePath,
|
||||
download,
|
||||
upload,
|
||||
|
||||
_saveFsMetadata: saveFsMetadata,
|
||||
_restoreFsMetadata: restoreFsMetadata
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
async = require('async'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
DataLayout = require('../datalayout.js'),
|
||||
debug = require('debug')('box:backupformat/rsync'),
|
||||
fs = require('fs'),
|
||||
hush = require('../hush.js'),
|
||||
once = require('../once.js'),
|
||||
path = require('path'),
|
||||
safe = require('safetydance'),
|
||||
storage = require('../storage.js'),
|
||||
syncer = require('../syncer.js'),
|
||||
util = require('util');
|
||||
|
||||
function getBackupFilePath(backupConfig, remotePath) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
|
||||
const rootPath = storage.api(backupConfig.provider).getRootPath(backupConfig);
|
||||
return path.join(rootPath, remotePath);
|
||||
}
|
||||
|
||||
function sync(backupConfig, remotePath, dataLayout, progressCallback, callback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// the number here has to take into account the s3.upload partSize (which is 10MB). So 20=200MB
|
||||
const concurrency = backupConfig.syncConcurrency || (backupConfig.provider === 's3' ? 20 : 10);
|
||||
const removeDir = util.callbackify(storage.api(backupConfig.provider).removeDir);
|
||||
const remove = util.callbackify(storage.api(backupConfig.provider).remove);
|
||||
|
||||
syncer.sync(dataLayout, function processTask(task, iteratorCallback) {
|
||||
debug('sync: processing task: %j', task);
|
||||
// the empty task.path is special to signify the directory
|
||||
const destPath = task.path && backupConfig.encryption ? hush.encryptFilePath(task.path, backupConfig.encryption) : task.path;
|
||||
const backupFilePath = path.join(getBackupFilePath(backupConfig, remotePath), destPath);
|
||||
|
||||
if (task.operation === 'removedir') {
|
||||
debug(`Removing directory ${backupFilePath}`);
|
||||
return removeDir(backupConfig, backupFilePath, progressCallback, iteratorCallback);
|
||||
} else if (task.operation === 'remove') {
|
||||
debug(`Removing ${backupFilePath}`);
|
||||
return remove(backupConfig, backupFilePath, iteratorCallback);
|
||||
}
|
||||
|
||||
let retryCount = 0;
|
||||
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
|
||||
retryCallback = once(retryCallback); // protect again upload() erroring much later after read stream error
|
||||
|
||||
++retryCount;
|
||||
if (task.operation === 'add') {
|
||||
progressCallback({ message: `Adding ${task.path}` + (retryCount > 1 ? ` (Try ${retryCount})` : '') });
|
||||
debug(`Adding ${task.path} position ${task.position} try ${retryCount}`);
|
||||
const stream = hush.createReadStream(dataLayout.toLocalPath('./' + task.path), backupConfig.encryption);
|
||||
stream.on('error', (error) => retryCallback(error.message.includes('ENOENT') ? null : error)); // ignore error if file disappears
|
||||
stream.on('progress', function (progress) {
|
||||
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
|
||||
if (!transferred && !speed) return progressCallback({ message: `Uploading ${task.path}` }); // 0M@0MBps looks wrong
|
||||
progressCallback({ message: `Uploading ${task.path}: ${transferred}M@${speed}MBps` }); // 0M@0MBps looks wrong
|
||||
});
|
||||
// only create the destination path when we have confirmation that the source is available. otherwise, we end up with
|
||||
// files owned as 'root' and the cp later will fail
|
||||
stream.on('open', function () {
|
||||
storage.api(backupConfig.provider).upload(backupConfig, backupFilePath, stream, function (error) {
|
||||
debug(error ? `Error uploading ${task.path} try ${retryCount}: ${error.message}` : `Uploaded ${task.path}`);
|
||||
retryCallback(error);
|
||||
});
|
||||
});
|
||||
}
|
||||
}, iteratorCallback);
|
||||
}, concurrency, function (error) {
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
|
||||
callback();
|
||||
});
|
||||
}
|
||||
|
||||
// this is not part of 'snapshotting' because we need root access to traverse
|
||||
async function saveFsMetadata(dataLayout, metadataFile) {
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof metadataFile, 'string');
|
||||
|
||||
// contains paths prefixed with './'
|
||||
const metadata = {
|
||||
emptyDirs: [],
|
||||
execFiles: [],
|
||||
symlinks: []
|
||||
};
|
||||
|
||||
// we assume small number of files. spawnSync will raise a ENOBUFS error after maxBuffer
|
||||
for (let lp of dataLayout.localPaths()) {
|
||||
const emptyDirs = safe.child_process.execSync(`find ${lp} -type d -empty`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
|
||||
if (emptyDirs === null) throw new BoxError(BoxError.FS_ERROR, `Error finding empty dirs: ${safe.error.message}`);
|
||||
if (emptyDirs.length) metadata.emptyDirs = metadata.emptyDirs.concat(emptyDirs.trim().split('\n').map((ed) => dataLayout.toRemotePath(ed)));
|
||||
|
||||
const execFiles = safe.child_process.execSync(`find ${lp} -type f -executable`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
|
||||
if (execFiles === null) throw new BoxError(BoxError.FS_ERROR, `Error finding executables: ${safe.error.message}`);
|
||||
if (execFiles.length) metadata.execFiles = metadata.execFiles.concat(execFiles.trim().split('\n').map((ef) => dataLayout.toRemotePath(ef)));
|
||||
|
||||
const symlinks = safe.child_process.execSync(`find ${lp} -type l`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
|
||||
if (symlinks === null) throw new BoxError(BoxError.FS_ERROR, `Error finding symlinks: ${safe.error.message}`);
|
||||
if (symlinks.length) metadata.symlinks = metadata.symlinks.concat(symlinks.trim().split('\n').map((sl) => {
|
||||
const target = safe.fs.readlinkSync(sl);
|
||||
return { path: dataLayout.toRemotePath(sl), target };
|
||||
}));
|
||||
}
|
||||
|
||||
if (!safe.fs.writeFileSync(metadataFile, JSON.stringify(metadata, null, 4))) throw new BoxError(BoxError.FS_ERROR, `Error writing fs metadata: ${safe.error.message}`);
|
||||
}
|
||||
|
||||
async function restoreFsMetadata(dataLayout, metadataFile) {
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof metadataFile, 'string');
|
||||
|
||||
debug(`Recreating empty directories in ${dataLayout.toString()}`);
|
||||
|
||||
const metadataJson = safe.fs.readFileSync(metadataFile, 'utf8');
|
||||
if (metadataJson === null) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Error loading fsmetadata.json:' + safe.error.message);
|
||||
const metadata = safe.JSON.parse(metadataJson);
|
||||
if (metadata === null) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Error parsing fsmetadata.json:' + safe.error.message);
|
||||
|
||||
for (const emptyDir of metadata.emptyDirs) {
|
||||
const [mkdirError] = await safe(fs.promises.mkdir(dataLayout.toLocalPath(emptyDir), { recursive: true }));
|
||||
if (mkdirError) throw new BoxError(BoxError.FS_ERROR, `unable to create path: ${mkdirError.message}`);
|
||||
}
|
||||
|
||||
for (const execFile of metadata.execFiles) {
|
||||
const [chmodError] = await safe(fs.promises.chmod(dataLayout.toLocalPath(execFile), parseInt('0755', 8)));
|
||||
if (chmodError) throw new BoxError(BoxError.FS_ERROR, `unable to chmod: ${chmodError.message}`);
|
||||
}
|
||||
|
||||
for (const symlink of (metadata.symlinks || [])) {
|
||||
if (!symlink.target) continue;
|
||||
// the path may not exist if we had a directory full of symlinks
|
||||
const [mkdirError] = await safe(fs.promises.mkdir(path.dirname(dataLayout.toLocalPath(symlink.path)), { recursive: true }));
|
||||
if (mkdirError) throw new BoxError(BoxError.FS_ERROR, `unable to symlink (mkdir): ${mkdirError.message}`);
|
||||
const [symlinkError] = await safe(fs.promises.symlink(symlink.target, dataLayout.toLocalPath(symlink.path), 'file'));
|
||||
if (symlinkError) throw new BoxError(BoxError.FS_ERROR, `unable to symlink: ${symlinkError.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
function downloadDir(backupConfig, backupFilePath, dataLayout, progressCallback, callback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof backupFilePath, 'string');
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug(`downloadDir: ${backupFilePath} to ${dataLayout.toString()}`);
|
||||
|
||||
function downloadFile(entry, done) {
|
||||
let relativePath = path.relative(backupFilePath, entry.fullPath);
|
||||
if (backupConfig.encryption) {
|
||||
const { error, result } = hush.decryptFilePath(relativePath, backupConfig.encryption);
|
||||
if (error) return done(new BoxError(BoxError.CRYPTO_ERROR, 'Unable to decrypt file'));
|
||||
relativePath = result;
|
||||
}
|
||||
const destFilePath = dataLayout.toLocalPath('./' + relativePath);
|
||||
|
||||
fs.mkdir(path.dirname(destFilePath), { recursive: true }, function (error) {
|
||||
if (error) return done(new BoxError(BoxError.FS_ERROR, error.message));
|
||||
|
||||
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
|
||||
storage.api(backupConfig.provider).download(backupConfig, entry.fullPath, function (error, sourceStream) {
|
||||
if (error) {
|
||||
progressCallback({ message: `Download ${entry.fullPath} to ${destFilePath} errored: ${error.message}` });
|
||||
return retryCallback(error);
|
||||
}
|
||||
|
||||
let destStream = hush.createWriteStream(destFilePath, backupConfig.encryption);
|
||||
|
||||
// protect against multiple errors. must destroy the write stream so that a previous retry does not write
|
||||
let closeAndRetry = once((error) => {
|
||||
if (error) progressCallback({ message: `Download ${entry.fullPath} to ${destFilePath} errored: ${error.message}` });
|
||||
else progressCallback({ message: `Download ${entry.fullPath} to ${destFilePath} finished` });
|
||||
sourceStream.destroy();
|
||||
destStream.destroy();
|
||||
retryCallback(error);
|
||||
});
|
||||
|
||||
destStream.on('progress', function (progress) {
|
||||
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
|
||||
if (!transferred && !speed) return progressCallback({ message: `Downloading ${entry.fullPath}` }); // 0M@0MBps looks wrong
|
||||
progressCallback({ message: `Downloading ${entry.fullPath}: ${transferred}M@${speed}MBps` });
|
||||
});
|
||||
destStream.on('error', closeAndRetry);
|
||||
|
||||
sourceStream.on('error', closeAndRetry);
|
||||
|
||||
progressCallback({ message: `Downloading ${entry.fullPath} to ${destFilePath}` });
|
||||
|
||||
sourceStream.pipe(destStream, { end: true }).on('done', closeAndRetry);
|
||||
});
|
||||
}, done);
|
||||
});
|
||||
}
|
||||
|
||||
storage.api(backupConfig.provider).listDir(backupConfig, backupFilePath, 1000, function (entries, iteratorDone) {
|
||||
// https://www.digitalocean.com/community/questions/rate-limiting-on-spaces?answer=40441
|
||||
const concurrency = backupConfig.downloadConcurrency || (backupConfig.provider === 's3' ? 30 : 10);
|
||||
|
||||
async.eachLimit(entries, concurrency, downloadFile, iteratorDone);
|
||||
}, callback);
|
||||
}
|
||||
|
||||
async function download(backupConfig, remotePath, dataLayout, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
debug(`download: Downloading ${remotePath} to ${dataLayout.toString()}`);
|
||||
|
||||
const backupFilePath = getBackupFilePath(backupConfig, remotePath);
|
||||
const downloadDirAsync = util.promisify(downloadDir);
|
||||
|
||||
await downloadDirAsync(backupConfig, backupFilePath, dataLayout, progressCallback);
|
||||
await restoreFsMetadata(dataLayout, `${dataLayout.localRoot()}/fsmetadata.json`);
|
||||
}
|
||||
|
||||
async function upload(backupConfig, remotePath, dataLayout, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
assert.strictEqual(typeof dataLayout, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
const syncAsync = util.promisify(sync);
|
||||
|
||||
await saveFsMetadata(dataLayout, `${dataLayout.localRoot()}/fsmetadata.json`);
|
||||
await syncAsync(backupConfig, remotePath, dataLayout, progressCallback);
|
||||
}
|
||||
195
src/backupformat/tgz.js
Normal file
195
src/backupformat/tgz.js
Normal file
@@ -0,0 +1,195 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
getBackupFilePath,
|
||||
download,
|
||||
upload
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
async = require('async'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
DataLayout = require('../datalayout.js'),
|
||||
debug = require('debug')('box:backupformat/tgz'),
|
||||
{ DecryptStream, EncryptStream } = require('../hush.js'),
|
||||
once = require('../once.js'),
|
||||
path = require('path'),
|
||||
progressStream = require('progress-stream'),
|
||||
storage = require('../storage.js'),
|
||||
tar = require('tar-fs'),
|
||||
zlib = require('zlib');
|
||||
|
||||
function getBackupFilePath(backupConfig, remotePath) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
|
||||
const rootPath = storage.api(backupConfig.provider).getRootPath(backupConfig);
|
||||
|
||||
const fileType = backupConfig.encryption ? '.tar.gz.enc' : '.tar.gz';
|
||||
return path.join(rootPath, remotePath + fileType);
|
||||
}
|
||||
|
||||
function tarPack(dataLayout, encryption) {
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
|
||||
const pack = tar.pack('/', {
|
||||
dereference: false, // pack the symlink and not what it points to
|
||||
entries: dataLayout.localPaths(),
|
||||
ignoreStatError: (path, err) => {
|
||||
debug(`tarPack: error stat'ing ${path} - ${err.code}`);
|
||||
return err.code === 'ENOENT'; // ignore if file or dir got removed (probably some temporary file)
|
||||
},
|
||||
map: function(header) {
|
||||
header.name = dataLayout.toRemotePath(header.name);
|
||||
// the tar pax format allows us to encode filenames > 100 and size > 8GB (see #640)
|
||||
// https://www.systutorials.com/docs/linux/man/5-star/
|
||||
if (header.size > 8589934590 || header.name > 99) header.pax = { size: header.size };
|
||||
return header;
|
||||
},
|
||||
strict: false // do not error for unknown types (skip fifo, char/block devices)
|
||||
});
|
||||
|
||||
const gzip = zlib.createGzip({});
|
||||
const ps = progressStream({ time: 10000 }); // emit 'progress' every 10 seconds
|
||||
|
||||
pack.on('error', function (error) {
|
||||
debug('tarPack: tar stream error.', error);
|
||||
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
gzip.on('error', function (error) {
|
||||
debug('tarPack: gzip stream error.', error);
|
||||
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
if (encryption) {
|
||||
const encryptStream = new EncryptStream(encryption);
|
||||
encryptStream.on('error', function (error) {
|
||||
debug('tarPack: encrypt stream error.', error);
|
||||
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
pack.pipe(gzip).pipe(encryptStream).pipe(ps);
|
||||
} else {
|
||||
pack.pipe(gzip).pipe(ps);
|
||||
}
|
||||
|
||||
return ps;
|
||||
}
|
||||
|
||||
function tarExtract(inStream, dataLayout, encryption) {
|
||||
assert.strictEqual(typeof inStream, 'object');
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
|
||||
const gunzip = zlib.createGunzip({});
|
||||
const ps = progressStream({ time: 10000 }); // display a progress every 10 seconds
|
||||
const extract = tar.extract('/', {
|
||||
map: function (header) {
|
||||
header.name = dataLayout.toLocalPath(header.name);
|
||||
return header;
|
||||
},
|
||||
dmode: 500 // ensure directory is writable
|
||||
});
|
||||
|
||||
const emitError = once((error) => {
|
||||
inStream.destroy();
|
||||
ps.emit('error', error);
|
||||
});
|
||||
|
||||
inStream.on('error', function (error) {
|
||||
debug('tarExtract: input stream error.', error);
|
||||
emitError(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
gunzip.on('error', function (error) {
|
||||
debug('tarExtract: gunzip stream error.', error);
|
||||
emitError(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
extract.on('error', function (error) {
|
||||
debug('tarExtract: extract stream error.', error);
|
||||
emitError(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
extract.on('finish', function () {
|
||||
debug('tarExtract: done.');
|
||||
// we use a separate event because ps is a through2 stream which emits 'finish' event indicating end of inStream and not extract
|
||||
ps.emit('done');
|
||||
});
|
||||
|
||||
if (encryption) {
|
||||
const decrypt = new DecryptStream(encryption);
|
||||
decrypt.on('error', function (error) {
|
||||
debug('tarExtract: decrypt stream error.', error);
|
||||
emitError(new BoxError(BoxError.EXTERNAL_ERROR, `Failed to decrypt: ${error.message}`));
|
||||
});
|
||||
inStream.pipe(ps).pipe(decrypt).pipe(gunzip).pipe(extract);
|
||||
} else {
|
||||
inStream.pipe(ps).pipe(gunzip).pipe(extract);
|
||||
}
|
||||
|
||||
return ps;
|
||||
}
|
||||
|
||||
async function download(backupConfig, remotePath, dataLayout, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
debug(`download: Downloading ${remotePath} to ${dataLayout.toString()}`);
|
||||
|
||||
const backupFilePath = getBackupFilePath(backupConfig, remotePath);
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
|
||||
progressCallback({ message: `Downloading backup ${remotePath}` });
|
||||
|
||||
storage.api(backupConfig.provider).download(backupConfig, backupFilePath, function (error, sourceStream) {
|
||||
if (error) return retryCallback(error);
|
||||
|
||||
const ps = tarExtract(sourceStream, dataLayout, backupConfig.encryption);
|
||||
|
||||
ps.on('progress', function (progress) {
|
||||
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
|
||||
if (!transferred && !speed) return progressCallback({ message: 'Downloading backup' }); // 0M@0MBps looks wrong
|
||||
progressCallback({ message: `Downloading ${transferred}M@${speed}MBps` });
|
||||
});
|
||||
ps.on('error', retryCallback);
|
||||
ps.on('done', retryCallback);
|
||||
});
|
||||
}, (error) => {
|
||||
if (error) return reject(error);
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async function upload(backupConfig, remotePath, dataLayout, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
assert.strictEqual(typeof dataLayout, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
|
||||
retryCallback = once(retryCallback); // protect again upload() erroring much later after tar stream error
|
||||
|
||||
const tarStream = tarPack(dataLayout, backupConfig.encryption);
|
||||
|
||||
tarStream.on('progress', function (progress) {
|
||||
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
|
||||
if (!transferred && !speed) return progressCallback({ message: 'Uploading backup' }); // 0M@0MBps looks wrong
|
||||
progressCallback({ message: `Uploading backup ${transferred}M@${speed}MBps` });
|
||||
});
|
||||
tarStream.on('error', retryCallback); // already returns BoxError
|
||||
|
||||
storage.api(backupConfig.provider).upload(backupConfig, getBackupFilePath(backupConfig, remotePath), tarStream, retryCallback);
|
||||
}, (error) => {
|
||||
if (error) return reject(error);
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
}
|
||||
130
src/backups.js
130
src/backups.js
@@ -6,6 +6,7 @@ exports = module.exports = {
|
||||
getByTypePaged,
|
||||
add,
|
||||
update,
|
||||
setState,
|
||||
list,
|
||||
del,
|
||||
|
||||
@@ -52,29 +53,24 @@ const assert = require('assert'),
|
||||
ejs = require('ejs'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
fs = require('fs'),
|
||||
hat = require('./hat.js'),
|
||||
locker = require('./locker.js'),
|
||||
path = require('path'),
|
||||
paths = require('./paths.js'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js'),
|
||||
storage = require('./storage.js'),
|
||||
tasks = require('./tasks.js'),
|
||||
util = require('util');
|
||||
tasks = require('./tasks.js');
|
||||
|
||||
const COLLECTD_CONFIG_EJS = fs.readFileSync(__dirname + '/collectd/cloudron-backup.ejs', { encoding: 'utf8' });
|
||||
|
||||
const BACKUPS_FIELDS = [ 'id', 'identifier', 'creationTime', 'packageVersion', 'type', 'dependsOn', 'state', 'manifestJson', 'format', 'preserveSecs', 'encryptionVersion' ];
|
||||
|
||||
// helper until all storage providers have been ported
|
||||
function maybePromisify(func) {
|
||||
if (util.types.isAsyncFunction(func)) return func;
|
||||
return util.promisify(func);
|
||||
}
|
||||
const BACKUPS_FIELDS = [ 'id', 'remotePath', 'label', 'identifier', 'creationTime', 'packageVersion', 'type', 'dependsOnJson', 'state', 'manifestJson', 'format', 'preserveSecs', 'encryptionVersion' ];
|
||||
|
||||
function postProcess(result) {
|
||||
assert.strictEqual(typeof result, 'object');
|
||||
|
||||
result.dependsOn = result.dependsOn ? result.dependsOn.split(',') : [ ];
|
||||
result.dependsOn = result.dependsOnJson ? safe.JSON.parse(result.dependsOnJson) : [];
|
||||
delete result.dependsOnJson;
|
||||
|
||||
result.manifest = result.manifestJson ? safe.JSON.parse(result.manifestJson) : null;
|
||||
delete result.manifestJson;
|
||||
@@ -116,9 +112,9 @@ function generateEncryptionKeysSync(password) {
|
||||
};
|
||||
}
|
||||
|
||||
async function add(id, data) {
|
||||
async function add(data) {
|
||||
assert(data && typeof data === 'object');
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof data.remotePath, 'string');
|
||||
assert(data.encryptionVersion === null || typeof data.encryptionVersion === 'number');
|
||||
assert.strictEqual(typeof data.packageVersion, 'string');
|
||||
assert.strictEqual(typeof data.type, 'string');
|
||||
@@ -127,15 +123,19 @@ async function add(id, data) {
|
||||
assert(Array.isArray(data.dependsOn));
|
||||
assert.strictEqual(typeof data.manifest, 'object');
|
||||
assert.strictEqual(typeof data.format, 'string');
|
||||
assert.strictEqual(typeof data.preserveSecs, 'number');
|
||||
|
||||
const creationTime = data.creationTime || new Date(); // allow tests to set the time
|
||||
const manifestJson = JSON.stringify(data.manifest);
|
||||
const id = `${data.type}_${data.identifier}_v${data.packageVersion}_${hat(256)}`; // id is used by the UI to derive dependent packages. making this a UUID will require a lot of db querying
|
||||
|
||||
const [error] = await safe(database.query('INSERT INTO backups (id, identifier, encryptionVersion, packageVersion, type, creationTime, state, dependsOn, manifestJson, format) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
|
||||
[ id, data.identifier, data.encryptionVersion, data.packageVersion, data.type, creationTime, data.state, data.dependsOn.join(','), manifestJson, data.format ]));
|
||||
const [error] = await safe(database.query('INSERT INTO backups (id, remotePath, identifier, encryptionVersion, packageVersion, type, creationTime, state, dependsOnJson, manifestJson, format, preserveSecs) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
|
||||
[ id, data.remotePath, data.identifier, data.encryptionVersion, data.packageVersion, data.type, creationTime, data.state, JSON.stringify(data.dependsOn), manifestJson, data.format, data.preserveSecs ]));
|
||||
|
||||
if (error && error.code === 'ER_DUP_ENTRY') throw new BoxError(BoxError.ALREADY_EXISTS, 'Backup already exists');
|
||||
if (error) throw error;
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
async function getByIdentifierAndStatePaged(identifier, state, page, perPage) {
|
||||
@@ -172,19 +172,55 @@ async function getByTypePaged(type, page, perPage) {
|
||||
return results;
|
||||
}
|
||||
|
||||
async function update(id, backup) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof backup, 'object');
|
||||
function validateLabel(label) {
|
||||
assert.strictEqual(typeof label, 'string');
|
||||
|
||||
let fields = [ ], values = [ ];
|
||||
for (const p in backup) {
|
||||
fields.push(p + ' = ?');
|
||||
values.push(backup[p]);
|
||||
if (label.length >= 200) return new BoxError(BoxError.BAD_FIELD, 'label too long');
|
||||
if (/[^a-zA-Z0-9._()-]/.test(label)) return new BoxError(BoxError.BAD_FIELD, 'label can only contain alphanumerals, dot, hyphen, brackets or underscore');
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
// this is called by REST API
|
||||
async function update(id, data) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof data, 'object');
|
||||
|
||||
let error;
|
||||
if ('label' in data) {
|
||||
error = validateLabel(data.label);
|
||||
if (error) throw error;
|
||||
}
|
||||
|
||||
const fields = [], values = [];
|
||||
for (const p in data) {
|
||||
if (p === 'label' || p === 'preserveSecs') {
|
||||
fields.push(p + ' = ?');
|
||||
values.push(data[p]);
|
||||
}
|
||||
}
|
||||
values.push(id);
|
||||
|
||||
const backup = await get(id);
|
||||
if (backup === null) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
|
||||
|
||||
const result = await database.query('UPDATE backups SET ' + fields.join(', ') + ' WHERE id = ?', values);
|
||||
if (result.affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
|
||||
|
||||
if ('preserveSecs' in data) {
|
||||
// update the dependancies
|
||||
for (const depId of backup.dependsOn) {
|
||||
await database.query('UPDATE backups SET preserveSecs=? WHERE id = ?', [ data.preserveSecs, depId]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function setState(id, state) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof state, 'string');
|
||||
|
||||
const result = await database.query('UPDATE backups SET state = ? WHERE id = ?', [state, id]);
|
||||
if (result.affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
|
||||
}
|
||||
|
||||
async function startBackupTask(auditSource) {
|
||||
@@ -205,7 +241,8 @@ async function startBackupTask(auditSource) {
|
||||
const errorMessage = error ? error.message : '';
|
||||
const timedOut = error ? error.code === tasks.ETIMEOUT : false;
|
||||
|
||||
await safe(eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId, errorMessage, timedOut, backupId }), { debug });
|
||||
const backup = await get(backupId);
|
||||
await safe(eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId, errorMessage, timedOut, backupId, remotePath: backup?.remotePath }), { debug });
|
||||
});
|
||||
|
||||
return taskId;
|
||||
@@ -268,14 +305,15 @@ async function startCleanupTask(auditSource) {
|
||||
|
||||
const taskId = await tasks.add(tasks.TASK_CLEAN_BACKUPS, []);
|
||||
|
||||
tasks.startTask(taskId, {}, (error, result) => { // result is { removedBoxBackupIds, removedAppBackupIds, missingBackupIds }
|
||||
eventlog.add(eventlog.ACTION_BACKUP_CLEANUP_FINISH, auditSource, {
|
||||
tasks.startTask(taskId, {}, async (error, result) => { // result is { removedBoxBackupPaths, removedAppBackupPaths, removedMailBackupPaths, missingBackupPaths }
|
||||
await safe(eventlog.add(eventlog.ACTION_BACKUP_CLEANUP_FINISH, auditSource, {
|
||||
taskId,
|
||||
errorMessage: error ? error.message : null,
|
||||
removedBoxBackupIds: result ? result.removedBoxBackupIds : [],
|
||||
removedAppBackupIds: result ? result.removedAppBackupIds : [],
|
||||
missingBackupIds: result ? result.missingBackupIds : []
|
||||
});
|
||||
removedBoxBackupPaths: result ? result.removedBoxBackupPaths : [],
|
||||
removedMailBackupPaths: result ? result.removedMailBackupPaths : [],
|
||||
removedAppBackupPaths: result ? result.removedAppBackupPaths : [],
|
||||
missingBackupPaths: result ? result.missingBackupPaths : []
|
||||
}), { debug });
|
||||
});
|
||||
|
||||
return taskId;
|
||||
@@ -296,29 +334,28 @@ async function testConfig(backupConfig) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
|
||||
const func = storage.api(backupConfig.provider);
|
||||
if (!func) return new BoxError(BoxError.BAD_FIELD, 'unknown storage provider', { field: 'provider' });
|
||||
if (!func) return new BoxError(BoxError.BAD_FIELD, 'unknown storage provider');
|
||||
|
||||
if (backupConfig.format !== 'tgz' && backupConfig.format !== 'rsync') return new BoxError(BoxError.BAD_FIELD, 'unknown format', { field: 'format' });
|
||||
if (backupConfig.format !== 'tgz' && backupConfig.format !== 'rsync') return new BoxError(BoxError.BAD_FIELD, 'unknown format');
|
||||
|
||||
const job = safe.safeCall(function () { return new CronJob(backupConfig.schedulePattern); });
|
||||
if (!job) return new BoxError(BoxError.BAD_FIELD, 'Invalid schedule pattern', { field: 'schedulePattern' });
|
||||
if (!job) return new BoxError(BoxError.BAD_FIELD, 'Invalid schedule pattern');
|
||||
|
||||
if ('password' in backupConfig) {
|
||||
if (typeof backupConfig.password !== 'string') return new BoxError(BoxError.BAD_FIELD, 'password must be a string', { field: 'password' });
|
||||
if (backupConfig.password.length < 8) return new BoxError(BoxError.BAD_FIELD, 'password must be atleast 8 characters', { field: 'password' });
|
||||
if (typeof backupConfig.password !== 'string') return new BoxError(BoxError.BAD_FIELD, 'password must be a string');
|
||||
if (backupConfig.password.length < 8) return new BoxError(BoxError.BAD_FIELD, 'password must be atleast 8 characters');
|
||||
}
|
||||
|
||||
const policy = backupConfig.retentionPolicy;
|
||||
if (!policy) return new BoxError(BoxError.BAD_FIELD, 'retentionPolicy is required', { field: 'retentionPolicy' });
|
||||
if (!['keepWithinSecs','keepDaily','keepWeekly','keepMonthly','keepYearly'].find(k => !!policy[k])) return new BoxError(BoxError.BAD_FIELD, 'properties missing', { field: 'retentionPolicy' });
|
||||
if ('keepWithinSecs' in policy && typeof policy.keepWithinSecs !== 'number') return new BoxError(BoxError.BAD_FIELD, 'keepWithinSecs must be a number', { field: 'retentionPolicy' });
|
||||
if ('keepDaily' in policy && typeof policy.keepDaily !== 'number') return new BoxError(BoxError.BAD_FIELD, 'keepDaily must be a number', { field: 'retentionPolicy' });
|
||||
if ('keepWeekly' in policy && typeof policy.keepWeekly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'keepWeekly must be a number', { field: 'retentionPolicy' });
|
||||
if ('keepMonthly' in policy && typeof policy.keepMonthly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'keepMonthly must be a number', { field: 'retentionPolicy' });
|
||||
if ('keepYearly' in policy && typeof policy.keepYearly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'keepYearly must be a number', { field: 'retentionPolicy' });
|
||||
if (!policy) return new BoxError(BoxError.BAD_FIELD, 'retentionPolicy is required');
|
||||
if (!['keepWithinSecs','keepDaily','keepWeekly','keepMonthly','keepYearly'].find(k => !!policy[k])) return new BoxError(BoxError.BAD_FIELD, 'properties missing');
|
||||
if ('keepWithinSecs' in policy && typeof policy.keepWithinSecs !== 'number') return new BoxError(BoxError.BAD_FIELD, 'keepWithinSecs must be a number');
|
||||
if ('keepDaily' in policy && typeof policy.keepDaily !== 'number') return new BoxError(BoxError.BAD_FIELD, 'keepDaily must be a number');
|
||||
if ('keepWeekly' in policy && typeof policy.keepWeekly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'keepWeekly must be a number');
|
||||
if ('keepMonthly' in policy && typeof policy.keepMonthly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'keepMonthly must be a number');
|
||||
if ('keepYearly' in policy && typeof policy.keepYearly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'keepYearly must be a number');
|
||||
|
||||
const [error] = await safe(util.promisify(storage.api(backupConfig.provider).testConfig)(backupConfig));
|
||||
return error;
|
||||
await storage.api(backupConfig.provider).testConfig(backupConfig);
|
||||
}
|
||||
|
||||
// this skips password check since that policy is only at creation time
|
||||
@@ -326,10 +363,9 @@ async function testProviderConfig(backupConfig) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
|
||||
const func = storage.api(backupConfig.provider);
|
||||
if (!func) return new BoxError(BoxError.BAD_FIELD, 'unknown storage provider', { field: 'provider' });
|
||||
if (!func) return new BoxError(BoxError.BAD_FIELD, 'unknown storage provider');
|
||||
|
||||
const [error] = await safe(util.promisify(storage.api(backupConfig.provider).testConfig)(backupConfig));
|
||||
return error;
|
||||
await storage.api(backupConfig.provider).testConfig(backupConfig);
|
||||
}
|
||||
|
||||
async function remount(auditSource) {
|
||||
@@ -338,7 +374,7 @@ async function remount(auditSource) {
|
||||
const backupConfig = await settings.getBackupConfig();
|
||||
|
||||
const func = storage.api(backupConfig.provider);
|
||||
if (!func) throw new BoxError(BoxError.BAD_FIELD, 'unknown storage provider', { field: 'provider' });
|
||||
if (!func) throw new BoxError(BoxError.BAD_FIELD, 'unknown storage provider');
|
||||
|
||||
await maybePromisify(storage.api(backupConfig.provider).remount)(backupConfig);
|
||||
await storage.api(backupConfig.provider).remount(backupConfig);
|
||||
}
|
||||
|
||||
@@ -12,39 +12,26 @@ exports = module.exports = {
|
||||
downloadMail,
|
||||
|
||||
upload,
|
||||
|
||||
_restoreFsMetadata: restoreFsMetadata,
|
||||
_saveFsMetadata: saveFsMetadata,
|
||||
};
|
||||
|
||||
const apps = require('./apps.js'),
|
||||
assert = require('assert'),
|
||||
async = require('async'),
|
||||
backupFormat = require('./backupformat.js'),
|
||||
backups = require('./backups.js'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
constants = require('./constants.js'),
|
||||
crypto = require('crypto'),
|
||||
DataLayout = require('./datalayout.js'),
|
||||
database = require('./database.js'),
|
||||
debug = require('debug')('box:backuptask'),
|
||||
fs = require('fs'),
|
||||
once = require('once'),
|
||||
path = require('path'),
|
||||
paths = require('./paths.js'),
|
||||
progressStream = require('progress-stream'),
|
||||
safe = require('safetydance'),
|
||||
services = require('./services.js'),
|
||||
settings = require('./settings.js'),
|
||||
shell = require('./shell.js'),
|
||||
storage = require('./storage.js'),
|
||||
syncer = require('./syncer.js'),
|
||||
tar = require('tar-fs'),
|
||||
TransformStream = require('stream').Transform,
|
||||
zlib = require('zlib'),
|
||||
util = require('util');
|
||||
storage = require('./storage.js');
|
||||
|
||||
const BACKUP_UPLOAD_CMD = path.join(__dirname, 'scripts/backupupload.js');
|
||||
const getBackupConfig = util.callbackify(settings.getBackupConfig);
|
||||
|
||||
function canBackupApp(app) {
|
||||
// only backup apps that are installed or specific pending states
|
||||
@@ -60,600 +47,44 @@ function canBackupApp(app) {
|
||||
app.installationState === apps.ISTATE_PENDING_UPDATE; // called from apptask
|
||||
}
|
||||
|
||||
function encryptFilePath(filePath, encryption) {
|
||||
assert.strictEqual(typeof filePath, 'string');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
|
||||
var encryptedParts = filePath.split('/').map(function (part) {
|
||||
let hmac = crypto.createHmac('sha256', Buffer.from(encryption.filenameHmacKey, 'hex'));
|
||||
const iv = hmac.update(part).digest().slice(0, 16); // iv has to be deterministic, for our sync (copy) logic to work
|
||||
const cipher = crypto.createCipheriv('aes-256-cbc', Buffer.from(encryption.filenameKey, 'hex'), iv);
|
||||
let crypt = cipher.update(part);
|
||||
crypt = Buffer.concat([ iv, crypt, cipher.final() ]);
|
||||
|
||||
return crypt.toString('base64') // ensures path is valid
|
||||
.replace(/\//g, '-') // replace '/' of base64 since it conflicts with path separator
|
||||
.replace(/=/g,''); // strip trailing = padding. this is only needed if we concat base64 strings, which we don't
|
||||
});
|
||||
|
||||
return encryptedParts.join('/');
|
||||
}
|
||||
|
||||
function decryptFilePath(filePath, encryption) {
|
||||
assert.strictEqual(typeof filePath, 'string');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
|
||||
let decryptedParts = [];
|
||||
for (let part of filePath.split('/')) {
|
||||
part = part + Array(part.length % 4).join('='); // add back = padding
|
||||
part = part.replace(/-/g, '/'); // replace with '/'
|
||||
|
||||
try {
|
||||
const buffer = Buffer.from(part, 'base64');
|
||||
const iv = buffer.slice(0, 16);
|
||||
let decrypt = crypto.createDecipheriv('aes-256-cbc', Buffer.from(encryption.filenameKey, 'hex'), iv);
|
||||
const plainText = decrypt.update(buffer.slice(16));
|
||||
const plainTextString = Buffer.concat([ plainText, decrypt.final() ]).toString('utf8');
|
||||
const hmac = crypto.createHmac('sha256', Buffer.from(encryption.filenameHmacKey, 'hex'));
|
||||
if (!hmac.update(plainTextString).digest().slice(0, 16).equals(iv)) return { error: new BoxError(BoxError.CRYPTO_ERROR, `mac error decrypting part ${part} of path ${filePath}`) };
|
||||
|
||||
decryptedParts.push(plainTextString);
|
||||
} catch (error) {
|
||||
debug(`Error decrypting part ${part} of path ${filePath}:`, error);
|
||||
return { error: new BoxError(BoxError.CRYPTO_ERROR, `Error decrypting part ${part} of path ${filePath}: ${error.message}`) };
|
||||
}
|
||||
}
|
||||
|
||||
return { result: decryptedParts.join('/') };
|
||||
}
|
||||
|
||||
class EncryptStream extends TransformStream {
|
||||
constructor(encryption) {
|
||||
super();
|
||||
this._headerPushed = false;
|
||||
this._iv = crypto.randomBytes(16);
|
||||
this._cipher = crypto.createCipheriv('aes-256-cbc', Buffer.from(encryption.dataKey, 'hex'), this._iv);
|
||||
this._hmac = crypto.createHmac('sha256', Buffer.from(encryption.dataHmacKey, 'hex'));
|
||||
}
|
||||
|
||||
pushHeaderIfNeeded() {
|
||||
if (!this._headerPushed) {
|
||||
const magic = Buffer.from('CBV2');
|
||||
this.push(magic);
|
||||
this._hmac.update(magic);
|
||||
this.push(this._iv);
|
||||
this._hmac.update(this._iv);
|
||||
this._headerPushed = true;
|
||||
}
|
||||
}
|
||||
|
||||
_transform(chunk, ignoredEncoding, callback) {
|
||||
this.pushHeaderIfNeeded();
|
||||
|
||||
try {
|
||||
const crypt = this._cipher.update(chunk);
|
||||
this._hmac.update(crypt);
|
||||
callback(null, crypt);
|
||||
} catch (error) {
|
||||
callback(error);
|
||||
}
|
||||
}
|
||||
|
||||
_flush(callback) {
|
||||
try {
|
||||
this.pushHeaderIfNeeded(); // for 0-length files
|
||||
const crypt = this._cipher.final();
|
||||
this.push(crypt);
|
||||
this._hmac.update(crypt);
|
||||
callback(null, this._hmac.digest()); // +32 bytes
|
||||
} catch (error) {
|
||||
callback(error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class DecryptStream extends TransformStream {
|
||||
constructor(encryption) {
|
||||
super();
|
||||
this._key = Buffer.from(encryption.dataKey, 'hex');
|
||||
this._header = Buffer.alloc(0);
|
||||
this._decipher = null;
|
||||
this._hmac = crypto.createHmac('sha256', Buffer.from(encryption.dataHmacKey, 'hex'));
|
||||
this._buffer = Buffer.alloc(0);
|
||||
}
|
||||
|
||||
_transform(chunk, ignoredEncoding, callback) {
|
||||
const needed = 20 - this._header.length; // 4 for magic, 16 for iv
|
||||
|
||||
if (this._header.length !== 20) { // not gotten header yet
|
||||
this._header = Buffer.concat([this._header, chunk.slice(0, needed)]);
|
||||
if (this._header.length !== 20) return callback();
|
||||
|
||||
if (!this._header.slice(0, 4).equals(new Buffer.from('CBV2'))) return callback(new BoxError(BoxError.CRYPTO_ERROR, 'Invalid magic in header'));
|
||||
|
||||
const iv = this._header.slice(4);
|
||||
this._decipher = crypto.createDecipheriv('aes-256-cbc', this._key, iv);
|
||||
this._hmac.update(this._header);
|
||||
}
|
||||
|
||||
this._buffer = Buffer.concat([ this._buffer, chunk.slice(needed) ]);
|
||||
if (this._buffer.length < 32) return callback(); // hmac trailer length is 32
|
||||
|
||||
try {
|
||||
const cipherText = this._buffer.slice(0, -32);
|
||||
this._hmac.update(cipherText);
|
||||
const plainText = this._decipher.update(cipherText);
|
||||
this._buffer = this._buffer.slice(-32);
|
||||
callback(null, plainText);
|
||||
} catch (error) {
|
||||
callback(error);
|
||||
}
|
||||
}
|
||||
|
||||
_flush (callback) {
|
||||
if (this._buffer.length !== 32) return callback(new BoxError(BoxError.CRYPTO_ERROR, 'Invalid password or tampered file (not enough data)'));
|
||||
|
||||
try {
|
||||
if (!this._hmac.digest().equals(this._buffer)) return callback(new BoxError(BoxError.CRYPTO_ERROR, 'Invalid password or tampered file (mac mismatch)'));
|
||||
|
||||
const plainText = this._decipher.final();
|
||||
callback(null, plainText);
|
||||
} catch (error) {
|
||||
callback(error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function createReadStream(sourceFile, encryption) {
|
||||
assert.strictEqual(typeof sourceFile, 'string');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
|
||||
var stream = fs.createReadStream(sourceFile);
|
||||
var ps = progressStream({ time: 10000 }); // display a progress every 10 seconds
|
||||
|
||||
stream.on('error', function (error) {
|
||||
debug(`createReadStream: read stream error at ${sourceFile}`, error);
|
||||
ps.emit('error', new BoxError(BoxError.FS_ERROR, `Error reading ${sourceFile}: ${error.message} ${error.code}`));
|
||||
});
|
||||
|
||||
stream.on('open', () => ps.emit('open'));
|
||||
|
||||
if (encryption) {
|
||||
let encryptStream = new EncryptStream(encryption);
|
||||
|
||||
encryptStream.on('error', function (error) {
|
||||
debug(`createReadStream: encrypt stream error ${sourceFile}`, error);
|
||||
ps.emit('error', new BoxError(BoxError.CRYPTO_ERROR, `Encryption error at ${sourceFile}: ${error.message}`));
|
||||
});
|
||||
|
||||
return stream.pipe(encryptStream).pipe(ps);
|
||||
} else {
|
||||
return stream.pipe(ps);
|
||||
}
|
||||
}
|
||||
|
||||
function createWriteStream(destFile, encryption) {
|
||||
assert.strictEqual(typeof destFile, 'string');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
|
||||
var stream = fs.createWriteStream(destFile);
|
||||
var ps = progressStream({ time: 10000 }); // display a progress every 10 seconds
|
||||
|
||||
stream.on('error', function (error) {
|
||||
debug(`createWriteStream: write stream error ${destFile}`, error);
|
||||
ps.emit('error', new BoxError(BoxError.FS_ERROR, `Write error ${destFile}: ${error.message}`));
|
||||
});
|
||||
|
||||
stream.on('finish', function () {
|
||||
debug('createWriteStream: done.');
|
||||
// we use a separate event because ps is a through2 stream which emits 'finish' event indicating end of inStream and not write
|
||||
ps.emit('done');
|
||||
});
|
||||
|
||||
if (encryption) {
|
||||
let decrypt = new DecryptStream(encryption);
|
||||
decrypt.on('error', function (error) {
|
||||
debug(`createWriteStream: decrypt stream error ${destFile}`, error);
|
||||
ps.emit('error', new BoxError(BoxError.CRYPTO_ERROR, `Decryption error at ${destFile}: ${error.message}`));
|
||||
});
|
||||
|
||||
ps.pipe(decrypt).pipe(stream);
|
||||
} else {
|
||||
ps.pipe(stream);
|
||||
}
|
||||
|
||||
return ps;
|
||||
}
|
||||
|
||||
function tarPack(dataLayout, encryption, callback) {
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var pack = tar.pack('/', {
|
||||
dereference: false, // pack the symlink and not what it points to
|
||||
entries: dataLayout.localPaths(),
|
||||
ignoreStatError: (path, err) => {
|
||||
debug(`tarPack: error stat'ing ${path} - ${err.code}`);
|
||||
return err.code === 'ENOENT'; // ignore if file or dir got removed (probably some temporary file)
|
||||
},
|
||||
map: function(header) {
|
||||
header.name = dataLayout.toRemotePath(header.name);
|
||||
// the tar pax format allows us to encode filenames > 100 and size > 8GB (see #640)
|
||||
// https://www.systutorials.com/docs/linux/man/5-star/
|
||||
if (header.size > 8589934590 || header.name > 99) header.pax = { size: header.size };
|
||||
return header;
|
||||
},
|
||||
strict: false // do not error for unknown types (skip fifo, char/block devices)
|
||||
});
|
||||
|
||||
var gzip = zlib.createGzip({});
|
||||
var ps = progressStream({ time: 10000 }); // emit 'progress' every 10 seconds
|
||||
|
||||
pack.on('error', function (error) {
|
||||
debug('tarPack: tar stream error.', error);
|
||||
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
gzip.on('error', function (error) {
|
||||
debug('tarPack: gzip stream error.', error);
|
||||
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
if (encryption) {
|
||||
const encryptStream = new EncryptStream(encryption);
|
||||
encryptStream.on('error', function (error) {
|
||||
debug('tarPack: encrypt stream error.', error);
|
||||
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
pack.pipe(gzip).pipe(encryptStream).pipe(ps);
|
||||
} else {
|
||||
pack.pipe(gzip).pipe(ps);
|
||||
}
|
||||
|
||||
return callback(null, ps);
|
||||
}
|
||||
|
||||
function sync(backupConfig, backupId, dataLayout, progressCallback, callback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// the number here has to take into account the s3.upload partSize (which is 10MB). So 20=200MB
|
||||
const concurrency = backupConfig.syncConcurrency || (backupConfig.provider === 's3' ? 20 : 10);
|
||||
|
||||
syncer.sync(dataLayout, function processTask(task, iteratorCallback) {
|
||||
debug('sync: processing task: %j', task);
|
||||
// the empty task.path is special to signify the directory
|
||||
const destPath = task.path && backupConfig.encryption ? encryptFilePath(task.path, backupConfig.encryption) : task.path;
|
||||
const backupFilePath = path.join(storage.getBackupFilePath(backupConfig, backupId, backupConfig.format), destPath);
|
||||
|
||||
if (task.operation === 'removedir') {
|
||||
debug(`Removing directory ${backupFilePath}`);
|
||||
return storage.api(backupConfig.provider).removeDir(backupConfig, backupFilePath)
|
||||
.on('progress', (message) => progressCallback({ message }))
|
||||
.on('done', iteratorCallback);
|
||||
} else if (task.operation === 'remove') {
|
||||
debug(`Removing ${backupFilePath}`);
|
||||
return storage.api(backupConfig.provider).remove(backupConfig, backupFilePath, iteratorCallback);
|
||||
}
|
||||
|
||||
var retryCount = 0;
|
||||
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
|
||||
retryCallback = once(retryCallback); // protect again upload() erroring much later after read stream error
|
||||
|
||||
++retryCount;
|
||||
if (task.operation === 'add') {
|
||||
progressCallback({ message: `Adding ${task.path}` + (retryCount > 1 ? ` (Try ${retryCount})` : '') });
|
||||
debug(`Adding ${task.path} position ${task.position} try ${retryCount}`);
|
||||
var stream = createReadStream(dataLayout.toLocalPath('./' + task.path), backupConfig.encryption);
|
||||
stream.on('error', (error) => retryCallback(error.message.includes('ENOENT') ? null : error)); // ignore error if file disappears
|
||||
stream.on('progress', function (progress) {
|
||||
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
|
||||
if (!transferred && !speed) return progressCallback({ message: `Uploading ${task.path}` }); // 0M@0MBps looks wrong
|
||||
progressCallback({ message: `Uploading ${task.path}: ${transferred}M@${speed}MBps` }); // 0M@0MBps looks wrong
|
||||
});
|
||||
// only create the destination path when we have confirmation that the source is available. otherwise, we end up with
|
||||
// files owned as 'root' and the cp later will fail
|
||||
stream.on('open', function () {
|
||||
storage.api(backupConfig.provider).upload(backupConfig, backupFilePath, stream, function (error) {
|
||||
debug(error ? `Error uploading ${task.path} try ${retryCount}: ${error.message}` : `Uploaded ${task.path}`);
|
||||
retryCallback(error);
|
||||
});
|
||||
});
|
||||
}
|
||||
}, iteratorCallback);
|
||||
}, concurrency, function (error) {
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
|
||||
callback();
|
||||
});
|
||||
}
|
||||
|
||||
// this is not part of 'snapshotting' because we need root access to traverse
|
||||
async function saveFsMetadata(dataLayout, metadataFile) {
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof metadataFile, 'string');
|
||||
|
||||
// contains paths prefixed with './'
|
||||
let metadata = {
|
||||
emptyDirs: [],
|
||||
execFiles: [],
|
||||
symlinks: []
|
||||
};
|
||||
|
||||
// we assume small number of files. spawnSync will raise a ENOBUFS error after maxBuffer
|
||||
for (let lp of dataLayout.localPaths()) {
|
||||
const emptyDirs = safe.child_process.execSync(`find ${lp} -type d -empty`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
|
||||
if (emptyDirs === null) throw new BoxError(BoxError.FS_ERROR, `Error finding empty dirs: ${safe.error.message}`);
|
||||
if (emptyDirs.length) metadata.emptyDirs = metadata.emptyDirs.concat(emptyDirs.trim().split('\n').map((ed) => dataLayout.toRemotePath(ed)));
|
||||
|
||||
const execFiles = safe.child_process.execSync(`find ${lp} -type f -executable`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
|
||||
if (execFiles === null) throw new BoxError(BoxError.FS_ERROR, `Error finding executables: ${safe.error.message}`);
|
||||
if (execFiles.length) metadata.execFiles = metadata.execFiles.concat(execFiles.trim().split('\n').map((ef) => dataLayout.toRemotePath(ef)));
|
||||
|
||||
const symlinks = safe.child_process.execSync(`find ${lp} -type l`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
|
||||
if (symlinks === null) throw new BoxError(BoxError.FS_ERROR, `Error finding symlinks: ${safe.error.message}`);
|
||||
if (symlinks.length) metadata.symlinks = metadata.symlinks.concat(symlinks.trim().split('\n').map((sl) => {
|
||||
const target = safe.fs.readlinkSync(sl);
|
||||
return { path: dataLayout.toRemotePath(sl), target };
|
||||
}));
|
||||
}
|
||||
|
||||
if (!safe.fs.writeFileSync(metadataFile, JSON.stringify(metadata, null, 4))) throw new BoxError(BoxError.FS_ERROR, `Error writing fs metadata: ${safe.error.message}`);
|
||||
}
|
||||
|
||||
// this function is called via backupupload (since it needs root to traverse app's directory)
|
||||
function upload(backupId, format, dataLayoutString, progressCallback, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
async function upload(remotePath, format, dataLayoutString, progressCallback) {
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
assert.strictEqual(typeof format, 'string');
|
||||
assert.strictEqual(typeof dataLayoutString, 'string');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug(`upload: id ${backupId} format ${format} dataLayout ${dataLayoutString}`);
|
||||
debug(`upload: path ${remotePath} format ${format} dataLayout ${dataLayoutString}`);
|
||||
|
||||
const dataLayout = DataLayout.fromString(dataLayoutString);
|
||||
const backupConfig = await settings.getBackupConfig();
|
||||
await safe(storage.api(backupConfig.provider).checkPreconditions(backupConfig, dataLayout));
|
||||
|
||||
getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
storage.api(backupConfig.provider).checkPreconditions(backupConfig, dataLayout, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
if (format === 'tgz') {
|
||||
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
|
||||
retryCallback = once(retryCallback); // protect again upload() erroring much later after tar stream error
|
||||
|
||||
tarPack(dataLayout, backupConfig.encryption, function (error, tarStream) {
|
||||
if (error) return retryCallback(error);
|
||||
|
||||
tarStream.on('progress', function (progress) {
|
||||
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
|
||||
if (!transferred && !speed) return progressCallback({ message: 'Uploading backup' }); // 0M@0MBps looks wrong
|
||||
progressCallback({ message: `Uploading backup ${transferred}M@${speed}MBps` });
|
||||
});
|
||||
tarStream.on('error', retryCallback); // already returns BoxError
|
||||
|
||||
storage.api(backupConfig.provider).upload(backupConfig, storage.getBackupFilePath(backupConfig, backupId, format), tarStream, retryCallback);
|
||||
});
|
||||
}, callback);
|
||||
} else {
|
||||
async.series([
|
||||
saveFsMetadata.bind(null, dataLayout, `${dataLayout.localRoot()}/fsmetadata.json`),
|
||||
sync.bind(null, backupConfig, backupId, dataLayout, progressCallback)
|
||||
], callback);
|
||||
}
|
||||
});
|
||||
});
|
||||
await backupFormat.api(format).upload(backupConfig, remotePath, dataLayout, progressCallback);
|
||||
}
|
||||
|
||||
function tarExtract(inStream, dataLayout, encryption, callback) {
|
||||
assert.strictEqual(typeof inStream, 'object');
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var gunzip = zlib.createGunzip({});
|
||||
var ps = progressStream({ time: 10000 }); // display a progress every 10 seconds
|
||||
var extract = tar.extract('/', {
|
||||
map: function (header) {
|
||||
header.name = dataLayout.toLocalPath(header.name);
|
||||
return header;
|
||||
},
|
||||
dmode: 500 // ensure directory is writable
|
||||
});
|
||||
|
||||
const emitError = once((error) => {
|
||||
inStream.destroy();
|
||||
ps.emit('error', error);
|
||||
});
|
||||
|
||||
inStream.on('error', function (error) {
|
||||
debug('tarExtract: input stream error.', error);
|
||||
emitError(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
gunzip.on('error', function (error) {
|
||||
debug('tarExtract: gunzip stream error.', error);
|
||||
emitError(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
extract.on('error', function (error) {
|
||||
debug('tarExtract: extract stream error.', error);
|
||||
emitError(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
extract.on('finish', function () {
|
||||
debug('tarExtract: done.');
|
||||
// we use a separate event because ps is a through2 stream which emits 'finish' event indicating end of inStream and not extract
|
||||
ps.emit('done');
|
||||
});
|
||||
|
||||
if (encryption) {
|
||||
let decrypt = new DecryptStream(encryption);
|
||||
decrypt.on('error', function (error) {
|
||||
debug('tarExtract: decrypt stream error.', error);
|
||||
emitError(new BoxError(BoxError.EXTERNAL_ERROR, `Failed to decrypt: ${error.message}`));
|
||||
});
|
||||
inStream.pipe(ps).pipe(decrypt).pipe(gunzip).pipe(extract);
|
||||
} else {
|
||||
inStream.pipe(ps).pipe(gunzip).pipe(extract);
|
||||
}
|
||||
|
||||
callback(null, ps);
|
||||
}
|
||||
|
||||
async function restoreFsMetadata(dataLayout, metadataFile) {
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof metadataFile, 'string');
|
||||
|
||||
debug(`Recreating empty directories in ${dataLayout.toString()}`);
|
||||
|
||||
var metadataJson = safe.fs.readFileSync(metadataFile, 'utf8');
|
||||
if (metadataJson === null) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Error loading fsmetadata.json:' + safe.error.message);
|
||||
var metadata = safe.JSON.parse(metadataJson);
|
||||
if (metadata === null) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Error parsing fsmetadata.json:' + safe.error.message);
|
||||
|
||||
for (const emptyDir of metadata.emptyDirs) {
|
||||
const [mkdirError] = await safe(fs.promises.mkdir(dataLayout.toLocalPath(emptyDir), { recursive: true }));
|
||||
if (mkdirError) throw new BoxError(BoxError.FS_ERROR, `unable to create path: ${mkdirError.message}`);
|
||||
}
|
||||
|
||||
for (const execFile of metadata.execFiles) {
|
||||
const [chmodError] = await safe(fs.promises.chmod(dataLayout.toLocalPath(execFile), parseInt('0755', 8)));
|
||||
if (chmodError) throw new BoxError(BoxError.FS_ERROR, `unable to chmod: ${chmodError.message}`);
|
||||
}
|
||||
|
||||
for (const symlink of (metadata.symlinks || [])) {
|
||||
if (!symlink.target) continue;
|
||||
// the path may not exist if we had a directory full of symlinks
|
||||
const [mkdirError] = await safe(fs.promises.mkdir(path.dirname(dataLayout.toLocalPath(symlink.path)), { recursive: true }));
|
||||
if (mkdirError) throw new BoxError(BoxError.FS_ERROR, `unable to symlink (mkdir): ${mkdirError.message}`);
|
||||
const [symlinkError] = await safe(fs.promises.symlink(symlink.target, dataLayout.toLocalPath(symlink.path), 'file'));
|
||||
if (symlinkError) throw new BoxError(BoxError.FS_ERROR, `unable to symlink: ${symlinkError.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
function downloadDir(backupConfig, backupFilePath, dataLayout, progressCallback, callback) {
|
||||
async function download(backupConfig, remotePath, format, dataLayout, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof backupFilePath, 'string');
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug(`downloadDir: ${backupFilePath} to ${dataLayout.toString()}`);
|
||||
|
||||
function downloadFile(entry, done) {
|
||||
let relativePath = path.relative(backupFilePath, entry.fullPath);
|
||||
if (backupConfig.encryption) {
|
||||
const { error, result } = decryptFilePath(relativePath, backupConfig.encryption);
|
||||
if (error) return done(new BoxError(BoxError.CRYPTO_ERROR, 'Unable to decrypt file'));
|
||||
relativePath = result;
|
||||
}
|
||||
const destFilePath = dataLayout.toLocalPath('./' + relativePath);
|
||||
|
||||
fs.mkdir(path.dirname(destFilePath), { recursive: true }, function (error) {
|
||||
if (error) return done(new BoxError(BoxError.FS_ERROR, error.message));
|
||||
|
||||
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
|
||||
storage.api(backupConfig.provider).download(backupConfig, entry.fullPath, function (error, sourceStream) {
|
||||
if (error) {
|
||||
progressCallback({ message: `Download ${entry.fullPath} to ${destFilePath} errored: ${error.message}` });
|
||||
return retryCallback(error);
|
||||
}
|
||||
|
||||
let destStream = createWriteStream(destFilePath, backupConfig.encryption);
|
||||
|
||||
// protect against multiple errors. must destroy the write stream so that a previous retry does not write
|
||||
let closeAndRetry = once((error) => {
|
||||
if (error) progressCallback({ message: `Download ${entry.fullPath} to ${destFilePath} errored: ${error.message}` });
|
||||
else progressCallback({ message: `Download ${entry.fullPath} to ${destFilePath} finished` });
|
||||
sourceStream.destroy();
|
||||
destStream.destroy();
|
||||
retryCallback(error);
|
||||
});
|
||||
|
||||
destStream.on('progress', function (progress) {
|
||||
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
|
||||
if (!transferred && !speed) return progressCallback({ message: `Downloading ${entry.fullPath}` }); // 0M@0MBps looks wrong
|
||||
progressCallback({ message: `Downloading ${entry.fullPath}: ${transferred}M@${speed}MBps` });
|
||||
});
|
||||
destStream.on('error', closeAndRetry);
|
||||
|
||||
sourceStream.on('error', closeAndRetry);
|
||||
|
||||
progressCallback({ message: `Downloading ${entry.fullPath} to ${destFilePath}` });
|
||||
|
||||
sourceStream.pipe(destStream, { end: true }).on('done', closeAndRetry);
|
||||
});
|
||||
}, done);
|
||||
});
|
||||
}
|
||||
|
||||
storage.api(backupConfig.provider).listDir(backupConfig, backupFilePath, 1000, function (entries, iteratorDone) {
|
||||
// https://www.digitalocean.com/community/questions/rate-limiting-on-spaces?answer=40441
|
||||
const concurrency = backupConfig.downloadConcurrency || (backupConfig.provider === 's3' ? 30 : 10);
|
||||
|
||||
async.eachLimit(entries, concurrency, downloadFile, iteratorDone);
|
||||
}, callback);
|
||||
}
|
||||
|
||||
function download(backupConfig, backupId, format, dataLayout, progressCallback, callback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
assert.strictEqual(typeof format, 'string');
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug(`download: Downloading ${backupId} of format ${format} to ${dataLayout.toString()}`);
|
||||
debug(`download: Downloading ${remotePath} of format ${format} to ${dataLayout.toString()}`);
|
||||
|
||||
const backupFilePath = storage.getBackupFilePath(backupConfig, backupId, format);
|
||||
|
||||
if (format === 'tgz') {
|
||||
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
|
||||
progressCallback({ message: `Downloading backup ${backupId}` });
|
||||
|
||||
storage.api(backupConfig.provider).download(backupConfig, backupFilePath, function (error, sourceStream) {
|
||||
if (error) return retryCallback(error);
|
||||
|
||||
tarExtract(sourceStream, dataLayout, backupConfig.encryption, function (error, ps) {
|
||||
if (error) return retryCallback(error);
|
||||
|
||||
ps.on('progress', function (progress) {
|
||||
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
|
||||
if (!transferred && !speed) return progressCallback({ message: 'Downloading backup' }); // 0M@0MBps looks wrong
|
||||
progressCallback({ message: `Downloading ${transferred}M@${speed}MBps` });
|
||||
});
|
||||
ps.on('error', retryCallback);
|
||||
ps.on('done', retryCallback);
|
||||
});
|
||||
});
|
||||
}, callback);
|
||||
} else {
|
||||
downloadDir(backupConfig, backupFilePath, dataLayout, progressCallback, async function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
[error] = await safe(restoreFsMetadata(dataLayout, `${dataLayout.localRoot()}/fsmetadata.json`));
|
||||
callback(error);
|
||||
});
|
||||
}
|
||||
await backupFormat.api(format).download(backupConfig, remotePath, dataLayout, progressCallback);
|
||||
}
|
||||
|
||||
async function restore(backupConfig, backupId, progressCallback) {
|
||||
async function restore(backupConfig, remotePath, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
const boxDataDir = safe.fs.realpathSync(paths.BOX_DATA_DIR);
|
||||
if (!boxDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving boxdata: ${safe.error.message}`);
|
||||
const dataLayout = new DataLayout(boxDataDir, []);
|
||||
|
||||
await util.promisify(download)(backupConfig, backupId, backupConfig.format, dataLayout, progressCallback);
|
||||
await download(backupConfig, remotePath, backupConfig.format, dataLayout, progressCallback);
|
||||
|
||||
debug('restore: download completed, importing database');
|
||||
|
||||
@@ -676,24 +107,20 @@ async function downloadApp(app, restoreConfig, progressCallback) {
|
||||
const startTime = new Date();
|
||||
const backupConfig = restoreConfig.backupConfig || await settings.getBackupConfig();
|
||||
|
||||
const downloadAsync = util.promisify(download);
|
||||
await downloadAsync(backupConfig, restoreConfig.backupId, restoreConfig.backupFormat, dataLayout, progressCallback);
|
||||
await download(backupConfig, restoreConfig.remotePath, restoreConfig.backupFormat, dataLayout, progressCallback);
|
||||
debug('downloadApp: time: %s', (new Date() - startTime)/1000);
|
||||
}
|
||||
|
||||
function runBackupUpload(uploadConfig, progressCallback, callback) {
|
||||
async function runBackupUpload(uploadConfig, progressCallback) {
|
||||
assert.strictEqual(typeof uploadConfig, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const { backupId, backupConfig, dataLayout, progressTag } = uploadConfig;
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
const { remotePath, backupConfig, dataLayout, progressTag } = uploadConfig;
|
||||
assert.strictEqual(typeof remotePath, 'string');
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof progressTag, 'string');
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
|
||||
let result = ''; // the script communicates error result as a string
|
||||
|
||||
// https://stackoverflow.com/questions/48387040/node-js-recommended-max-old-space-size
|
||||
const envCopy = Object.assign({}, process.env);
|
||||
if (backupConfig.memoryLimit && backupConfig.memoryLimit >= 2*1024*1024*1024) {
|
||||
@@ -702,19 +129,19 @@ function runBackupUpload(uploadConfig, progressCallback, callback) {
|
||||
envCopy.NODE_OPTIONS = `--max-old-space-size=${heapSize}`;
|
||||
}
|
||||
|
||||
shell.sudo(`backup-${backupId}`, [ BACKUP_UPLOAD_CMD, backupId, backupConfig.format, dataLayout.toString() ], { env: envCopy, preserveEnv: true, ipc: true }, function (error) {
|
||||
if (error && (error.code === null /* signal */ || (error.code !== 0 && error.code !== 50))) { // backuptask crashed
|
||||
return callback(new BoxError(BoxError.INTERNAL_ERROR, 'Backuptask crashed'));
|
||||
} else if (error && error.code === 50) { // exited with error
|
||||
return callback(new BoxError(BoxError.EXTERNAL_ERROR, result));
|
||||
}
|
||||
|
||||
callback();
|
||||
}).on('message', function (progress) { // this is { message } or { result }
|
||||
let result = ''; // the script communicates error result as a string
|
||||
function onMessage(progress) { // this is { message } or { result }
|
||||
if ('message' in progress) return progressCallback({ message: `${progress.message} (${progressTag})` });
|
||||
debug(`runBackupUpload: result - ${JSON.stringify(progress)}`);
|
||||
result = progress.result;
|
||||
});
|
||||
}
|
||||
|
||||
const [error] = await safe(shell.promises.sudo(`backup-${remotePath}`, [ BACKUP_UPLOAD_CMD, remotePath, backupConfig.format, dataLayout.toString() ], { env: envCopy, preserveEnv: true, ipc: true, onMessage }));
|
||||
if (error && (error.code === null /* signal */ || (error.code !== 0 && error.code !== 50))) { // backuptask crashed
|
||||
throw new BoxError(BoxError.INTERNAL_ERROR, 'Backuptask crashed');
|
||||
} else if (error && error.code === 50) { // exited with error
|
||||
throw new BoxError(BoxError.EXTERNAL_ERROR, result);
|
||||
}
|
||||
}
|
||||
|
||||
async function snapshotBox(progressCallback) {
|
||||
@@ -738,7 +165,7 @@ async function uploadBoxSnapshot(backupConfig, progressCallback) {
|
||||
if (!boxDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving boxdata: ${safe.error.message}`);
|
||||
|
||||
const uploadConfig = {
|
||||
backupId: 'snapshot/box',
|
||||
remotePath: 'snapshot/box',
|
||||
backupConfig,
|
||||
dataLayout: new DataLayout(boxDataDir, []),
|
||||
progressTag: 'box'
|
||||
@@ -748,39 +175,26 @@ async function uploadBoxSnapshot(backupConfig, progressCallback) {
|
||||
|
||||
const startTime = new Date();
|
||||
|
||||
await util.promisify(runBackupUpload)(uploadConfig, progressCallback);
|
||||
await runBackupUpload(uploadConfig, progressCallback);
|
||||
|
||||
debug(`uploadBoxSnapshot: took ${(new Date() - startTime)/1000} seconds`);
|
||||
|
||||
await backups.setSnapshotInfo('box', { timestamp: new Date().toISOString(), format: backupConfig.format });
|
||||
}
|
||||
|
||||
async function copy(backupConfig, sourceBackupId, destBackupId, options, progressCallback) {
|
||||
async function copy(backupConfig, srcRemotePath, destRemotePath, progressCallback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof sourceBackupId, 'string');
|
||||
assert.strictEqual(typeof destBackupId, 'string');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof srcRemotePath, 'string');
|
||||
assert.strictEqual(typeof destRemotePath, 'string');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
const format = backupConfig.format;
|
||||
const { provider, format } = backupConfig;
|
||||
const oldFilePath = backupFormat.api(format).getBackupFilePath(backupConfig, srcRemotePath);
|
||||
const newFilePath = backupFormat.api(format).getBackupFilePath(backupConfig, destRemotePath);
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const startTime = new Date();
|
||||
|
||||
const copyEvents = storage.api(backupConfig.provider).copy(backupConfig, storage.getBackupFilePath(backupConfig, sourceBackupId, format), storage.getBackupFilePath(backupConfig, destBackupId, format));
|
||||
copyEvents.on('progress', (message) => progressCallback({ message }));
|
||||
copyEvents.on('done', async function (copyBackupError) {
|
||||
const state = copyBackupError ? backups.BACKUP_STATE_ERROR : backups.BACKUP_STATE_NORMAL;
|
||||
|
||||
const [error] = await safe(backups.update(destBackupId, { preserveSecs: options.preserveSecs || 0, state }));
|
||||
if (copyBackupError) return reject(copyBackupError);
|
||||
if (error) return reject(error);
|
||||
|
||||
debug(`copy: copied successfully to id ${destBackupId}. Took ${(new Date() - startTime)/1000} seconds`);
|
||||
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
const startTime = new Date();
|
||||
await safe(storage.api(provider).copy(backupConfig, oldFilePath, newFilePath, progressCallback));
|
||||
debug(`copy: copied successfully to ${destRemotePath}. Took ${(new Date() - startTime)/1000} seconds`);
|
||||
}
|
||||
|
||||
async function rotateBoxBackup(backupConfig, tag, options, dependsOn, progressCallback) {
|
||||
@@ -790,12 +204,13 @@ async function rotateBoxBackup(backupConfig, tag, options, dependsOn, progressCa
|
||||
assert(Array.isArray(dependsOn));
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
const backupId = `${tag}/box_v${constants.VERSION}`;
|
||||
const remotePath = `${tag}/box_v${constants.VERSION}`;
|
||||
const format = backupConfig.format;
|
||||
|
||||
debug(`rotateBoxBackup: rotating to id ${backupId}`);
|
||||
debug(`rotateBoxBackup: rotating to id ${remotePath}`);
|
||||
|
||||
const data = {
|
||||
remotePath,
|
||||
encryptionVersion: backupConfig.encryption ? 2 : null,
|
||||
packageVersion: constants.VERSION,
|
||||
type: backups.BACKUP_TYPE_BOX,
|
||||
@@ -803,12 +218,17 @@ async function rotateBoxBackup(backupConfig, tag, options, dependsOn, progressCa
|
||||
identifier: backups.BACKUP_IDENTIFIER_BOX,
|
||||
dependsOn,
|
||||
manifest: null,
|
||||
format
|
||||
format,
|
||||
preserveSecs: options.preserveSecs || 0
|
||||
};
|
||||
|
||||
await backups.add(backupId, data);
|
||||
await copy(backupConfig, 'snapshot/box', backupId, options, progressCallback);
|
||||
return backupId;
|
||||
const id = await backups.add(data);
|
||||
const [error] = await safe(copy(backupConfig, 'snapshot/box', remotePath, progressCallback));
|
||||
const state = error ? backups.BACKUP_STATE_ERROR : backups.BACKUP_STATE_NORMAL;
|
||||
await backups.setState(id, state);
|
||||
if (error) throw error;
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
async function backupBox(dependsOn, tag, options, progressCallback) {
|
||||
@@ -820,9 +240,7 @@ async function backupBox(dependsOn, tag, options, progressCallback) {
|
||||
const backupConfig = await settings.getBackupConfig();
|
||||
|
||||
await uploadBoxSnapshot(backupConfig, progressCallback);
|
||||
|
||||
const backupId = await rotateBoxBackup(backupConfig, tag, options, dependsOn, progressCallback);
|
||||
return backupId;
|
||||
return await rotateBoxBackup(backupConfig, tag, options, dependsOn, progressCallback);
|
||||
}
|
||||
|
||||
async function rotateAppBackup(backupConfig, app, tag, options, progressCallback) {
|
||||
@@ -835,25 +253,31 @@ async function rotateAppBackup(backupConfig, app, tag, options, progressCallback
|
||||
const snapshotInfo = backups.getSnapshotInfo(app.id);
|
||||
|
||||
const manifest = snapshotInfo.restoreConfig ? snapshotInfo.restoreConfig.manifest : snapshotInfo.manifest; // compat
|
||||
const backupId = `${tag}/app_${app.fqdn}_v${manifest.version}`;
|
||||
const remotePath = `${tag}/app_${app.fqdn}_v${manifest.version}`;
|
||||
const format = backupConfig.format;
|
||||
|
||||
debug(`rotateAppBackup: rotating ${app.fqdn} to id ${backupId}`);
|
||||
debug(`rotateAppBackup: rotating ${app.fqdn} to path ${remotePath}`);
|
||||
|
||||
const data = {
|
||||
remotePath,
|
||||
encryptionVersion: backupConfig.encryption ? 2 : null,
|
||||
packageVersion: manifest.version,
|
||||
type: backups.BACKUP_TYPE_APP,
|
||||
state: backups.BACKUP_STATE_CREATING,
|
||||
identifier: app.id,
|
||||
dependsOn: [ ],
|
||||
dependsOn: [],
|
||||
manifest,
|
||||
format: format
|
||||
format,
|
||||
preserveSecs: options.preserveSecs || 0
|
||||
};
|
||||
|
||||
await backups.add(backupId, data);
|
||||
await copy(backupConfig, `snapshot/app_${app.id}`, backupId, options, progressCallback);
|
||||
return backupId;
|
||||
const id = await backups.add(data);
|
||||
const [error] = await safe(copy(backupConfig, `snapshot/app_${app.id}`, remotePath, progressCallback));
|
||||
const state = error ? backups.BACKUP_STATE_ERROR : backups.BACKUP_STATE_NORMAL;
|
||||
await backups.setState(id, state);
|
||||
if (error) throw error;
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
async function backupApp(app, options, progressCallback) {
|
||||
@@ -890,7 +314,7 @@ async function uploadAppSnapshot(backupConfig, app, progressCallback) {
|
||||
|
||||
await snapshotApp(app, progressCallback);
|
||||
|
||||
const backupId = util.format('snapshot/app_%s', app.id);
|
||||
const remotePath = `snapshot/app_${app.id}`;
|
||||
const appDataDir = safe.fs.realpathSync(path.join(paths.APPS_DATA_DIR, app.id));
|
||||
if (!appDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving appsdata: ${safe.error.message}`);
|
||||
|
||||
@@ -899,7 +323,7 @@ async function uploadAppSnapshot(backupConfig, app, progressCallback) {
|
||||
progressCallback({ message: `Uploading app snapshot ${app.fqdn}`});
|
||||
|
||||
const uploadConfig = {
|
||||
backupId,
|
||||
remotePath,
|
||||
backupConfig,
|
||||
dataLayout,
|
||||
progressTag: app.fqdn
|
||||
@@ -907,9 +331,9 @@ async function uploadAppSnapshot(backupConfig, app, progressCallback) {
|
||||
|
||||
const startTime = new Date();
|
||||
|
||||
await util.promisify(runBackupUpload)(uploadConfig, progressCallback);
|
||||
await runBackupUpload(uploadConfig, progressCallback);
|
||||
|
||||
debug(`uploadAppSnapshot: ${app.fqdn} upload with id ${backupId}. ${(new Date() - startTime)/1000} seconds`);
|
||||
debug(`uploadAppSnapshot: ${app.fqdn} uploaded to ${remotePath}. ${(new Date() - startTime)/1000} seconds`);
|
||||
|
||||
await backups.setSnapshotInfo(app.id, { timestamp: new Date().toISOString(), manifest: app.manifest, format: backupConfig.format });
|
||||
}
|
||||
@@ -930,8 +354,7 @@ async function backupAppWithTag(app, tag, options, progressCallback) {
|
||||
const backupConfig = await settings.getBackupConfig();
|
||||
|
||||
await uploadAppSnapshot(backupConfig, app, progressCallback);
|
||||
const backupId = await rotateAppBackup(backupConfig, app, tag, options, progressCallback);
|
||||
return backupId;
|
||||
return await rotateAppBackup(backupConfig, app, tag, options, progressCallback);
|
||||
}
|
||||
|
||||
async function uploadMailSnapshot(backupConfig, progressCallback) {
|
||||
@@ -942,7 +365,7 @@ async function uploadMailSnapshot(backupConfig, progressCallback) {
|
||||
if (!mailDataDir) throw new BoxError(BoxError.FS_ERROR, `Error resolving maildata: ${safe.error.message}`);
|
||||
|
||||
const uploadConfig = {
|
||||
backupId: 'snapshot/mail',
|
||||
remotePath: 'snapshot/mail',
|
||||
backupConfig,
|
||||
dataLayout: new DataLayout(mailDataDir, []),
|
||||
progressTag: 'mail'
|
||||
@@ -952,7 +375,7 @@ async function uploadMailSnapshot(backupConfig, progressCallback) {
|
||||
|
||||
const startTime = new Date();
|
||||
|
||||
await util.promisify(runBackupUpload)(uploadConfig, progressCallback);
|
||||
await runBackupUpload(uploadConfig, progressCallback);
|
||||
|
||||
debug(`uploadMailSnapshot: took ${(new Date() - startTime)/1000} seconds`);
|
||||
|
||||
@@ -965,12 +388,13 @@ async function rotateMailBackup(backupConfig, tag, options, progressCallback) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
const backupId = `${tag}/mail_v${constants.VERSION}`;
|
||||
const remotePath = `${tag}/mail_v${constants.VERSION}`;
|
||||
const format = backupConfig.format;
|
||||
|
||||
debug(`rotateMailBackup: rotating to id ${backupId}`);
|
||||
debug(`rotateMailBackup: rotating to ${remotePath}`);
|
||||
|
||||
const data = {
|
||||
remotePath,
|
||||
encryptionVersion: backupConfig.encryption ? 2 : null,
|
||||
packageVersion: constants.VERSION,
|
||||
type: backups.BACKUP_TYPE_MAIL,
|
||||
@@ -978,12 +402,17 @@ async function rotateMailBackup(backupConfig, tag, options, progressCallback) {
|
||||
identifier: backups.BACKUP_IDENTIFIER_MAIL,
|
||||
dependsOn: [],
|
||||
manifest: null,
|
||||
format: format
|
||||
format,
|
||||
preserveSecs: options.preserveSecs || 0
|
||||
};
|
||||
|
||||
await backups.add(backupId, data);
|
||||
await copy(backupConfig, 'snapshot/mail', backupId, options, progressCallback);
|
||||
return backupId;
|
||||
const id = await backups.add(data);
|
||||
const [error] = await safe(copy(backupConfig, 'snapshot/mail', remotePath, progressCallback));
|
||||
const state = error ? backups.BACKUP_STATE_ERROR : backups.BACKUP_STATE_NORMAL;
|
||||
await backups.setState(id, state);
|
||||
if (error) throw error;
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
async function backupMailWithTag(tag, options, progressCallback) {
|
||||
@@ -995,8 +424,7 @@ async function backupMailWithTag(tag, options, progressCallback) {
|
||||
|
||||
const backupConfig = await settings.getBackupConfig();
|
||||
await uploadMailSnapshot(backupConfig, progressCallback);
|
||||
const backupId = await rotateMailBackup(backupConfig, tag, options, progressCallback);
|
||||
return backupId;
|
||||
return await rotateMailBackup(backupConfig, tag, options, progressCallback);
|
||||
}
|
||||
|
||||
async function backupMail(options, progressCallback) {
|
||||
@@ -1018,8 +446,7 @@ async function downloadMail(restoreConfig, progressCallback) {
|
||||
|
||||
const startTime = new Date();
|
||||
|
||||
const downloadAsync = util.promisify(download);
|
||||
await downloadAsync(restoreConfig.backupConfig, restoreConfig.backupId, restoreConfig.backupFormat, dataLayout, progressCallback);
|
||||
await download(restoreConfig.backupConfig, restoreConfig.remotePath, restoreConfig.backupFormat, dataLayout, progressCallback);
|
||||
debug('downloadMail: time: %s', (new Date() - startTime)/1000);
|
||||
}
|
||||
|
||||
@@ -1028,7 +455,7 @@ async function fullBackup(options, progressCallback) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
const tag = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
|
||||
const tag = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,''); // unique tag under which all apps/mail/box backs up
|
||||
|
||||
const allApps = await apps.list();
|
||||
|
||||
@@ -1042,24 +469,24 @@ async function fullBackup(options, progressCallback) {
|
||||
percent += step;
|
||||
|
||||
if (!app.enableBackup) {
|
||||
debug(`fullBackup: skipped backup ${app.fqdn} (${i+1}/${allApps.length})`);
|
||||
debug(`fullBackup: skipped backup ${app.fqdn} (${i+1}/${allApps.length}) since automatic backup disabled`);
|
||||
continue; // nothing to backup
|
||||
}
|
||||
|
||||
const startTime = new Date();
|
||||
const appBackupId = await backupAppWithTag(app, tag, options, (progress) => progressCallback({ percent: percent, message: progress.message }));
|
||||
const appBackupId = await backupAppWithTag(app, tag, options, (progress) => progressCallback({ percent, message: progress.message }));
|
||||
debug(`fullBackup: app ${app.fqdn} backup finished. Took ${(new Date() - startTime)/1000} seconds`);
|
||||
if (appBackupId) appBackupIds.push(appBackupId); // backupId can be null if in BAD_STATE and never backed up
|
||||
}
|
||||
|
||||
progressCallback({ percent: percent, message: 'Backing up mail' });
|
||||
progressCallback({ percent, message: 'Backing up mail' });
|
||||
percent += step;
|
||||
const mailBackupId = await backupMailWithTag(tag, options, (progress) => progressCallback({ percent: percent, message: progress.message }));
|
||||
const mailBackupId = await backupMailWithTag(tag, options, (progress) => progressCallback({ percent, message: progress.message }));
|
||||
|
||||
progressCallback({ percent: percent, message: 'Backing up system data' });
|
||||
progressCallback({ percent, message: 'Backing up system data' });
|
||||
percent += step;
|
||||
|
||||
const dependsOn = appBackupIds.concat(mailBackupId);
|
||||
const backupId = await backupBox(dependsOn, tag, options, (progress) => progressCallback({ percent: percent, message: progress.message }));
|
||||
const backupId = await backupBox(dependsOn, tag, options, (progress) => progressCallback({ percent, message: progress.message }));
|
||||
return backupId;
|
||||
}
|
||||
|
||||
77
src/blobs.js
77
src/blobs.js
@@ -4,16 +4,16 @@
|
||||
|
||||
exports = module.exports = {
|
||||
get,
|
||||
getString,
|
||||
set,
|
||||
setString,
|
||||
del,
|
||||
|
||||
initSecrets,
|
||||
|
||||
ACME_ACCOUNT_KEY: 'acme_account_key',
|
||||
ADDON_TURN_SECRET: 'addon_turn_secret',
|
||||
DHPARAMS: 'dhparams',
|
||||
SFTP_PUBLIC_KEY: 'sftp_public_key',
|
||||
SFTP_PRIVATE_KEY: 'sftp_private_key',
|
||||
PROXY_AUTH_TOKEN_SECRET: 'proxy_auth_token_secret',
|
||||
|
||||
CERT_PREFIX: 'cert',
|
||||
|
||||
@@ -21,13 +21,7 @@ exports = module.exports = {
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
constants = require('./constants.js'),
|
||||
crypto = require('crypto'),
|
||||
database = require('./database.js'),
|
||||
debug = require('debug')('box:blobs'),
|
||||
paths = require('./paths.js'),
|
||||
safe = require('safetydance');
|
||||
database = require('./database.js');
|
||||
|
||||
const BLOBS_FIELDS = [ 'id', 'value' ].join(',');
|
||||
|
||||
@@ -39,6 +33,14 @@ async function get(id) {
|
||||
return result[0].value;
|
||||
}
|
||||
|
||||
async function getString(id) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
|
||||
const result = await database.query(`SELECT ${BLOBS_FIELDS} FROM blobs WHERE id = ?`, [ id ]);
|
||||
if (result.length === 0) return null;
|
||||
return result[0].value.toString('utf8');
|
||||
}
|
||||
|
||||
async function set(id, value) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert(value === null || Buffer.isBuffer(value));
|
||||
@@ -46,6 +48,13 @@ async function set(id, value) {
|
||||
await database.query('INSERT INTO blobs (id, value) VALUES (?, ?) ON DUPLICATE KEY UPDATE value=VALUES(value)', [ id, value ]);
|
||||
}
|
||||
|
||||
async function setString(id, value) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert(value === null || typeof value === 'string');
|
||||
|
||||
await database.query('INSERT INTO blobs (id, value) VALUES (?, ?) ON DUPLICATE KEY UPDATE value=VALUES(value)', [ id, Buffer.from(value) ]);
|
||||
}
|
||||
|
||||
async function del(id) {
|
||||
await database.query('DELETE FROM blobs WHERE id=?', [ id ]);
|
||||
}
|
||||
@@ -53,51 +62,3 @@ async function del(id) {
|
||||
async function clear() {
|
||||
await database.query('DELETE FROM blobs');
|
||||
}
|
||||
|
||||
async function initSecrets() {
|
||||
let acmeAccountKey = await get(exports.ACME_ACCOUNT_KEY);
|
||||
if (!acmeAccountKey) {
|
||||
acmeAccountKey = safe.child_process.execSync('openssl genrsa 4096');
|
||||
if (!acmeAccountKey) throw new BoxError(BoxError.OPENSSL_ERROR, `Could not generate acme account key: ${safe.error.message}`);
|
||||
await set(exports.ACME_ACCOUNT_KEY, acmeAccountKey);
|
||||
}
|
||||
|
||||
let turnSecret = await get(exports.ADDON_TURN_SECRET);
|
||||
if (!turnSecret) {
|
||||
turnSecret = 'a' + crypto.randomBytes(15).toString('hex'); // prefix with a to ensure string starts with a letter
|
||||
await set(exports.ADDON_TURN_SECRET, Buffer.from(turnSecret));
|
||||
}
|
||||
|
||||
// TODO maybe skip this in tests if possible again
|
||||
let dhparams = await get(exports.DHPARAMS);
|
||||
if (!dhparams) {
|
||||
debug('initSecrets: generating dhparams.pem. this takes forever');
|
||||
if (constants.TEST) dhparams = safe.fs.readFileSync('/tmp/dhparams.pem');
|
||||
if (!dhparams) dhparams = safe.child_process.execSync('openssl dhparam 2048');
|
||||
if (!dhparams) throw new BoxError(BoxError.OPENSSL_ERROR, safe.error);
|
||||
if (constants.TEST) safe.fs.writeFileSync('/tmp/dhparams.pem', dhparams);
|
||||
if (!safe.fs.writeFileSync(paths.DHPARAMS_FILE, dhparams)) throw new BoxError(BoxError.FS_ERROR, `Could not save dhparams.pem: ${safe.error.message}`);
|
||||
await set(exports.DHPARAMS, dhparams);
|
||||
} else if (!safe.fs.existsSync(paths.DHPARAMS_FILE)) {
|
||||
if (!safe.fs.writeFileSync(paths.DHPARAMS_FILE, dhparams)) throw new BoxError(BoxError.FS_ERROR, `Could not save dhparams.pem: ${safe.error.message}`);
|
||||
}
|
||||
|
||||
let sftpPrivateKey = await get(exports.SFTP_PRIVATE_KEY);
|
||||
let sftpPublicKey = await get(exports.SFTP_PUBLIC_KEY);
|
||||
|
||||
if (!sftpPrivateKey || !sftpPublicKey) {
|
||||
debug('initSecrets: generate sftp keys');
|
||||
if (constants.TEST) {
|
||||
safe.fs.unlinkSync(paths.SFTP_PUBLIC_KEY_FILE);
|
||||
safe.fs.unlinkSync(paths.SFTP_PRIVATE_KEY_FILE);
|
||||
}
|
||||
if (!safe.child_process.execSync(`ssh-keygen -m PEM -t rsa -f "${paths.SFTP_KEYS_DIR}/ssh_host_rsa_key" -q -N ""`)) throw new BoxError(BoxError.OPENSSL_ERROR, `Could not generate sftp ssh keys: ${safe.error.message}`);
|
||||
sftpPublicKey = safe.fs.readFileSync(paths.SFTP_PUBLIC_KEY_FILE);
|
||||
await set(exports.SFTP_PUBLIC_KEY, sftpPublicKey);
|
||||
sftpPrivateKey = safe.fs.readFileSync(paths.SFTP_PRIVATE_KEY_FILE);
|
||||
await set(exports.SFTP_PRIVATE_KEY, sftpPrivateKey);
|
||||
} else if (!safe.fs.existsSync(paths.SFTP_PUBLIC_KEY_FILE) || !safe.fs.existsSync(paths.SFTP_PRIVATE_KEY_FILE)) {
|
||||
if (!safe.fs.writeFileSync(paths.SFTP_PUBLIC_KEY_FILE, sftpPublicKey)) throw new BoxError(BoxError.FS_ERROR, `Could not save sftp public key: ${safe.error.message}`);
|
||||
if (!safe.fs.writeFileSync(paths.SFTP_PRIVATE_KEY_FILE, sftpPrivateKey)) throw new BoxError(BoxError.FS_ERROR, `Could not save sftp private key: ${safe.error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ function BoxError(reason, errorOrMessage, override) {
|
||||
}
|
||||
util.inherits(BoxError, Error);
|
||||
BoxError.ACCESS_DENIED = 'Access Denied';
|
||||
BoxError.ACME_ERROR = 'Acme Error';
|
||||
BoxError.ADDONS_ERROR = 'Addons Error';
|
||||
BoxError.ALREADY_EXISTS = 'Already Exists';
|
||||
BoxError.BAD_FIELD = 'Bad Field';
|
||||
@@ -51,7 +52,7 @@ BoxError.INACTIVE = 'Inactive'; // service/volume/mount
|
||||
BoxError.INTERNAL_ERROR = 'Internal Error';
|
||||
BoxError.INVALID_CREDENTIALS = 'Invalid Credentials';
|
||||
BoxError.IPTABLES_ERROR = 'IPTables Error';
|
||||
BoxError.LICENSE_ERROR = 'License Error';
|
||||
BoxError.LICENSE_ERROR = 'License Error'; // billing or subscription expired
|
||||
BoxError.LOGROTATE_ERROR = 'Logrotate Error';
|
||||
BoxError.MAIL_ERROR = 'Mail Error';
|
||||
BoxError.MOUNT_ERROR = 'Mount Error';
|
||||
@@ -91,6 +92,7 @@ BoxError.toHttpError = function (error) {
|
||||
case BoxError.INVALID_CREDENTIALS:
|
||||
return new HttpError(412, error);
|
||||
case BoxError.EXTERNAL_ERROR:
|
||||
case BoxError.ACME_ERROR:
|
||||
case BoxError.NETWORK_ERROR:
|
||||
case BoxError.FS_ERROR:
|
||||
case BoxError.MOUNT_ERROR:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
'use strict';
|
||||
|
||||
let assert = require('assert'),
|
||||
const assert = require('assert'),
|
||||
fs = require('fs'),
|
||||
path = require('path');
|
||||
|
||||
@@ -11,7 +11,7 @@ exports = module.exports = {
|
||||
function getChanges(version) {
|
||||
assert.strictEqual(typeof version, 'string');
|
||||
|
||||
let changelog = [ ];
|
||||
const changelog = [];
|
||||
const lines = fs.readFileSync(path.join(__dirname, '../CHANGES'), 'utf8').split('\n');
|
||||
|
||||
version = version.replace(/[+-].*/, ''); // strip prerelease
|
||||
|
||||
@@ -32,8 +32,9 @@ const apps = require('./apps.js'),
|
||||
constants = require('./constants.js'),
|
||||
cron = require('./cron.js'),
|
||||
debug = require('debug')('box:cloudron'),
|
||||
delay = require('delay'),
|
||||
delay = require('./delay.js'),
|
||||
dns = require('./dns.js'),
|
||||
dockerProxy = require('./dockerproxy.js'),
|
||||
domains = require('./domains.js'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
fs = require('fs'),
|
||||
@@ -63,6 +64,7 @@ async function initialize() {
|
||||
|
||||
async function uninitialize() {
|
||||
await cron.stopJobs();
|
||||
await dockerProxy.stop();
|
||||
await platform.stopAllTasks();
|
||||
}
|
||||
|
||||
@@ -76,6 +78,7 @@ async function onActivated(options) {
|
||||
// 2. the restore code path can run without sudo (since mail/ is non-root)
|
||||
await platform.start(options);
|
||||
await cron.startJobs();
|
||||
await dockerProxy.start(); // this relies on the 'cloudron' docker network interface to be available
|
||||
|
||||
// disable responding to api calls via IP to not leak domain info. this is carefully placed as the last item, so it buys
|
||||
// the UI some time to query the dashboard domain in the restore code path
|
||||
@@ -87,10 +90,13 @@ async function notifyUpdate() {
|
||||
const version = safe.fs.readFileSync(paths.VERSION_FILE, 'utf8');
|
||||
if (version === constants.VERSION) return;
|
||||
|
||||
await eventlog.add(eventlog.ACTION_UPDATE_FINISH, AuditSource.CRON, { errorMessage: '', oldVersion: version || 'dev', newVersion: constants.VERSION });
|
||||
|
||||
const [error] = await safe(tasks.setCompletedByType(tasks.TASK_UPDATE, { error: null }));
|
||||
if (error && error.reason !== BoxError.NOT_FOUND) throw error; // when hotfixing, task may not exist
|
||||
if (!version) {
|
||||
await eventlog.add(eventlog.ACTION_INSTALL_FINISH, AuditSource.CRON, { version: constants.VERSION });
|
||||
} else {
|
||||
await eventlog.add(eventlog.ACTION_UPDATE_FINISH, AuditSource.CRON, { errorMessage: '', oldVersion: version || 'dev', newVersion: constants.VERSION });
|
||||
const [error] = await safe(tasks.setCompletedByType(tasks.TASK_UPDATE, { error: null }));
|
||||
if (error && error.reason !== BoxError.NOT_FOUND) throw error; // when hotfixing, task may not exist
|
||||
}
|
||||
|
||||
safe.fs.writeFileSync(paths.VERSION_FILE, constants.VERSION, 'utf8');
|
||||
}
|
||||
@@ -112,7 +118,8 @@ async function runStartupTasks() {
|
||||
tasks.push(async function () {
|
||||
if (!settings.dashboardDomain()) return;
|
||||
|
||||
await reverseProxy.writeDashboardConfig(settings.dashboardDomain());
|
||||
const domainObject = await domains.get(settings.dashboardDomain());
|
||||
await reverseProxy.writeDashboardConfig(domainObject);
|
||||
});
|
||||
|
||||
tasks.push(async function () {
|
||||
@@ -148,6 +155,7 @@ async function getConfig() {
|
||||
return {
|
||||
apiServerOrigin: settings.apiServerOrigin(),
|
||||
webServerOrigin: settings.webServerOrigin(),
|
||||
consoleServerOrigin: settings.consoleServerOrigin(),
|
||||
adminDomain: settings.dashboardDomain(),
|
||||
adminFqdn: settings.dashboardFqdn(),
|
||||
mailFqdn: settings.mailFqdn(),
|
||||
@@ -157,8 +165,8 @@ async function getConfig() {
|
||||
cloudronName: allSettings[settings.CLOUDRON_NAME_KEY],
|
||||
footer: branding.renderFooter(allSettings[settings.FOOTER_KEY] || constants.FOOTER),
|
||||
features: appstore.getFeatures(),
|
||||
profileLocked: allSettings[settings.DIRECTORY_CONFIG_KEY].lockUserProfiles,
|
||||
mandatory2FA: allSettings[settings.DIRECTORY_CONFIG_KEY].mandatory2FA
|
||||
profileLocked: allSettings[settings.PROFILE_CONFIG_KEY].lockUserProfiles,
|
||||
mandatory2FA: allSettings[settings.PROFILE_CONFIG_KEY].mandatory2FA
|
||||
};
|
||||
}
|
||||
|
||||
@@ -211,7 +219,7 @@ async function getLogs(unit, options) {
|
||||
assert.strictEqual(typeof options.format, 'string');
|
||||
assert.strictEqual(typeof options.follow, 'boolean');
|
||||
|
||||
var lines = options.lines === -1 ? '+1' : options.lines,
|
||||
const lines = options.lines === -1 ? '+1' : options.lines,
|
||||
format = options.format || 'json',
|
||||
follow = options.follow;
|
||||
|
||||
@@ -223,7 +231,7 @@ async function getLogs(unit, options) {
|
||||
// need to handle box.log without subdir
|
||||
if (unit === 'box') args.push(path.join(paths.LOG_DIR, 'box.log'));
|
||||
else if (unit.startsWith('crash-')) args.push(path.join(paths.CRASH_LOG_DIR, unit.slice(6) + '.log'));
|
||||
else throw new BoxError(BoxError.BAD_FIELD, 'No such unit', { field: 'unit' });
|
||||
else throw new BoxError(BoxError.BAD_FIELD, `No such unit '${unit}'`);
|
||||
|
||||
const cp = spawn('/usr/bin/tail', args);
|
||||
|
||||
@@ -281,12 +289,12 @@ async function setDashboardDomain(domain, auditSource) {
|
||||
const domainObject = await domains.get(domain);
|
||||
if (!domain) throw new BoxError(BoxError.NOT_FOUND, 'No such domain');
|
||||
|
||||
await reverseProxy.writeDashboardConfig(domain);
|
||||
await reverseProxy.writeDashboardConfig(domainObject);
|
||||
const fqdn = dns.fqdn(constants.DASHBOARD_LOCATION, domainObject);
|
||||
|
||||
await settings.setDashboardLocation(domain, fqdn);
|
||||
|
||||
await safe(appstore.updateCloudron({ domain }));
|
||||
await safe(appstore.updateCloudron({ domain }), { debug });
|
||||
|
||||
await eventlog.add(eventlog.ACTION_DASHBOARD_DOMAIN_UPDATE, auditSource, { domain, fqdn });
|
||||
}
|
||||
@@ -323,13 +331,16 @@ async function setupDnsAndCert(subdomain, domain, auditSource, progressCallback)
|
||||
const domainObject = await domains.get(domain);
|
||||
const dashboardFqdn = dns.fqdn(subdomain, domainObject);
|
||||
|
||||
const ip = await sysinfo.getServerIp();
|
||||
const ipv4 = await sysinfo.getServerIPv4();
|
||||
const ipv6 = await sysinfo.getServerIPv6();
|
||||
|
||||
progressCallback({ message: `Updating DNS of ${dashboardFqdn}` });
|
||||
await dns.upsertDnsRecords(subdomain, domain, 'A', [ ip ]);
|
||||
progressCallback({ message: `Waiting for DNS of ${dashboardFqdn}` });
|
||||
await dns.waitForDnsRecord(subdomain, domain, 'A', ip, { interval: 30000, times: 50000 });
|
||||
progressCallback({ message: `Getting certificate of ${dashboardFqdn}` });
|
||||
progressCallback({ percent: 20, message: `Updating DNS of ${dashboardFqdn}` });
|
||||
await dns.upsertDnsRecords(subdomain, domain, 'A', [ ipv4 ]);
|
||||
if (ipv6) await dns.upsertDnsRecords(subdomain, domain, 'AAAA', [ ipv6 ]);
|
||||
progressCallback({ percent: 40, message: `Waiting for DNS of ${dashboardFqdn}` });
|
||||
await dns.waitForDnsRecord(subdomain, domain, 'A', ipv4, { interval: 30000, times: 50000 });
|
||||
if (ipv6) await dns.waitForDnsRecord(subdomain, domain, 'AAAA', ipv6, { interval: 30000, times: 50000 });
|
||||
progressCallback({ percent: 60, message: `Getting certificate of ${dashboardFqdn}` });
|
||||
await reverseProxy.ensureCertificate(dns.fqdn(subdomain, domainObject), domain, auditSource);
|
||||
}
|
||||
|
||||
|
||||
42
src/collectd/app_cgroup_v2.ejs
Normal file
42
src/collectd/app_cgroup_v2.ejs
Normal file
@@ -0,0 +1,42 @@
|
||||
LoadPlugin "table"
|
||||
<Plugin table>
|
||||
<Table "/sys/fs/cgroup/docker/<%= containerId %>/memory.stat">
|
||||
Instance "<%= appId %>-memory"
|
||||
Separator " \\n"
|
||||
<Result>
|
||||
Type gauge
|
||||
InstancesFrom 0
|
||||
ValuesFrom 1
|
||||
</Result>
|
||||
</Table>
|
||||
|
||||
<Table "/sys/fs/cgroup/docker/<%= containerId %>/memory.max">
|
||||
Instance "<%= appId %>-memory"
|
||||
Separator "\\n"
|
||||
<Result>
|
||||
Type gauge
|
||||
InstancePrefix "max_usage_in_bytes"
|
||||
ValuesFrom 0
|
||||
</Result>
|
||||
</Table>
|
||||
|
||||
<Table "/sys/fs/cgroup/docker/<%= containerId %>/cpu.stat">
|
||||
Instance "<%= appId %>-cpu"
|
||||
Separator " \\n"
|
||||
<Result>
|
||||
Type gauge
|
||||
InstancesFrom 0
|
||||
ValuesFrom 1
|
||||
</Result>
|
||||
</Table>
|
||||
</Plugin>
|
||||
|
||||
<Plugin python>
|
||||
<Module du>
|
||||
<Path>
|
||||
Instance "<%= appId %>"
|
||||
Dir "<%= appDataDir %>"
|
||||
</Path>
|
||||
</Module>
|
||||
</Plugin>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
'use strict';
|
||||
|
||||
let fs = require('fs'),
|
||||
const fs = require('fs'),
|
||||
path = require('path');
|
||||
|
||||
const CLOUDRON = process.env.BOX_ENV === 'cloudron',
|
||||
@@ -29,6 +29,7 @@ exports = module.exports = {
|
||||
AUTHWALL_PORT: 3001,
|
||||
LDAP_PORT: 3002,
|
||||
DOCKER_PROXY_PORT: 3003,
|
||||
USER_DIRECTORY_LDAPS_PORT: 3004, // user directory LDAP with TLS rerouting in iptables, public port is 636
|
||||
|
||||
NGINX_DEFAULT_CONFIG_FILE_NAME: 'default.conf',
|
||||
|
||||
@@ -46,6 +47,7 @@ exports = module.exports = {
|
||||
'io.github.sickchill.cloudronapp',
|
||||
'to.couchpota.cloudronapp'
|
||||
],
|
||||
DEMO_APP_LIMIT: 20,
|
||||
|
||||
AUTOUPDATE_PATTERN_NEVER: 'never',
|
||||
|
||||
@@ -59,10 +61,17 @@ exports = module.exports = {
|
||||
CLOUDRON: CLOUDRON,
|
||||
TEST: TEST,
|
||||
|
||||
PORT25_CHECK_SERVER: 'port25check.cloudron.io',
|
||||
|
||||
FORUM_URL: 'https://forum.cloudron.io',
|
||||
|
||||
SUPPORT_USERNAME: 'cloudron-support',
|
||||
SUPPORT_EMAIL: 'support@cloudron.io',
|
||||
|
||||
USER_DIRECTORY_LDAP_DN: 'cn=admin,ou=system,dc=cloudron',
|
||||
|
||||
FOOTER: '© %YEAR% [Cloudron](https://cloudron.io) [Forum <i class="fa fa-comments"></i>](https://forum.cloudron.io)',
|
||||
|
||||
VERSION: process.env.BOX_ENV === 'cloudron' ? fs.readFileSync(path.join(__dirname, '../VERSION'), 'utf8').trim() : '6.3.0-test'
|
||||
VERSION: process.env.BOX_ENV === 'cloudron' ? fs.readFileSync(path.join(__dirname, '../VERSION'), 'utf8').trim() : '7.0.0-test'
|
||||
};
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ const appHealthMonitor = require('./apphealthmonitor.js'),
|
||||
system = require('./system.js'),
|
||||
updater = require('./updater.js'),
|
||||
updateChecker = require('./updatechecker.js'),
|
||||
userdirectory = require('./userdirectory.js'),
|
||||
_ = require('underscore');
|
||||
|
||||
const gJobs = {
|
||||
@@ -65,7 +66,7 @@ async function startJobs() {
|
||||
|
||||
const randomTick = Math.floor(60*Math.random());
|
||||
gJobs.systemChecks = new CronJob({
|
||||
cronTime: '00 30 2 * * *', // once a day. if you change this interval, change the notification messages with correct duration
|
||||
cronTime: `${randomTick} ${randomTick} 2 * * *`, // once a day. if you change this interval, change the notification messages with correct duration
|
||||
onTick: async () => await safe(cloudron.runSystemChecks(), { debug }),
|
||||
start: true
|
||||
});
|
||||
@@ -147,6 +148,10 @@ async function handleSettingsChanged(key, value) {
|
||||
await stopJobs();
|
||||
await startJobs();
|
||||
break;
|
||||
case settings.USER_DIRECTORY_KEY:
|
||||
if (value.enabled) await userdirectory.start();
|
||||
else await userdirectory.stop();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -143,9 +143,11 @@ async function importFromFile(file) {
|
||||
async function exportToFile(file) {
|
||||
assert.strictEqual(typeof file, 'string');
|
||||
|
||||
// latest mysqldump enables column stats by default which is not present in MySQL 5.7 server
|
||||
// this option must not be set in production cloudrons which still use the old mysqldump
|
||||
const colStats = (!constants.TEST && require('fs').readFileSync('/etc/lsb-release', 'utf-8').includes('20.04')) ? '--column-statistics=0' : '';
|
||||
// latest mysqldump enables column stats by default which is not present in 5.7 util
|
||||
const mysqlDumpHelp = safe.child_process.execSync('/usr/bin/mysqldump --help', { encoding: 'utf8' });
|
||||
if (!mysqlDumpHelp) throw new BoxError(BoxError.DATABASE_ERROR, safe.error);
|
||||
const hasColStats = mysqlDumpHelp.includes('column-statistics');
|
||||
const colStats = hasColStats ? '--column-statistics=0' : '';
|
||||
|
||||
const cmd = `/usr/bin/mysqldump -h "${gDatabase.hostname}" -u root -p${gDatabase.password} ${colStats} --single-transaction --routines --triggers ${gDatabase.name} > "${file}"`;
|
||||
|
||||
|
||||
13
src/delay.js
Normal file
13
src/delay.js
Normal file
@@ -0,0 +1,13 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = delay;
|
||||
|
||||
const assert = require('assert');
|
||||
|
||||
function delay(msecs) {
|
||||
assert.strictEqual(typeof msecs, 'number');
|
||||
|
||||
return new Promise(function (resolve) {
|
||||
setTimeout(resolve, msecs);
|
||||
});
|
||||
}
|
||||
40
src/dig.js
Normal file
40
src/dig.js
Normal file
@@ -0,0 +1,40 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
resolve,
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
constants = require('./constants.js'),
|
||||
dns = require('dns'),
|
||||
safe = require('safetydance'),
|
||||
_ = require('underscore');
|
||||
|
||||
// a note on TXT records. It doesn't have quotes ("") at the DNS level. Those quotes
|
||||
// are added for DNS server software to enclose spaces. Such quotes may also be returned
|
||||
// by the DNS REST API of some providers
|
||||
async function resolve(hostname, rrtype, options) {
|
||||
assert.strictEqual(typeof hostname, 'string');
|
||||
assert.strictEqual(typeof rrtype, 'string');
|
||||
assert(options && typeof options === 'object');
|
||||
|
||||
const defaultOptions = { server: '127.0.0.1', timeout: 5000 }; // unbound runs on 127.0.0.1
|
||||
const resolver = new dns.promises.Resolver();
|
||||
options = _.extend({ }, defaultOptions, options);
|
||||
|
||||
// Only use unbound on a Cloudron
|
||||
if (constants.CLOUDRON) resolver.setServers([ options.server ]);
|
||||
|
||||
// should callback with ECANCELLED but looks like we might hit https://github.com/nodejs/node/issues/14814
|
||||
const timerId = setTimeout(resolver.cancel.bind(resolver), options.timeout || 5000);
|
||||
|
||||
const [error, result] = await safe(resolver.resolve(hostname, rrtype));
|
||||
clearTimeout(timerId);
|
||||
|
||||
if (error && error.code === 'ECANCELLED') error.code = 'TIMEOUT';
|
||||
if (error) throw error;
|
||||
|
||||
// when you query a random record, it errors with ENOTFOUND. But, if you query a record which has a different type
|
||||
// we sometimes get empty array and sometimes ENODATA. for TXT records, result is 2d array of strings
|
||||
return result;
|
||||
}
|
||||
239
src/dns.js
239
src/dns.js
@@ -19,12 +19,6 @@ module.exports = exports = {
|
||||
|
||||
checkDnsRecords,
|
||||
syncDnsRecords,
|
||||
|
||||
resolve,
|
||||
|
||||
promises: {
|
||||
resolve: require('util').promisify(resolve)
|
||||
}
|
||||
};
|
||||
|
||||
const apps = require('./apps.js'),
|
||||
@@ -32,16 +26,14 @@ const apps = require('./apps.js'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
constants = require('./constants.js'),
|
||||
debug = require('debug')('box:dns'),
|
||||
dns = require('dns'),
|
||||
domains = require('./domains.js'),
|
||||
ipaddr = require('ipaddr.js'),
|
||||
mail = require('./mail.js'),
|
||||
promiseRetry = require('./promise-retry.js'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js'),
|
||||
sysinfo = require('./sysinfo.js'),
|
||||
tld = require('tldjs'),
|
||||
util = require('util'),
|
||||
_ = require('underscore');
|
||||
tld = require('tldjs');
|
||||
|
||||
// choose which subdomain backend we use for test purpose we use route53
|
||||
function api(provider) {
|
||||
@@ -59,6 +51,7 @@ function api(provider) {
|
||||
case 'namecom': return require('./dns/namecom.js');
|
||||
case 'namecheap': return require('./dns/namecheap.js');
|
||||
case 'netcup': return require('./dns/netcup.js');
|
||||
case 'hetzner': return require('./dns/hetzner.js');
|
||||
case 'noop': return require('./dns/noop.js');
|
||||
case 'manual': return require('./dns/manual.js');
|
||||
case 'wildcard': return require('./dns/wildcard.js');
|
||||
@@ -66,42 +59,42 @@ function api(provider) {
|
||||
}
|
||||
}
|
||||
|
||||
function fqdn(location, domainObject) {
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
function fqdn(subdomain, domainObject) {
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
|
||||
return location + (location ? '.' : '') + domainObject.domain;
|
||||
return subdomain + (subdomain ? '.' : '') + domainObject.domain;
|
||||
}
|
||||
|
||||
// Hostname validation comes from RFC 1123 (section 2.1)
|
||||
// Domain name validation comes from RFC 2181 (Name syntax)
|
||||
// https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names
|
||||
// We are validating the validity of the location-fqdn as host name (and not dns name)
|
||||
function validateHostname(location, domainObject) {
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
function validateHostname(subdomain, domainObject) {
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
|
||||
const hostname = fqdn(location, domainObject);
|
||||
const hostname = fqdn(subdomain, domainObject);
|
||||
|
||||
const RESERVED_LOCATIONS = [
|
||||
constants.SMTP_LOCATION,
|
||||
constants.IMAP_LOCATION
|
||||
];
|
||||
if (RESERVED_LOCATIONS.indexOf(location) !== -1) return new BoxError(BoxError.BAD_FIELD, location + ' is reserved', { field: 'location' });
|
||||
if (RESERVED_LOCATIONS.indexOf(subdomain) !== -1) return new BoxError(BoxError.BAD_FIELD, `subdomain '${subdomain}' is reserved`);
|
||||
|
||||
if (hostname === settings.dashboardFqdn()) return new BoxError(BoxError.BAD_FIELD, location + ' is reserved', { field: 'location' });
|
||||
if (hostname === settings.dashboardFqdn()) return new BoxError(BoxError.BAD_FIELD, `subdomain '${subdomain}' is reserved`);
|
||||
|
||||
// workaround https://github.com/oncletom/tld.js/issues/73
|
||||
var tmp = hostname.replace('_', '-');
|
||||
if (!tld.isValid(tmp)) return new BoxError(BoxError.BAD_FIELD, 'Hostname is not a valid domain name', { field: 'location' });
|
||||
const tmp = hostname.replace('_', '-');
|
||||
if (!tld.isValid(tmp)) return new BoxError(BoxError.BAD_FIELD, 'Hostname is not a valid domain name');
|
||||
|
||||
if (hostname.length > 253) return new BoxError(BoxError.BAD_FIELD, 'Hostname length exceeds 253 characters', { field: 'location' });
|
||||
if (hostname.length > 253) return new BoxError(BoxError.BAD_FIELD, 'Hostname length exceeds 253 characters');
|
||||
|
||||
if (location) {
|
||||
if (subdomain) {
|
||||
// label validation
|
||||
if (location.split('.').some(function (p) { return p.length > 63 || p.length < 1; })) return new BoxError(BoxError.BAD_FIELD, 'Invalid subdomain length', { field: 'location' });
|
||||
if (location.match(/^[A-Za-z0-9-.]+$/) === null) return new BoxError(BoxError.BAD_FIELD, 'Subdomain can only contain alphanumeric, hyphen and dot', { field: 'location' });
|
||||
if (/^[-.]/.test(location)) return new BoxError(BoxError.BAD_FIELD, 'Subdomain cannot start or end with hyphen or dot', { field: 'location' });
|
||||
if (subdomain.split('.').some(function (p) { return p.length > 63 || p.length < 1; })) return new BoxError(BoxError.BAD_FIELD, 'Invalid subdomain length');
|
||||
if (subdomain.match(/^[A-Za-z0-9-.]+$/) === null) return new BoxError(BoxError.BAD_FIELD, 'Subdomain can only contain alphanumeric, hyphen and dot');
|
||||
if (/^[-.]/.test(subdomain)) return new BoxError(BoxError.BAD_FIELD, 'Subdomain cannot start or end with hyphen or dot');
|
||||
}
|
||||
|
||||
return null;
|
||||
@@ -109,72 +102,77 @@ function validateHostname(location, domainObject) {
|
||||
|
||||
// returns the 'name' that needs to be inserted into zone
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
function getName(domain, location, type) {
|
||||
function getName(domain, subdomain, type) {
|
||||
const part = domain.domain.slice(0, -domain.zoneName.length - 1);
|
||||
|
||||
if (location === '') return part;
|
||||
if (subdomain === '') return part;
|
||||
|
||||
return part ? `${location}.${part}` : location;
|
||||
return part ? `${subdomain}.${part}` : subdomain;
|
||||
}
|
||||
|
||||
function maybePromisify(func) {
|
||||
if (util.types.isAsyncFunction(func)) return func;
|
||||
return util.promisify(func);
|
||||
}
|
||||
|
||||
async function getDnsRecords(location, domain, type) {
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
async function getDnsRecords(subdomain, domain, type) {
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
|
||||
const domainObject = await domains.get(domain);
|
||||
return await maybePromisify(api(domainObject.provider).get)(domainObject, location, type);
|
||||
return await api(domainObject.provider).get(domainObject, subdomain, type);
|
||||
}
|
||||
|
||||
async function checkDnsRecords(location, domain) {
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
async function checkDnsRecords(subdomain, domain) {
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
|
||||
const values = await getDnsRecords(location, domain, 'A');
|
||||
const cnameRecords = await getDnsRecords(subdomain, domain, 'CNAME');
|
||||
if (cnameRecords.length !== 0) return { needsOverwrite: true };
|
||||
|
||||
const ip = await sysinfo.getServerIp();
|
||||
const ipv4Records = await getDnsRecords(subdomain, domain, 'A');
|
||||
const ipv4 = await sysinfo.getServerIPv4();
|
||||
|
||||
if (values.length === 0) return { needsOverwrite: false }; // does not exist
|
||||
if (values[0] === ip) return { needsOverwrite: false }; // exists but in sync
|
||||
// if empty OR exactly one record with the ip, we don't need to overwrite
|
||||
if (ipv4Records.length !== 0 && (ipv4Records.length !== 1 || ipv4Records[0] !== ipv4)) return { needsOverwrite: true };
|
||||
|
||||
return { needsOverwrite: true };
|
||||
const ipv6 = await sysinfo.getServerIPv6();
|
||||
if (ipv6) {
|
||||
const ipv6Records = await getDnsRecords(subdomain, domain, 'AAAA');
|
||||
|
||||
// if empty OR exactly one record with the ip, we don't need to overwrite
|
||||
if (ipv6Records.length !== 0 && (ipv6Records.length !== 1 || ipaddr.parse(ipv6Records[0]).toRFC5952String() !== ipv6)) return { needsOverwrite: true };
|
||||
}
|
||||
|
||||
return { needsOverwrite: false }; // one record exists and in sync
|
||||
}
|
||||
|
||||
// note: for TXT records the values must be quoted
|
||||
async function upsertDnsRecords(location, domain, type, values) {
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
async function upsertDnsRecords(subdomain, domain, type, values) {
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
|
||||
debug(`upsertDNSRecord: location ${location} on domain ${domain} of type ${type} with values ${JSON.stringify(values)}`);
|
||||
debug(`upsertDNSRecord: location ${subdomain} on domain ${domain} of type ${type} with values ${JSON.stringify(values)}`);
|
||||
|
||||
const domainObject = await domains.get(domain);
|
||||
await maybePromisify(api(domainObject.provider).upsert)(domainObject, location, type, values);
|
||||
await api(domainObject.provider).upsert(domainObject, subdomain, type, values);
|
||||
}
|
||||
|
||||
async function removeDnsRecords(location, domain, type, values) {
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
async function removeDnsRecords(subdomain, domain, type, values) {
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
|
||||
debug('removeDNSRecord: %s on %s type %s values', location, domain, type, values);
|
||||
debug('removeDNSRecords: %s on %s type %s values', subdomain, domain, type, values);
|
||||
|
||||
const domainObject = await domains.get(domain);
|
||||
const [error] = await safe(maybePromisify(api(domainObject.provider).del)(domainObject, location, type, values));
|
||||
if (error && error.reason !== BoxError.NOT_FOUND) throw error;
|
||||
const [error] = await safe(api(domainObject.provider).del(domainObject, subdomain, type, values));
|
||||
if (error && error.reason !== BoxError.NOT_FOUND) throw error; // this is never returned afaict
|
||||
}
|
||||
|
||||
async function waitForDnsRecord(location, domain, type, value, options) {
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
async function waitForDnsRecord(subdomain, domain, type, value, options) {
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert(type === 'A' || type === 'TXT');
|
||||
assert(type === 'A' || type === 'AAAA' || type === 'TXT');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
|
||||
@@ -183,7 +181,7 @@ async function waitForDnsRecord(location, domain, type, value, options) {
|
||||
// linode DNS takes ~15mins
|
||||
if (!options.interval) options.interval = domainObject.provider === 'linode' ? 20000 : 5000;
|
||||
|
||||
await maybePromisify(api(domainObject.provider).wait)(domainObject, location, type, value, options);
|
||||
await api(domainObject.provider).wait(domainObject, subdomain, type, value, options);
|
||||
}
|
||||
|
||||
function makeWildcard(vhost) {
|
||||
@@ -195,6 +193,30 @@ function makeWildcard(vhost) {
|
||||
return parts.join('.');
|
||||
}
|
||||
|
||||
async function registerLocation(location, options, recordType, recordValue) {
|
||||
const overwriteDns = options.overwriteDns || false;
|
||||
|
||||
// get the current record before updating it
|
||||
const [getError, values] = await safe(getDnsRecords(location.subdomain, location.domain, recordType));
|
||||
if (getError) {
|
||||
const retryable = getError.reason !== BoxError.ACCESS_DENIED && getError.reason !== BoxError.NOT_FOUND; // NOT_FOUND is when zone is not found
|
||||
debug(`registerLocation: Get error. retryable: ${retryable}. ${getError.message}`);
|
||||
throw new BoxError(getError.reason, getError.message, { domain: location, retryable });
|
||||
}
|
||||
|
||||
if (values.length === 1 && values[0] === recordValue) return; // up-to-date
|
||||
|
||||
// refuse to update any existing DNS record for custom domains that we did not create
|
||||
if (values.length !== 0 && !overwriteDns) throw new BoxError(BoxError.ALREADY_EXISTS, `DNS ${recordType} record already exists`, { domain: location, retryable: false });
|
||||
|
||||
const [upsertError] = await safe(upsertDnsRecords(location.subdomain, location.domain, recordType, [ recordValue ]));
|
||||
if (upsertError) {
|
||||
const retryable = upsertError.reason === BoxError.BUSY || upsertError.reason === BoxError.EXTERNAL_ERROR;
|
||||
debug(`registerLocation: Upsert error. retryable: ${retryable}. ${upsertError.message}`);
|
||||
throw new BoxError(BoxError.EXTERNAL_ERROR, upsertError.message, { domain: location, retryable });
|
||||
}
|
||||
}
|
||||
|
||||
async function registerLocations(locations, options, progressCallback) {
|
||||
assert(Array.isArray(locations));
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
@@ -202,61 +224,52 @@ async function registerLocations(locations, options, progressCallback) {
|
||||
|
||||
debug(`registerLocations: Will register ${JSON.stringify(locations)} with options ${JSON.stringify(options)}`);
|
||||
|
||||
const overwriteDns = options.overwriteDns || false;
|
||||
|
||||
const ip = await sysinfo.getServerIp();
|
||||
const ipv4 = await sysinfo.getServerIPv4();
|
||||
const ipv6 = await sysinfo.getServerIPv6();
|
||||
|
||||
for (const location of locations) {
|
||||
const error = await promiseRetry({ times: 200, interval: 5000 }, async function () {
|
||||
progressCallback({ message: `Registering location: ${location.subdomain ? (location.subdomain + '.') : ''}${location.domain}` });
|
||||
const fqdn = `${location.subdomain ? (location.subdomain + '.') : ''}${location.domain}`;
|
||||
progressCallback({ message: `Registering location: ${fqdn}` });
|
||||
|
||||
// get the current record before updating it
|
||||
const [error, values] = await safe(getDnsRecords(location.subdomain, location.domain, 'A'));
|
||||
if (error && error.reason === BoxError.EXTERNAL_ERROR) throw new BoxError(BoxError.EXTERNAL_ERROR, error.message, { domain: location }); // try again
|
||||
// give up for other errors
|
||||
if (error && error.reason === BoxError.ACCESS_DENIED) return new BoxError(BoxError.ACCESS_DENIED, error.message, { domain: location });
|
||||
if (error && error.reason === BoxError.NOT_FOUND) return new BoxError(BoxError.NOT_FOUND, error.message, { domain: location });
|
||||
if (error) return new BoxError(BoxError.EXTERNAL_ERROR, error.message, location);
|
||||
|
||||
if (values.length !== 0 && values[0] === ip) return null; // up-to-date
|
||||
|
||||
// refuse to update any existing DNS record for custom domains that we did not create
|
||||
if (values.length !== 0 && !overwriteDns) return new BoxError(BoxError.ALREADY_EXISTS, 'DNS Record already exists', { domain: location });
|
||||
|
||||
const [upsertError] = await safe(upsertDnsRecords(location.subdomain, location.domain, 'A', [ ip ]));
|
||||
if (upsertError && (upsertError.reason === BoxError.BUSY || upsertError.reason === BoxError.EXTERNAL_ERROR)) {
|
||||
progressCallback({ message: `registerSubdomains: Upsert error. Will retry. ${upsertError.message}` });
|
||||
throw new BoxError(BoxError.EXTERNAL_ERROR, upsertError.message, { domain: location }); // try again
|
||||
await promiseRetry({ times: 200, interval: 5000, debug, retry: (error) => error.retryable }, async function () {
|
||||
// cname records cannot co-exist with other records
|
||||
const [getError, values] = await safe(getDnsRecords(location.subdomain, location.domain, 'CNAME'));
|
||||
if (!getError && values.length === 1) {
|
||||
if (!options.overwriteDns) throw new BoxError(BoxError.ALREADY_EXISTS, 'DNS CNAME record already exists', { domain: location, retryable: false });
|
||||
debug(`registerLocations: removing CNAME record of ${fqdn}`);
|
||||
await removeDnsRecords(location.subdomain, location.domain, 'CNAME', values);
|
||||
}
|
||||
|
||||
return upsertError ? new BoxError(BoxError.EXTERNAL_ERROR, upsertError.message, location) : null;
|
||||
await registerLocation(location, options, 'A', ipv4);
|
||||
if (ipv6) await registerLocation(location, options, 'AAAA', ipv6);
|
||||
});
|
||||
|
||||
if (error) throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async function unregisterLocation(location, recordType, recordValue) {
|
||||
const [error] = await safe(removeDnsRecords(location.subdomain, location.domain, recordType, [ recordValue ]));
|
||||
if (!error || error.reason === BoxError.NOT_FOUND) return;
|
||||
|
||||
const retryable = error.reason === BoxError.BUSY || error.reason === BoxError.EXTERNAL_ERROR;
|
||||
debug(`unregisterLocation: Error unregistering location ${recordType}. retryable: ${retryable}. ${error.message}`);
|
||||
|
||||
throw new BoxError(BoxError.EXTERNAL_ERROR, error.message, { domain: location, retryable });
|
||||
}
|
||||
|
||||
async function unregisterLocations(locations, progressCallback) {
|
||||
assert(Array.isArray(locations));
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
|
||||
const ip = await sysinfo.getServerIp();
|
||||
const ipv4 = await sysinfo.getServerIPv4();
|
||||
const ipv6 = await sysinfo.getServerIPv6();
|
||||
|
||||
for (const location of locations) {
|
||||
const error = await promiseRetry({ times: 30, interval: 5000 }, async function () {
|
||||
progressCallback({ message: `Unregistering location: ${location.subdomain ? (location.subdomain + '.') : ''}${location.domain}` });
|
||||
progressCallback({ message: `Unregistering location: ${location.subdomain ? (location.subdomain + '.') : ''}${location.domain}` });
|
||||
|
||||
const [error] = await safe(removeDnsRecords(location.subdomain, location.domain, 'A', [ ip ]));
|
||||
if (error && error.reason === BoxError.NOT_FOUND) return;
|
||||
if (error && (error.reason === BoxError.BUSY || error.reason === BoxError.EXTERNAL_ERROR)) {
|
||||
progressCallback({ message: `Error unregistering location. Will retry. ${error.message}`});
|
||||
throw new BoxError(BoxError.EXTERNAL_ERROR, error.message, { domain: location }); // try again
|
||||
}
|
||||
|
||||
return error ? new BoxError(BoxError.EXTERNAL_ERROR, error.message, { domain: location }) : null; // give up for other errors
|
||||
await promiseRetry({ times: 30, interval: 5000, debug, retry: (error) => error.retryable }, async function () {
|
||||
await unregisterLocation(location, 'A', ipv4);
|
||||
if (ipv6) await unregisterLocation(location, 'AAAA', ipv6);
|
||||
});
|
||||
|
||||
if (error) throw error;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -285,10 +298,10 @@ async function syncDnsRecords(options, progressCallback) {
|
||||
if (domain.domain === settings.dashboardDomain()) locations.push({ subdomain: constants.DASHBOARD_LOCATION, domain: settings.dashboardDomain() });
|
||||
if (domain.domain === settings.mailDomain() && settings.mailFqdn() !== settings.dashboardFqdn()) locations.push({ subdomain: mailSubdomain, domain: settings.mailDomain() });
|
||||
|
||||
allApps.forEach(function (app) {
|
||||
const appLocations = [{ subdomain: app.location, domain: app.domain }].concat(app.alternateDomains).concat(app.aliasDomains);
|
||||
for (const app of allApps) {
|
||||
const appLocations = [{ subdomain: app.subdomain, domain: app.domain }].concat(app.redirectDomains).concat(app.aliasDomains);
|
||||
locations = locations.concat(appLocations.filter(al => al.domain === domain.domain));
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
await registerLocations(locations, { overwriteDns: true }, progressCallback);
|
||||
@@ -301,35 +314,3 @@ async function syncDnsRecords(options, progressCallback) {
|
||||
|
||||
return { errors };
|
||||
}
|
||||
|
||||
// a note on TXT records. It doesn't have quotes ("") at the DNS level. Those quotes
|
||||
// are added for DNS server software to enclose spaces. Such quotes may also be returned
|
||||
// by the DNS REST API of some providers
|
||||
function resolve(hostname, rrtype, options, callback) {
|
||||
assert.strictEqual(typeof hostname, 'string');
|
||||
assert.strictEqual(typeof rrtype, 'string');
|
||||
assert(options && typeof options === 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const defaultOptions = { server: '127.0.0.1', timeout: 5000 }; // unbound runs on 127.0.0.1
|
||||
const resolver = new dns.Resolver();
|
||||
options = _.extend({ }, defaultOptions, options);
|
||||
|
||||
// Only use unbound on a Cloudron
|
||||
if (constants.CLOUDRON) resolver.setServers([ options.server ]);
|
||||
|
||||
// should callback with ECANCELLED but looks like we might hit https://github.com/nodejs/node/issues/14814
|
||||
const timerId = setTimeout(resolver.cancel.bind(resolver), options.timeout || 5000);
|
||||
|
||||
resolver.resolve(hostname, rrtype, function (error, result) {
|
||||
clearTimeout(timerId);
|
||||
|
||||
if (error && error.code === 'ECANCELLED') error.code = 'TIMEOUT';
|
||||
|
||||
// result is an empty array if there was no error but there is no record. when you query a random
|
||||
// domain, it errors with ENOTFOUND. But if you query an existing domain (A record) but with different
|
||||
// type (CNAME) it is not an error and empty array
|
||||
// for TXT records, result is 2d array of strings
|
||||
callback(error, result);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -7,22 +7,23 @@ exports = module.exports = {
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDnsConfig
|
||||
verifyDomainConfig
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
async = require('async'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
constants = require('../constants.js'),
|
||||
debug = require('debug')('box:dns/cloudflare'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('../dns.js'),
|
||||
safe = require('safetydance'),
|
||||
superagent = require('superagent'),
|
||||
util = require('util'),
|
||||
waitForDns = require('./waitfordns.js'),
|
||||
_ = require('underscore');
|
||||
|
||||
// we are using latest v4 stable API https://api.cloudflare.com/#getting-started-endpoints
|
||||
var CLOUDFLARE_ENDPOINT = 'https://api.cloudflare.com/client/v4';
|
||||
const CLOUDFLARE_ENDPOINT = 'https://api.cloudflare.com/client/v4';
|
||||
|
||||
function removePrivateFields(domainObject) {
|
||||
domainObject.config.token = constants.SECRET_PLACEHOLDER;
|
||||
@@ -33,12 +34,11 @@ function injectPrivateFields(newConfig, currentConfig) {
|
||||
if (newConfig.token === constants.SECRET_PLACEHOLDER) newConfig.token = currentConfig.token;
|
||||
}
|
||||
|
||||
function translateRequestError(result, callback) {
|
||||
function translateRequestError(result) {
|
||||
assert.strictEqual(typeof result, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (result.statusCode === 404) return callback(new BoxError(BoxError.NOT_FOUND, util.format('%s %j', result.statusCode, 'API does not exist')));
|
||||
if (result.statusCode === 422) return callback(new BoxError(BoxError.BAD_FIELD, result.body.message));
|
||||
if (result.statusCode === 404) return new BoxError(BoxError.NOT_FOUND, util.format('%s %j', result.statusCode, 'API does not exist'));
|
||||
if (result.statusCode === 422) return new BoxError(BoxError.BAD_FIELD, result.body.message);
|
||||
if (result.statusCode === 400 || result.statusCode === 401 || result.statusCode === 403) {
|
||||
let message = 'Unknown error';
|
||||
if (typeof result.body.error === 'string') {
|
||||
@@ -47,288 +47,230 @@ function translateRequestError(result, callback) {
|
||||
let error = result.body.errors[0];
|
||||
message = `message: ${error.message} statusCode: ${result.statusCode} code:${error.code}`;
|
||||
}
|
||||
return callback(new BoxError(BoxError.ACCESS_DENIED, message));
|
||||
return new BoxError(BoxError.ACCESS_DENIED, message);
|
||||
}
|
||||
|
||||
callback(new BoxError(BoxError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body)));
|
||||
return new BoxError(BoxError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body));
|
||||
}
|
||||
|
||||
function createRequest(method, url, dnsConfig) {
|
||||
function createRequest(method, url, domainConfig) {
|
||||
assert.strictEqual(typeof method, 'string');
|
||||
assert.strictEqual(typeof url, 'string');
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
|
||||
let request = superagent(method, url)
|
||||
.timeout(30 * 1000);
|
||||
const request = superagent(method, url).timeout(30 * 1000).ok(() => true);
|
||||
|
||||
if (dnsConfig.tokenType === 'GlobalApiKey') {
|
||||
request.set('X-Auth-Key', dnsConfig.token).set('X-Auth-Email', dnsConfig.email);
|
||||
if (domainConfig.tokenType === 'GlobalApiKey') {
|
||||
request.set('X-Auth-Key', domainConfig.token).set('X-Auth-Email', domainConfig.email);
|
||||
} else {
|
||||
request.set('Authorization', 'Bearer ' + dnsConfig.token);
|
||||
request.set('Authorization', 'Bearer ' + domainConfig.token);
|
||||
}
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
function getZoneByName(dnsConfig, zoneName, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
async function getZoneByName(domainConfig, zoneName) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
createRequest('GET', CLOUDFLARE_ENDPOINT + '/zones?name=' + zoneName + '&status=active', dnsConfig)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode !== 200 || result.body.success !== true) return translateRequestError(result, callback);
|
||||
if (!result.body.result.length) return callback(new BoxError(BoxError.NOT_FOUND, util.format('%s %j', result.statusCode, result.body)));
|
||||
const [error, response] = await safe(createRequest('GET', `${CLOUDFLARE_ENDPOINT}/zones?name=${zoneName}&status=active`, domainConfig));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode !== 200 || response.body.success !== true) throw translateRequestError(response);
|
||||
if (!response.body.result.length) throw new BoxError(BoxError.NOT_FOUND, util.format('%s %j', response.statusCode, response.body));
|
||||
|
||||
callback(null, result.body.result[0]);
|
||||
});
|
||||
return response.body.result[0];
|
||||
}
|
||||
|
||||
// gets records filtered by zone, type and fqdn
|
||||
function getDnsRecords(dnsConfig, zoneId, fqdn, type, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
async function getDnsRecords(domainConfig, zoneId, fqdn, type) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zoneId, 'string');
|
||||
assert.strictEqual(typeof fqdn, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
createRequest('GET', CLOUDFLARE_ENDPOINT + '/zones/' + zoneId + '/dns_records', dnsConfig)
|
||||
.query({ type: type, name: fqdn })
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode !== 200 || result.body.success !== true) return translateRequestError(result, callback);
|
||||
const [error, response] = await safe(createRequest('GET', `${CLOUDFLARE_ENDPOINT}/zones/${zoneId}/dns_records`, domainConfig)
|
||||
.query({ type: type, name: fqdn }));
|
||||
|
||||
var tmp = result.body.result;
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode !== 200 || response.body.success !== true) throw translateRequestError(response);
|
||||
|
||||
return callback(null, tmp);
|
||||
});
|
||||
return response.body.result;
|
||||
}
|
||||
|
||||
function upsert(domainObject, location, type, values, callback) {
|
||||
async function upsert(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
fqdn = dns.fqdn(location, domainObject);
|
||||
|
||||
debug('upsert: %s for zone %s of type %s with values %j', fqdn, zoneName, type, values);
|
||||
|
||||
getZoneByName(dnsConfig, zoneName, function(error, result) {
|
||||
if (error) return callback(error);
|
||||
const result = await getZoneByName(domainConfig, zoneName);
|
||||
const zoneId = result.id;
|
||||
|
||||
let zoneId = result.id;
|
||||
const records = await getDnsRecords(domainConfig, zoneId, fqdn, type);
|
||||
|
||||
getDnsRecords(dnsConfig, zoneId, fqdn, type, function (error, dnsRecords) {
|
||||
if (error) return callback(error);
|
||||
let i = 0; // // used to track available records to update instead of create
|
||||
|
||||
let i = 0; // // used to track available records to update instead of create
|
||||
for (let value of values) {
|
||||
let priority = null;
|
||||
|
||||
async.eachSeries(values, function (value, iteratorCallback) {
|
||||
var priority = null;
|
||||
if (type === 'MX') {
|
||||
priority = parseInt(value.split(' ')[0], 10);
|
||||
value = value.split(' ')[1];
|
||||
}
|
||||
|
||||
if (type === 'MX') {
|
||||
priority = parseInt(value.split(' ')[0], 10);
|
||||
value = value.split(' ')[1];
|
||||
}
|
||||
const data = {
|
||||
type: type,
|
||||
name: fqdn,
|
||||
content: value,
|
||||
priority: priority,
|
||||
proxied: false,
|
||||
ttl: 120 // 1 means "automatic" (meaning 300ms) and 120 is the lowest supported
|
||||
};
|
||||
|
||||
var data = {
|
||||
type: type,
|
||||
name: fqdn,
|
||||
content: value,
|
||||
priority: priority,
|
||||
proxied: false,
|
||||
ttl: 120 // 1 means "automatic" (meaning 300ms) and 120 is the lowest supported
|
||||
};
|
||||
if (i >= records.length) { // create a new record
|
||||
debug(`upsert: Adding new record fqdn: ${fqdn}, zoneName: ${zoneName} proxied: false`);
|
||||
|
||||
if (i >= dnsRecords.length) { // create a new record
|
||||
debug(`upsert: Adding new record fqdn: ${fqdn}, zoneName: ${zoneName} proxied: false`);
|
||||
const [error, response] = await safe(createRequest('POST', `${CLOUDFLARE_ENDPOINT}/zones/${zoneId}/dns_records`, domainConfig)
|
||||
.send(data));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode !== 200 || response.body.success !== true) throw translateRequestError(response);
|
||||
} else { // replace existing record
|
||||
data.proxied = records[i].proxied; // preserve proxied parameter
|
||||
|
||||
createRequest('POST', CLOUDFLARE_ENDPOINT + '/zones/' + zoneId + '/dns_records', dnsConfig)
|
||||
.send(data)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return iteratorCallback(error);
|
||||
if (result.statusCode !== 200 || result.body.success !== true) return translateRequestError(result, iteratorCallback);
|
||||
debug(`upsert: Updating existing record fqdn: ${fqdn}, zoneName: ${zoneName} proxied: ${data.proxied}`);
|
||||
|
||||
iteratorCallback(null);
|
||||
});
|
||||
} else { // replace existing record
|
||||
data.proxied = dnsRecords[i].proxied; // preserve proxied parameter
|
||||
const [error, response] = await safe(createRequest('PUT', `${CLOUDFLARE_ENDPOINT}/zones/${zoneId}/dns_records/${records[i].id}`, domainConfig)
|
||||
.send(data));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode !== 200 || response.body.success !== true) throw translateRequestError(response);
|
||||
++i; // increment, as we have consumed the record
|
||||
}
|
||||
}
|
||||
|
||||
debug(`upsert: Updating existing record fqdn: ${fqdn}, zoneName: ${zoneName} proxied: ${data.proxied}`);
|
||||
|
||||
createRequest('PUT', CLOUDFLARE_ENDPOINT + '/zones/' + zoneId + '/dns_records/' + dnsRecords[i].id, dnsConfig)
|
||||
.send(data)
|
||||
.end(function (error, result) {
|
||||
++i; // increment, as we have consumed the record
|
||||
|
||||
if (error && !error.response) return iteratorCallback(error);
|
||||
if (result.statusCode !== 200 || result.body.success !== true) return translateRequestError(result, iteratorCallback);
|
||||
|
||||
iteratorCallback(null);
|
||||
});
|
||||
}
|
||||
}, callback);
|
||||
});
|
||||
});
|
||||
for (let j = values.length + 1; j < records.length; j++) {
|
||||
const [error] = await safe(createRequest('DELETE', `${CLOUDFLARE_ENDPOINT}/zones/${zoneId}/dns_records/${records[j].id}`, domainConfig));
|
||||
if (error) debug(`upsert: error removing record ${records[j].id}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
function get(domainObject, location, type, callback) {
|
||||
async function get(domainObject, location, type) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
fqdn = dns.fqdn(location, domainObject);
|
||||
|
||||
getZoneByName(dnsConfig, zoneName, function(error, zone) {
|
||||
if (error) return callback(error);
|
||||
|
||||
getDnsRecords(dnsConfig, zone.id, fqdn, type, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var tmp = result.map(function (record) { return record.content; });
|
||||
debug('get: %j', tmp);
|
||||
|
||||
callback(null, tmp);
|
||||
});
|
||||
});
|
||||
const zone = await getZoneByName(domainConfig, zoneName);
|
||||
const result = await getDnsRecords(domainConfig, zone.id, fqdn, type);
|
||||
const tmp = result.map(function (record) { return record.content; });
|
||||
return tmp;
|
||||
}
|
||||
|
||||
function del(domainObject, location, type, values, callback) {
|
||||
async function del(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
fqdn = dns.fqdn(location, domainObject);
|
||||
|
||||
getZoneByName(dnsConfig, zoneName, function(error, zone) {
|
||||
if (error) return callback(error);
|
||||
const zone = await getZoneByName(domainConfig, zoneName);
|
||||
|
||||
getDnsRecords(dnsConfig, zone.id, fqdn, type, function(error, result) {
|
||||
if (error) return callback(error);
|
||||
if (result.length === 0) return callback(null);
|
||||
const result = await getDnsRecords(domainConfig, zone.id, fqdn, type);
|
||||
if (result.length === 0) return;
|
||||
|
||||
var zoneId = result[0].zone_id;
|
||||
const zoneId = result[0].zone_id;
|
||||
|
||||
var tmp = result.filter(function (record) { return values.some(function (value) { return value === record.content; }); });
|
||||
debug('del: %j', tmp);
|
||||
const tmp = result.filter(function (record) { return values.some(function (value) { return value === record.content; }); });
|
||||
debug('del: %j', tmp);
|
||||
|
||||
if (tmp.length === 0) return callback(null);
|
||||
if (tmp.length === 0) return;
|
||||
|
||||
async.eachSeries(tmp, function (record, callback) {
|
||||
createRequest('DELETE', CLOUDFLARE_ENDPOINT + '/zones/'+ zoneId + '/dns_records/' + record.id, dnsConfig)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode !== 200 || result.body.success !== true) return translateRequestError(result, callback);
|
||||
|
||||
debug('del: done');
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(null, 'unused');
|
||||
});
|
||||
});
|
||||
});
|
||||
for (const r of tmp) {
|
||||
const [error, response] = await safe(createRequest('DELETE', `${CLOUDFLARE_ENDPOINT}/zones/${zoneId}/dns_records/${r.id}`, domainConfig));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode !== 200 || response.body.success !== true) throw translateRequestError(response);
|
||||
}
|
||||
}
|
||||
|
||||
function wait(domainObject, location, type, value, options, callback) {
|
||||
async function wait(domainObject, subdomain, type, value, options) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
fqdn = dns.fqdn(location, domainObject);
|
||||
fqdn = dns.fqdn(subdomain, domainObject);
|
||||
|
||||
debug('wait: %s for zone %s of type %s', fqdn, zoneName, type);
|
||||
|
||||
getZoneByName(dnsConfig, zoneName, function(error, result) {
|
||||
if (error) return callback(error);
|
||||
const result = await getZoneByName(domainConfig, zoneName);
|
||||
const zoneId = result.id;
|
||||
|
||||
let zoneId = result.id;
|
||||
const dnsRecords = await getDnsRecords(domainConfig, zoneId, fqdn, type);
|
||||
if (dnsRecords.length === 0) throw new BoxError(BoxError.NOT_FOUND, 'Domain not found');
|
||||
|
||||
getDnsRecords(dnsConfig, zoneId, fqdn, type, function (error, dnsRecords) {
|
||||
if (error) return callback(error);
|
||||
if (dnsRecords.length === 0) return callback(new BoxError(BoxError.NOT_FOUND, 'Domain not found'));
|
||||
if (!dnsRecords[0].proxied) return await waitForDns(fqdn, domainObject.zoneName, type, value, options);
|
||||
|
||||
if (!dnsRecords[0].proxied) return waitForDns(fqdn, domainObject.zoneName, type, value, options, callback);
|
||||
debug('wait: skipping wait of proxied domain');
|
||||
|
||||
debug('wait: skipping wait of proxied domain');
|
||||
|
||||
callback(null); // maybe we can check for dns to be cloudflare IPs? https://api.cloudflare.com/#cloudflare-ips-cloudflare-ip-details
|
||||
});
|
||||
});
|
||||
// maybe we can check for dns to be cloudflare IPs? https://api.cloudflare.com/#cloudflare-ips-cloudflare-ip-details
|
||||
}
|
||||
|
||||
function verifyDnsConfig(domainObject, callback) {
|
||||
async function verifyDomainConfig(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName;
|
||||
|
||||
// token can be api token or global api key
|
||||
if (!dnsConfig.token || typeof dnsConfig.token !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'token must be a non-empty string', { field: 'token' }));
|
||||
if (dnsConfig.tokenType !== 'GlobalApiKey' && dnsConfig.tokenType !== 'ApiToken') return callback(new BoxError(BoxError.BAD_FIELD, 'tokenType is required', { field: 'tokenType' }));
|
||||
if (!domainConfig.token || typeof domainConfig.token !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'token must be a non-empty string');
|
||||
if (domainConfig.tokenType !== 'GlobalApiKey' && domainConfig.tokenType !== 'ApiToken') throw new BoxError(BoxError.BAD_FIELD, 'tokenType is required');
|
||||
|
||||
if (dnsConfig.tokenType === 'GlobalApiKey') {
|
||||
if (typeof dnsConfig.email !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'email must be a non-empty string', { field: 'email' }));
|
||||
if (domainConfig.tokenType === 'GlobalApiKey') {
|
||||
if (typeof domainConfig.email !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'email must be a non-empty string');
|
||||
}
|
||||
|
||||
const ip = '127.0.0.1';
|
||||
|
||||
var credentials = {
|
||||
token: dnsConfig.token,
|
||||
tokenType: dnsConfig.tokenType,
|
||||
email: dnsConfig.email || null
|
||||
const credentials = {
|
||||
token: domainConfig.token,
|
||||
tokenType: domainConfig.tokenType,
|
||||
email: domainConfig.email || null
|
||||
};
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
|
||||
if (process.env.BOX_ENV === 'test') return credentials; // this shouldn't be here
|
||||
|
||||
dns.resolve(zoneName, 'NS', { timeout: 5000 }, function (error, nameservers) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain', { field: 'nameservers' }));
|
||||
if (error || !nameservers) return callback(new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers', { field: 'nameservers' }));
|
||||
const [error, nameservers] = await safe(dig.resolve(zoneName, 'NS', { timeout: 5000 }));
|
||||
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
|
||||
|
||||
getZoneByName(dnsConfig, zoneName, function(error, zone) {
|
||||
if (error) return callback(error);
|
||||
const zone = await getZoneByName(domainConfig, zoneName);
|
||||
|
||||
if (!_.isEqual(zone.name_servers.sort(), nameservers.sort())) {
|
||||
debug('verifyDnsConfig: %j and %j do not match', nameservers, zone.name_servers);
|
||||
return callback(new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Cloudflare', { field: 'nameservers' }));
|
||||
}
|
||||
if (!_.isEqual(zone.name_servers.sort(), nameservers.sort())) {
|
||||
debug('verifyDomainConfig: %j and %j do not match', nameservers, zone.name_servers);
|
||||
throw new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Cloudflare');
|
||||
}
|
||||
|
||||
const location = 'cloudrontestdns';
|
||||
const location = 'cloudrontestdns';
|
||||
|
||||
upsert(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
await upsert(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record added');
|
||||
|
||||
debug('verifyDnsConfig: Test A record added');
|
||||
await del(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record removed again');
|
||||
|
||||
del(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: Test A record removed again');
|
||||
|
||||
callback(null, credentials);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
return credentials;
|
||||
}
|
||||
|
||||
@@ -7,24 +7,23 @@ exports = module.exports = {
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDnsConfig
|
||||
verifyDomainConfig
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
async = require('async'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
constants = require('../constants.js'),
|
||||
debug = require('debug')('box:dns/digitalocean'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('../dns.js'),
|
||||
safe = require('safetydance'),
|
||||
superagent = require('superagent'),
|
||||
util = require('util'),
|
||||
waitForDns = require('./waitfordns.js');
|
||||
|
||||
const DIGITALOCEAN_ENDPOINT = 'https://api.digitalocean.com';
|
||||
|
||||
function formatError(response) {
|
||||
return util.format('DigitalOcean DNS error [%s] %j', response.statusCode, response.body);
|
||||
return `DigitalOcean DNS error ${response.statusCode} ${JSON.stringify(response.body)}`;
|
||||
}
|
||||
|
||||
function removePrivateFields(domainObject) {
|
||||
@@ -36,244 +35,208 @@ function injectPrivateFields(newConfig, currentConfig) {
|
||||
if (newConfig.token === constants.SECRET_PLACEHOLDER) newConfig.token = currentConfig.token;
|
||||
}
|
||||
|
||||
function getInternal(dnsConfig, zoneName, name, type, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
async function getZoneRecords(domainConfig, zoneName, name, type) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var nextPage = null, matchingRecords = [];
|
||||
let nextPage = null, matchingRecords = [];
|
||||
|
||||
debug(`getInternal: getting dns records of ${zoneName} with ${name} and type ${type}`);
|
||||
|
||||
async.doWhilst(function (iteratorDone) {
|
||||
var url = nextPage ? nextPage : DIGITALOCEAN_ENDPOINT + '/v2/domains/' + zoneName + '/records';
|
||||
do {
|
||||
const url = nextPage ? nextPage : DIGITALOCEAN_ENDPOINT + '/v2/domains/' + zoneName + '/records';
|
||||
|
||||
superagent.get(url)
|
||||
.set('Authorization', 'Bearer ' + dnsConfig.token)
|
||||
const [error, response] = await safe(superagent.get(url)
|
||||
.set('Authorization', 'Bearer ' + domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return iteratorDone(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 404) return iteratorDone(new BoxError(BoxError.NOT_FOUND, formatError(result)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return iteratorDone(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 200) return iteratorDone(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
.ok(() => true));
|
||||
|
||||
matchingRecords = matchingRecords.concat(result.body.domain_records.filter(function (record) {
|
||||
return (record.type === type && record.name === name);
|
||||
}));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, formatError(response));
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
nextPage = (result.body.links && result.body.links.pages) ? result.body.links.pages.next : null;
|
||||
matchingRecords = matchingRecords.concat(response.body.domain_records.filter(function (record) {
|
||||
return (record.type === type && record.name === name);
|
||||
}));
|
||||
|
||||
iteratorDone();
|
||||
});
|
||||
}, function (testDone) { return testDone(null, !!nextPage); }, function (error) {
|
||||
debug('getInternal:', error, JSON.stringify(matchingRecords));
|
||||
nextPage = (response.body.links && response.body.links.pages) ? response.body.links.pages.next : null;
|
||||
} while (nextPage);
|
||||
|
||||
if (error) return callback(error);
|
||||
|
||||
return callback(null, matchingRecords);
|
||||
});
|
||||
return matchingRecords;
|
||||
}
|
||||
|
||||
function upsert(domainObject, location, type, values, callback) {
|
||||
async function upsert(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
debug('upsert: %s for zone %s of type %s with values %j', name, zoneName, type, values);
|
||||
|
||||
getInternal(dnsConfig, zoneName, name, type, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
const records = await getZoneRecords(domainConfig, zoneName, name, type);
|
||||
|
||||
// used to track available records to update instead of create
|
||||
var i = 0, recordIds = [];
|
||||
// used to track available records to update instead of create
|
||||
let i = 0, recordIds = [];
|
||||
|
||||
async.eachSeries(values, function (value, iteratorCallback) {
|
||||
var priority = null;
|
||||
for (let value of values) {
|
||||
let priority = null;
|
||||
|
||||
if (type === 'MX') {
|
||||
priority = value.split(' ')[0];
|
||||
value = value.split(' ')[1];
|
||||
}
|
||||
if (type === 'MX') {
|
||||
priority = value.split(' ')[0];
|
||||
value = value.split(' ')[1];
|
||||
}
|
||||
|
||||
var data = {
|
||||
type: type,
|
||||
name: name,
|
||||
data: value,
|
||||
priority: priority,
|
||||
ttl: 30 // Recent DO DNS API break means this value must atleast be 30
|
||||
};
|
||||
const data = {
|
||||
type: type,
|
||||
name: name,
|
||||
data: value,
|
||||
priority: priority,
|
||||
ttl: 30 // Recent DO DNS API break means this value must atleast be 30
|
||||
};
|
||||
|
||||
if (i >= result.length) {
|
||||
superagent.post(DIGITALOCEAN_ENDPOINT + '/v2/domains/' + zoneName + '/records')
|
||||
.set('Authorization', 'Bearer ' + dnsConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return iteratorCallback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return iteratorCallback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode === 422) return iteratorCallback(new BoxError(BoxError.BAD_FIELD, result.body.message));
|
||||
if (result.statusCode !== 201) return iteratorCallback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
if (i >= records.length) {
|
||||
const [error, response] = await safe(superagent.post(DIGITALOCEAN_ENDPOINT + '/v2/domains/' + zoneName + '/records')
|
||||
.set('Authorization', 'Bearer ' + domainConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
recordIds.push(safe.query(result.body, 'domain_record.id'));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode === 422) throw new BoxError(BoxError.BAD_FIELD, response.body.message);
|
||||
if (response.statusCode !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
return iteratorCallback(null);
|
||||
});
|
||||
} else {
|
||||
superagent.put(DIGITALOCEAN_ENDPOINT + '/v2/domains/' + zoneName + '/records/' + result[i].id)
|
||||
.set('Authorization', 'Bearer ' + dnsConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.end(function (error, result) {
|
||||
// increment, as we have consumed the record
|
||||
++i;
|
||||
recordIds.push(safe.query(records.body, 'domain_record.id'));
|
||||
} else {
|
||||
const [error, response] = await safe(superagent.put(DIGITALOCEAN_ENDPOINT + '/v2/domains/' + zoneName + '/records/' + records[i].id)
|
||||
.set('Authorization', 'Bearer ' + domainConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
if (error && !error.response) return iteratorCallback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return iteratorCallback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode === 422) return iteratorCallback(new BoxError(BoxError.BAD_FIELD, result.body.message));
|
||||
if (result.statusCode !== 200) return iteratorCallback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
++i;
|
||||
|
||||
recordIds.push(safe.query(result.body, 'domain_record.id'));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode === 422) throw new BoxError(BoxError.BAD_FIELD, response.body.message);
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
return iteratorCallback(null);
|
||||
});
|
||||
}
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
recordIds.push(safe.query(records.body, 'domain_record.id'));
|
||||
}
|
||||
}
|
||||
|
||||
debug('upsert: completed with recordIds:%j', recordIds);
|
||||
for (let j = values.length + 1; j < records.length; j++) {
|
||||
const [error] = await safe(superagent.del(`${DIGITALOCEAN_ENDPOINT}/v2/domains/${zoneName}/records/${records[j].id}`)
|
||||
.set('Authorization', 'Bearer ' + domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
callback();
|
||||
});
|
||||
});
|
||||
if (error) debug(`upsert: error removing record ${records[j].id}: ${error.message}`);
|
||||
}
|
||||
|
||||
debug('upsert: completed with recordIds:%j', recordIds);
|
||||
}
|
||||
|
||||
function get(domainObject, location, type, callback) {
|
||||
async function get(domainObject, location, type) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
getInternal(dnsConfig, zoneName, name, type, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
const result = await getZoneRecords(domainConfig, zoneName, name, type);
|
||||
|
||||
// We only return the value string
|
||||
var tmp = result.map(function (record) { return record.data; });
|
||||
|
||||
debug('get: %j', tmp);
|
||||
|
||||
return callback(null, tmp);
|
||||
});
|
||||
const tmp = result.map(function (record) { return record.data; });
|
||||
return tmp;
|
||||
}
|
||||
|
||||
function del(domainObject, location, type, values, callback) {
|
||||
async function del(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
getInternal(dnsConfig, zoneName, name, type, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
const result = await getZoneRecords(domainConfig, zoneName, name, type);
|
||||
if (result.length === 0) return;
|
||||
|
||||
if (result.length === 0) return callback(null);
|
||||
const tmp = result.filter(function (record) { return values.some(function (value) { return value === record.data; }); });
|
||||
if (tmp.length === 0) return;
|
||||
|
||||
var tmp = result.filter(function (record) { return values.some(function (value) { return value === record.data; }); });
|
||||
|
||||
debug('del: %j', tmp);
|
||||
|
||||
if (tmp.length === 0) return callback(null);
|
||||
|
||||
// FIXME we only handle the first one currently
|
||||
|
||||
superagent.del(DIGITALOCEAN_ENDPOINT + '/v2/domains/' + zoneName + '/records/' + tmp[0].id)
|
||||
.set('Authorization', 'Bearer ' + dnsConfig.token)
|
||||
for (const r of tmp) {
|
||||
const [error, response] = await safe(superagent.del(`${DIGITALOCEAN_ENDPOINT}/v2/domains/${zoneName}/records/${r.id}`)
|
||||
.set('Authorization', 'Bearer ' + domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 404) return callback(null);
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 204) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
.ok(() => true));
|
||||
|
||||
debug('del: done');
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
});
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 404) return;
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 204) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
}
|
||||
}
|
||||
|
||||
function wait(domainObject, location, type, value, options, callback) {
|
||||
async function wait(domainObject, subdomain, type, value, options) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const fqdn = dns.fqdn(location, domainObject);
|
||||
const fqdn = dns.fqdn(subdomain, domainObject);
|
||||
|
||||
waitForDns(fqdn, domainObject.zoneName, type, value, options, callback);
|
||||
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
|
||||
}
|
||||
|
||||
function verifyDnsConfig(domainObject, callback) {
|
||||
async function verifyDomainConfig(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName;
|
||||
|
||||
if (!dnsConfig.token || typeof dnsConfig.token !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'token must be a non-empty string', { field: 'token' }));
|
||||
if (!domainConfig.token || typeof domainConfig.token !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'token must be a non-empty string');
|
||||
|
||||
const ip = '127.0.0.1';
|
||||
|
||||
var credentials = {
|
||||
token: dnsConfig.token
|
||||
const credentials = {
|
||||
token: domainConfig.token
|
||||
};
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
|
||||
if (process.env.BOX_ENV === 'test') return credentials; // this shouldn't be here
|
||||
|
||||
dns.resolve(zoneName, 'NS', { timeout: 5000 }, function (error, nameservers) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain', { field: 'nameservers' }));
|
||||
if (error || !nameservers) return callback(new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers', { field: 'nameservers' }));
|
||||
const [error, nameservers] = await safe(dig.resolve(zoneName, 'NS', { timeout: 5000 }));
|
||||
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
|
||||
|
||||
if (nameservers.map(function (n) { return n.toLowerCase(); }).indexOf('ns1.digitalocean.com') === -1) {
|
||||
debug('verifyDnsConfig: %j does not contains DO NS', nameservers);
|
||||
return callback(new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to DigitalOcean', { field: 'nameservers' }));
|
||||
}
|
||||
if (nameservers.map(function (n) { return n.toLowerCase(); }).indexOf('ns1.digitalocean.com') === -1) {
|
||||
debug('verifyDomainConfig: %j does not contains DO NS', nameservers);
|
||||
throw new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to DigitalOcean');
|
||||
}
|
||||
|
||||
const location = 'cloudrontestdns';
|
||||
const location = 'cloudrontestdns';
|
||||
|
||||
upsert(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
await upsert(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record added');
|
||||
|
||||
debug('verifyDnsConfig: Test A record added');
|
||||
await del(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record removed again');
|
||||
|
||||
del(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: Test A record removed again');
|
||||
|
||||
callback(null, credentials);
|
||||
});
|
||||
});
|
||||
});
|
||||
return credentials;
|
||||
}
|
||||
|
||||
128
src/dns/gandi.js
128
src/dns/gandi.js
@@ -7,14 +7,16 @@ exports = module.exports = {
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDnsConfig
|
||||
verifyDomainConfig
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
constants = require('../constants.js'),
|
||||
debug = require('debug')('box:dns/gandi'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('../dns.js'),
|
||||
safe = require('safetydance'),
|
||||
superagent = require('superagent'),
|
||||
util = require('util'),
|
||||
waitForDns = require('./waitfordns.js');
|
||||
@@ -34,146 +36,126 @@ function injectPrivateFields(newConfig, currentConfig) {
|
||||
if (newConfig.token === constants.SECRET_PLACEHOLDER) newConfig.token = currentConfig.token;
|
||||
}
|
||||
|
||||
function upsert(domainObject, location, type, values, callback) {
|
||||
async function upsert(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
debug(`upsert: ${name} in zone ${zoneName} of type ${type} with values ${JSON.stringify(values)}`);
|
||||
|
||||
var data = {
|
||||
const data = {
|
||||
'rrset_ttl': 300, // this is the minimum allowed
|
||||
'rrset_values': values // for mx records, value is already of the '<priority> <server>' format
|
||||
};
|
||||
|
||||
superagent.put(`${GANDI_API}/domains/${zoneName}/records/${name}/${type}`)
|
||||
.set('X-Api-Key', dnsConfig.token)
|
||||
const [error, response] = await safe(superagent.put(`${GANDI_API}/domains/${zoneName}/records/${name}/${type}`)
|
||||
.set('X-Api-Key', domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.send(data)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode === 400) return callback(new BoxError(BoxError.BAD_FIELD, formatError(result)));
|
||||
if (result.statusCode !== 201) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
.ok(() => true));
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode === 400) throw new BoxError(BoxError.BAD_FIELD, formatError(response));
|
||||
if (response.statusCode !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
}
|
||||
|
||||
function get(domainObject, location, type, callback) {
|
||||
async function get(domainObject, location, type) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
debug(`get: ${name} in zone ${zoneName} of type ${type}`);
|
||||
|
||||
superagent.get(`${GANDI_API}/domains/${zoneName}/records/${name}/${type}`)
|
||||
.set('X-Api-Key', dnsConfig.token)
|
||||
const [error, response] = await safe(superagent.get(`${GANDI_API}/domains/${zoneName}/records/${name}/${type}`)
|
||||
.set('X-Api-Key', domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode === 404) return callback(null, [ ]);
|
||||
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
.ok(() => true));
|
||||
|
||||
debug('get: %j', result.body);
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode === 404) return [];
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
return callback(null, result.body.rrset_values);
|
||||
});
|
||||
return response.body.rrset_values;
|
||||
}
|
||||
|
||||
function del(domainObject, location, type, values, callback) {
|
||||
async function del(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
debug(`del: ${name} in zone ${zoneName} of type ${type} with values ${JSON.stringify(values)}`);
|
||||
|
||||
superagent.del(`${GANDI_API}/domains/${zoneName}/records/${name}/${type}`)
|
||||
.set('X-Api-Key', dnsConfig.token)
|
||||
const [error, response] = await safe(superagent.del(`${GANDI_API}/domains/${zoneName}/records/${name}/${type}`)
|
||||
.set('X-Api-Key', domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 404) return callback(null);
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 204) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
.ok(() => true));
|
||||
|
||||
debug('del: done');
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 404) return;
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 204) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
}
|
||||
|
||||
function wait(domainObject, location, type, value, options, callback) {
|
||||
async function wait(domainObject, subdomain, type, value, options) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const fqdn = dns.fqdn(location, domainObject);
|
||||
const fqdn = dns.fqdn(subdomain, domainObject);
|
||||
|
||||
waitForDns(fqdn, domainObject.zoneName, type, value, options, callback);
|
||||
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
|
||||
}
|
||||
|
||||
function verifyDnsConfig(domainObject, callback) {
|
||||
async function verifyDomainConfig(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName;
|
||||
|
||||
if (!dnsConfig.token || typeof dnsConfig.token !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'token must be a non-empty string', { field: 'token' }));
|
||||
if (!domainConfig.token || typeof domainConfig.token !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'token must be a non-empty string');
|
||||
|
||||
var credentials = {
|
||||
token: dnsConfig.token
|
||||
const credentials = {
|
||||
token: domainConfig.token
|
||||
};
|
||||
|
||||
const ip = '127.0.0.1';
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
|
||||
if (process.env.BOX_ENV === 'test') return credentials; // this shouldn't be here
|
||||
|
||||
dns.resolve(zoneName, 'NS', { timeout: 5000 }, function (error, nameservers) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain', { field: 'nameservers' }));
|
||||
if (error || !nameservers) return callback(new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers', { field: 'nameservers' }));
|
||||
const [error, nameservers] = await safe(dig.resolve(zoneName, 'NS', { timeout: 5000 }));
|
||||
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
|
||||
|
||||
if (!nameservers.every(function (n) { return n.toLowerCase().indexOf('.gandi.net') !== -1; })) {
|
||||
debug('verifyDnsConfig: %j does not contain Gandi NS', nameservers);
|
||||
return callback(new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Gandi', { field: 'nameservers' }));
|
||||
}
|
||||
if (!nameservers.every(function (n) { return n.toLowerCase().indexOf('.gandi.net') !== -1; })) {
|
||||
debug('verifyDomainConfig: %j does not contain Gandi NS', nameservers);
|
||||
throw new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Gandi');
|
||||
}
|
||||
|
||||
const location = 'cloudrontestdns';
|
||||
const location = 'cloudrontestdns';
|
||||
|
||||
upsert(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
await upsert(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record added');
|
||||
|
||||
debug('verifyDnsConfig: Test A record added');
|
||||
await del(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record removed again');
|
||||
|
||||
del(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: Test A record removed again');
|
||||
|
||||
callback(null, credentials);
|
||||
});
|
||||
});
|
||||
});
|
||||
return credentials;
|
||||
}
|
||||
|
||||
221
src/dns/gcdns.js
221
src/dns/gcdns.js
@@ -1,5 +1,7 @@
|
||||
'use strict';
|
||||
|
||||
const safe = require('safetydance');
|
||||
|
||||
exports = module.exports = {
|
||||
removePrivateFields,
|
||||
injectPrivateFields,
|
||||
@@ -7,13 +9,14 @@ exports = module.exports = {
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDnsConfig
|
||||
verifyDomainConfig
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
constants = require('../constants.js'),
|
||||
debug = require('debug')('box:dns/gcdns'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('../dns.js'),
|
||||
GCDNS = require('@google-cloud/dns').DNS,
|
||||
waitForDns = require('./waitfordns.js'),
|
||||
@@ -28,210 +31,166 @@ function injectPrivateFields(newConfig, currentConfig) {
|
||||
if (newConfig.credentials.private_key === constants.SECRET_PLACEHOLDER && currentConfig.credentials) newConfig.credentials.private_key = currentConfig.credentials.private_key;
|
||||
}
|
||||
|
||||
function getDnsCredentials(dnsConfig) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
function getDnsCredentials(domainConfig) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
|
||||
return {
|
||||
projectId: dnsConfig.projectId,
|
||||
projectId: domainConfig.projectId,
|
||||
credentials: {
|
||||
client_email: dnsConfig.credentials.client_email,
|
||||
private_key: dnsConfig.credentials.private_key
|
||||
client_email: domainConfig.credentials.client_email,
|
||||
private_key: domainConfig.credentials.private_key
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function getZoneByName(dnsConfig, zoneName, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
async function getZoneByName(domainConfig, zoneName) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var gcdns = new GCDNS(getDnsCredentials(dnsConfig));
|
||||
const gcdns = new GCDNS(getDnsCredentials(domainConfig));
|
||||
|
||||
gcdns.getZones(function (error, zones) {
|
||||
if (error && error.message === 'invalid_grant') return callback(new BoxError(BoxError.ACCESS_DENIED, 'The key was probably revoked'));
|
||||
if (error && error.reason === 'No such domain') return callback(new BoxError(BoxError.NOT_FOUND, error.message));
|
||||
if (error && error.code === 403) return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 404) return callback(new BoxError(BoxError.NOT_FOUND, error.message));
|
||||
if (error) {
|
||||
debug('gcdns.getZones', error);
|
||||
return callback(new BoxError(BoxError.EXTERNAL_ERROR, error));
|
||||
}
|
||||
const [error, result] = await safe(gcdns.getZones());
|
||||
if (error && error.message === 'invalid_grant') throw new BoxError(BoxError.ACCESS_DENIED, 'The key was probably revoked');
|
||||
if (error && error.reason === 'No such domain') throw new BoxError(BoxError.NOT_FOUND, error.message);
|
||||
if (error && error.code === 403) throw new BoxError(BoxError.ACCESS_DENIED, error.message);
|
||||
if (error && error.code === 404) throw new BoxError(BoxError.NOT_FOUND, error.message);
|
||||
if (error) {
|
||||
debug('gcdns.getZones', error);
|
||||
throw new BoxError(BoxError.EXTERNAL_ERROR, error);
|
||||
}
|
||||
|
||||
var zone = zones.filter(function (zone) {
|
||||
return zone.metadata.dnsName.slice(0, -1) === zoneName; // the zone name contains a '.' at the end
|
||||
})[0];
|
||||
const zone = result[0].filter(function (zone) {
|
||||
return zone.metadata.dnsName.slice(0, -1) === zoneName; // the zone name contains a '.' at the end
|
||||
})[0];
|
||||
|
||||
if (!zone) return callback(new BoxError(BoxError.NOT_FOUND, 'no such zone'));
|
||||
if (!zone) throw new BoxError(BoxError.NOT_FOUND, 'no such zone');
|
||||
|
||||
callback(null, zone); //zone.metadata ~= {name="", dnsName="", nameServers:[]}
|
||||
});
|
||||
return zone; //zone.metadata ~= {name="", dnsName="", nameServers:[]}
|
||||
}
|
||||
|
||||
function upsert(domainObject, location, type, values, callback) {
|
||||
async function upsert(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
fqdn = dns.fqdn(location, domainObject);
|
||||
|
||||
debug('add: %s for zone %s of type %s with values %j', fqdn, zoneName, type, values);
|
||||
|
||||
getZoneByName(getDnsCredentials(dnsConfig), zoneName, function (error, zone) {
|
||||
if (error) return callback(error);
|
||||
const zone = await getZoneByName(getDnsCredentials(domainConfig), zoneName);
|
||||
|
||||
zone.getRecords({ type: type, name: fqdn + '.' }, function (error, oldRecords) {
|
||||
if (error && error.code === 403) return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
if (error) {
|
||||
debug('upsert->zone.getRecords', error);
|
||||
return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
}
|
||||
const [error, result] = await safe(zone.getRecords({ type: type, name: fqdn + '.' }));
|
||||
if (error && error.code === 403) throw new BoxError(BoxError.ACCESS_DENIED, error.message);
|
||||
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, error.message);
|
||||
|
||||
var newRecord = zone.record(type, {
|
||||
name: fqdn + '.',
|
||||
data: values,
|
||||
ttl: 1
|
||||
});
|
||||
|
||||
zone.createChange({ delete: oldRecords, add: newRecord }, function(error /*, change */) {
|
||||
if (error && error.code === 403) return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 412) return callback(new BoxError(BoxError.BUSY, error.message));
|
||||
if (error) {
|
||||
debug('upsert->zone.createChange', error);
|
||||
return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
}
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
const newRecord = zone.record(type, {
|
||||
name: fqdn + '.',
|
||||
data: values,
|
||||
ttl: 1
|
||||
});
|
||||
|
||||
const [changeError] = await safe(zone.createChange({ delete: result[0], add: newRecord }));
|
||||
if (changeError && changeError.code === 403) throw new BoxError(BoxError.ACCESS_DENIED, changeError.message);
|
||||
if (changeError && changeError.code === 412) throw new BoxError(BoxError.BUSY, changeError.message);
|
||||
if (changeError) throw new BoxError(BoxError.EXTERNAL_ERROR, changeError.message);
|
||||
}
|
||||
|
||||
function get(domainObject, location, type, callback) {
|
||||
async function get(domainObject, location, type) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
fqdn = dns.fqdn(location, domainObject);
|
||||
|
||||
getZoneByName(getDnsCredentials(dnsConfig), zoneName, function (error, zone) {
|
||||
if (error) return callback(error);
|
||||
const zone = await getZoneByName(getDnsCredentials(domainConfig), zoneName);
|
||||
|
||||
var params = {
|
||||
name: fqdn + '.',
|
||||
type: type
|
||||
};
|
||||
const params = {
|
||||
name: fqdn + '.',
|
||||
type: type
|
||||
};
|
||||
|
||||
zone.getRecords(params, function (error, records) {
|
||||
if (error && error.code === 403) return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error));
|
||||
if (records.length === 0) return callback(null, [ ]);
|
||||
const [error, result] = await safe(zone.getRecords(params));
|
||||
if (error && error.code === 403) throw new BoxError(BoxError.ACCESS_DENIED, error.message);
|
||||
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, error.message);
|
||||
if (result[0].length === 0) return [];
|
||||
|
||||
return callback(null, records[0].data);
|
||||
});
|
||||
});
|
||||
return result[0][0].data;
|
||||
}
|
||||
|
||||
function del(domainObject, location, type, values, callback) {
|
||||
async function del(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
fqdn = dns.fqdn(location, domainObject);
|
||||
|
||||
getZoneByName(getDnsCredentials(dnsConfig), zoneName, function (error, zone) {
|
||||
if (error) return callback(error);
|
||||
const zone = await getZoneByName(getDnsCredentials(domainConfig), zoneName);
|
||||
|
||||
zone.getRecords({ type: type, name: fqdn + '.' }, function(error, oldRecords) {
|
||||
if (error && error.code === 403) return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
if (error) {
|
||||
debug('del->zone.getRecords', error);
|
||||
return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
}
|
||||
const [error, result] = await safe(zone.getRecords({ type: type, name: fqdn + '.' }));
|
||||
if (error && error.code === 403) throw new BoxError(BoxError.ACCESS_DENIED, error.message);
|
||||
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, error.message);
|
||||
|
||||
zone.deleteRecords(oldRecords, function (error, change) {
|
||||
if (error && error.code === 403) return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 412) return callback(new BoxError(BoxError.BUSY, error.message));
|
||||
if (error) {
|
||||
debug('del->zone.createChange', error);
|
||||
return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
}
|
||||
|
||||
callback(null, change.id);
|
||||
});
|
||||
});
|
||||
});
|
||||
const [delError] = await safe(zone.deleteRecords(result[0]));
|
||||
if (delError && delError.code === 403) throw new BoxError(BoxError.ACCESS_DENIED, delError.message);
|
||||
if (delError && delError.code === 412) throw new BoxError(BoxError.BUSY, delError.message);
|
||||
if (delError) throw new BoxError(BoxError.EXTERNAL_ERROR, delError.message);
|
||||
}
|
||||
|
||||
function wait(domainObject, location, type, value, options, callback) {
|
||||
async function wait(domainObject, subdomain, type, value, options) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const fqdn = dns.fqdn(location, domainObject);
|
||||
const fqdn = dns.fqdn(subdomain, domainObject);
|
||||
|
||||
waitForDns(fqdn, domainObject.zoneName, type, value, options, callback);
|
||||
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
|
||||
}
|
||||
|
||||
function verifyDnsConfig(domainObject, callback) {
|
||||
async function verifyDomainConfig(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName;
|
||||
|
||||
if (typeof dnsConfig.projectId !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'projectId must be a string', { field: 'projectId' }));
|
||||
if (!dnsConfig.credentials || typeof dnsConfig.credentials !== 'object') return callback(new BoxError(BoxError.BAD_FIELD, 'credentials must be an object', { field: 'credentials' }));
|
||||
if (typeof dnsConfig.credentials.client_email !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'credentials.client_email must be a string', { field: 'client_email' }));
|
||||
if (typeof dnsConfig.credentials.private_key !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'credentials.private_key must be a string', { field: 'private_key' }));
|
||||
if (typeof domainConfig.projectId !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'projectId must be a string');
|
||||
if (!domainConfig.credentials || typeof domainConfig.credentials !== 'object') throw new BoxError(BoxError.BAD_FIELD, 'credentials must be an object');
|
||||
if (typeof domainConfig.credentials.client_email !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'credentials.client_email must be a string');
|
||||
if (typeof domainConfig.credentials.private_key !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'credentials.private_key must be a string');
|
||||
|
||||
var credentials = getDnsCredentials(dnsConfig);
|
||||
const credentials = getDnsCredentials(domainConfig);
|
||||
|
||||
const ip = '127.0.0.1';
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
|
||||
if (process.env.BOX_ENV === 'test') return credentials; // this shouldn't be here
|
||||
|
||||
dns.resolve(zoneName, 'NS', { timeout: 5000 }, function (error, nameservers) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain', { field: 'nameservers' }));
|
||||
if (error || !nameservers) return callback(new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers', { field: 'nameservers' }));
|
||||
const [error, nameservers] = await safe(dig.resolve(zoneName, 'NS', { timeout: 5000 }));
|
||||
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
|
||||
|
||||
getZoneByName(credentials, zoneName, function (error, zone) {
|
||||
if (error) return callback(error);
|
||||
const zone = await getZoneByName(credentials, zoneName);
|
||||
|
||||
var definedNS = zone.metadata.nameServers.sort().map(function(r) { return r.replace(/\.$/, ''); });
|
||||
if (!_.isEqual(definedNS, nameservers.sort())) {
|
||||
debug('verifyDnsConfig: %j and %j do not match', nameservers, definedNS);
|
||||
return callback(new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Google Cloud DNS', { field: 'nameservers' }));
|
||||
}
|
||||
const definedNS = zone.metadata.nameServers.sort().map(function(r) { return r.replace(/\.$/, ''); });
|
||||
if (!_.isEqual(definedNS, nameservers.sort())) {
|
||||
debug('verifyDomainConfig: %j and %j do not match', nameservers, definedNS);
|
||||
throw new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Google Cloud DNS');
|
||||
}
|
||||
|
||||
const location = 'cloudrontestdns';
|
||||
const location = 'cloudrontestdns';
|
||||
await upsert(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record added');
|
||||
|
||||
upsert(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
await del(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record removed again');
|
||||
|
||||
debug('verifyDnsConfig: Test A record added');
|
||||
|
||||
del(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: Test A record removed again');
|
||||
|
||||
callback(null, credentials);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
return credentials;
|
||||
}
|
||||
|
||||
@@ -7,14 +7,16 @@ exports = module.exports = {
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDnsConfig
|
||||
verifyDomainConfig
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
constants = require('../constants.js'),
|
||||
debug = require('debug')('box:dns/godaddy'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('../dns.js'),
|
||||
safe = require('safetydance'),
|
||||
superagent = require('superagent'),
|
||||
util = require('util'),
|
||||
waitForDns = require('./waitfordns.js');
|
||||
@@ -22,11 +24,6 @@ const assert = require('assert'),
|
||||
// const GODADDY_API_OTE = 'https://api.ote-godaddy.com/v1/domains';
|
||||
const GODADDY_API = 'https://api.godaddy.com/v1/domains';
|
||||
|
||||
// this is a workaround for godaddy not having a delete API
|
||||
// https://stackoverflow.com/questions/39347464/delete-record-libcloud-godaddy-api
|
||||
const GODADDY_INVALID_IP = '0.0.0.0';
|
||||
const GODADDY_INVALID_TXT = '""';
|
||||
|
||||
function formatError(response) {
|
||||
return util.format(`GoDaddy DNS error [${response.statusCode}] ${response.body.message}`);
|
||||
}
|
||||
@@ -40,22 +37,21 @@ function injectPrivateFields(newConfig, currentConfig) {
|
||||
if (newConfig.apiSecret === constants.SECRET_PLACEHOLDER) newConfig.apiSecret = currentConfig.apiSecret;
|
||||
}
|
||||
|
||||
function upsert(domainObject, location, type, values, callback) {
|
||||
async function upsert(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
debug(`upsert: ${name} in zone ${zoneName} of type ${type} with values ${JSON.stringify(values)}`);
|
||||
|
||||
var records = [ ];
|
||||
values.forEach(function (value) {
|
||||
var record = { ttl: 600 }; // 600 is the min ttl
|
||||
const records = [];
|
||||
for (const value of values) {
|
||||
const record = { ttl: 600 }; // 600 is the min ttl
|
||||
|
||||
if (type === 'MX') {
|
||||
record.priority = parseInt(value.split(' ')[0], 10);
|
||||
@@ -65,152 +61,135 @@ function upsert(domainObject, location, type, values, callback) {
|
||||
}
|
||||
|
||||
records.push(record);
|
||||
});
|
||||
}
|
||||
|
||||
superagent.put(`${GODADDY_API}/${zoneName}/records/${type}/${name}`)
|
||||
.set('Authorization', `sso-key ${dnsConfig.apiKey}:${dnsConfig.apiSecret}`)
|
||||
const [error, response] = await safe(superagent.put(`${GODADDY_API}/${zoneName}/records/${type}/${name}`)
|
||||
.set('Authorization', `sso-key ${domainConfig.apiKey}:${domainConfig.apiSecret}`)
|
||||
.timeout(30 * 1000)
|
||||
.send(records)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode === 400) return callback(new BoxError(BoxError.BAD_FIELD, formatError(result))); // no such zone
|
||||
if (result.statusCode === 422) return callback(new BoxError(BoxError.BAD_FIELD, formatError(result))); // conflict
|
||||
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
.ok(() => true));
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode === 400) throw new BoxError(BoxError.BAD_FIELD, formatError(response)); // no such zone
|
||||
if (response.statusCode === 422) throw new BoxError(BoxError.BAD_FIELD, formatError(response)); // conflict
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
}
|
||||
|
||||
function get(domainObject, location, type, callback) {
|
||||
async function get(domainObject, location, type) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
debug(`get: ${name} in zone ${zoneName} of type ${type}`);
|
||||
|
||||
superagent.get(`${GODADDY_API}/${zoneName}/records/${type}/${name}`)
|
||||
.set('Authorization', `sso-key ${dnsConfig.apiKey}:${dnsConfig.apiSecret}`)
|
||||
const [error, response] = await safe(superagent.get(`${GODADDY_API}/${zoneName}/records/${type}/${name}`)
|
||||
.set('Authorization', `sso-key ${domainConfig.apiKey}:${domainConfig.apiSecret}`)
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode === 404) return callback(null, [ ]);
|
||||
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
.ok(() => true));
|
||||
|
||||
debug('get: %j', result.body);
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode === 404) return [];
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
var values = result.body.map(function (record) { return record.data; });
|
||||
const values = response.body.map(function (record) { return record.data; });
|
||||
|
||||
if (values.length === 1 && values[0] === GODADDY_INVALID_IP) return callback(null, [ ]); // pretend this record doesn't exist
|
||||
if (values.length === 1) {
|
||||
// legacy: this was a workaround for godaddy not having a delete API
|
||||
// https://stackoverflow.com/questions/39347464/delete-record-libcloud-godaddy-api
|
||||
const GODADDY_INVALID_IP = '0.0.0.0';
|
||||
const GODADDY_INVALID_IPv6 = '0:0:0:0:0:0:0:0';
|
||||
const GODADDY_INVALID_TXT = '""';
|
||||
|
||||
return callback(null, values);
|
||||
});
|
||||
if ((type === 'A' && values[0] === GODADDY_INVALID_IP)
|
||||
|| (type === 'AAAA' && values[0] === GODADDY_INVALID_IPv6)
|
||||
|| (type === 'TXT' && values[0] === GODADDY_INVALID_TXT)) return []; // pretend this record doesn't exist
|
||||
}
|
||||
|
||||
return values;
|
||||
}
|
||||
|
||||
function del(domainObject, location, type, values, callback) {
|
||||
async function del(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
debug(`get: ${name} in zone ${zoneName} of type ${type} with values ${JSON.stringify(values)}`);
|
||||
debug(`del: ${name} in zone ${zoneName} of type ${type} with values ${JSON.stringify(values)}`);
|
||||
|
||||
if (type !== 'A' && type !== 'TXT') return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'Record deletion is not supported by GoDaddy API'));
|
||||
const result = await get(domainObject, location, type);
|
||||
if (result.length === 0) return;
|
||||
|
||||
// check if the record exists at all so that we don't insert the "Dead" record for no reason
|
||||
get(domainObject, location, type, function (error, values) {
|
||||
if (error) return callback(error);
|
||||
if (values.length === 0) return callback();
|
||||
const tmp = result.filter(r => !values.includes(r));
|
||||
|
||||
// godaddy does not have a delete API. so fill it up with an invalid IP that we can ignore in future get()
|
||||
var records = [{
|
||||
ttl: 600,
|
||||
data: type === 'A' ? GODADDY_INVALID_IP : GODADDY_INVALID_TXT
|
||||
}];
|
||||
if (tmp.length) return await upsert(domainObject, location, type, tmp); // only remove 'values'
|
||||
|
||||
superagent.put(`${GODADDY_API}/${zoneName}/records/${type}/${name}`)
|
||||
.set('Authorization', `sso-key ${dnsConfig.apiKey}:${dnsConfig.apiSecret}`)
|
||||
.send(records)
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 404) return callback(null);
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
const [error, response] = await safe(superagent.del(`${GODADDY_API}/${zoneName}/records/${type}/${name}`)
|
||||
.set('Authorization', `sso-key ${domainConfig.apiKey}:${domainConfig.apiSecret}`)
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
|
||||
debug('del: done');
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
});
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 404) return;
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 204) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
}
|
||||
|
||||
function wait(domainObject, location, type, value, options, callback) {
|
||||
async function wait(domainObject, subdomain, type, value, options) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const fqdn = dns.fqdn(location, domainObject);
|
||||
const fqdn = dns.fqdn(subdomain, domainObject);
|
||||
|
||||
waitForDns(fqdn, domainObject.zoneName, type, value, options, callback);
|
||||
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
|
||||
}
|
||||
|
||||
function verifyDnsConfig(domainObject, callback) {
|
||||
async function verifyDomainConfig(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName;
|
||||
|
||||
if (!dnsConfig.apiKey || typeof dnsConfig.apiKey !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'apiKey must be a non-empty string', { field: 'apiKey' }));
|
||||
if (!dnsConfig.apiSecret || typeof dnsConfig.apiSecret !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'apiSecret must be a non-empty string', { field: 'apiSecret' }));
|
||||
if (!domainConfig.apiKey || typeof domainConfig.apiKey !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'apiKey must be a non-empty string');
|
||||
if (!domainConfig.apiSecret || typeof domainConfig.apiSecret !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'apiSecret must be a non-empty string');
|
||||
|
||||
const ip = '127.0.0.1';
|
||||
|
||||
var credentials = {
|
||||
apiKey: dnsConfig.apiKey,
|
||||
apiSecret: dnsConfig.apiSecret
|
||||
const credentials = {
|
||||
apiKey: domainConfig.apiKey,
|
||||
apiSecret: domainConfig.apiSecret
|
||||
};
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
|
||||
if (process.env.BOX_ENV === 'test') return credentials; // this shouldn't be here
|
||||
|
||||
dns.resolve(zoneName, 'NS', { timeout: 5000 }, function (error, nameservers) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain', { field: 'nameservers' }));
|
||||
if (error || !nameservers) return callback(new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers', { field: 'nameservers' }));
|
||||
const [error, nameservers] = await safe(dig.resolve(zoneName, 'NS', { timeout: 5000 }));
|
||||
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
|
||||
|
||||
if (!nameservers.every(function (n) { return n.toLowerCase().indexOf('.domaincontrol.com') !== -1; })) {
|
||||
debug('verifyDnsConfig: %j does not contain GoDaddy NS', nameservers);
|
||||
return callback(new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to GoDaddy', { field: 'nameservers' }));
|
||||
}
|
||||
if (!nameservers.every(function (n) { return n.toLowerCase().indexOf('.domaincontrol.com') !== -1; })) {
|
||||
debug('verifyDomainConfig: %j does not contain GoDaddy NS', nameservers);
|
||||
throw new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to GoDaddy');
|
||||
}
|
||||
|
||||
const location = 'cloudrontestdns';
|
||||
const location = 'cloudrontestdns';
|
||||
|
||||
upsert(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
await upsert(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record added');
|
||||
|
||||
debug('verifyDnsConfig: Test A record added');
|
||||
await del(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record removed again');
|
||||
|
||||
del(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: Test A record removed again');
|
||||
|
||||
callback(null, credentials);
|
||||
});
|
||||
});
|
||||
});
|
||||
return credentials;
|
||||
}
|
||||
|
||||
259
src/dns/hetzner.js
Normal file
259
src/dns/hetzner.js
Normal file
@@ -0,0 +1,259 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
removePrivateFields,
|
||||
injectPrivateFields,
|
||||
upsert,
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDomainConfig
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
constants = require('../constants.js'),
|
||||
debug = require('debug')('box:dns/digitalocean'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('../dns.js'),
|
||||
safe = require('safetydance'),
|
||||
superagent = require('superagent'),
|
||||
waitForDns = require('./waitfordns.js');
|
||||
|
||||
const ENDPOINT = 'https://dns.hetzner.com/api/v1';
|
||||
|
||||
function formatError(response) {
|
||||
return `Hetzner DNS error ${response.statusCode} ${JSON.stringify(response.body)}`;
|
||||
}
|
||||
|
||||
function removePrivateFields(domainObject) {
|
||||
domainObject.config.token = constants.SECRET_PLACEHOLDER;
|
||||
return domainObject;
|
||||
}
|
||||
|
||||
function injectPrivateFields(newConfig, currentConfig) {
|
||||
if (newConfig.token === constants.SECRET_PLACEHOLDER) newConfig.token = currentConfig.token;
|
||||
}
|
||||
|
||||
async function getZone(domainConfig, zoneName) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
|
||||
const [error, response] = await safe(superagent.get(`${ENDPOINT}/zones`)
|
||||
.set('Auth-API-Token', domainConfig.token)
|
||||
.query({ search_name: zoneName })
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 401 || response.statusCode === 403) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
if (!Array.isArray(response.body.zones)) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
const zone = response.body.zones.filter(z => z.name === zoneName);
|
||||
if (zone.length === 0) throw new BoxError(BoxError.NOT_FOUND, formatError(response));
|
||||
return zone[0];
|
||||
}
|
||||
|
||||
async function getZoneRecords(domainConfig, zone, name, type) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zone, 'object');
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
|
||||
let page = 1, matchingRecords = [];
|
||||
|
||||
debug(`getInternal: getting dns records of ${zone.name} with ${name} and type ${type}`);
|
||||
|
||||
const perPage = 50;
|
||||
|
||||
// eslint-disable-next-line no-constant-condition
|
||||
while (true) {
|
||||
const [error, response] = await safe(superagent.get(`${ENDPOINT}/records`)
|
||||
.set('Auth-API-Token', domainConfig.token)
|
||||
.query({ zone_id: zone.id, page, per_page: perPage })
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, formatError(response));
|
||||
if (response.statusCode === 401 || response.statusCode === 403) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
matchingRecords = matchingRecords.concat(response.body.records.filter(function (record) {
|
||||
return (record.type === type && record.name === name);
|
||||
}));
|
||||
|
||||
if (response.body.records.length < perPage) break;
|
||||
|
||||
++page;
|
||||
}
|
||||
|
||||
return matchingRecords;
|
||||
}
|
||||
|
||||
async function upsert(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
debug('upsert: %s for zone %s of type %s with values %j', name, zoneName, type, values);
|
||||
|
||||
const zone = await getZone(domainConfig, zoneName);
|
||||
const records = await getZoneRecords(domainConfig, zone, name, type);
|
||||
|
||||
// used to track available records to update instead of create
|
||||
let i = 0;
|
||||
|
||||
for (let value of values) {
|
||||
const data = {
|
||||
type,
|
||||
name,
|
||||
value,
|
||||
ttl: 60,
|
||||
zone_id: zone.id
|
||||
};
|
||||
|
||||
if (i >= records.length) {
|
||||
const [error, response] = await safe(superagent.post(`${ENDPOINT}/records`)
|
||||
.set('Auth-API-Token', domainConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode === 422) throw new BoxError(BoxError.BAD_FIELD, response.body.message);
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
} else {
|
||||
const [error, response] = await safe(superagent.put(`${ENDPOINT}/records/${records[i].id}`)
|
||||
.set('Auth-API-Token', domainConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
++i;
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode === 422) throw new BoxError(BoxError.BAD_FIELD, response.body.message);
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
}
|
||||
}
|
||||
|
||||
for (let j = values.length + 1; j < records.length; j++) {
|
||||
const [error] = await safe(superagent.del(`${ENDPOINT}/records/${records[j].id}`)
|
||||
.set('Auth-API-Token', domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) debug(`upsert: error removing record ${records[j].id}: ${error.message}`);
|
||||
}
|
||||
|
||||
debug('upsert: completed');
|
||||
}
|
||||
|
||||
async function get(domainObject, location, type) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
const zone = await getZone(domainConfig, zoneName);
|
||||
const result = await getZoneRecords(domainConfig, zone, name, type);
|
||||
|
||||
return result.map(function (record) { return record.value; });
|
||||
}
|
||||
|
||||
async function del(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
const zone = await getZone(domainConfig, zoneName);
|
||||
const records = await getZoneRecords(domainConfig, zone, name, type);
|
||||
if (records.length === 0) return;
|
||||
|
||||
const matchingRecords = records.filter(function (record) { return values.some(function (value) { return value === record.value; }); });
|
||||
if (matchingRecords.length === 0) return;
|
||||
|
||||
for (const r of matchingRecords) {
|
||||
const [error, response] = await safe(superagent.del(`${ENDPOINT}/records/${r.id}`)
|
||||
.set('Auth-API-Token', domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 404) return;
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
}
|
||||
}
|
||||
|
||||
async function wait(domainObject, subdomain, type, value, options) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
|
||||
const fqdn = dns.fqdn(subdomain, domainObject);
|
||||
|
||||
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
|
||||
}
|
||||
|
||||
async function verifyDomainConfig(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName;
|
||||
|
||||
if (!domainConfig.token || typeof domainConfig.token !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'token must be a non-empty string');
|
||||
|
||||
const ip = '127.0.0.1';
|
||||
|
||||
const credentials = {
|
||||
token: domainConfig.token
|
||||
};
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return credentials; // this shouldn't be here
|
||||
|
||||
const [error, nameservers] = await safe(dig.resolve(zoneName, 'NS', { timeout: 5000 }));
|
||||
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
|
||||
|
||||
// https://docs.hetzner.com/dns-console/dns/general/dns-overview#the-hetzner-online-name-servers-are
|
||||
if (nameservers.map(function (n) { return n.toLowerCase(); }).indexOf('oxygen.ns.hetzner.com') === -1) {
|
||||
debug('verifyDomainConfig: %j does not contain Hetzner NS', nameservers);
|
||||
throw new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Hetzner');
|
||||
}
|
||||
|
||||
const location = 'cloudrontestdns';
|
||||
|
||||
await upsert(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record added');
|
||||
|
||||
await del(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record removed again');
|
||||
|
||||
return credentials;
|
||||
}
|
||||
@@ -13,7 +13,7 @@ exports = module.exports = {
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDnsConfig
|
||||
verifyDomainConfig
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
@@ -29,57 +29,50 @@ function injectPrivateFields(newConfig, currentConfig) {
|
||||
// in-place injection of tokens and api keys which came in with constants.SECRET_PLACEHOLDER
|
||||
}
|
||||
|
||||
function upsert(domainObject, location, type, values, callback) {
|
||||
async function upsert(domainObject, subdomain, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Result: none
|
||||
|
||||
callback(new BoxError(BoxError.NOT_IMPLEMENTED, 'upsert is not implemented'));
|
||||
throw new BoxError(BoxError.NOT_IMPLEMENTED, 'upsert is not implemented');
|
||||
}
|
||||
|
||||
function get(domainObject, location, type, callback) {
|
||||
async function get(domainObject, subdomain, type) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Result: Array of matching DNS records in string format
|
||||
|
||||
callback(new BoxError(BoxError.NOT_IMPLEMENTED, 'get is not implemented'));
|
||||
throw new BoxError(BoxError.NOT_IMPLEMENTED, 'get is not implemented');
|
||||
}
|
||||
|
||||
function del(domainObject, location, type, values, callback) {
|
||||
async function del(domainObject, subdomain, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Result: none
|
||||
|
||||
callback(new BoxError(BoxError.NOT_IMPLEMENTED, 'del is not implemented'));
|
||||
throw new BoxError(BoxError.NOT_IMPLEMENTED, 'del is not implemented');
|
||||
}
|
||||
|
||||
function wait(domainObject, location, type, value, options, callback) {
|
||||
async function wait(domainObject, subdomain, type, value, options) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
function verifyDnsConfig(domainObject, callback) {
|
||||
async function verifyDomainConfig(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Result: dnsConfig object
|
||||
// Result: domainConfig object
|
||||
|
||||
callback(new BoxError(BoxError.NOT_IMPLEMENTED, 'verifyDnsConfig is not implemented'));
|
||||
throw new BoxError(BoxError.NOT_IMPLEMENTED, 'verifyDomainConfig is not implemented');
|
||||
}
|
||||
|
||||
@@ -7,15 +7,16 @@ exports = module.exports = {
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDnsConfig
|
||||
verifyDomainConfig
|
||||
};
|
||||
|
||||
const async = require('async'),
|
||||
assert = require('assert'),
|
||||
const assert = require('assert'),
|
||||
constants = require('../constants.js'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
debug = require('debug')('box:dns/linode'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('../dns.js'),
|
||||
safe = require('safetydance'),
|
||||
superagent = require('superagent'),
|
||||
util = require('util'),
|
||||
waitForDns = require('./waitfordns.js');
|
||||
@@ -35,278 +36,232 @@ function injectPrivateFields(newConfig, currentConfig) {
|
||||
if (newConfig.token === constants.SECRET_PLACEHOLDER) newConfig.token = currentConfig.token;
|
||||
}
|
||||
|
||||
function getZoneId(dnsConfig, zoneName, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
async function getZoneId(domainConfig, zoneName) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// returns 100 at a time
|
||||
superagent.get(`${LINODE_ENDPOINT}/domains`)
|
||||
.set('Authorization', 'Bearer ' + dnsConfig.token)
|
||||
const [error, response] = await safe(superagent.get(`${LINODE_ENDPOINT}/domains`)
|
||||
.set('Authorization', 'Bearer ' + domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
.ok(() => true));
|
||||
|
||||
if (!Array.isArray(result.body.data)) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response'));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
const zone = result.body.data.find(d => d.domain === zoneName);
|
||||
if (!Array.isArray(response.body.data)) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response');
|
||||
|
||||
if (!zone || !zone.id) return callback(new BoxError(BoxError.NOT_FOUND, 'Zone not found'));
|
||||
const zone = response.body.data.find(d => d.domain === zoneName);
|
||||
|
||||
debug(`getZoneId: zone id of ${zoneName} is ${zone.id}`);
|
||||
if (!zone || !zone.id) throw new BoxError(BoxError.NOT_FOUND, 'Zone not found');
|
||||
|
||||
callback(null, zone.id);
|
||||
});
|
||||
debug(`getZoneId: zone id of ${zoneName} is ${zone.id}`);
|
||||
|
||||
return zone.id;
|
||||
}
|
||||
|
||||
function getZoneRecords(dnsConfig, zoneName, name, type, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
async function getZoneRecords(domainConfig, zoneName, name, type) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug(`getInternal: getting dns records of ${zoneName} with ${name} and type ${type}`);
|
||||
|
||||
getZoneId(dnsConfig, zoneName, function (error, zoneId) {
|
||||
if (error) return callback(error);
|
||||
const zoneId = await getZoneId(domainConfig, zoneName);
|
||||
|
||||
let page = 0, more = false;
|
||||
let records = [];
|
||||
let page = 0, more = false;
|
||||
let records = [];
|
||||
|
||||
async.doWhilst(function (iteratorDone) {
|
||||
const url = `${LINODE_ENDPOINT}/domains/${zoneId}/records?page=${++page}`;
|
||||
do {
|
||||
const url = `${LINODE_ENDPOINT}/domains/${zoneId}/records?page=${++page}`;
|
||||
|
||||
superagent.get(url)
|
||||
.set('Authorization', 'Bearer ' + dnsConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return iteratorDone(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 404) return iteratorDone(new BoxError(BoxError.NOT_FOUND, formatError(result)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return iteratorDone(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 200) return iteratorDone(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
const [error, response] = await safe(superagent.get(url)
|
||||
.set('Authorization', 'Bearer ' + domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
records = records.concat(result.body.data.filter(function (record) {
|
||||
return (record.type === type && record.name === name);
|
||||
}));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, formatError(response));
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
more = result.body.page !== result.body.pages;
|
||||
records = records.concat(response.body.data.filter(function (record) {
|
||||
return (record.type === type && record.name === name);
|
||||
}));
|
||||
|
||||
iteratorDone();
|
||||
});
|
||||
}, function (testDone) { return testDone(null, more); }, function (error) {
|
||||
debug('getZoneRecords:', error, JSON.stringify(records));
|
||||
more = response.body.page !== response.body.pages;
|
||||
} while (more);
|
||||
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(null, { zoneId, records });
|
||||
});
|
||||
});
|
||||
return { zoneId, records };
|
||||
}
|
||||
|
||||
function get(domainObject, location, type, callback) {
|
||||
async function get(domainObject, location, type) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '';
|
||||
|
||||
getZoneRecords(dnsConfig, zoneName, name, type, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
const { records } = result;
|
||||
var tmp = records.map(function (record) { return record.target; });
|
||||
|
||||
debug('get: %j', tmp);
|
||||
|
||||
return callback(null, tmp);
|
||||
});
|
||||
const { records } = await getZoneRecords(domainConfig, zoneName, name, type);
|
||||
const tmp = records.map(function (record) { return record.target; });
|
||||
return tmp;
|
||||
}
|
||||
|
||||
function upsert(domainObject, location, type, values, callback) {
|
||||
async function upsert(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '';
|
||||
|
||||
debug('upsert: %s for zone %s of type %s with values %j', name, zoneName, type, values);
|
||||
|
||||
getZoneRecords(dnsConfig, zoneName, name, type, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
const { zoneId, records } = await getZoneRecords(domainConfig, zoneName, name, type);
|
||||
let i = 0, recordIds = []; // used to track available records to update instead of create
|
||||
|
||||
const { zoneId, records } = result;
|
||||
let i = 0, recordIds = []; // used to track available records to update instead of create
|
||||
for (const value of values) {
|
||||
const data = {
|
||||
type: type,
|
||||
ttl_sec: 300 // lowest
|
||||
};
|
||||
|
||||
async.eachSeries(values, function (value, iteratorCallback) {
|
||||
let data = {
|
||||
type: type,
|
||||
ttl_sec: 300 // lowest
|
||||
};
|
||||
if (type === 'MX') {
|
||||
data.priority = parseInt(value.split(' ')[0], 10);
|
||||
data.target = value.split(' ')[1];
|
||||
} else if (type === 'TXT') {
|
||||
data.target = value.replace(/^"(.*)"$/, '$1'); // strip any double quotes
|
||||
} else {
|
||||
data.target = value;
|
||||
}
|
||||
|
||||
if (type === 'MX') {
|
||||
data.priority = parseInt(value.split(' ')[0], 10);
|
||||
data.target = value.split(' ')[1];
|
||||
} else if (type === 'TXT') {
|
||||
data.target = value.replace(/^"(.*)"$/, '$1'); // strip any double quotes
|
||||
} else {
|
||||
data.target = value;
|
||||
}
|
||||
if (i >= records.length) {
|
||||
data.name = name; // only set for new records
|
||||
|
||||
if (i >= records.length) {
|
||||
data.name = name; // only set for new records
|
||||
const [error, response] = await safe(superagent.post(`${LINODE_ENDPOINT}/domains/${zoneId}/records`)
|
||||
.set('Authorization', 'Bearer ' + domainConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
superagent.post(`${LINODE_ENDPOINT}/domains/${zoneId}/records`)
|
||||
.set('Authorization', 'Bearer ' + dnsConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return iteratorCallback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 400) return iteratorCallback(new BoxError(BoxError.BAD_FIELD, formatError(result)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return iteratorCallback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 200) return iteratorCallback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 400) throw new BoxError(BoxError.BAD_FIELD, formatError(response));
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
recordIds.push(result.body.id);
|
||||
recordIds.push(response.body.id);
|
||||
} else {
|
||||
const [error, response] = await safe(superagent.put(`${LINODE_ENDPOINT}/domains/${zoneId}/records/${records[i].id}`)
|
||||
.set('Authorization', 'Bearer ' + domainConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
return iteratorCallback(null);
|
||||
});
|
||||
} else {
|
||||
superagent.put(`${LINODE_ENDPOINT}/domains/${zoneId}/records/${records[i].id}`)
|
||||
.set('Authorization', 'Bearer ' + dnsConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.end(function (error, result) {
|
||||
// increment, as we have consumed the record
|
||||
++i;
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 400) throw new BoxError(BoxError.BAD_FIELD, formatError(response));
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
if (error && !error.response) return iteratorCallback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 400) return iteratorCallback(new BoxError(BoxError.BAD_FIELD, formatError(result)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return iteratorCallback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 200) return iteratorCallback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
++i;
|
||||
|
||||
recordIds.push(result.body.id);
|
||||
recordIds.push(response.body.id);
|
||||
}
|
||||
}
|
||||
|
||||
return iteratorCallback(null);
|
||||
});
|
||||
}
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
for (let j = values.length + 1; j < records.length; j++) {
|
||||
const [error] = await safe(superagent.del(`${LINODE_ENDPOINT}/domains/${zoneId}/records/${records[j].id}`)
|
||||
.set('Authorization', 'Bearer ' + domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
debug('upsert: completed with recordIds:%j', recordIds);
|
||||
|
||||
callback();
|
||||
});
|
||||
});
|
||||
if (error) debug(`upsert: error removing record ${records[j].id}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
function del(domainObject, location, type, values, callback) {
|
||||
async function del(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '';
|
||||
|
||||
getZoneRecords(dnsConfig, zoneName, name, type, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
const { zoneId, records } = await getZoneRecords(domainConfig, zoneName, name, type);
|
||||
if (records.length === 0) return;
|
||||
|
||||
const { zoneId, records } = result;
|
||||
if (records.length === 0) return callback(null);
|
||||
const tmp = records.filter(function (record) { return values.some(function (value) { return value === record.target; }); });
|
||||
if (tmp.length === 0) return;
|
||||
|
||||
var tmp = records.filter(function (record) { return values.some(function (value) { return value === record.target; }); });
|
||||
|
||||
debug('del: %j', tmp);
|
||||
|
||||
if (tmp.length === 0) return callback(null);
|
||||
|
||||
// FIXME we only handle the first one currently
|
||||
|
||||
superagent.del(`${LINODE_ENDPOINT}/domains/${zoneId}/records/${tmp[0].id}`)
|
||||
.set('Authorization', 'Bearer ' + dnsConfig.token)
|
||||
for (const r of tmp) {
|
||||
const [error, response] = await safe(superagent.del(`${LINODE_ENDPOINT}/domains/${zoneId}/records/${r.id}`)
|
||||
.set('Authorization', 'Bearer ' + domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 404) return callback(null);
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
|
||||
debug('del: done');
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
});
|
||||
.ok(() => true));
|
||||
if (error && !error.response) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 404) return;
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
}
|
||||
}
|
||||
|
||||
function wait(domainObject, location, type, value, options, callback) {
|
||||
async function wait(domainObject, subdomain, type, value, options) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const fqdn = dns.fqdn(location, domainObject);
|
||||
const fqdn = dns.fqdn(subdomain, domainObject);
|
||||
|
||||
waitForDns(fqdn, domainObject.zoneName, type, value, options, callback);
|
||||
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
|
||||
}
|
||||
|
||||
function verifyDnsConfig(domainObject, callback) {
|
||||
async function verifyDomainConfig(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName;
|
||||
|
||||
if (!dnsConfig.token || typeof dnsConfig.token !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'token must be a non-empty string', { field: 'token' }));
|
||||
if (!domainConfig.token || typeof domainConfig.token !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'token must be a non-empty string');
|
||||
|
||||
const ip = '127.0.0.1';
|
||||
|
||||
var credentials = {
|
||||
token: dnsConfig.token
|
||||
const credentials = {
|
||||
token: domainConfig.token
|
||||
};
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
|
||||
if (process.env.BOX_ENV === 'test') return credentials; // this shouldn't be here
|
||||
|
||||
dns.resolve(zoneName, 'NS', { timeout: 5000 }, function (error, nameservers) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain', { field: 'nameservers' }));
|
||||
if (error || !nameservers) return callback(new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers', { field: 'nameservers' }));
|
||||
const [error, nameservers] = await safe(dig.resolve(zoneName, 'NS', { timeout: 5000 }));
|
||||
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
|
||||
|
||||
if (nameservers.map(function (n) { return n.toLowerCase(); }).indexOf('ns1.linode.com') === -1) {
|
||||
debug('verifyDnsConfig: %j does not contains linode NS', nameservers);
|
||||
return callback(new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Linode', { field: 'nameservers' }));
|
||||
}
|
||||
if (nameservers.map(function (n) { return n.toLowerCase(); }).indexOf('ns1.linode.com') === -1) {
|
||||
debug('verifyDomainConfig: %j does not contains linode NS', nameservers);
|
||||
throw new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Linode');
|
||||
}
|
||||
|
||||
const location = 'cloudrontestdns';
|
||||
const location = 'cloudrontestdns';
|
||||
|
||||
upsert(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
await upsert(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record added');
|
||||
|
||||
debug('verifyDnsConfig: Test A record added');
|
||||
await del(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record removed again');
|
||||
|
||||
del(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: Test A record removed again');
|
||||
|
||||
callback(null, credentials);
|
||||
});
|
||||
});
|
||||
});
|
||||
return credentials;
|
||||
}
|
||||
|
||||
@@ -1,19 +1,21 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
removePrivateFields: removePrivateFields,
|
||||
injectPrivateFields: injectPrivateFields,
|
||||
upsert: upsert,
|
||||
get: get,
|
||||
del: del,
|
||||
wait: wait,
|
||||
verifyDnsConfig: verifyDnsConfig
|
||||
removePrivateFields,
|
||||
injectPrivateFields,
|
||||
upsert,
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDomainConfig
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
debug = require('debug')('box:dns/manual'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('../dns.js'),
|
||||
safe = require('safetydance'),
|
||||
waitForDns = require('./waitfordns.js');
|
||||
|
||||
function removePrivateFields(domainObject) {
|
||||
@@ -25,61 +27,55 @@ function injectPrivateFields(newConfig, currentConfig) {
|
||||
|
||||
}
|
||||
|
||||
function upsert(domainObject, location, type, values, callback) {
|
||||
async function upsert(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('upsert: %s for zone %s of type %s with values %j', location, domainObject.zoneName, type, values);
|
||||
|
||||
return callback(null);
|
||||
return;
|
||||
}
|
||||
|
||||
function get(domainObject, location, type, callback) {
|
||||
async function get(domainObject, location, type) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback(null, [ ]); // returning ip confuses apptask into thinking the entry already exists
|
||||
return []; // returning ip confuses apptask into thinking the entry already exists
|
||||
}
|
||||
|
||||
function del(domainObject, location, type, values, callback) {
|
||||
async function del(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
return callback();
|
||||
return;
|
||||
}
|
||||
|
||||
function wait(domainObject, location, type, value, options, callback) {
|
||||
async function wait(domainObject, subdomain, type, value, options) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const fqdn = dns.fqdn(location, domainObject);
|
||||
const fqdn = dns.fqdn(subdomain, domainObject);
|
||||
|
||||
waitForDns(fqdn, domainObject.zoneName, type, value, options, callback);
|
||||
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
|
||||
}
|
||||
|
||||
function verifyDnsConfig(domainObject, callback) {
|
||||
async function verifyDomainConfig(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const zoneName = domainObject.zoneName;
|
||||
|
||||
// Very basic check if the nameservers can be fetched
|
||||
dns.resolve(zoneName, 'NS', { timeout: 5000 }, function (error, nameservers) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain', { field: 'nameservers' }));
|
||||
if (error || !nameservers) return callback(new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers', { field: 'nameservers' }));
|
||||
const [error, nameservers] = await safe(dig.resolve(zoneName, 'NS', { timeout: 5000 }));
|
||||
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
|
||||
|
||||
callback(null, {});
|
||||
});
|
||||
return {};
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ exports = module.exports = {
|
||||
upsert,
|
||||
get,
|
||||
del,
|
||||
verifyDnsConfig,
|
||||
verifyDomainConfig,
|
||||
wait
|
||||
};
|
||||
|
||||
@@ -14,8 +14,8 @@ const assert = require('assert'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
constants = require('../constants.js'),
|
||||
debug = require('debug')('box:dns/namecheap'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('../dns.js'),
|
||||
querystring = require('querystring'),
|
||||
safe = require('safetydance'),
|
||||
superagent = require('superagent'),
|
||||
sysinfo = require('../sysinfo.js'),
|
||||
@@ -34,286 +34,247 @@ function injectPrivateFields(newConfig, currentConfig) {
|
||||
if (newConfig.token === constants.SECRET_PLACEHOLDER) newConfig.token = currentConfig.token;
|
||||
}
|
||||
|
||||
async function getQuery(dnsConfig) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
async function getQuery(domainConfig) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
|
||||
const ip = await sysinfo.getServerIp();
|
||||
const ip = await sysinfo.getServerIPv4(); // only supports ipv4
|
||||
|
||||
return {
|
||||
ApiUser: dnsConfig.username,
|
||||
ApiKey: dnsConfig.token,
|
||||
UserName: dnsConfig.username,
|
||||
ApiUser: domainConfig.username,
|
||||
ApiKey: domainConfig.token,
|
||||
UserName: domainConfig.username,
|
||||
ClientIp: ip
|
||||
};
|
||||
}
|
||||
|
||||
function getZone(dnsConfig, zoneName, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
async function getZone(domainConfig, zoneName) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
util.callbackify(getQuery)(dnsConfig, function (error, query) {
|
||||
if (error) return callback(error);
|
||||
const query = await getQuery(domainConfig);
|
||||
query.Command = 'namecheap.domains.dns.getHosts';
|
||||
query.SLD = zoneName.split('.')[0];
|
||||
query.TLD = zoneName.split('.')[1];
|
||||
|
||||
query.Command = 'namecheap.domains.dns.getHosts';
|
||||
query.SLD = zoneName.split('.')[0];
|
||||
query.TLD = zoneName.split('.')[1];
|
||||
const [error, response] = await safe(superagent.get(ENDPOINT).query(query).ok(() => true));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
|
||||
superagent.get(ENDPOINT).query(query).end(function (error, result) {
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error));
|
||||
const parser = new xml2js.Parser();
|
||||
const [parserError, result] = await safe(util.promisify(parser.parseString)(response.text));
|
||||
if (parserError) throw new BoxError(BoxError.EXTERNAL_ERROR, parserError);
|
||||
|
||||
var parser = new xml2js.Parser();
|
||||
parser.parseString(result.text, function (error, result) {
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error));
|
||||
const tmp = result.ApiResponse;
|
||||
if (tmp['$'].Status !== 'OK') {
|
||||
const errorMessage = safe.query(tmp, 'Errors[0].Error[0]._', 'Invalid response');
|
||||
if (errorMessage === 'API Key is invalid or API access has not been enabled') throw new BoxError(BoxError.ACCESS_DENIED, errorMessage);
|
||||
|
||||
var tmp = result.ApiResponse;
|
||||
if (tmp['$'].Status !== 'OK') {
|
||||
var errorMessage = safe.query(tmp, 'Errors[0].Error[0]._', 'Invalid response');
|
||||
if (errorMessage === 'API Key is invalid or API access has not been enabled') return callback(new BoxError(BoxError.ACCESS_DENIED, errorMessage));
|
||||
throw new BoxError(BoxError.EXTERNAL_ERROR, errorMessage);
|
||||
}
|
||||
const host = safe.query(tmp, 'CommandResponse[0].DomainDNSGetHostsResult[0].host');
|
||||
if (!host) throw new BoxError(BoxError.EXTERNAL_ERROR, `Invalid response: ${JSON.stringify(tmp)}`);
|
||||
if (!Array.isArray(host)) throw new BoxError(BoxError.EXTERNAL_ERROR, `host is not an array: ${JSON.stringify(tmp)}`);
|
||||
|
||||
return callback(new BoxError(BoxError.EXTERNAL_ERROR, errorMessage));
|
||||
}
|
||||
const host = safe.query(tmp, 'CommandResponse[0].DomainDNSGetHostsResult[0].host');
|
||||
if (!host) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Invalid response: ${JSON.stringify(tmp)}`));
|
||||
if (!Array.isArray(host)) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `host is not an array: ${JSON.stringify(tmp)}`));
|
||||
|
||||
const hosts = host.map(h => h['$']);
|
||||
callback(null, hosts);
|
||||
});
|
||||
});
|
||||
});
|
||||
const hosts = host.map(h => h['$']);
|
||||
return hosts;
|
||||
}
|
||||
|
||||
function setZone(dnsConfig, zoneName, hosts, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
async function setZone(domainConfig, zoneName, hosts) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert(Array.isArray(hosts));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
util.callbackify(getQuery)(dnsConfig, function (error, query) {
|
||||
if (error) return callback(error);
|
||||
const query = await getQuery(domainConfig);
|
||||
query.Command = 'namecheap.domains.dns.setHosts';
|
||||
query.SLD = zoneName.split('.')[0];
|
||||
query.TLD = zoneName.split('.')[1];
|
||||
|
||||
query.Command = 'namecheap.domains.dns.setHosts';
|
||||
query.SLD = zoneName.split('.')[0];
|
||||
query.TLD = zoneName.split('.')[1];
|
||||
// Map to query params https://www.namecheap.com/support/api/methods/domains-dns/set-hosts.aspx
|
||||
hosts.forEach(function (host, i) {
|
||||
const n = i+1; // api starts with 1 not 0
|
||||
query['TTL' + n] = '300'; // keep it low
|
||||
query['HostName' + n] = host.HostName || host.Name;
|
||||
query['RecordType' + n] = host.RecordType || host.Type;
|
||||
query['Address' + n] = host.Address;
|
||||
|
||||
// Map to query params https://www.namecheap.com/support/api/methods/domains-dns/set-hosts.aspx
|
||||
hosts.forEach(function (host, i) {
|
||||
var n = i+1; // api starts with 1 not 0
|
||||
query['TTL' + n] = '300'; // keep it low
|
||||
query['HostName' + n] = host.HostName || host.Name;
|
||||
query['RecordType' + n] = host.RecordType || host.Type;
|
||||
query['Address' + n] = host.Address;
|
||||
|
||||
if (host.Type === 'MX') {
|
||||
query['EmailType' + n] = 'MX';
|
||||
if (host.MXPref) query['MXPref' + n] = host.MXPref;
|
||||
}
|
||||
});
|
||||
|
||||
// namecheap recommends sending as POSTDATA with > 10 records
|
||||
const qs = querystring.stringify(query);
|
||||
|
||||
superagent.post(ENDPOINT).set('Content-Type', 'application/x-www-form-urlencoded').send(qs).end(function (error, result) {
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error));
|
||||
|
||||
var parser = new xml2js.Parser();
|
||||
parser.parseString(result.text, function (error, result) {
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error));
|
||||
|
||||
var tmp = result.ApiResponse;
|
||||
if (tmp['$'].Status !== 'OK') {
|
||||
var errorMessage = safe.query(tmp, 'Errors[0].Error[0]._', 'Invalid response');
|
||||
if (errorMessage === 'API Key is invalid or API access has not been enabled') return callback(new BoxError(BoxError.ACCESS_DENIED, errorMessage));
|
||||
|
||||
return callback(new BoxError(BoxError.EXTERNAL_ERROR, errorMessage));
|
||||
}
|
||||
if (!tmp.CommandResponse[0]) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response'));
|
||||
if (!tmp.CommandResponse[0].DomainDNSSetHostsResult[0]) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response'));
|
||||
if (tmp.CommandResponse[0].DomainDNSSetHostsResult[0]['$'].IsSuccess !== 'true') return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response'));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
if (host.Type === 'MX') {
|
||||
query['EmailType' + n] = 'MX';
|
||||
if (host.MXPref) query['MXPref' + n] = host.MXPref;
|
||||
}
|
||||
});
|
||||
|
||||
// namecheap recommends sending as POSTDATA with > 10 records
|
||||
const qs = new URLSearchParams(query).toString();
|
||||
|
||||
const [error, response] = await safe(superagent.post(ENDPOINT).set('Content-Type', 'application/x-www-form-urlencoded').send(qs).ok(() => true));
|
||||
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, error);
|
||||
|
||||
const parser = new xml2js.Parser();
|
||||
const [parserError, result] = await safe(util.promisify(parser.parseString)(response.text));
|
||||
if (parserError) throw new BoxError(BoxError.EXTERNAL_ERROR, parserError.message);
|
||||
|
||||
const tmp = result.ApiResponse;
|
||||
if (tmp['$'].Status !== 'OK') {
|
||||
const errorMessage = safe.query(tmp, 'Errors[0].Error[0]._', 'Invalid response');
|
||||
if (errorMessage === 'API Key is invalid or API access has not been enabled') throw new BoxError(BoxError.ACCESS_DENIED, errorMessage);
|
||||
|
||||
throw new BoxError(BoxError.EXTERNAL_ERROR, errorMessage);
|
||||
}
|
||||
if (!tmp.CommandResponse[0]) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response');
|
||||
if (!tmp.CommandResponse[0].DomainDNSSetHostsResult[0]) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response');
|
||||
if (tmp.CommandResponse[0].DomainDNSSetHostsResult[0]['$'].IsSuccess !== 'true') throw new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response');
|
||||
}
|
||||
|
||||
function upsert(domainObject, subdomain, type, values, callback) {
|
||||
async function upsert(domainObject, subdomain, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config;
|
||||
const domainConfig = domainObject.config;
|
||||
const zoneName = domainObject.zoneName;
|
||||
|
||||
subdomain = dns.getName(domainObject, subdomain, type) || '@';
|
||||
|
||||
debug('upsert: %s for zone %s of type %s with values %j', subdomain, zoneName, type, values);
|
||||
|
||||
getZone(dnsConfig, zoneName, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
const result = await getZone(domainConfig, zoneName);
|
||||
|
||||
// Array to keep track of records that need to be inserted
|
||||
let toInsert = [];
|
||||
// Array to keep track of records that need to be inserted
|
||||
let toInsert = [];
|
||||
|
||||
for (let i = 0; i < values.length; i++) {
|
||||
let curValue = values[i];
|
||||
let wasUpdate = false;
|
||||
for (let i = 0; i < values.length; i++) {
|
||||
let curValue = values[i];
|
||||
let wasUpdate = false;
|
||||
|
||||
for (let j = 0; j < result.length; j++) {
|
||||
let curHost = result[j];
|
||||
for (let j = 0; j < result.length; j++) {
|
||||
let curHost = result[j];
|
||||
|
||||
if (curHost.Type === type && curHost.Name === subdomain) {
|
||||
// Updating an already existing host
|
||||
wasUpdate = true;
|
||||
if (type === 'MX') {
|
||||
curHost.MXPref = curValue.split(' ')[0];
|
||||
curHost.Address = curValue.split(' ')[1];
|
||||
} else {
|
||||
curHost.Address = curValue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We don't have this host at all yet, let's push to toInsert array
|
||||
if (!wasUpdate) {
|
||||
let newRecord = {
|
||||
RecordType: type,
|
||||
HostName: subdomain,
|
||||
Address: curValue
|
||||
};
|
||||
|
||||
// Special case for MX records
|
||||
if (curHost.Type === type && curHost.Name === subdomain) {
|
||||
// Updating an already existing host
|
||||
wasUpdate = true;
|
||||
if (type === 'MX') {
|
||||
newRecord.MXPref = curValue.split(' ')[0];
|
||||
newRecord.Address = curValue.split(' ')[1];
|
||||
curHost.MXPref = curValue.split(' ')[0];
|
||||
curHost.Address = curValue.split(' ')[1];
|
||||
} else {
|
||||
curHost.Address = curValue;
|
||||
}
|
||||
|
||||
toInsert.push(newRecord);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
const hosts = result.concat(toInsert);
|
||||
// We don't have this host at all yet, let's push to toInsert array
|
||||
if (!wasUpdate) {
|
||||
let newRecord = {
|
||||
RecordType: type,
|
||||
HostName: subdomain,
|
||||
Address: curValue
|
||||
};
|
||||
|
||||
setZone(dnsConfig, zoneName, hosts, callback);
|
||||
});
|
||||
// Special case for MX records
|
||||
if (type === 'MX') {
|
||||
newRecord.MXPref = curValue.split(' ')[0];
|
||||
newRecord.Address = curValue.split(' ')[1];
|
||||
}
|
||||
|
||||
toInsert.push(newRecord);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
const hosts = result.concat(toInsert);
|
||||
|
||||
return await setZone(domainConfig, zoneName, hosts);
|
||||
}
|
||||
|
||||
function get(domainObject, subdomain, type, callback) {
|
||||
async function get(domainObject, subdomain, type) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config;
|
||||
const domainConfig = domainObject.config;
|
||||
const zoneName = domainObject.zoneName;
|
||||
|
||||
subdomain = dns.getName(domainObject, subdomain, type) || '@';
|
||||
|
||||
getZone(dnsConfig, zoneName, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
const result = await getZone(domainConfig, zoneName);
|
||||
|
||||
// We need to filter hosts to ones with this subdomain and type
|
||||
const actualHosts = result.filter((host) => host.Type === type && host.Name === subdomain);
|
||||
// We need to filter hosts to ones with this subdomain and type
|
||||
const actualHosts = result.filter((host) => host.Type === type && host.Name === subdomain);
|
||||
|
||||
// We only return the value string
|
||||
const tmp = actualHosts.map(function (record) { return record.Address; });
|
||||
|
||||
debug(`get: subdomain: ${subdomain} type:${type} value:${JSON.stringify(tmp)}`);
|
||||
|
||||
return callback(null, tmp);
|
||||
});
|
||||
const tmp = actualHosts.map(function (record) { return record.Address; });
|
||||
return tmp;
|
||||
}
|
||||
|
||||
function del(domainObject, subdomain, type, values, callback) {
|
||||
async function del(domainObject, subdomain, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config;
|
||||
const domainConfig = domainObject.config;
|
||||
const zoneName = domainObject.zoneName;
|
||||
|
||||
subdomain = dns.getName(domainObject, subdomain, type) || '@';
|
||||
|
||||
debug('del: %s for zone %s of type %s with values %j', subdomain, zoneName, type, values);
|
||||
|
||||
getZone(dnsConfig, zoneName, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
let result = await getZone(domainConfig, zoneName);
|
||||
if (result.length === 0) return;
|
||||
const originalLength = result.length;
|
||||
|
||||
if (result.length === 0) return callback();
|
||||
const originalLength = result.length;
|
||||
for (let i = 0; i < values.length; i++) {
|
||||
let curValue = values[i];
|
||||
|
||||
for (let i = 0; i < values.length; i++) {
|
||||
let curValue = values[i];
|
||||
result = result.filter(curHost => curHost.Type !== type || curHost.Name !== subdomain || curHost.Address !== curValue);
|
||||
}
|
||||
|
||||
result = result.filter(curHost => curHost.Type !== type || curHost.Name !== subdomain || curHost.Address !== curValue);
|
||||
}
|
||||
|
||||
if (result.length !== originalLength) return setZone(dnsConfig, zoneName, result, callback);
|
||||
|
||||
callback();
|
||||
});
|
||||
if (result.length !== originalLength) return await setZone(domainConfig, zoneName, result);
|
||||
}
|
||||
|
||||
function verifyDnsConfig(domainObject, callback) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config;
|
||||
const zoneName = domainObject.zoneName;
|
||||
const ip = '127.0.0.1';
|
||||
|
||||
if (!dnsConfig.username || typeof dnsConfig.username !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'username must be a non-empty string', { field: 'username' }));
|
||||
if (!dnsConfig.token || typeof dnsConfig.token !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'token must be a non-empty string', { field: 'token' }));
|
||||
|
||||
let credentials = {
|
||||
username: dnsConfig.username,
|
||||
token: dnsConfig.token
|
||||
};
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
|
||||
|
||||
dns.resolve(zoneName, 'NS', { timeout: 5000 }, function (error, nameservers) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain', { field: 'nameservers' }));
|
||||
if (error || !nameservers) return callback(new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers', { field: 'nameservers' }));
|
||||
|
||||
if (nameservers.some(function (n) { return n.toLowerCase().indexOf('.registrar-servers.com') === -1; })) {
|
||||
debug('verifyDnsConfig: %j does not contains NC NS', nameservers);
|
||||
return callback(new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to NameCheap', { field: 'nameservers' }));
|
||||
}
|
||||
|
||||
const testSubdomain = 'cloudrontestdns';
|
||||
|
||||
upsert(domainObject, testSubdomain, 'A', [ip], function (error, changeId) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: Test A record added with change id %s', changeId);
|
||||
|
||||
del(domainObject, testSubdomain, 'A', [ip], function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: Test A record removed again');
|
||||
|
||||
callback(null, credentials);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function wait(domainObject, subdomain, type, value, options, callback) {
|
||||
async function wait(domainObject, subdomain, type, value, options) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const fqdn = dns.fqdn(subdomain, domainObject);
|
||||
|
||||
waitForDns(fqdn, domainObject.zoneName, type, value, options, callback);
|
||||
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
|
||||
}
|
||||
|
||||
async function verifyDomainConfig(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
|
||||
const domainConfig = domainObject.config;
|
||||
const zoneName = domainObject.zoneName;
|
||||
const ip = '127.0.0.1';
|
||||
|
||||
if (!domainConfig.username || typeof domainConfig.username !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'username must be a non-empty string');
|
||||
if (!domainConfig.token || typeof domainConfig.token !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'token must be a non-empty string');
|
||||
|
||||
const credentials = {
|
||||
username: domainConfig.username,
|
||||
token: domainConfig.token
|
||||
};
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return credentials; // this shouldn't be here
|
||||
|
||||
const [error, nameservers] = await safe(dig.resolve(zoneName, 'NS', { timeout: 5000 }));
|
||||
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
|
||||
|
||||
if (nameservers.some(function (n) { return n.toLowerCase().indexOf('.registrar-servers.com') === -1; })) {
|
||||
debug('verifyDomainConfig: %j does not contains NC NS', nameservers);
|
||||
throw new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to NameCheap');
|
||||
}
|
||||
|
||||
const testSubdomain = 'cloudrontestdns';
|
||||
|
||||
await upsert(domainObject, testSubdomain, 'A', [ip]);
|
||||
debug('verifyDomainConfig: Test A record added');
|
||||
|
||||
await del(domainObject, testSubdomain, 'A', [ip]);
|
||||
debug('verifyDomainConfig: Test A record removed again');
|
||||
|
||||
return credentials;
|
||||
}
|
||||
|
||||
@@ -7,13 +7,14 @@ exports = module.exports = {
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDnsConfig
|
||||
verifyDomainConfig
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
constants = require('../constants.js'),
|
||||
debug = require('debug')('box:dns/namecom'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('../dns.js'),
|
||||
safe = require('safetydance'),
|
||||
superagent = require('superagent'),
|
||||
@@ -34,17 +35,16 @@ function injectPrivateFields(newConfig, currentConfig) {
|
||||
if (newConfig.token === constants.SECRET_PLACEHOLDER) newConfig.token = currentConfig.token;
|
||||
}
|
||||
|
||||
function addRecord(dnsConfig, zoneName, name, type, values, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
async function addRecord(domainConfig, zoneName, name, type, values) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug(`add: ${name} in zone ${zoneName} of type ${type} with values ${JSON.stringify(values)}`);
|
||||
|
||||
var data = {
|
||||
const data = {
|
||||
host: name,
|
||||
type: type,
|
||||
ttl: 300 // 300 is the lowest
|
||||
@@ -61,31 +61,28 @@ function addRecord(dnsConfig, zoneName, name, type, values, callback) {
|
||||
data.answer = values[0];
|
||||
}
|
||||
|
||||
superagent.post(`${NAMECOM_API}/domains/${zoneName}/records`)
|
||||
.auth(dnsConfig.username, dnsConfig.token)
|
||||
const [error, response] = await safe(superagent.post(`${NAMECOM_API}/domains/${zoneName}/records`)
|
||||
.auth(domainConfig.username, domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.send(data)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 403) return callback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
.ok(() => true));
|
||||
|
||||
return callback(null, 'unused-id');
|
||||
});
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 403) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
}
|
||||
|
||||
function updateRecord(dnsConfig, zoneName, recordId, name, type, values, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
async function updateRecord(domainConfig, zoneName, recordId, name, type, values) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof recordId, 'number');
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug(`update:${recordId} on ${name} in zone ${zoneName} of type ${type} with values ${JSON.stringify(values)}`);
|
||||
|
||||
var data = {
|
||||
const data = {
|
||||
host: name,
|
||||
type: type,
|
||||
ttl: 300 // 300 is the lowest
|
||||
@@ -102,184 +99,152 @@ function updateRecord(dnsConfig, zoneName, recordId, name, type, values, callbac
|
||||
data.answer = values[0];
|
||||
}
|
||||
|
||||
superagent.put(`${NAMECOM_API}/domains/${zoneName}/records/${recordId}`)
|
||||
.auth(dnsConfig.username, dnsConfig.token)
|
||||
const [error, response] = await safe(superagent.put(`${NAMECOM_API}/domains/${zoneName}/records/${recordId}`)
|
||||
.auth(domainConfig.username, domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.send(data)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 403) return callback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
.ok(() => true));
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 403) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
}
|
||||
|
||||
function getInternal(dnsConfig, zoneName, name, type, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
async function getInternal(domainConfig, zoneName, name, type) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug(`getInternal: ${name} in zone ${zoneName} of type ${type}`);
|
||||
|
||||
superagent.get(`${NAMECOM_API}/domains/${zoneName}/records`)
|
||||
.auth(dnsConfig.username, dnsConfig.token)
|
||||
const [error, response] = await safe(superagent.get(`${NAMECOM_API}/domains/${zoneName}/records`)
|
||||
.auth(domainConfig.username, domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 403) return callback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
.ok(() => true));
|
||||
|
||||
// name.com does not return the correct content-type
|
||||
result.body = safe.JSON.parse(result.text);
|
||||
if (!result.body.records) result.body.records = [];
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 403) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
result.body.records.forEach(function (r) {
|
||||
// name.com api simply strips empty properties
|
||||
r.host = r.host || '';
|
||||
});
|
||||
// name.com does not return the correct content-type
|
||||
response.body = safe.JSON.parse(response.text);
|
||||
if (!response.body.records) response.body.records = [];
|
||||
|
||||
var results = result.body.records.filter(function (r) {
|
||||
return (r.host === name && r.type === type);
|
||||
});
|
||||
response.body.records.forEach(function (r) {
|
||||
// name.com api simply strips empty properties
|
||||
r.host = r.host || '';
|
||||
});
|
||||
|
||||
debug('getInternal: %j', results);
|
||||
const results = response.body.records.filter(function (r) {
|
||||
return (r.host === name && r.type === type);
|
||||
});
|
||||
|
||||
return callback(null, results);
|
||||
});
|
||||
return results;
|
||||
}
|
||||
|
||||
function upsert(domainObject, location, type, values, callback) {
|
||||
async function upsert(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '';
|
||||
|
||||
debug(`upsert: ${name} in zone ${zoneName} of type ${type} with values ${JSON.stringify(values)}`);
|
||||
|
||||
getInternal(dnsConfig, zoneName, name, type, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
const result = await getInternal(domainConfig, zoneName, name, type);
|
||||
if (result.length === 0) return await addRecord(domainConfig, zoneName, name, type, values);
|
||||
|
||||
if (result.length === 0) return addRecord(dnsConfig, zoneName, name, type, values, callback);
|
||||
|
||||
return updateRecord(dnsConfig, zoneName, result[0].id, name, type, values, callback);
|
||||
});
|
||||
return await updateRecord(domainConfig, zoneName, result[0].id, name, type, values);
|
||||
}
|
||||
|
||||
function get(domainObject, location, type, callback) {
|
||||
async function get(domainObject, location, type) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '';
|
||||
|
||||
getInternal(dnsConfig, zoneName, name, type, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var tmp = result.map(function (record) { return record.answer; });
|
||||
|
||||
debug('get: %j', tmp);
|
||||
|
||||
return callback(null, tmp);
|
||||
});
|
||||
const result = await getInternal(domainConfig, zoneName, name, type);
|
||||
const tmp = result.map(function (record) { return record.answer; });
|
||||
return tmp;
|
||||
}
|
||||
|
||||
function del(domainObject, location, type, values, callback) {
|
||||
async function del(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '';
|
||||
|
||||
debug(`del: ${name} in zone ${zoneName} of type ${type} with values ${JSON.stringify(values)}`);
|
||||
|
||||
getInternal(dnsConfig, zoneName, name, type, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
const result = await getInternal(domainConfig, zoneName, name, type);
|
||||
if (result.length === 0) return;
|
||||
|
||||
if (result.length === 0) return callback();
|
||||
|
||||
superagent.del(`${NAMECOM_API}/domains/${zoneName}/records/${result[0].id}`)
|
||||
.auth(dnsConfig.username, dnsConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 403) return callback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
});
|
||||
const [error, response] = await safe(superagent.del(`${NAMECOM_API}/domains/${zoneName}/records/${result[0].id}`)
|
||||
.auth(domainConfig.username, domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.ok(() => true));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 403) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
}
|
||||
|
||||
function wait(domainObject, location, type, value, options, callback) {
|
||||
async function wait(domainObject, subdomain, type, value, options) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const fqdn = dns.fqdn(location, domainObject);
|
||||
const fqdn = dns.fqdn(subdomain, domainObject);
|
||||
|
||||
waitForDns(fqdn, domainObject.zoneName, type, value, options, callback);
|
||||
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
|
||||
}
|
||||
|
||||
function verifyDnsConfig(domainObject, callback) {
|
||||
async function verifyDomainConfig(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName;
|
||||
|
||||
if (typeof dnsConfig.username !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'username must be a string', { field: 'username' }));
|
||||
if (typeof dnsConfig.token !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'token must be a string', { field: 'token' }));
|
||||
if (typeof domainConfig.username !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'username must be a string');
|
||||
if (typeof domainConfig.token !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'token must be a string');
|
||||
|
||||
var credentials = {
|
||||
username: dnsConfig.username,
|
||||
token: dnsConfig.token
|
||||
const credentials = {
|
||||
username: domainConfig.username,
|
||||
token: domainConfig.token
|
||||
};
|
||||
|
||||
const ip = '127.0.0.1';
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
|
||||
if (process.env.BOX_ENV === 'test') return credentials; // this shouldn't be here
|
||||
|
||||
dns.resolve(zoneName, 'NS', { timeout: 5000 }, function (error, nameservers) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain', { field: 'nameservers' }));
|
||||
if (error || !nameservers) return callback(new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers', { field: 'nameservers' }));
|
||||
const [error, nameservers] = await safe(dig.resolve(zoneName, 'NS', { timeout: 5000 }));
|
||||
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
|
||||
|
||||
if (!nameservers.every(function (n) { return n.toLowerCase().indexOf('.name.com') !== -1; })) {
|
||||
debug('verifyDnsConfig: %j does not contain Name.com NS', nameservers);
|
||||
return callback(new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to name.com', { field: 'nameservers' }));
|
||||
}
|
||||
if (!nameservers.every(function (n) { return n.toLowerCase().indexOf('.name.com') !== -1; })) {
|
||||
debug('verifyDomainConfig: %j does not contain Name.com NS', nameservers);
|
||||
throw new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to name.com');
|
||||
}
|
||||
|
||||
const location = 'cloudrontestdns';
|
||||
const location = 'cloudrontestdns';
|
||||
|
||||
upsert(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
await upsert(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record added');
|
||||
|
||||
debug('verifyDnsConfig: Test A record added');
|
||||
await del(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record removed again');
|
||||
|
||||
del(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: Test A record removed again');
|
||||
|
||||
callback(null, credentials);
|
||||
});
|
||||
});
|
||||
});
|
||||
return credentials;
|
||||
}
|
||||
|
||||
@@ -7,14 +7,16 @@ exports = module.exports = {
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDnsConfig
|
||||
verifyDomainConfig
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
constants = require('../constants.js'),
|
||||
debug = require('debug')('box:dns/netcup'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('../dns.js'),
|
||||
safe = require('safetydance'),
|
||||
superagent = require('superagent'),
|
||||
util = require('util'),
|
||||
waitForDns = require('./waitfordns.js');
|
||||
@@ -36,267 +38,226 @@ function injectPrivateFields(newConfig, currentConfig) {
|
||||
}
|
||||
|
||||
// returns a api session id
|
||||
function login(dnsConfig, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
async function login(domainConfig) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
|
||||
const data = {
|
||||
action: 'login',
|
||||
param:{
|
||||
apikey: dnsConfig.apiKey,
|
||||
apipassword: dnsConfig.apiPassword,
|
||||
customernumber: dnsConfig.customerNumber
|
||||
apikey: domainConfig.apiKey,
|
||||
apipassword: domainConfig.apiPassword,
|
||||
customernumber: domainConfig.customerNumber
|
||||
}
|
||||
};
|
||||
|
||||
superagent.post(API_ENDPOINT).send(data).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
const [error, response] = await safe(superagent.post(API_ENDPOINT).send(data).ok(() => true));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
if (!response.body.responsedata.apisessionid) throw new BoxError(BoxError.ACCESS_DENIED, 'invalid api password');
|
||||
|
||||
callback(null, result.body.responsedata.apisessionid);
|
||||
});
|
||||
return response.body.responsedata.apisessionid;
|
||||
}
|
||||
|
||||
function getAllRecords(dnsConfig, apiSessionId, zoneName, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
async function getAllRecords(domainConfig, apiSessionId, zoneName) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof apiSessionId, 'string');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug(`getAllRecords: getting dns records of ${zoneName}`);
|
||||
|
||||
const data = {
|
||||
action: 'infoDnsRecords',
|
||||
param:{
|
||||
apikey: dnsConfig.apiKey,
|
||||
apikey: domainConfig.apiKey,
|
||||
apisessionid: apiSessionId,
|
||||
customernumber: dnsConfig.customerNumber,
|
||||
customernumber: domainConfig.customerNumber,
|
||||
domainname: zoneName,
|
||||
}
|
||||
};
|
||||
|
||||
superagent.post(API_ENDPOINT).send(data).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
const [error, response] = await safe(superagent.post(API_ENDPOINT).send(data).ok(() => true));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
debug('getAllRecords:', JSON.stringify(result.body.responsedata.dnsrecords || []));
|
||||
|
||||
callback(null, result.body.responsedata.dnsrecords || []);
|
||||
});
|
||||
return response.body.responsedata.dnsrecords || [];
|
||||
}
|
||||
|
||||
function upsert(domainObject, location, type, values, callback) {
|
||||
async function upsert(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
debug('upsert: %s for zone %s of type %s with values %j', name, zoneName, type, values);
|
||||
|
||||
login(dnsConfig, function (error, apiSessionId) {
|
||||
if (error) return callback(error);
|
||||
const apiSessionId = await login(domainConfig);
|
||||
|
||||
getAllRecords(dnsConfig, apiSessionId, zoneName, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
const result = await getAllRecords(domainConfig, apiSessionId, zoneName);
|
||||
|
||||
let records = [];
|
||||
let records = [];
|
||||
|
||||
values.forEach(function (value) {
|
||||
// remove possible quotation
|
||||
if (value.charAt(0) === '"') value = value.slice(1);
|
||||
if (value.charAt(value.length -1) === '"') value = value.slice(0, -1);
|
||||
values.forEach(function (value) {
|
||||
// remove possible quotation
|
||||
if (value.charAt(0) === '"') value = value.slice(1);
|
||||
if (value.charAt(value.length -1) === '"') value = value.slice(0, -1);
|
||||
|
||||
let priority = null;
|
||||
if (type === 'MX') {
|
||||
priority = parseInt(value.split(' ')[0], 10);
|
||||
value = value.split(' ')[1];
|
||||
}
|
||||
let priority = null;
|
||||
if (type === 'MX') {
|
||||
priority = parseInt(value.split(' ')[0], 10);
|
||||
value = value.split(' ')[1];
|
||||
}
|
||||
|
||||
let record = result.find(function (r) { return r.hostname === name && r.type === type; });
|
||||
if (!record) record = { hostname: name, type: type, destination: value, deleterecord: false };
|
||||
else record.destination = value;
|
||||
let record = result.find(function (r) { return r.hostname === name && r.type === type; });
|
||||
if (!record) record = { hostname: name, type: type, destination: value, deleterecord: false };
|
||||
else record.destination = value;
|
||||
|
||||
if (priority !== null) record.priority = priority;
|
||||
if (priority !== null) record.priority = priority;
|
||||
|
||||
records.push(record);
|
||||
});
|
||||
|
||||
const data = {
|
||||
action: 'updateDnsRecords',
|
||||
param:{
|
||||
apikey: dnsConfig.apiKey,
|
||||
apisessionid: apiSessionId,
|
||||
customernumber: dnsConfig.customerNumber,
|
||||
domainname: zoneName,
|
||||
dnsrecordset: {
|
||||
dnsrecords: records
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
debug('upserting', JSON.stringify(data));
|
||||
|
||||
superagent.post(API_ENDPOINT).send(data).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
if (result.body.statuscode !== 2000) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
|
||||
debug('upsert:', result.body);
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
records.push(record);
|
||||
});
|
||||
|
||||
const data = {
|
||||
action: 'updateDnsRecords',
|
||||
param:{
|
||||
apikey: domainConfig.apiKey,
|
||||
apisessionid: apiSessionId,
|
||||
customernumber: domainConfig.customerNumber,
|
||||
domainname: zoneName,
|
||||
dnsrecordset: {
|
||||
dnsrecords: records
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const [error, response] = await safe(superagent.post(API_ENDPOINT).send(data).ok(() => true));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
if (response.body.statuscode !== 2000) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
}
|
||||
|
||||
function get(domainObject, location, type, callback) {
|
||||
async function get(domainObject, location, type) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
debug('get: %s for zone %s of type %s', name, zoneName, type);
|
||||
|
||||
login(dnsConfig, function (error, apiSessionId) {
|
||||
if (error) return callback(error);
|
||||
const apiSessionId = await login(domainConfig);
|
||||
|
||||
getAllRecords(dnsConfig, apiSessionId, zoneName, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
const result = await getAllRecords(domainConfig, apiSessionId, zoneName);
|
||||
|
||||
// We only return the value string
|
||||
callback(null, result.filter(function (r) { return r.hostname === name && r.type === type; }).map(function (r) { return r.destination; }));
|
||||
});
|
||||
});
|
||||
// We only return the value string
|
||||
return result.filter(function (r) { return r.hostname === name && r.type === type; }).map(function (r) { return r.destination; });
|
||||
}
|
||||
|
||||
function del(domainObject, location, type, values, callback) {
|
||||
async function del(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '@';
|
||||
|
||||
debug('del: %s for zone %s of type %s with values %j', name, zoneName, type, values);
|
||||
|
||||
login(dnsConfig, function (error, apiSessionId) {
|
||||
if (error) return callback(error);
|
||||
const apiSessionId = await login(domainConfig);
|
||||
|
||||
getAllRecords(dnsConfig, apiSessionId, zoneName, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
const result = await getAllRecords(domainConfig, apiSessionId, zoneName);
|
||||
|
||||
let records = [];
|
||||
let records = [];
|
||||
|
||||
values.forEach(function (value) {
|
||||
// remove possible quotation
|
||||
if (value.charAt(0) === '"') value = value.slice(1);
|
||||
if (value.charAt(value.length -1) === '"') value = value.slice(0, -1);
|
||||
values.forEach(function (value) {
|
||||
// remove possible quotation
|
||||
if (value.charAt(0) === '"') value = value.slice(1);
|
||||
if (value.charAt(value.length -1) === '"') value = value.slice(0, -1);
|
||||
|
||||
let record = result.find(function (r) { return r.hostname === name && r.type === type && r.destination === value; });
|
||||
if (!record) return;
|
||||
let record = result.find(function (r) { return r.hostname === name && r.type === type && r.destination === value; });
|
||||
if (!record) return;
|
||||
|
||||
record.deleterecord = true;
|
||||
record.deleterecord = true;
|
||||
|
||||
records.push(record);
|
||||
});
|
||||
|
||||
if (records.length === 0) return callback(null);
|
||||
|
||||
const data = {
|
||||
action: 'updateDnsRecords',
|
||||
param:{
|
||||
apikey: dnsConfig.apiKey,
|
||||
apisessionid: apiSessionId,
|
||||
customernumber: dnsConfig.customerNumber,
|
||||
domainname: zoneName,
|
||||
dnsrecordset: {
|
||||
dnsrecords: records
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
superagent.post(API_ENDPOINT).send(data).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
if (result.body.statuscode !== 2000) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
|
||||
debug('del:', result.body.responsedata);
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
records.push(record);
|
||||
});
|
||||
|
||||
if (records.length === 0) return;
|
||||
|
||||
const data = {
|
||||
action: 'updateDnsRecords',
|
||||
param:{
|
||||
apikey: domainConfig.apiKey,
|
||||
apisessionid: apiSessionId,
|
||||
customernumber: domainConfig.customerNumber,
|
||||
domainname: zoneName,
|
||||
dnsrecordset: {
|
||||
dnsrecords: records
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const [error, response] = await safe(superagent.post(API_ENDPOINT).send(data).ok(() => true));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
if (response.body.statuscode !== 2000) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
}
|
||||
|
||||
function wait(domainObject, location, type, value, options, callback) {
|
||||
async function wait(domainObject, subdomain, type, value, options) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const fqdn = dns.fqdn(location, domainObject);
|
||||
const fqdn = dns.fqdn(subdomain, domainObject);
|
||||
|
||||
waitForDns(fqdn, domainObject.zoneName, type, value, options, callback);
|
||||
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
|
||||
}
|
||||
|
||||
function verifyDnsConfig(domainObject, callback) {
|
||||
async function verifyDomainConfig(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName;
|
||||
|
||||
if (!dnsConfig.customerNumber || typeof dnsConfig.customerNumber !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'customerNumber must be a non-empty string', { field: 'customerNumber' }));
|
||||
if (!dnsConfig.apiKey || typeof dnsConfig.apiKey !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'apiKey must be a non-empty string', { field: 'apiKey' }));
|
||||
if (!dnsConfig.apiPassword || typeof dnsConfig.apiPassword !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'apiPassword must be a non-empty string', { field: 'apiPassword' }));
|
||||
if (!domainConfig.customerNumber || typeof domainConfig.customerNumber !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'customerNumber must be a non-empty string');
|
||||
if (!domainConfig.apiKey || typeof domainConfig.apiKey !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'apiKey must be a non-empty string');
|
||||
if (!domainConfig.apiPassword || typeof domainConfig.apiPassword !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'apiPassword must be a non-empty string');
|
||||
|
||||
const ip = '127.0.0.1';
|
||||
|
||||
var credentials = {
|
||||
customerNumber: dnsConfig.customerNumber,
|
||||
apiKey: dnsConfig.apiKey,
|
||||
apiPassword: dnsConfig.apiPassword,
|
||||
const credentials = {
|
||||
customerNumber: domainConfig.customerNumber,
|
||||
apiKey: domainConfig.apiKey,
|
||||
apiPassword: domainConfig.apiPassword,
|
||||
};
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
|
||||
if (process.env.BOX_ENV === 'test') return credentials; // this shouldn't be here
|
||||
|
||||
dns.resolve(zoneName, 'NS', { timeout: 5000 }, function (error, nameservers) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain', { field: 'nameservers' }));
|
||||
if (error || !nameservers) return callback(new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers', { field: 'nameservers' }));
|
||||
const [error, nameservers] = await safe(dig.resolve(zoneName, 'NS', { timeout: 5000 }));
|
||||
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
|
||||
|
||||
if (!nameservers.every(function (n) { return n.toLowerCase().indexOf('dns.netcup.net') !== -1; })) {
|
||||
debug('verifyDnsConfig: %j does not contains Netcup NS', nameservers);
|
||||
return callback(new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Netcup', { field: 'nameservers' }));
|
||||
}
|
||||
if (!nameservers.every(function (n) { return n.toLowerCase().indexOf('dns.netcup.net') !== -1; })) {
|
||||
debug('verifyDomainConfig: %j does not contains Netcup NS', nameservers);
|
||||
throw new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Netcup');
|
||||
}
|
||||
|
||||
const location = 'cloudrontestdns';
|
||||
const location = 'cloudrontestdns';
|
||||
|
||||
upsert(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
await upsert(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record added');
|
||||
|
||||
debug('verifyDnsConfig: Test A record added');
|
||||
await del(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record removed again');
|
||||
|
||||
del(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: Test A record removed again');
|
||||
|
||||
callback(null, credentials);
|
||||
});
|
||||
});
|
||||
});
|
||||
return credentials;
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ exports = module.exports = {
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDnsConfig
|
||||
verifyDomainConfig
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
@@ -21,51 +21,46 @@ function removePrivateFields(domainObject) {
|
||||
function injectPrivateFields(newConfig, currentConfig) {
|
||||
}
|
||||
|
||||
function upsert(domainObject, location, type, values, callback) {
|
||||
async function upsert(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('upsert: %s for zone %s of type %s with values %j', location, domainObject.zoneName, type, values);
|
||||
|
||||
return callback(null);
|
||||
return;
|
||||
}
|
||||
|
||||
function get(domainObject, location, type, callback) {
|
||||
async function get(domainObject, location, type) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback(null, [ ]); // returning ip confuses apptask into thinking the entry already exists
|
||||
return []; // returning ip confuses apptask into thinking the entry already exists
|
||||
}
|
||||
|
||||
function del(domainObject, location, type, values, callback) {
|
||||
async function del(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
return callback();
|
||||
return;
|
||||
}
|
||||
|
||||
function wait(domainObject, location, type, value, options, callback) {
|
||||
async function wait(domainObject, location, type, value, options) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback();
|
||||
// do nothing
|
||||
}
|
||||
|
||||
function verifyDnsConfig(domainObject, callback) {
|
||||
async function verifyDomainConfig(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
return callback(null, { });
|
||||
return {};
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ exports = module.exports = {
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDnsConfig
|
||||
verifyDomainConfig
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
@@ -15,7 +15,9 @@ const assert = require('assert'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
constants = require('../constants.js'),
|
||||
debug = require('debug')('box:dns/route53'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('../dns.js'),
|
||||
safe = require('safetydance'),
|
||||
waitForDns = require('./waitfordns.js'),
|
||||
_ = require('underscore');
|
||||
|
||||
@@ -28,272 +30,236 @@ function injectPrivateFields(newConfig, currentConfig) {
|
||||
if (newConfig.secretAccessKey === constants.SECRET_PLACEHOLDER) newConfig.secretAccessKey = currentConfig.secretAccessKey;
|
||||
}
|
||||
|
||||
function getDnsCredentials(dnsConfig) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
function getDnsCredentials(domainConfig) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
|
||||
var credentials = {
|
||||
accessKeyId: dnsConfig.accessKeyId,
|
||||
secretAccessKey: dnsConfig.secretAccessKey,
|
||||
region: dnsConfig.region
|
||||
const credentials = {
|
||||
accessKeyId: domainConfig.accessKeyId,
|
||||
secretAccessKey: domainConfig.secretAccessKey,
|
||||
region: domainConfig.region
|
||||
};
|
||||
|
||||
if (dnsConfig.endpoint) credentials.endpoint = new AWS.Endpoint(dnsConfig.endpoint);
|
||||
if (domainConfig.endpoint) credentials.endpoint = new AWS.Endpoint(domainConfig.endpoint);
|
||||
|
||||
return credentials;
|
||||
}
|
||||
|
||||
function getZoneByName(dnsConfig, zoneName, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
async function getZoneByName(domainConfig, zoneName) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var route53 = new AWS.Route53(getDnsCredentials(dnsConfig));
|
||||
const route53 = new AWS.Route53(getDnsCredentials(domainConfig));
|
||||
|
||||
// backward compat for 2.2, where we only required access to "listHostedZones"
|
||||
let listHostedZones;
|
||||
if (dnsConfig.listHostedZonesByName) {
|
||||
listHostedZones = route53.listHostedZonesByName.bind(route53, { MaxItems: '1', DNSName: zoneName + '.' });
|
||||
if (domainConfig.listHostedZonesByName) {
|
||||
listHostedZones = route53.listHostedZonesByName({ MaxItems: '1', DNSName: zoneName + '.' }).promise();
|
||||
} else {
|
||||
listHostedZones = route53.listHostedZones.bind(route53, {}); // currently, this route does not support > 100 zones
|
||||
listHostedZones = route53.listHostedZones({}).promise(); // currently, this route does not support > 100 zones
|
||||
}
|
||||
|
||||
listHostedZones(function (error, result) {
|
||||
if (error && error.code === 'AccessDenied') return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'InvalidClientTokenId') return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
const [error, result] = await safe(listHostedZones);
|
||||
if (error && error.code === 'AccessDenied') throw new BoxError(BoxError.ACCESS_DENIED, error.message);
|
||||
if (error && error.code === 'InvalidClientTokenId') throw new BoxError(BoxError.ACCESS_DENIED, error.message);
|
||||
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, error.message);
|
||||
|
||||
var zone = result.HostedZones.filter(function (zone) {
|
||||
return zone.Name.slice(0, -1) === zoneName; // aws zone name contains a '.' at the end
|
||||
})[0];
|
||||
const zone = result.HostedZones.filter(function (zone) {
|
||||
return zone.Name.slice(0, -1) === zoneName; // aws zone name contains a '.' at the end
|
||||
})[0];
|
||||
|
||||
if (!zone) return callback(new BoxError(BoxError.NOT_FOUND, 'no such zone'));
|
||||
if (!zone) throw new BoxError(BoxError.NOT_FOUND, 'no such zone');
|
||||
|
||||
callback(null, zone);
|
||||
});
|
||||
return zone;
|
||||
}
|
||||
|
||||
function getHostedZone(dnsConfig, zoneName, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
async function getHostedZone(domainConfig, zoneName) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getZoneByName(dnsConfig, zoneName, function (error, zone) {
|
||||
if (error) return callback(error);
|
||||
const zone = await getZoneByName(domainConfig, zoneName);
|
||||
|
||||
var route53 = new AWS.Route53(getDnsCredentials(dnsConfig));
|
||||
route53.getHostedZone({ Id: zone.Id }, function (error, result) {
|
||||
if (error && error.code === 'AccessDenied') return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'InvalidClientTokenId') return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
const route53 = new AWS.Route53(getDnsCredentials(domainConfig));
|
||||
const [error, result] = await safe(route53.getHostedZone({ Id: zone.Id }).promise());
|
||||
if (error && error.code === 'AccessDenied') throw new BoxError(BoxError.ACCESS_DENIED, error.message);
|
||||
if (error && error.code === 'InvalidClientTokenId') throw new BoxError(BoxError.ACCESS_DENIED, error.message);
|
||||
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, error.message);
|
||||
|
||||
callback(null, result);
|
||||
});
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
function upsert(domainObject, location, type, values, callback) {
|
||||
async function upsert(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
fqdn = dns.fqdn(location, domainObject);
|
||||
|
||||
debug('add: %s for zone %s of type %s with values %j', fqdn, zoneName, type, values);
|
||||
|
||||
getZoneByName(dnsConfig, zoneName, function (error, zone) {
|
||||
if (error) return callback(error);
|
||||
const zone = await getZoneByName(domainConfig, zoneName);
|
||||
|
||||
var records = values.map(function (v) { return { Value: v }; }); // for mx records, value is already of the '<priority> <server>' format
|
||||
const records = values.map(function (v) { return { Value: v }; }); // for mx records, value is already of the '<priority> <server>' format
|
||||
|
||||
var params = {
|
||||
ChangeBatch: {
|
||||
Changes: [{
|
||||
Action: 'UPSERT',
|
||||
ResourceRecordSet: {
|
||||
Type: type,
|
||||
Name: fqdn,
|
||||
ResourceRecords: records,
|
||||
TTL: 1
|
||||
}
|
||||
}]
|
||||
},
|
||||
HostedZoneId: zone.Id
|
||||
};
|
||||
const params = {
|
||||
ChangeBatch: {
|
||||
Changes: [{
|
||||
Action: 'UPSERT',
|
||||
ResourceRecordSet: {
|
||||
Type: type,
|
||||
Name: fqdn,
|
||||
ResourceRecords: records,
|
||||
TTL: 1
|
||||
}
|
||||
}]
|
||||
},
|
||||
HostedZoneId: zone.Id
|
||||
};
|
||||
|
||||
var route53 = new AWS.Route53(getDnsCredentials(dnsConfig));
|
||||
route53.changeResourceRecordSets(params, function(error) {
|
||||
if (error && error.code === 'AccessDenied') return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'InvalidClientTokenId') return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'PriorRequestNotComplete') return callback(new BoxError(BoxError.BUSY, error.message));
|
||||
if (error && error.code === 'InvalidChangeBatch') return callback(new BoxError(BoxError.BAD_FIELD, error.message));
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
const route53 = new AWS.Route53(getDnsCredentials(domainConfig));
|
||||
const [error] = await safe(route53.changeResourceRecordSets(params).promise());
|
||||
if (error && error.code === 'AccessDenied') throw new BoxError(BoxError.ACCESS_DENIED, error.message);
|
||||
if (error && error.code === 'InvalidClientTokenId') throw new BoxError(BoxError.ACCESS_DENIED, error.message);
|
||||
if (error && error.code === 'PriorRequestNotComplete') throw new BoxError(BoxError.BUSY, error.message);
|
||||
if (error && error.code === 'InvalidChangeBatch') throw new BoxError(BoxError.BAD_FIELD, error.message);
|
||||
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, error.message);
|
||||
}
|
||||
|
||||
function get(domainObject, location, type, callback) {
|
||||
async function get(domainObject, location, type) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
fqdn = dns.fqdn(location, domainObject);
|
||||
|
||||
getZoneByName(dnsConfig, zoneName, function (error, zone) {
|
||||
if (error) return callback(error);
|
||||
const zone = await getZoneByName(domainConfig, zoneName);
|
||||
|
||||
var params = {
|
||||
HostedZoneId: zone.Id,
|
||||
MaxItems: '1',
|
||||
StartRecordName: fqdn + '.',
|
||||
StartRecordType: type
|
||||
};
|
||||
const params = {
|
||||
HostedZoneId: zone.Id,
|
||||
MaxItems: '1',
|
||||
StartRecordName: fqdn + '.',
|
||||
StartRecordType: type
|
||||
};
|
||||
|
||||
var route53 = new AWS.Route53(getDnsCredentials(dnsConfig));
|
||||
route53.listResourceRecordSets(params, function (error, result) {
|
||||
if (error && error.code === 'AccessDenied') return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'InvalidClientTokenId') return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
if (result.ResourceRecordSets.length === 0) return callback(null, [ ]);
|
||||
if (result.ResourceRecordSets[0].Name !== params.StartRecordName || result.ResourceRecordSets[0].Type !== params.StartRecordType) return callback(null, [ ]);
|
||||
const route53 = new AWS.Route53(getDnsCredentials(domainConfig));
|
||||
const [error, result] = await safe(route53.listResourceRecordSets(params).promise());
|
||||
if (error && error.code === 'AccessDenied') throw new BoxError(BoxError.ACCESS_DENIED, error.message);
|
||||
if (error && error.code === 'InvalidClientTokenId') throw new BoxError(BoxError.ACCESS_DENIED, error.message);
|
||||
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, error.message);
|
||||
if (result.ResourceRecordSets.length === 0) return [];
|
||||
if (result.ResourceRecordSets[0].Name !== params.StartRecordName || result.ResourceRecordSets[0].Type !== params.StartRecordType) return [];
|
||||
|
||||
var values = result.ResourceRecordSets[0].ResourceRecords.map(function (record) { return record.Value; });
|
||||
|
||||
callback(null, values);
|
||||
});
|
||||
});
|
||||
const values = result.ResourceRecordSets[0].ResourceRecords.map(function (record) { return record.Value; });
|
||||
return values;
|
||||
}
|
||||
|
||||
function del(domainObject, location, type, values, callback) {
|
||||
async function del(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
fqdn = dns.fqdn(location, domainObject);
|
||||
|
||||
getZoneByName(dnsConfig, zoneName, function (error, zone) {
|
||||
if (error) return callback(error);
|
||||
const zone = await getZoneByName(domainConfig, zoneName);
|
||||
|
||||
var records = values.map(function (v) { return { Value: v }; });
|
||||
const records = values.map(function (v) { return { Value: v }; });
|
||||
|
||||
var resourceRecordSet = {
|
||||
Name: fqdn,
|
||||
Type: type,
|
||||
ResourceRecords: records,
|
||||
TTL: 1
|
||||
};
|
||||
const resourceRecordSet = {
|
||||
Name: fqdn,
|
||||
Type: type,
|
||||
ResourceRecords: records,
|
||||
TTL: 1
|
||||
};
|
||||
|
||||
var params = {
|
||||
ChangeBatch: {
|
||||
Changes: [{
|
||||
Action: 'DELETE',
|
||||
ResourceRecordSet: resourceRecordSet
|
||||
}]
|
||||
},
|
||||
HostedZoneId: zone.Id
|
||||
};
|
||||
const params = {
|
||||
ChangeBatch: {
|
||||
Changes: [{
|
||||
Action: 'DELETE',
|
||||
ResourceRecordSet: resourceRecordSet
|
||||
}]
|
||||
},
|
||||
HostedZoneId: zone.Id
|
||||
};
|
||||
|
||||
var route53 = new AWS.Route53(getDnsCredentials(dnsConfig));
|
||||
route53.changeResourceRecordSets(params, function(error) {
|
||||
if (error && error.code === 'AccessDenied') return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'InvalidClientTokenId') return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
if (error && error.message && error.message.indexOf('it was not found') !== -1) {
|
||||
debug('del: resource record set not found.', error);
|
||||
return callback(new BoxError(BoxError.NOT_FOUND, error.message));
|
||||
} else if (error && error.code === 'NoSuchHostedZone') {
|
||||
debug('del: hosted zone not found.', error);
|
||||
return callback(new BoxError(BoxError.NOT_FOUND, error.message));
|
||||
} else if (error && error.code === 'PriorRequestNotComplete') {
|
||||
debug('del: resource is still busy', error);
|
||||
return callback(new BoxError(BoxError.BUSY, error.message));
|
||||
} else if (error && error.code === 'InvalidChangeBatch') {
|
||||
debug('del: invalid change batch. No such record to be deleted.');
|
||||
return callback(new BoxError(BoxError.NOT_FOUND, error.message));
|
||||
} else if (error) {
|
||||
debug('del: error', error);
|
||||
return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
|
||||
}
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
const route53 = new AWS.Route53(getDnsCredentials(domainConfig));
|
||||
const [error] = await safe(route53.changeResourceRecordSets(params).promise());
|
||||
if (error && error.code === 'AccessDenied') throw new BoxError(BoxError.ACCESS_DENIED, error.message);
|
||||
if (error && error.code === 'InvalidClientTokenId') throw new BoxError(BoxError.ACCESS_DENIED, error.message);
|
||||
if (error && error.message && error.message.indexOf('it was not found') !== -1) {
|
||||
throw new BoxError(BoxError.NOT_FOUND, error.message);
|
||||
} else if (error && error.code === 'NoSuchHostedZone') {
|
||||
throw new BoxError(BoxError.NOT_FOUND, error.message);
|
||||
} else if (error && error.code === 'PriorRequestNotComplete') {
|
||||
throw new BoxError(BoxError.BUSY, error.message);
|
||||
} else if (error && error.code === 'InvalidChangeBatch') {
|
||||
throw new BoxError(BoxError.NOT_FOUND, error.message);
|
||||
} else if (error) {
|
||||
throw new BoxError(BoxError.EXTERNAL_ERROR, error.message);
|
||||
}
|
||||
}
|
||||
|
||||
function wait(domainObject, location, type, value, options, callback) {
|
||||
async function wait(domainObject, subdomain, type, value, options) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const fqdn = dns.fqdn(location, domainObject);
|
||||
const fqdn = dns.fqdn(subdomain, domainObject);
|
||||
|
||||
waitForDns(fqdn, domainObject.zoneName, type, value, options, callback);
|
||||
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
|
||||
}
|
||||
|
||||
function verifyDnsConfig(domainObject, callback) {
|
||||
async function verifyDomainConfig(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName;
|
||||
|
||||
if (!dnsConfig.accessKeyId || typeof dnsConfig.accessKeyId !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'accessKeyId must be a non-empty string', { field: 'accessKeyId' }));
|
||||
if (!dnsConfig.secretAccessKey || typeof dnsConfig.secretAccessKey !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'secretAccessKey must be a non-empty string', { field: 'secretAccessKey' }));
|
||||
if (!domainConfig.accessKeyId || typeof domainConfig.accessKeyId !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'accessKeyId must be a non-empty string');
|
||||
if (!domainConfig.secretAccessKey || typeof domainConfig.secretAccessKey !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'secretAccessKey must be a non-empty string');
|
||||
|
||||
var credentials = {
|
||||
accessKeyId: dnsConfig.accessKeyId,
|
||||
secretAccessKey: dnsConfig.secretAccessKey,
|
||||
region: dnsConfig.region || 'us-east-1',
|
||||
endpoint: dnsConfig.endpoint || null,
|
||||
const credentials = {
|
||||
accessKeyId: domainConfig.accessKeyId,
|
||||
secretAccessKey: domainConfig.secretAccessKey,
|
||||
region: domainConfig.region || 'us-east-1',
|
||||
endpoint: domainConfig.endpoint || null,
|
||||
listHostedZonesByName: true, // new/updated creds require this perm
|
||||
};
|
||||
|
||||
const ip = '127.0.0.1';
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
|
||||
if (process.env.BOX_ENV === 'test') return credentials; // this shouldn't be here
|
||||
|
||||
dns.resolve(zoneName, 'NS', { timeout: 5000 }, function (error, nameservers) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain', { field: 'nameservers' }));
|
||||
if (error || !nameservers) return callback(new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers', { field: 'nameservers' }));
|
||||
const [error, nameservers] = await safe(dig.resolve(zoneName, 'NS', { timeout: 5000 }));
|
||||
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
|
||||
|
||||
getHostedZone(credentials, zoneName, function (error, zone) {
|
||||
if (error) return callback(error);
|
||||
const zone = await getHostedZone(credentials, zoneName);
|
||||
|
||||
if (!_.isEqual(zone.DelegationSet.NameServers.sort(), nameservers.sort())) {
|
||||
debug('verifyDnsConfig: %j and %j do not match', nameservers, zone.DelegationSet.NameServers);
|
||||
return callback(new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Route53', { field: 'nameservers' }));
|
||||
}
|
||||
if (!_.isEqual(zone.DelegationSet.NameServers.sort(), nameservers.sort())) {
|
||||
debug('verifyDomainConfig: %j and %j do not match', nameservers, zone.DelegationSet.NameServers);
|
||||
throw new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Route53');
|
||||
}
|
||||
|
||||
const location = 'cloudrontestdns';
|
||||
const newDomainObject = Object.assign({ }, domainObject, { config: credentials });
|
||||
const location = 'cloudrontestdns';
|
||||
const newDomainObject = Object.assign({ }, domainObject, { config: credentials });
|
||||
|
||||
upsert(newDomainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
await upsert(newDomainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record added');
|
||||
|
||||
debug('verifyDnsConfig: Test A record added');
|
||||
await get(newDomainObject, location, 'A');
|
||||
debug('verifyDomainConfig: Can list record sets');
|
||||
|
||||
del(newDomainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
await del(newDomainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record removed again');
|
||||
|
||||
debug('verifyDnsConfig: Test A record removed again');
|
||||
|
||||
callback(null, credentials);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
return credentials;
|
||||
}
|
||||
|
||||
272
src/dns/vultr.js
272
src/dns/vultr.js
@@ -7,14 +7,14 @@ exports = module.exports = {
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDnsConfig
|
||||
verifyDomainConfig
|
||||
};
|
||||
|
||||
const async = require('async'),
|
||||
assert = require('assert'),
|
||||
const assert = require('assert'),
|
||||
constants = require('../constants.js'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
debug = require('debug')('box:dns/vultr'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('../dns.js'),
|
||||
safe = require('safetydance'),
|
||||
superagent = require('superagent'),
|
||||
@@ -36,244 +36,202 @@ function injectPrivateFields(newConfig, currentConfig) {
|
||||
if (newConfig.token === constants.SECRET_PLACEHOLDER) newConfig.token = currentConfig.token;
|
||||
}
|
||||
|
||||
function getZoneRecords(dnsConfig, zoneName, name, type, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
async function getZoneRecords(domainConfig, zoneName, name, type) {
|
||||
assert.strictEqual(typeof domainConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug(`getInternal: getting dns records of ${zoneName} with ${name} and type ${type}`);
|
||||
|
||||
let per_page = 100, cursor= null;
|
||||
let per_page = 100, cursor = null;
|
||||
let records = [];
|
||||
|
||||
async.doWhilst(function (iteratorDone) {
|
||||
do {
|
||||
const url = `${VULTR_ENDPOINT}/domains/${zoneName}/records?per_page=${per_page}` + (cursor ? `&cursor=${cursor}` : '');
|
||||
|
||||
superagent.get(url)
|
||||
.set('Authorization', 'Bearer ' + dnsConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return iteratorDone(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 404) return iteratorDone(new BoxError(BoxError.NOT_FOUND, formatError(result)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return iteratorDone(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 200) return iteratorDone(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
const [error, response] = await safe(superagent.get(url).set('Authorization', 'Bearer ' + domainConfig.token).timeout(30 * 1000).retry(5).ok(() => true));
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, formatError(response));
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
records = records.concat(result.body.records.filter(function (record) {
|
||||
return (record.type === type && record.name === name);
|
||||
}));
|
||||
records = records.concat(response.body.records.filter(function (record) {
|
||||
return (record.type === type && record.name === name);
|
||||
}));
|
||||
|
||||
cursor = safe.query(result.body, 'meta.links.next');
|
||||
cursor = safe.query(response.body, 'meta.links.next');
|
||||
} while (cursor);
|
||||
|
||||
iteratorDone();
|
||||
});
|
||||
}, function (testDone) { return testDone(null, !!cursor); }, function (error) {
|
||||
debug('getZoneRecords: error:', error, JSON.stringify(records));
|
||||
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(null, records);
|
||||
});
|
||||
return records;
|
||||
}
|
||||
|
||||
function get(domainObject, location, type, callback) {
|
||||
async function get(domainObject, location, type) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '';
|
||||
|
||||
getZoneRecords(dnsConfig, zoneName, name, type, function (error, records) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var tmp = records.map(function (record) { return record.data; });
|
||||
|
||||
debug('get: %j', tmp);
|
||||
|
||||
return callback(null, tmp);
|
||||
});
|
||||
const records = await getZoneRecords(domainConfig, zoneName, name, type);
|
||||
const tmp = records.map(function (record) { return record.data; });
|
||||
return tmp;
|
||||
}
|
||||
|
||||
function upsert(domainObject, location, type, values, callback) {
|
||||
async function upsert(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '';
|
||||
|
||||
debug('upsert: %s for zone %s of type %s with values %j', name, zoneName, type, values);
|
||||
|
||||
getZoneRecords(dnsConfig, zoneName, name, type, function (error, records) {
|
||||
if (error) return callback(error);
|
||||
const records = await getZoneRecords(domainConfig, zoneName, name, type);
|
||||
|
||||
let i = 0, recordIds = []; // used to track available records to update instead of create
|
||||
let i = 0, recordIds = []; // used to track available records to update instead of create
|
||||
|
||||
async.eachSeries(values, function (value, iteratorCallback) {
|
||||
let data = {
|
||||
type,
|
||||
ttl: 300 // lowest
|
||||
};
|
||||
for (const value of values) {
|
||||
const data = {
|
||||
type,
|
||||
ttl: 120 // lowest
|
||||
};
|
||||
|
||||
if (type === 'MX') {
|
||||
data.priority = parseInt(value.split(' ')[0], 10);
|
||||
data.data = value.split(' ')[1];
|
||||
} else if (type === 'TXT') {
|
||||
data.data = value.replace(/^"(.*)"$/, '$1'); // strip any double quotes
|
||||
} else {
|
||||
data.data = value;
|
||||
}
|
||||
if (type === 'MX') {
|
||||
data.priority = parseInt(value.split(' ')[0], 10);
|
||||
data.data = value.split(' ')[1];
|
||||
} else if (type === 'TXT') {
|
||||
data.data = value.replace(/^"(.*)"$/, '$1'); // strip any double quotes
|
||||
} else {
|
||||
data.data = value;
|
||||
}
|
||||
|
||||
if (i >= records.length) {
|
||||
data.name = name; // only set for new records
|
||||
if (i >= records.length) {
|
||||
data.name = name; // only set for new records
|
||||
|
||||
superagent.post(`${VULTR_ENDPOINT}/domains/${zoneName}/records`)
|
||||
.set('Authorization', 'Bearer ' + dnsConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return iteratorCallback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 400) return iteratorCallback(new BoxError(BoxError.BAD_FIELD, formatError(result)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return iteratorCallback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 201) return iteratorCallback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
const [error, response] = await safe(superagent.post(`${VULTR_ENDPOINT}/domains/${zoneName}/records`)
|
||||
.set('Authorization', 'Bearer ' + domainConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
recordIds.push(result.body.record.id);
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 400) throw new BoxError(BoxError.BAD_FIELD, formatError(response));
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
return iteratorCallback(null);
|
||||
});
|
||||
} else {
|
||||
superagent.patch(`${VULTR_ENDPOINT}/domains/${zoneName}/records/${records[i].id}`)
|
||||
.set('Authorization', 'Bearer ' + dnsConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.end(function (error, result) {
|
||||
// increment, as we have consumed the record
|
||||
++i;
|
||||
recordIds.push(response.body.record.id);
|
||||
} else {
|
||||
const [error, response] = await safe(superagent.patch(`${VULTR_ENDPOINT}/domains/${zoneName}/records/${records[i].id}`)
|
||||
.set('Authorization', 'Bearer ' + domainConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.ok(() => true));
|
||||
|
||||
if (error && !error.response) return iteratorCallback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 400) return iteratorCallback(new BoxError(BoxError.BAD_FIELD, formatError(result)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return iteratorCallback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 204) return iteratorCallback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
++i;
|
||||
|
||||
recordIds.push(records[i-1].id);
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 400) throw new BoxError(BoxError.BAD_FIELD, formatError(response));
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 204) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
|
||||
return iteratorCallback(null);
|
||||
});
|
||||
}
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
recordIds.push(records[i-1].id);
|
||||
}
|
||||
}
|
||||
|
||||
debug('upsert: completed with recordIds:%j', recordIds);
|
||||
for (let j = values.length + 1; j < records.length; j++) {
|
||||
const [error] = await safe(superagent.del(`${VULTR_ENDPOINT}/domains/${zoneName}/records/${records[j].id}`)
|
||||
.set('Authorization', 'Bearer ' + domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5));
|
||||
|
||||
callback();
|
||||
});
|
||||
});
|
||||
if (error) debug(`upsert: error removing record ${records[j].id}: ${error.message}`);
|
||||
}
|
||||
|
||||
debug('upsert: completed with recordIds:%j', recordIds);
|
||||
}
|
||||
|
||||
function del(domainObject, location, type, values, callback) {
|
||||
async function del(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = dns.getName(domainObject, location, type) || '';
|
||||
|
||||
getZoneRecords(dnsConfig, zoneName, name, type, function (error, records) {
|
||||
if (error) return callback(error);
|
||||
const records = await getZoneRecords(domainConfig, zoneName, name, type);
|
||||
if (records.length === 0) return;
|
||||
|
||||
if (records.length === 0) return callback(null);
|
||||
const tmp = records.filter(function (record) { return values.some(function (value) { return value === record.data; }); });
|
||||
if (tmp.length === 0) return;
|
||||
|
||||
const tmp = records.filter(function (record) { return values.some(function (value) { return value === record.data; }); });
|
||||
|
||||
debug('del: %j', tmp);
|
||||
|
||||
if (tmp.length === 0) return callback(null);
|
||||
|
||||
// FIXME we only handle the first one currently
|
||||
|
||||
superagent.del(`${VULTR_ENDPOINT}/domains/${zoneName}/records/${tmp[0].id}`)
|
||||
.set('Authorization', 'Bearer ' + dnsConfig.token)
|
||||
for (const r of tmp) {
|
||||
const [error, response] = await safe(superagent.del(`${VULTR_ENDPOINT}/domains/${zoneName}/records/${r.id}`)
|
||||
.set('Authorization', 'Bearer ' + domainConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
|
||||
if (result.statusCode === 404) return callback(null);
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new BoxError(BoxError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 204) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
|
||||
.ok(() => true));
|
||||
|
||||
debug('del: done');
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
});
|
||||
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
|
||||
if (response.statusCode === 404) continue;
|
||||
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
|
||||
if (response.statusCode !== 204) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
|
||||
}
|
||||
}
|
||||
|
||||
function wait(domainObject, location, type, value, options, callback) {
|
||||
async function wait(domainObject, subdomain, type, value, options) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const fqdn = dns.fqdn(location, domainObject);
|
||||
const fqdn = dns.fqdn(subdomain, domainObject);
|
||||
|
||||
waitForDns(fqdn, domainObject.zoneName, type, value, options, callback);
|
||||
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
|
||||
}
|
||||
|
||||
function verifyDnsConfig(domainObject, callback) {
|
||||
async function verifyDomainConfig(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
const domainConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName;
|
||||
|
||||
if (!dnsConfig.token || typeof dnsConfig.token !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'token must be a non-empty string', { field: 'token' }));
|
||||
if (!domainConfig.token || typeof domainConfig.token !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'token must be a non-empty string');
|
||||
|
||||
const ip = '127.0.0.1';
|
||||
|
||||
var credentials = {
|
||||
token: dnsConfig.token
|
||||
const credentials = {
|
||||
token: domainConfig.token
|
||||
};
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
|
||||
if (process.env.BOX_ENV === 'test') credentials; // this shouldn't be here
|
||||
|
||||
dns.resolve(zoneName, 'NS', { timeout: 5000 }, function (error, nameservers) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain', { field: 'nameservers' }));
|
||||
if (error || !nameservers) return callback(new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers', { field: 'nameservers' }));
|
||||
const [error, nameservers] = await safe(dig.resolve(zoneName, 'NS', { timeout: 5000 }));
|
||||
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
|
||||
|
||||
if (nameservers.map(function (n) { return n.toLowerCase(); }).indexOf('ns1.vultr.com') === -1) {
|
||||
debug('verifyDnsConfig: %j does not contains vultr NS', nameservers);
|
||||
return callback(new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Vultr', { field: 'nameservers' }));
|
||||
}
|
||||
if (nameservers.map(function (n) { return n.toLowerCase(); }).indexOf('ns1.vultr.com') === -1) {
|
||||
debug('verifyDomainConfig: %j does not contains vultr NS', nameservers);
|
||||
throw new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Vultr');
|
||||
}
|
||||
|
||||
const location = 'cloudrontestdns';
|
||||
const location = 'cloudrontestdns';
|
||||
|
||||
upsert(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
await upsert(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record added');
|
||||
|
||||
debug('verifyDnsConfig: Test A record added');
|
||||
await del(domainObject, location, 'A', [ ip ]);
|
||||
debug('verifyDomainConfig: Test A record removed again');
|
||||
|
||||
del(domainObject, location, 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: Test A record removed again');
|
||||
|
||||
callback(null, credentials);
|
||||
});
|
||||
});
|
||||
});
|
||||
return credentials;
|
||||
}
|
||||
|
||||
@@ -3,108 +3,99 @@
|
||||
exports = module.exports = waitForDns;
|
||||
|
||||
const assert = require('assert'),
|
||||
async = require('async'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
debug = require('debug')('box:dns/waitfordns'),
|
||||
dns = require('../dns.js');
|
||||
dig = require('../dig.js'),
|
||||
promiseRetry = require('../promise-retry.js'),
|
||||
safe = require('safetydance');
|
||||
|
||||
function resolveIp(hostname, options, callback) {
|
||||
async function resolveIp(hostname, type, options) {
|
||||
assert.strictEqual(typeof hostname, 'string');
|
||||
assert(type === 'A' || type === 'AAAA');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// try A record at authoritative server
|
||||
debug(`resolveIp: Checking if ${hostname} has A record at ${options.server}`);
|
||||
dns.resolve(hostname, 'A', options, function (error, results) {
|
||||
if (!error && results.length !== 0) return callback(null, results);
|
||||
debug(`resolveIp: Checking if ${hostname} has ${type} record at ${options.server}`);
|
||||
const [error, results] = await safe(dig.resolve(hostname, type, options));
|
||||
if (!error && results.length !== 0) return results;
|
||||
|
||||
// try CNAME record at authoritative server
|
||||
debug(`resolveIp: Checking if ${hostname} has CNAME record at ${options.server}`);
|
||||
dns.resolve(hostname, 'CNAME', options, function (error, results) {
|
||||
if (error || results.length === 0) return callback(error, results);
|
||||
// try CNAME record at authoritative server
|
||||
debug(`resolveIp: Checking if ${hostname} has CNAME record at ${options.server}`);
|
||||
const cnameResults = await dig.resolve(hostname, 'CNAME', options);
|
||||
if (cnameResults.length === 0) return cnameResults;
|
||||
|
||||
// recurse lookup the CNAME record
|
||||
debug(`resolveIp: Resolving ${hostname}'s CNAME record ${results[0]}`);
|
||||
dns.resolve(results[0], 'A', { server: '127.0.0.1', timeout: options.timeout }, callback);
|
||||
});
|
||||
});
|
||||
// recurse lookup the CNAME record
|
||||
debug(`resolveIp: Resolving ${hostname}'s CNAME record ${cnameResults[0]}`);
|
||||
return await dig.resolve(cnameResults[0], type, options);
|
||||
}
|
||||
|
||||
function isChangeSynced(hostname, type, value, nameserver, callback) {
|
||||
async function isChangeSynced(hostname, type, value, nameserver) {
|
||||
assert.strictEqual(typeof hostname, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert.strictEqual(typeof nameserver, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// ns records cannot have cname
|
||||
dns.resolve(nameserver, 'A', { timeout: 5000 }, function (error, nsIps) {
|
||||
if (error || !nsIps || nsIps.length === 0) {
|
||||
debug(`isChangeSynced: cannot resolve NS ${nameserver}`); // it's fine if one or more ns are dead
|
||||
return callback(null, true);
|
||||
const [error, nsIps] = await safe(dig.resolve(nameserver, 'A', { timeout: 5000 }));
|
||||
if (error || !nsIps || nsIps.length === 0) {
|
||||
debug(`isChangeSynced: cannot resolve NS ${nameserver}`); // it's fine if one or more ns are dead
|
||||
return true;
|
||||
}
|
||||
|
||||
const status = [];
|
||||
for (let i = 0; i < nsIps.length; i++) {
|
||||
const nsIp = nsIps[i];
|
||||
const resolveOptions = { server: nsIp, timeout: 5000 };
|
||||
const resolver = type === 'A' || type === 'AAAA' ? resolveIp(hostname, type, resolveOptions) : dig.resolve(hostname, 'TXT', resolveOptions);
|
||||
|
||||
const [error, answer] = await safe(resolver);
|
||||
if (error && error.code === 'TIMEOUT') {
|
||||
debug(`isChangeSynced: NS ${nameserver} (${nsIp}) timed out when resolving ${hostname} (${type})`);
|
||||
status[i] = true; // should be ok if dns server is down
|
||||
continue;
|
||||
}
|
||||
|
||||
async.every(nsIps, function (nsIp, iteratorCallback) {
|
||||
const resolveOptions = { server: nsIp, timeout: 5000 };
|
||||
const resolver = type === 'A' ? resolveIp.bind(null, hostname) : dns.resolve.bind(null, hostname, 'TXT');
|
||||
if (error) {
|
||||
debug(`isChangeSynced: NS ${nameserver} (${nsIp}) errored when resolve ${hostname} (${type}): ${error}`);
|
||||
status[i] = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
resolver(resolveOptions, function (error, answer) {
|
||||
if (error && error.code === 'TIMEOUT') {
|
||||
debug(`isChangeSynced: NS ${nameserver} (${nsIp}) timed out when resolving ${hostname} (${type})`);
|
||||
return iteratorCallback(null, true); // should be ok if dns server is down
|
||||
}
|
||||
let match;
|
||||
if (type === 'A' || type === 'AAAA') {
|
||||
match = answer.length === 1 && answer[0] === value;
|
||||
} else if (type === 'TXT') { // answer is a 2d array of strings
|
||||
match = answer.some(function (a) { return value === a.join(''); });
|
||||
}
|
||||
|
||||
if (error) {
|
||||
debug(`isChangeSynced: NS ${nameserver} (${nsIp}) errored when resolve ${hostname} (${type}): ${error}`);
|
||||
return iteratorCallback(null, false);
|
||||
}
|
||||
debug(`isChangeSynced: ${hostname} (${type}) was resolved to ${answer} at NS ${nameserver} (${nsIp}). Expecting ${value}. Match ${match}`);
|
||||
status[i] = match;
|
||||
}
|
||||
|
||||
let match;
|
||||
if (type === 'A') {
|
||||
match = answer.length === 1 && answer[0] === value;
|
||||
} else if (type === 'TXT') { // answer is a 2d array of strings
|
||||
match = answer.some(function (a) { return value === a.join(''); });
|
||||
}
|
||||
|
||||
debug(`isChangeSynced: ${hostname} (${type}) was resolved to ${answer} at NS ${nameserver} (${nsIp}). Expecting ${value}. Match ${match}`);
|
||||
|
||||
iteratorCallback(null, match);
|
||||
});
|
||||
}, callback);
|
||||
|
||||
});
|
||||
return status.every(s => s === true);
|
||||
}
|
||||
|
||||
// check if IP change has propagated to every nameserver
|
||||
function waitForDns(hostname, zoneName, type, value, options, callback) {
|
||||
async function waitForDns(hostname, zoneName, type, value, options) {
|
||||
assert.strictEqual(typeof hostname, 'string');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert(type === 'A' || type === 'TXT');
|
||||
assert(type === 'A' || type === 'AAAA' || type === 'TXT');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('waitForDns: hostname %s to be %s in zone %s.', hostname, value, zoneName);
|
||||
debug(`waitForDns: waiting for ${hostname} to be ${value} in zone ${zoneName}`);
|
||||
|
||||
var attempt = 0;
|
||||
async.retry(options, function (retryCallback) {
|
||||
++attempt;
|
||||
debug(`waitForDns (try ${attempt}): ${hostname} to be ${value} in zone ${zoneName}`);
|
||||
await promiseRetry(Object.assign({ debug }, options), async function () {
|
||||
const nameservers = await dig.resolve(zoneName, 'NS', { timeout: 5000 });
|
||||
if (!nameservers) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Unable to get nameservers');
|
||||
debug(`waitForDns: nameservers are ${JSON.stringify(nameservers)}`);
|
||||
|
||||
dns.resolve(zoneName, 'NS', { timeout: 5000 }, function (error, nameservers) {
|
||||
if (error || !nameservers) return retryCallback(error || new BoxError(BoxError.EXTERNAL_ERROR, 'Unable to get nameservers'));
|
||||
|
||||
async.every(nameservers, isChangeSynced.bind(null, hostname, type, value), function (error, synced) {
|
||||
debug('waitForDns: %s %s ns: %j', hostname, synced ? 'done' : 'not done', nameservers);
|
||||
|
||||
retryCallback(synced ? null : new BoxError(BoxError.EXTERNAL_ERROR, 'ETRYAGAIN'));
|
||||
});
|
||||
});
|
||||
}, function retryDone(error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug(`waitForDns: ${hostname} has propagated`);
|
||||
|
||||
callback(null);
|
||||
for (const nameserver of nameservers) {
|
||||
const synced = await isChangeSynced(hostname, type, value, nameserver);
|
||||
debug(`waitForDns: ${hostname} at ns ${nameserver}: ${synced ? 'done' : 'not done'} `);
|
||||
if (!synced) throw new BoxError(BoxError.EXTERNAL_ERROR, 'ETRYAGAIN');
|
||||
}
|
||||
});
|
||||
|
||||
debug(`waitForDns: ${hostname} has propagated`);
|
||||
}
|
||||
|
||||
@@ -7,12 +7,13 @@ exports = module.exports = {
|
||||
get,
|
||||
del,
|
||||
wait,
|
||||
verifyDnsConfig
|
||||
verifyDomainConfig
|
||||
};
|
||||
|
||||
const assert = require('assert'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
debug = require('debug')('box:dns/manual'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('../dns.js'),
|
||||
safe = require('safetydance'),
|
||||
sysinfo = require('../sysinfo.js'),
|
||||
@@ -26,71 +27,75 @@ function removePrivateFields(domainObject) {
|
||||
function injectPrivateFields(newConfig, currentConfig) {
|
||||
}
|
||||
|
||||
function upsert(domainObject, location, type, values, callback) {
|
||||
async function upsert(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('upsert: %s for zone %s of type %s with values %j', location, domainObject.zoneName, type, values);
|
||||
|
||||
return callback(null);
|
||||
return;
|
||||
}
|
||||
|
||||
function get(domainObject, location, type, callback) {
|
||||
async function get(domainObject, location, type) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback(null, [ ]); // returning ip confuses apptask into thinking the entry already exists
|
||||
return []; // returning ip confuses apptask into thinking the entry already exists
|
||||
}
|
||||
|
||||
function del(domainObject, location, type, values, callback) {
|
||||
async function del(domainObject, location, type, values) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(Array.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
return callback();
|
||||
return;
|
||||
}
|
||||
|
||||
function wait(domainObject, location, type, value, options, callback) {
|
||||
async function wait(domainObject, subdomain, type, value, options) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const fqdn = dns.fqdn(location, domainObject);
|
||||
const fqdn = dns.fqdn(subdomain, domainObject);
|
||||
|
||||
waitForDns(fqdn, domainObject.zoneName, type, value, options, callback);
|
||||
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
|
||||
}
|
||||
|
||||
async function verifyDnsConfig(domainObject) {
|
||||
async function verifyDomainConfig(domainObject) {
|
||||
assert.strictEqual(typeof domainObject, 'object');
|
||||
|
||||
const zoneName = domainObject.zoneName;
|
||||
|
||||
// Very basic check if the nameservers can be fetched
|
||||
const [error, nameservers] = await safe(dns.promises.resolve(zoneName, 'NS', { timeout: 5000 }));
|
||||
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain', { field: 'nameservers' });
|
||||
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers', { field: 'nameservers' });
|
||||
const [error, nameservers] = await safe(dig.resolve(zoneName, 'NS', { timeout: 5000 }));
|
||||
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
|
||||
|
||||
const location = 'cloudrontestdns';
|
||||
const fqdn = dns.fqdn(location, domainObject);
|
||||
|
||||
const [error2, result] = await safe(dns.promises.resolve(fqdn, 'A', { server: '127.0.0.1', timeout: 5000 }));
|
||||
if (error2 && error2.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve ${fqdn}`, { field: 'nameservers' });
|
||||
if (error2 || !result) throw new BoxError(BoxError.BAD_FIELD, error2 ? error2.message : `Unable to resolve ${fqdn}`, { field: 'nameservers' });
|
||||
const [ipv4Error, ipv4Result] = await safe(dig.resolve(fqdn, 'A', { server: '127.0.0.1', timeout: 5000 }));
|
||||
if (ipv4Error && (ipv4Error.code === 'ENOTFOUND' || ipv4Error.code === 'ENODATA')) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv4 of ${fqdn}. Please check if you have set up *.${domainObject.domain} to point to this server's IP`);
|
||||
if (ipv4Error) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv4 of ${fqdn}: ${ipv4Error.message}`);
|
||||
if (!ipv4Result) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv4 of ${fqdn}`);
|
||||
|
||||
const [error3, ip] = await safe(sysinfo.getServerIp());
|
||||
if (error3) throw new BoxError(BoxError.EXTERNAL_ERROR, `Failed to detect IP of this server: ${error3.message}`);
|
||||
const ipv4 = await sysinfo.getServerIPv4();
|
||||
if (ipv4Result.length !== 1 || ipv4 !== ipv4Result[0]) throw new BoxError(BoxError.EXTERNAL_ERROR, `Domain resolves to ${JSON.stringify(ipv4Result)} instead of IPv4 ${ipv4}`);
|
||||
|
||||
if (result.length !== 1 || ip !== result[0]) throw new BoxError(BoxError.EXTERNAL_ERROR, `Domain resolves to ${JSON.stringify(result)} instead of ${ip}`);
|
||||
const ipv6 = await sysinfo.getServerIPv6(); // both should be RFC 5952 format
|
||||
if (ipv6) {
|
||||
const [ipv6Error, ipv6Result] = await safe(dig.resolve(fqdn, 'AAAA', { server: '127.0.0.1', timeout: 5000 }));
|
||||
if (ipv6Error && (ipv6Error.code === 'ENOTFOUND' || ipv6Error.code === 'ENODATA')) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv6 of ${fqdn}`);
|
||||
if (ipv6Error) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv6 of ${fqdn}: ${ipv6Error.message}`);
|
||||
if (!ipv6Result) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv6 of ${fqdn}`);
|
||||
|
||||
if (ipv6Result.length !== 1 || ipv6 !== ipv6Result[0]) throw new BoxError(BoxError.EXTERNAL_ERROR, `Domain resolves to ${JSON.stringify(ipv6Result)} instead of IPv6 ${ipv6}`);
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
100
src/docker.js
100
src/docker.js
@@ -20,13 +20,19 @@ exports = module.exports = {
|
||||
createSubcontainer,
|
||||
inspect,
|
||||
getContainerIp,
|
||||
execContainer,
|
||||
getEvents,
|
||||
memoryUsage,
|
||||
|
||||
createVolume,
|
||||
removeVolume,
|
||||
clearVolume,
|
||||
|
||||
update,
|
||||
|
||||
createExec,
|
||||
startExec,
|
||||
getExec,
|
||||
resizeExec
|
||||
};
|
||||
|
||||
const apps = require('./apps.js'),
|
||||
@@ -34,7 +40,7 @@ const apps = require('./apps.js'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
constants = require('./constants.js'),
|
||||
debug = require('debug')('box:docker'),
|
||||
delay = require('delay'),
|
||||
delay = require('./delay.js'),
|
||||
Docker = require('dockerode'),
|
||||
path = require('path'),
|
||||
reverseProxy = require('./reverseproxy.js'),
|
||||
@@ -58,7 +64,7 @@ async function testRegistryConfig(config) {
|
||||
if (config.provider === 'noop') return;
|
||||
|
||||
const [error] = await safe(gConnection.checkAuth(config)); // this returns a 500 even for auth errors
|
||||
if (error) throw new BoxError(BoxError.BAD_FIELD, error, { field: 'serverAddress' });
|
||||
if (error) throw new BoxError(BoxError.BAD_FIELD, `Invalid serverAddress: ${error.message}`);
|
||||
}
|
||||
|
||||
function injectPrivateFields(newConfig, currentConfig) {
|
||||
@@ -117,7 +123,7 @@ async function pullImage(manifest) {
|
||||
// https://github.com/dotcloud/docker/issues/1074 says each status message
|
||||
// is emitted as a chunk
|
||||
stream.on('data', function (chunk) {
|
||||
var data = safe.JSON.parse(chunk) || { };
|
||||
const data = safe.JSON.parse(chunk) || { };
|
||||
debug('pullImage: %j', data);
|
||||
|
||||
// The data.status here is useless because this is per layer as opposed to per image
|
||||
@@ -247,9 +253,16 @@ function getAddresses() {
|
||||
|
||||
const addresses = [];
|
||||
for (const phy of physicalDevices) {
|
||||
const result = safe.JSON.parse(safe.child_process.execSync(`ip -f inet -j addr show ${phy.name}`, { encoding: 'utf8' }));
|
||||
const address = safe.query(result, '[0].addr_info[0].local');
|
||||
if (address) addresses.push(address);
|
||||
const inet = safe.JSON.parse(safe.child_process.execSync(`ip -f inet -j addr show dev ${phy.name} scope global`, { encoding: 'utf8' }));
|
||||
for (const r of inet) {
|
||||
const address = safe.query(r, 'addr_info[0].local');
|
||||
if (address) addresses.push(address);
|
||||
}
|
||||
const inet6 = safe.JSON.parse(safe.child_process.execSync(`ip -f inet6 -j addr show dev ${phy.name} scope global`, { encoding: 'utf8' }));
|
||||
for (const r of inet6) {
|
||||
const address = safe.query(r, 'addr_info[0].local');
|
||||
if (address) addresses.push(address);
|
||||
}
|
||||
}
|
||||
|
||||
return addresses;
|
||||
@@ -266,18 +279,19 @@ async function createSubcontainer(app, name, cmd, options) {
|
||||
const manifest = app.manifest;
|
||||
const exposedPorts = {}, dockerPortBindings = { };
|
||||
const domain = app.fqdn;
|
||||
const envPrefix = manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
||||
|
||||
const stdEnv = [
|
||||
'CLOUDRON=1',
|
||||
'CLOUDRON_PROXY_IP=172.18.0.1',
|
||||
`CLOUDRON_APP_HOSTNAME=${app.id}`,
|
||||
`${envPrefix}WEBADMIN_ORIGIN=${settings.dashboardOrigin()}`,
|
||||
`${envPrefix}API_ORIGIN=${settings.dashboardOrigin()}`,
|
||||
`${envPrefix}APP_ORIGIN=https://${domain}`,
|
||||
`${envPrefix}APP_DOMAIN=${domain}`
|
||||
`CLOUDRON_WEBADMIN_ORIGIN=${settings.dashboardOrigin()}`,
|
||||
`CLOUDRON_API_ORIGIN=${settings.dashboardOrigin()}`,
|
||||
`CLOUDRON_APP_ORIGIN=https://${domain}`,
|
||||
`CLOUDRON_APP_DOMAIN=${domain}`
|
||||
];
|
||||
|
||||
const secondaryDomainsEnv = app.secondaryDomains.map(sd => `${sd.environmentVariable}=${sd.fqdn}`);
|
||||
|
||||
const portEnv = [];
|
||||
for (const portName in app.portBindings) {
|
||||
const hostPort = app.portBindings[portName];
|
||||
@@ -290,7 +304,7 @@ async function createSubcontainer(app, name, cmd, options) {
|
||||
exposedPorts[`${containerPort}/${portType}`] = {};
|
||||
portEnv.push(`${portName}=${hostPort}`);
|
||||
|
||||
const hostIps = hostPort === 53 ? getAddresses() : [ '0.0.0.0' ]; // port 53 is special because it is possibly taken by systemd-resolved
|
||||
const hostIps = hostPort === 53 ? getAddresses() : [ '0.0.0.0', '::0' ]; // port 53 is special because it is possibly taken by systemd-resolved
|
||||
dockerPortBindings[`${containerPort}/${portType}`] = hostIps.map(hip => { return { HostIp: hip, HostPort: hostPort + '' }; });
|
||||
}
|
||||
|
||||
@@ -312,7 +326,7 @@ async function createSubcontainer(app, name, cmd, options) {
|
||||
Tty: isAppContainer,
|
||||
Image: app.manifest.dockerImage,
|
||||
Cmd: (isAppContainer && app.debugMode && app.debugMode.cmd) ? app.debugMode.cmd : cmd,
|
||||
Env: stdEnv.concat(addonEnv).concat(portEnv).concat(appEnv),
|
||||
Env: stdEnv.concat(addonEnv).concat(portEnv).concat(appEnv).concat(secondaryDomainsEnv),
|
||||
ExposedPorts: isAppContainer ? exposedPorts : { },
|
||||
Volumes: { // see also ReadonlyRootfs
|
||||
'/tmp': {},
|
||||
@@ -347,7 +361,8 @@ async function createSubcontainer(app, name, cmd, options) {
|
||||
VolumesFrom: isAppContainer ? null : [ app.containerId + ':rw' ],
|
||||
SecurityOpt: [ 'apparmor=docker-cloudron-app' ],
|
||||
CapAdd: [],
|
||||
CapDrop: []
|
||||
CapDrop: [],
|
||||
Sysctls: {}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -380,7 +395,13 @@ async function createSubcontainer(app, name, cmd, options) {
|
||||
const capabilities = manifest.capabilities || [];
|
||||
|
||||
// https://docs-stage.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities
|
||||
if (capabilities.includes('net_admin')) containerOptions.HostConfig.CapAdd.push('NET_ADMIN', 'NET_RAW');
|
||||
if (capabilities.includes('net_admin')) {
|
||||
containerOptions.HostConfig.CapAdd.push('NET_ADMIN', 'NET_RAW');
|
||||
// ipv6 for new interfaces is disabled in the container. this prevents the openvpn tun device having ipv6
|
||||
// See https://github.com/moby/moby/issues/20569 and https://github.com/moby/moby/issues/33099
|
||||
containerOptions.HostConfig.Sysctls['net.ipv6.conf.all.disable_ipv6'] = '0';
|
||||
containerOptions.HostConfig.Sysctls['net.ipv6.conf.all.forwarding'] = '1';
|
||||
}
|
||||
if (capabilities.includes('mlock')) containerOptions.HostConfig.CapAdd.push('IPC_LOCK'); // mlock prevents swapping
|
||||
if (!capabilities.includes('ping')) containerOptions.HostConfig.CapDrop.push('NET_RAW'); // NET_RAW is included by default by Docker
|
||||
|
||||
@@ -498,7 +519,7 @@ async function deleteImage(manifest) {
|
||||
|
||||
const dockerImage = manifest ? manifest.dockerImage : null;
|
||||
if (!dockerImage) return;
|
||||
if (dockerImage.includes('//')) return; // a common mistake is to paste a https:// as docker image. this results in a crash at runtime in dockerode module
|
||||
if (dockerImage.includes('//') || dockerImage.startsWith('/')) return; // a common mistake is to paste a https:// as docker image. this results in a crash at runtime in dockerode module (https://github.com/apocas/dockerode/issues/548)
|
||||
|
||||
const removeOptions = {
|
||||
force: false, // might be shared with another instance of this app
|
||||
@@ -544,30 +565,51 @@ async function getContainerIp(containerId) {
|
||||
return ip;
|
||||
}
|
||||
|
||||
async function execContainer(containerId, options) {
|
||||
async function createExec(containerId, options) {
|
||||
assert.strictEqual(typeof containerId, 'string');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
const container = gConnection.getContainer(containerId);
|
||||
|
||||
const [error, exec] = await safe(container.exec(options.execOptions));
|
||||
const [error, exec] = await safe(container.exec(options));
|
||||
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND);
|
||||
if (error && error.statusCode === 409) throw new BoxError(BoxError.BAD_STATE, error.message); // container restarting/not running
|
||||
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
|
||||
|
||||
const [startError, stream] = await safe(exec.start(options.startOptions)); /* in hijacked mode, stream is a net.socket */
|
||||
if (startError) throw new BoxError(BoxError.DOCKER_ERROR, startError);
|
||||
return exec.id;
|
||||
}
|
||||
|
||||
if (options.rows && options.columns) {
|
||||
// there is a race where resizing too early results in a 404 "no such exec"
|
||||
// https://git.cloudron.io/cloudron/box/issues/549
|
||||
setTimeout(function () {
|
||||
exec.resize({ h: options.rows, w: options.columns }, function (error) { if (error) debug('Error resizing console', error); });
|
||||
}, 2000);
|
||||
}
|
||||
async function startExec(execId, options) {
|
||||
assert.strictEqual(typeof execId, 'string');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
const exec = gConnection.getExec(execId);
|
||||
const [error, stream] = await safe(exec.start(options)); /* in hijacked mode, stream is a net.socket */
|
||||
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND);
|
||||
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
|
||||
return stream;
|
||||
}
|
||||
|
||||
async function getExec(execId) {
|
||||
assert.strictEqual(typeof execId, 'string');
|
||||
|
||||
const exec = gConnection.getExec(execId);
|
||||
const [error, result] = await safe(exec.inspect());
|
||||
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Unable to find exec container ${execId}`);
|
||||
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
|
||||
|
||||
return { exitCode: result.ExitCode, running: result.Running };
|
||||
}
|
||||
|
||||
async function resizeExec(execId, options) {
|
||||
assert.strictEqual(typeof execId, 'string');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
const exec = gConnection.getExec(execId);
|
||||
const [error] = await safe(exec.resize(options)); // { h, w }
|
||||
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND);
|
||||
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
|
||||
}
|
||||
|
||||
async function getEvents(options) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ async function authorizeApp(req, res, next) {
|
||||
}
|
||||
|
||||
function attachDockerRequest(req, res, next) {
|
||||
var options = {
|
||||
const options = {
|
||||
socketPath: '/var/run/docker.sock',
|
||||
method: req.method,
|
||||
path: req.url,
|
||||
@@ -143,8 +143,8 @@ async function start() {
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
gHttpServer.on('upgrade', function (req, client, head) {
|
||||
// Create a new tcp connection to the TCP server
|
||||
var remote = net.connect('/var/run/docker.sock', function () {
|
||||
var upgradeMessage = req.method + ' ' + req.url + ' HTTP/1.1\r\n' +
|
||||
const remote = net.connect('/var/run/docker.sock', function () {
|
||||
let upgradeMessage = req.method + ' ' + req.url + ' HTTP/1.1\r\n' +
|
||||
`Host: ${req.headers.host}\r\n` +
|
||||
'Connection: Upgrade\r\n' +
|
||||
'Upgrade: tcp\r\n';
|
||||
|
||||
@@ -4,7 +4,8 @@ module.exports = exports = {
|
||||
add,
|
||||
get,
|
||||
list,
|
||||
update,
|
||||
setConfig,
|
||||
setWellKnown,
|
||||
del,
|
||||
clear,
|
||||
|
||||
@@ -22,7 +23,6 @@ const assert = require('assert'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js'),
|
||||
tld = require('tldjs'),
|
||||
util = require('util'),
|
||||
_ = require('underscore');
|
||||
|
||||
const DOMAINS_FIELDS = [ 'domain', 'zoneName', 'provider', 'configJson', 'tlsConfigJson', 'wellKnownJson', 'fallbackCertificateJson' ].join(',');
|
||||
@@ -54,6 +54,7 @@ function api(provider) {
|
||||
case 'digitalocean': return require('./dns/digitalocean.js');
|
||||
case 'gandi': return require('./dns/gandi.js');
|
||||
case 'godaddy': return require('./dns/godaddy.js');
|
||||
case 'hetzner': return require('./dns/hetzner.js');
|
||||
case 'linode': return require('./dns/linode.js');
|
||||
case 'vultr': return require('./dns/vultr.js');
|
||||
case 'namecom': return require('./dns/namecom.js');
|
||||
@@ -66,22 +67,17 @@ function api(provider) {
|
||||
}
|
||||
}
|
||||
|
||||
function maybePromisify(func) {
|
||||
if (util.types.isAsyncFunction(func)) return func;
|
||||
return util.promisify(func);
|
||||
}
|
||||
|
||||
async function verifyDnsConfig(dnsConfig, domain, zoneName, provider) {
|
||||
assert(dnsConfig && typeof dnsConfig === 'object'); // the dns config to test with
|
||||
async function verifyDomainConfig(domainConfig, domain, zoneName, provider) {
|
||||
assert(domainConfig && typeof domainConfig === 'object'); // the dns config to test with
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof provider, 'string');
|
||||
|
||||
const backend = api(provider);
|
||||
if (!backend) throw new BoxError(BoxError.BAD_FIELD, 'Invalid provider', { field: 'provider' });
|
||||
if (!backend) throw new BoxError(BoxError.BAD_FIELD, 'Invalid provider');
|
||||
|
||||
const domainObject = { config: dnsConfig, domain: domain, zoneName: zoneName };
|
||||
const [error, result] = await safe(maybePromisify(api(provider).verifyDnsConfig)(domainObject));
|
||||
const domainObject = { config: domainConfig, domain: domain, zoneName: zoneName };
|
||||
const [error, result] = await safe(api(provider).verifyDomainConfig(domainObject));
|
||||
if (error && error.reason === BoxError.ACCESS_DENIED) return { error: new BoxError(BoxError.BAD_FIELD, `Access denied: ${error.message}`) };
|
||||
if (error && error.reason === BoxError.NOT_FOUND) return { error: new BoxError(BoxError.BAD_FIELD, `Zone not found: ${error.message}`) };
|
||||
if (error && error.reason === BoxError.EXTERNAL_ERROR) return { error: new BoxError(BoxError.BAD_FIELD, `Configuration error: ${error.message}`) };
|
||||
@@ -100,12 +96,12 @@ function validateTlsConfig(tlsConfig, dnsProvider) {
|
||||
case 'fallback':
|
||||
break;
|
||||
default:
|
||||
return new BoxError(BoxError.BAD_FIELD, 'tlsConfig.provider must be fallback, letsencrypt-prod/staging', { field: 'tlsProvider' });
|
||||
return new BoxError(BoxError.BAD_FIELD, 'tlsConfig.provider must be fallback, letsencrypt-prod/staging');
|
||||
}
|
||||
|
||||
if (tlsConfig.wildcard) {
|
||||
if (!tlsConfig.provider.startsWith('letsencrypt')) return new BoxError(BoxError.BAD_FIELD, 'wildcard can only be set with letsencrypt', { field: 'wildcard' });
|
||||
if (dnsProvider === 'manual' || dnsProvider === 'noop' || dnsProvider === 'wildcard') return new BoxError(BoxError.BAD_FIELD, 'wildcard cert requires a programmable DNS backend', { field: 'tlsProvider' });
|
||||
if (!tlsConfig.provider.startsWith('letsencrypt')) return new BoxError(BoxError.BAD_FIELD, 'wildcard can only be set with letsencrypt');
|
||||
if (dnsProvider === 'manual' || dnsProvider === 'noop' || dnsProvider === 'wildcard') return new BoxError(BoxError.BAD_FIELD, 'wildcard cert requires a programmable DNS backend');
|
||||
}
|
||||
|
||||
return null;
|
||||
@@ -127,12 +123,12 @@ async function add(domain, data, auditSource) {
|
||||
|
||||
let { zoneName, provider, config, fallbackCertificate, tlsConfig, dkimSelector } = data;
|
||||
|
||||
if (!tld.isValid(domain)) throw new BoxError(BoxError.BAD_FIELD, 'Invalid domain', { field: 'domain' });
|
||||
if (domain.endsWith('.')) throw new BoxError(BoxError.BAD_FIELD, 'Invalid domain', { field: 'domain' });
|
||||
if (!tld.isValid(domain)) throw new BoxError(BoxError.BAD_FIELD, 'Invalid domain');
|
||||
if (domain.endsWith('.')) throw new BoxError(BoxError.BAD_FIELD, 'Invalid domain');
|
||||
|
||||
if (zoneName) {
|
||||
if (!tld.isValid(zoneName)) throw new BoxError(BoxError.BAD_FIELD, 'Invalid zoneName', { field: 'zoneName' });
|
||||
if (zoneName.endsWith('.')) throw new BoxError(BoxError.BAD_FIELD, 'Invalid zoneName', { field: 'zoneName' });
|
||||
if (!tld.isValid(zoneName)) throw new BoxError(BoxError.BAD_FIELD, 'Invalid zoneName');
|
||||
if (zoneName.endsWith('.')) throw new BoxError(BoxError.BAD_FIELD, 'Invalid zoneName');
|
||||
} else {
|
||||
zoneName = tld.getDomain(domain) || domain;
|
||||
}
|
||||
@@ -155,7 +151,7 @@ async function add(domain, data, auditSource) {
|
||||
dkimSelector = `cloudron-${suffix}`;
|
||||
}
|
||||
|
||||
const result = await verifyDnsConfig(config, domain, zoneName, provider);
|
||||
const result = await verifyDomainConfig(config, domain, zoneName, provider);
|
||||
if (result.error) throw result.error;
|
||||
|
||||
let queries = [
|
||||
@@ -170,7 +166,7 @@ async function add(domain, data, auditSource) {
|
||||
|
||||
await reverseProxy.setFallbackCertificate(domain, fallbackCertificate);
|
||||
|
||||
eventlog.add(eventlog.ACTION_DOMAIN_ADD, auditSource, { domain, zoneName, provider });
|
||||
await eventlog.add(eventlog.ACTION_DOMAIN_ADD, auditSource, { domain, zoneName, provider });
|
||||
|
||||
safe(mail.onDomainAdded(domain)); // background
|
||||
}
|
||||
@@ -189,7 +185,7 @@ async function list() {
|
||||
return results;
|
||||
}
|
||||
|
||||
async function update(domain, data, auditSource) {
|
||||
async function setConfig(domain, data, auditSource) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof data.zoneName, 'string');
|
||||
assert.strictEqual(typeof data.provider, 'string');
|
||||
@@ -198,14 +194,14 @@ async function update(domain, data, auditSource) {
|
||||
assert.strictEqual(typeof data.tlsConfig, 'object');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
let { zoneName, provider, config, fallbackCertificate, tlsConfig, wellKnown } = data;
|
||||
let { zoneName, provider, config, fallbackCertificate, tlsConfig } = data;
|
||||
let error;
|
||||
|
||||
if (settings.isDemo() && (domain === settings.dashboardDomain())) throw new BoxError(BoxError.CONFLICT, 'Not allowed in demo mode');
|
||||
|
||||
const domainObject = await get(domain);
|
||||
if (zoneName) {
|
||||
if (!tld.isValid(zoneName)) throw new BoxError(BoxError.BAD_FIELD, 'Invalid zoneName', { field: 'zoneName' });
|
||||
if (!tld.isValid(zoneName)) throw new BoxError(BoxError.BAD_FIELD, 'Invalid zoneName');
|
||||
} else {
|
||||
zoneName = domainObject.zoneName;
|
||||
}
|
||||
@@ -218,12 +214,9 @@ async function update(domain, data, auditSource) {
|
||||
error = validateTlsConfig(tlsConfig, provider);
|
||||
if (error) throw error;
|
||||
|
||||
error = validateWellKnown(wellKnown, provider);
|
||||
if (error) throw error;
|
||||
|
||||
if (provider === domainObject.provider) api(provider).injectPrivateFields(config, domainObject.config);
|
||||
|
||||
const result = await verifyDnsConfig(config, domain, zoneName, provider);
|
||||
const result = await verifyDomainConfig(config, domain, zoneName, provider);
|
||||
if (result.error) throw result.error;
|
||||
|
||||
const newData = {
|
||||
@@ -231,14 +224,13 @@ async function update(domain, data, auditSource) {
|
||||
zoneName,
|
||||
provider,
|
||||
tlsConfig,
|
||||
wellKnown,
|
||||
};
|
||||
|
||||
if (fallbackCertificate) newData.fallbackCertificate = fallbackCertificate;
|
||||
|
||||
let args = [ ], fields = [ ];
|
||||
for (const k in newData) {
|
||||
if (k === 'config' || k === 'tlsConfig' || k === 'wellKnown' || k === 'fallbackCertificate') { // json fields
|
||||
if (k === 'config' || k === 'tlsConfig' || k === 'fallbackCertificate') { // json fields
|
||||
fields.push(`${k}Json = ?`);
|
||||
args.push(JSON.stringify(newData[k]));
|
||||
} else {
|
||||
@@ -256,7 +248,22 @@ async function update(domain, data, auditSource) {
|
||||
|
||||
await reverseProxy.setFallbackCertificate(domain, fallbackCertificate);
|
||||
|
||||
eventlog.add(eventlog.ACTION_DOMAIN_UPDATE, auditSource, { domain, zoneName, provider });
|
||||
await eventlog.add(eventlog.ACTION_DOMAIN_UPDATE, auditSource, { domain, zoneName, provider });
|
||||
}
|
||||
|
||||
async function setWellKnown(domain, wellKnown, auditSource) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof wellKnown, 'object');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
let error = validateWellKnown(wellKnown);
|
||||
if (error) throw error;
|
||||
|
||||
[error] = await safe(database.query('UPDATE domains SET wellKnownJson = ? WHERE domain=?', [ JSON.stringify(wellKnown), domain ]));
|
||||
if (error && error.reason === BoxError.NOT_FOUND) throw new BoxError(BoxError.NOT_FOUND, 'Domain not found');
|
||||
if (error) throw new BoxError(BoxError.DATABASE_ERROR, error);
|
||||
|
||||
await eventlog.add(eventlog.ACTION_DOMAIN_UPDATE, auditSource, { domain, wellKnown });
|
||||
}
|
||||
|
||||
async function del(domain, auditSource) {
|
||||
@@ -274,14 +281,14 @@ async function del(domain, auditSource) {
|
||||
const [error, results] = await safe(database.transaction(queries));
|
||||
if (error && error.code === 'ER_ROW_IS_REFERENCED_2') {
|
||||
if (error.message.indexOf('apps_mailDomain_constraint') !== -1) throw new BoxError(BoxError.CONFLICT, 'Domain is in use by an app or the mailbox of an app. Check the domains of apps and the Email section of each app.');
|
||||
if (error.message.indexOf('subdomains') !== -1) throw new BoxError(BoxError.CONFLICT, 'Domain is in use by one or more app(s).');
|
||||
if (error.message.indexOf('locations') !== -1) throw new BoxError(BoxError.CONFLICT, 'Domain is in use by one or more app(s).');
|
||||
if (error.message.indexOf('mail') !== -1) throw new BoxError(BoxError.CONFLICT, 'Domain is in use by one or more mailboxes. Delete them first in the Email view.');
|
||||
throw new BoxError(BoxError.CONFLICT, error.message);
|
||||
}
|
||||
if (error) throw error;
|
||||
if (results[1].affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Domain not found');
|
||||
|
||||
eventlog.add(eventlog.ACTION_DOMAIN_REMOVE, auditSource, { domain });
|
||||
await eventlog.add(eventlog.ACTION_DOMAIN_REMOVE, auditSource, { domain });
|
||||
|
||||
safe(mail.onDomainRemoved(domain));
|
||||
}
|
||||
@@ -292,13 +299,13 @@ async function clear() {
|
||||
|
||||
// removes all fields that are strictly private and should never be returned by API calls
|
||||
function removePrivateFields(domain) {
|
||||
var result = _.pick(domain, 'domain', 'zoneName', 'provider', 'config', 'tlsConfig', 'fallbackCertificate', 'wellKnown');
|
||||
const result = _.pick(domain, 'domain', 'zoneName', 'provider', 'config', 'tlsConfig', 'fallbackCertificate', 'wellKnown');
|
||||
return api(result.provider).removePrivateFields(result);
|
||||
}
|
||||
|
||||
// removes all fields that are not accessible by a normal user
|
||||
function removeRestrictedFields(domain) {
|
||||
var result = _.pick(domain, 'domain', 'zoneName', 'provider');
|
||||
const result = _.pick(domain, 'domain', 'zoneName', 'provider');
|
||||
|
||||
result.config = {}; // always ensure config object
|
||||
|
||||
|
||||
@@ -19,30 +19,39 @@ const apps = require('./apps.js'),
|
||||
async function sync(auditSource) {
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
const ip = await sysinfo.getServerIp();
|
||||
const ipv4 = await sysinfo.getServerIPv4();
|
||||
const ipv6 = await sysinfo.getServerIPv6();
|
||||
|
||||
let info = safe.JSON.parse(safe.fs.readFileSync(paths.DYNDNS_INFO_FILE, 'utf8')) || { ip: null };
|
||||
if (info.ip === ip) {
|
||||
debug(`refreshDNS: no change in IP ${ip}`);
|
||||
const info = safe.JSON.parse(safe.fs.readFileSync(paths.DYNDNS_INFO_FILE, 'utf8')) || { ipv4: null, ipv6: null };
|
||||
if (info.ip) { // legacy cache file
|
||||
info.ipv4 = info.ip;
|
||||
delete info.ip;
|
||||
}
|
||||
const ipv4Changed = info.ipv4 !== ipv4;
|
||||
const ipv6Changed = ipv6 && info.ipv6 !== ipv6; // both should be RFC 5952 format
|
||||
|
||||
if (!ipv4Changed && !ipv6Changed) {
|
||||
debug(`refreshDNS: no change in IP ipv4: ${ipv4} ipv6: ${ipv6}`);
|
||||
return;
|
||||
}
|
||||
|
||||
debug(`refreshDNS: updating ip from ${info.ip} to ${ip}`);
|
||||
|
||||
await dns.upsertDnsRecords(constants.DASHBOARD_LOCATION, settings.dashboardDomain(), 'A', [ ip ]);
|
||||
debug('refreshDNS: updated admin location');
|
||||
debug(`refreshDNS: updating IP from ${info.ipv4} to ipv4: ${ipv4} (changed: ${ipv4Changed}) ipv6: ${ipv6} (changed: ${ipv6Changed})`);
|
||||
if (ipv4Changed) await dns.upsertDnsRecords(constants.DASHBOARD_LOCATION, settings.dashboardDomain(), 'A', [ ipv4 ]);
|
||||
if (ipv6Changed) await dns.upsertDnsRecords(constants.DASHBOARD_LOCATION, settings.dashboardDomain(), 'AAAA', [ ipv6 ]);
|
||||
|
||||
const result = await apps.list();
|
||||
for (const app of result) {
|
||||
// do not change state of installing apps since apptask will error if dns record already exists
|
||||
if (app.installationState !== apps.ISTATE_INSTALLED) continue;
|
||||
|
||||
await dns.upsertDnsRecords(app.location, app.domain, 'A', [ ip ]);
|
||||
if (ipv4Changed) await dns.upsertDnsRecords(app.subdomain, app.domain, 'A', [ ipv4 ]);
|
||||
if (ipv6Changed) await dns.upsertDnsRecords(app.subdomain, app.domain, 'AAAA', [ ipv6 ]);
|
||||
}
|
||||
|
||||
debug('refreshDNS: updated apps');
|
||||
|
||||
await eventlog.add(eventlog.ACTION_DYNDNS_UPDATE, auditSource, { fromIp: info.ip, toIp: ip });
|
||||
info.ip = ip;
|
||||
await eventlog.add(eventlog.ACTION_DYNDNS_UPDATE, auditSource, { fromIpv4: info.ipv4, fromIpv6: info.ipv6, toIpv4: ipv4, toIpv6: ipv6 });
|
||||
info.ipv4 = ipv4;
|
||||
info.ipv6 = ipv6;
|
||||
safe.fs.writeFileSync(paths.DYNDNS_INFO_FILE, JSON.stringify(info), 'utf8');
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ exports = module.exports = {
|
||||
|
||||
ACTION_CERTIFICATE_NEW: 'certificate.new',
|
||||
ACTION_CERTIFICATE_RENEWAL: 'certificate.renew',
|
||||
ACTION_CERTIFICATE_CLEANUP: 'certificate.cleanup',
|
||||
|
||||
ACTION_DASHBOARD_DOMAIN_UPDATE: 'dashboard.domain.update',
|
||||
|
||||
@@ -43,6 +44,8 @@ exports = module.exports = {
|
||||
ACTION_DOMAIN_UPDATE: 'domain.update',
|
||||
ACTION_DOMAIN_REMOVE: 'domain.remove',
|
||||
|
||||
ACTION_INSTALL_FINISH: 'cloudron.install.finish',
|
||||
|
||||
ACTION_MAIL_LOCATION: 'mail.location',
|
||||
ACTION_MAIL_ENABLED: 'mail.enabled',
|
||||
ACTION_MAIL_DISABLED: 'mail.disabled',
|
||||
@@ -91,12 +94,14 @@ const assert = require('assert'),
|
||||
safe = require('safetydance'),
|
||||
uuid = require('uuid');
|
||||
|
||||
const EVENTLOG_FIELDS = [ 'id', 'action', 'source', 'data', 'creationTime' ].join(',');
|
||||
const EVENTLOG_FIELDS = [ 'id', 'action', 'sourceJson', 'dataJson', 'creationTime' ].join(',');
|
||||
|
||||
function postProcess(record) {
|
||||
// usually we have sourceJson and dataJson, however since this used to be the JSON data type, we don't
|
||||
record.source = safe.JSON.parse(record.source);
|
||||
record.data = safe.JSON.parse(record.data);
|
||||
record.source = safe.JSON.parse(record.sourceJson);
|
||||
delete record.sourceJson;
|
||||
record.data = safe.JSON.parse(record.dataJson);
|
||||
delete record.dataJson;
|
||||
|
||||
return record;
|
||||
}
|
||||
@@ -108,7 +113,7 @@ async function add(action, source, data) {
|
||||
assert.strictEqual(typeof data, 'object');
|
||||
|
||||
const id = uuid.v4();
|
||||
await database.query('INSERT INTO eventlog (id, action, source, data) VALUES (?, ?, ?, ?)', [ id, action, JSON.stringify(source), JSON.stringify(data) ]);
|
||||
await database.query('INSERT INTO eventlog (id, action, sourceJson, dataJson) VALUES (?, ?, ?, ?)', [ id, action, JSON.stringify(source), JSON.stringify(data) ]);
|
||||
await notifications.onEvent(id, action, source, data);
|
||||
return id;
|
||||
}
|
||||
@@ -121,10 +126,10 @@ async function upsertLoginEvent(action, source, data) {
|
||||
|
||||
// can't do a real sql upsert, for frequent eventlog entries we only have to do 2 queries once a day
|
||||
const queries = [{
|
||||
query: 'UPDATE eventlog SET creationTime=NOW(), data=? WHERE action = ? AND source LIKE ? AND DATE(creationTime)=CURDATE()',
|
||||
query: 'UPDATE eventlog SET creationTime=NOW(), dataJson=? WHERE action = ? AND sourceJson LIKE ? AND DATE(creationTime)=CURDATE()',
|
||||
args: [ JSON.stringify(data), action, JSON.stringify(source) ]
|
||||
}, {
|
||||
query: 'SELECT ' + EVENTLOG_FIELDS + ' FROM eventlog WHERE action = ? AND source LIKE ? AND DATE(creationTime)=CURDATE()',
|
||||
query: 'SELECT ' + EVENTLOG_FIELDS + ' FROM eventlog WHERE action = ? AND sourceJson LIKE ? AND DATE(creationTime)=CURDATE()',
|
||||
args: [ action, JSON.stringify(source) ]
|
||||
}];
|
||||
|
||||
@@ -154,7 +159,7 @@ async function listPaged(actions, search, page, perPage) {
|
||||
let query = `SELECT ${EVENTLOG_FIELDS} FROM eventlog`;
|
||||
|
||||
if (actions.length || search) query += ' WHERE';
|
||||
if (search) query += ' (source LIKE ' + mysql.escape('%' + search + '%') + ' OR data LIKE ' + mysql.escape('%' + search + '%') + ')';
|
||||
if (search) query += ' (sourceJson LIKE ' + mysql.escape('%' + search + '%') + ' OR dataJson LIKE ' + mysql.escape('%' + search + '%') + ')';
|
||||
|
||||
if (actions.length && search) query += ' AND ( ';
|
||||
actions.forEach(function (action, i) {
|
||||
|
||||
@@ -20,7 +20,7 @@ const assert = require('assert'),
|
||||
debug = require('debug')('box:externalldap'),
|
||||
groups = require('./groups.js'),
|
||||
ldap = require('ldapjs'),
|
||||
once = require('once'),
|
||||
once = require('./once.js'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js'),
|
||||
tasks = require('./tasks.js'),
|
||||
@@ -40,10 +40,11 @@ function removePrivateFields(ldapConfig) {
|
||||
function translateUser(ldapConfig, ldapUser) {
|
||||
assert.strictEqual(typeof ldapConfig, 'object');
|
||||
|
||||
// RFC: https://datatracker.ietf.org/doc/html/rfc2798
|
||||
return {
|
||||
username: ldapUser[ldapConfig.usernameField],
|
||||
username: ldapUser[ldapConfig.usernameField].toLowerCase(),
|
||||
email: ldapUser.mail || ldapUser.mailPrimaryAddress,
|
||||
displayName: ldapUser.cn // user.giveName + ' ' + user.sn
|
||||
displayName: ldapUser.displayName || ldapUser.cn // user.giveName + ' ' + user.sn
|
||||
};
|
||||
}
|
||||
|
||||
@@ -105,8 +106,6 @@ async function clientSearch(client, dn, searchOptions) {
|
||||
assert.strictEqual(typeof dn, 'string');
|
||||
assert.strictEqual(typeof searchOptions, 'object');
|
||||
|
||||
debug(`clientSearch: Get objects at ${dn} with options ${JSON.stringify(searchOptions)}`);
|
||||
|
||||
// basic validation to not crash
|
||||
try { ldap.parseDN(dn); } catch (e) { throw new BoxError(BoxError.BAD_FIELD, 'invalid DN'); }
|
||||
|
||||
@@ -163,8 +162,6 @@ async function ldapUserSearch(externalLdapConfig, options) {
|
||||
searchOptions.filter = new ldap.AndFilter({ filters: [ extraFilter, searchOptions.filter ] });
|
||||
}
|
||||
|
||||
debug(`Listing users at ${externalLdapConfig.baseDn} with filter ${searchOptions.filter.toString()}`);
|
||||
|
||||
const client = await getClient(externalLdapConfig, { bind: true });
|
||||
const result = await clientSearch(client, externalLdapConfig.baseDn, searchOptions);
|
||||
client.unbind();
|
||||
@@ -187,8 +184,6 @@ async function ldapGroupSearch(externalLdapConfig, options) {
|
||||
searchOptions.filter = new ldap.AndFilter({ filters: [ extraFilter, searchOptions.filter ] });
|
||||
}
|
||||
|
||||
debug(`Listing groups at ${externalLdapConfig.groupBaseDn} with filter ${searchOptions.filter.toString()}`);
|
||||
|
||||
const client = await getClient(externalLdapConfig, { bind: true });
|
||||
const result = await clientSearch(client, externalLdapConfig.groupBaseDn, searchOptions);
|
||||
client.unbind();
|
||||
@@ -239,21 +234,6 @@ async function testConfig(config) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
async function search(identifier) {
|
||||
assert.strictEqual(typeof identifier, 'string');
|
||||
|
||||
const externalLdapConfig = await settings.getExternalLdapConfig();
|
||||
if (externalLdapConfig.provider === 'noop') throw new BoxError(BoxError.BAD_STATE, 'not enabled');
|
||||
|
||||
const ldapUsers = await ldapUserSearch(externalLdapConfig, { filter: `${externalLdapConfig.usernameField}=${identifier}` });
|
||||
|
||||
// translate ldap properties to ours
|
||||
let users = ldapUsers.map(function (u) { return translateUser(externalLdapConfig, u); });
|
||||
|
||||
return users;
|
||||
}
|
||||
|
||||
async function maybeCreateUser(identifier) {
|
||||
assert.strictEqual(typeof identifier, 'string');
|
||||
|
||||
@@ -452,7 +432,7 @@ async function syncGroupUsers(externalLdapConfig, progressCallback) {
|
||||
|
||||
debug(`syncGroupUsers: Found member object at ${memberDn} adding to group ${group.name}`);
|
||||
|
||||
const username = result[externalLdapConfig.usernameField];
|
||||
const username = result[externalLdapConfig.usernameField].toLowerCase();
|
||||
if (!username) continue;
|
||||
|
||||
const [getError, userObject] = await safe(users.getByUsername(username));
|
||||
|
||||
@@ -33,13 +33,13 @@ const GROUPS_FIELDS = [ 'id', 'name', 'source' ].join(',');
|
||||
function validateGroupname(name) {
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
|
||||
if (name.length < 1) return new BoxError(BoxError.BAD_FIELD, 'name must be atleast 1 char', { field: 'name' });
|
||||
if (name.length >= 200) return new BoxError(BoxError.BAD_FIELD, 'name too long', { field: 'name' });
|
||||
if (name.length < 1) return new BoxError(BoxError.BAD_FIELD, 'name must be atleast 1 char');
|
||||
if (name.length >= 200) return new BoxError(BoxError.BAD_FIELD, 'name too long');
|
||||
|
||||
if (constants.RESERVED_NAMES.indexOf(name) !== -1) return new BoxError(BoxError.BAD_FIELD, 'name is reserved', { field: name });
|
||||
if (constants.RESERVED_NAMES.indexOf(name) !== -1) return new BoxError(BoxError.BAD_FIELD, 'name is reserved');
|
||||
|
||||
// need to consider valid LDAP characters here (e.g '+' is reserved)
|
||||
if (/[^a-zA-Z0-9.-]/.test(name)) return new BoxError(BoxError.BAD_FIELD, 'name can only contain alphanumerals, hyphen and dot', { field: 'name' });
|
||||
if (/[^a-zA-Z0-9.-]/.test(name)) return new BoxError(BoxError.BAD_FIELD, 'name can only contain alphanumerals, hyphen and dot');
|
||||
|
||||
return null;
|
||||
}
|
||||
@@ -47,7 +47,7 @@ function validateGroupname(name) {
|
||||
function validateGroupSource(source) {
|
||||
assert.strictEqual(typeof source, 'string');
|
||||
|
||||
if (source !== '' && source !== 'ldap') return new BoxError(BoxError.BAD_FIELD, 'source must be "" or "ldap"', { field: source });
|
||||
if (source !== '' && source !== 'ldap') return new BoxError(BoxError.BAD_FIELD, 'source must be "" or "ldap"');
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -2,8 +2,8 @@
|
||||
|
||||
exports = module.exports = hat;
|
||||
|
||||
var crypto = require('crypto');
|
||||
const crypto = require('crypto');
|
||||
|
||||
function hat (bits) {
|
||||
function hat(bits) {
|
||||
return crypto.randomBytes(bits / 8).toString('hex');
|
||||
}
|
||||
|
||||
225
src/hush.js
Normal file
225
src/hush.js
Normal file
@@ -0,0 +1,225 @@
|
||||
'use strict';
|
||||
|
||||
const assert = require('assert'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
crypto = require('crypto'),
|
||||
debug = require('debug')('box:hush'),
|
||||
fs = require('fs'),
|
||||
progressStream = require('progress-stream'),
|
||||
TransformStream = require('stream').Transform;
|
||||
|
||||
class EncryptStream extends TransformStream {
|
||||
constructor(encryption) {
|
||||
super();
|
||||
this._headerPushed = false;
|
||||
this._iv = crypto.randomBytes(16);
|
||||
this._cipher = crypto.createCipheriv('aes-256-cbc', Buffer.from(encryption.dataKey, 'hex'), this._iv);
|
||||
this._hmac = crypto.createHmac('sha256', Buffer.from(encryption.dataHmacKey, 'hex'));
|
||||
}
|
||||
|
||||
pushHeaderIfNeeded() {
|
||||
if (!this._headerPushed) {
|
||||
const magic = Buffer.from('CBV2');
|
||||
this.push(magic);
|
||||
this._hmac.update(magic);
|
||||
this.push(this._iv);
|
||||
this._hmac.update(this._iv);
|
||||
this._headerPushed = true;
|
||||
}
|
||||
}
|
||||
|
||||
_transform(chunk, ignoredEncoding, callback) {
|
||||
this.pushHeaderIfNeeded();
|
||||
|
||||
try {
|
||||
const crypt = this._cipher.update(chunk);
|
||||
this._hmac.update(crypt);
|
||||
callback(null, crypt);
|
||||
} catch (error) {
|
||||
callback(new BoxError(BoxError.CRYPTO_ERROR, `Encryption error when updating: ${error.message}`));
|
||||
}
|
||||
}
|
||||
|
||||
_flush(callback) {
|
||||
try {
|
||||
this.pushHeaderIfNeeded(); // for 0-length files
|
||||
const crypt = this._cipher.final();
|
||||
this.push(crypt);
|
||||
this._hmac.update(crypt);
|
||||
callback(null, this._hmac.digest()); // +32 bytes
|
||||
} catch (error) {
|
||||
callback(new BoxError(BoxError.CRYPTO_ERROR, `Encryption error when flushing: ${error.message}`));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class DecryptStream extends TransformStream {
|
||||
constructor(encryption) {
|
||||
super();
|
||||
this._key = Buffer.from(encryption.dataKey, 'hex');
|
||||
this._header = Buffer.alloc(0);
|
||||
this._decipher = null;
|
||||
this._hmac = crypto.createHmac('sha256', Buffer.from(encryption.dataHmacKey, 'hex'));
|
||||
this._buffer = Buffer.alloc(0);
|
||||
}
|
||||
|
||||
_transform(chunk, ignoredEncoding, callback) {
|
||||
const needed = 20 - this._header.length; // 4 for magic, 16 for iv
|
||||
|
||||
if (this._header.length !== 20) { // not gotten header yet
|
||||
this._header = Buffer.concat([this._header, chunk.slice(0, needed)]);
|
||||
if (this._header.length !== 20) return callback();
|
||||
|
||||
if (!this._header.slice(0, 4).equals(new Buffer.from('CBV2'))) return callback(new BoxError(BoxError.CRYPTO_ERROR, 'Invalid magic in header'));
|
||||
|
||||
const iv = this._header.slice(4);
|
||||
this._decipher = crypto.createDecipheriv('aes-256-cbc', this._key, iv);
|
||||
this._hmac.update(this._header);
|
||||
}
|
||||
|
||||
this._buffer = Buffer.concat([ this._buffer, chunk.slice(needed) ]);
|
||||
if (this._buffer.length < 32) return callback(); // hmac trailer length is 32
|
||||
|
||||
try {
|
||||
const cipherText = this._buffer.slice(0, -32);
|
||||
this._hmac.update(cipherText);
|
||||
const plainText = this._decipher.update(cipherText);
|
||||
this._buffer = this._buffer.slice(-32);
|
||||
callback(null, plainText);
|
||||
} catch (error) {
|
||||
callback(new BoxError(BoxError.CRYPTO_ERROR, `Decryption error: ${error.message}`));
|
||||
}
|
||||
}
|
||||
|
||||
_flush (callback) {
|
||||
if (this._buffer.length !== 32) return callback(new BoxError(BoxError.CRYPTO_ERROR, 'Invalid password or tampered file (not enough data)'));
|
||||
|
||||
try {
|
||||
if (!this._hmac.digest().equals(this._buffer)) return callback(new BoxError(BoxError.CRYPTO_ERROR, 'Invalid password or tampered file (mac mismatch)'));
|
||||
|
||||
const plainText = this._decipher.final();
|
||||
callback(null, plainText);
|
||||
} catch (error) {
|
||||
callback(new BoxError(BoxError.CRYPTO_ERROR, `Invalid password or tampered file: ${error.message}`));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function encryptFilePath(filePath, encryption) {
|
||||
assert.strictEqual(typeof filePath, 'string');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
|
||||
const encryptedParts = filePath.split('/').map(function (part) {
|
||||
let hmac = crypto.createHmac('sha256', Buffer.from(encryption.filenameHmacKey, 'hex'));
|
||||
const iv = hmac.update(part).digest().slice(0, 16); // iv has to be deterministic, for our sync (copy) logic to work
|
||||
const cipher = crypto.createCipheriv('aes-256-cbc', Buffer.from(encryption.filenameKey, 'hex'), iv);
|
||||
let crypt = cipher.update(part);
|
||||
crypt = Buffer.concat([ iv, crypt, cipher.final() ]);
|
||||
|
||||
return crypt.toString('base64') // ensures path is valid
|
||||
.replace(/\//g, '-') // replace '/' of base64 since it conflicts with path separator
|
||||
.replace(/=/g,''); // strip trailing = padding. this is only needed if we concat base64 strings, which we don't
|
||||
});
|
||||
|
||||
return encryptedParts.join('/');
|
||||
}
|
||||
|
||||
function decryptFilePath(filePath, encryption) {
|
||||
assert.strictEqual(typeof filePath, 'string');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
|
||||
const decryptedParts = [];
|
||||
for (let part of filePath.split('/')) {
|
||||
part = part + Array(part.length % 4).join('='); // add back = padding
|
||||
part = part.replace(/-/g, '/'); // replace with '/'
|
||||
|
||||
try {
|
||||
const buffer = Buffer.from(part, 'base64');
|
||||
const iv = buffer.slice(0, 16);
|
||||
let decrypt = crypto.createDecipheriv('aes-256-cbc', Buffer.from(encryption.filenameKey, 'hex'), iv);
|
||||
const plainText = decrypt.update(buffer.slice(16));
|
||||
const plainTextString = Buffer.concat([ plainText, decrypt.final() ]).toString('utf8');
|
||||
const hmac = crypto.createHmac('sha256', Buffer.from(encryption.filenameHmacKey, 'hex'));
|
||||
if (!hmac.update(plainTextString).digest().slice(0, 16).equals(iv)) return { error: new BoxError(BoxError.CRYPTO_ERROR, `mac error decrypting part ${part} of path ${filePath}`) };
|
||||
|
||||
decryptedParts.push(plainTextString);
|
||||
} catch (error) {
|
||||
debug(`Error decrypting part ${part} of path ${filePath}:`, error);
|
||||
return { error: new BoxError(BoxError.CRYPTO_ERROR, `Error decrypting part ${part} of path ${filePath}: ${error.message}`) };
|
||||
}
|
||||
}
|
||||
|
||||
return { result: decryptedParts.join('/') };
|
||||
}
|
||||
|
||||
function createReadStream(sourceFile, encryption) {
|
||||
assert.strictEqual(typeof sourceFile, 'string');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
|
||||
const stream = fs.createReadStream(sourceFile);
|
||||
const ps = progressStream({ time: 10000 }); // display a progress every 10 seconds
|
||||
|
||||
stream.on('error', function (error) {
|
||||
debug(`createReadStream: read stream error at ${sourceFile}`, error);
|
||||
ps.emit('error', new BoxError(BoxError.FS_ERROR, `Error reading ${sourceFile}: ${error.message} ${error.code}`));
|
||||
});
|
||||
|
||||
stream.on('open', () => ps.emit('open'));
|
||||
|
||||
if (encryption) {
|
||||
let encryptStream = new EncryptStream(encryption);
|
||||
|
||||
encryptStream.on('error', function (error) {
|
||||
debug(`createReadStream: encrypt stream error ${sourceFile}`, error);
|
||||
ps.emit('error', new BoxError(BoxError.CRYPTO_ERROR, `Encryption error at ${sourceFile}: ${error.message}`));
|
||||
});
|
||||
|
||||
return stream.pipe(encryptStream).pipe(ps);
|
||||
} else {
|
||||
return stream.pipe(ps);
|
||||
}
|
||||
}
|
||||
|
||||
function createWriteStream(destFile, encryption) {
|
||||
assert.strictEqual(typeof destFile, 'string');
|
||||
assert.strictEqual(typeof encryption, 'object');
|
||||
|
||||
const stream = fs.createWriteStream(destFile);
|
||||
const ps = progressStream({ time: 10000 }); // display a progress every 10 seconds
|
||||
|
||||
stream.on('error', function (error) {
|
||||
debug(`createWriteStream: write stream error ${destFile}`, error);
|
||||
ps.emit('error', new BoxError(BoxError.FS_ERROR, `Write error ${destFile}: ${error.message}`));
|
||||
});
|
||||
|
||||
stream.on('finish', function () {
|
||||
debug('createWriteStream: done.');
|
||||
// we use a separate event because ps is a through2 stream which emits 'finish' event indicating end of inStream and not write
|
||||
ps.emit('done');
|
||||
});
|
||||
|
||||
if (encryption) {
|
||||
let decrypt = new DecryptStream(encryption);
|
||||
decrypt.on('error', function (error) {
|
||||
debug(`createWriteStream: decrypt stream error ${destFile}`, error);
|
||||
ps.emit('error', new BoxError(BoxError.CRYPTO_ERROR, `Decryption error at ${destFile}: ${error.message}`));
|
||||
});
|
||||
|
||||
ps.pipe(decrypt).pipe(stream);
|
||||
} else {
|
||||
ps.pipe(stream);
|
||||
}
|
||||
|
||||
return ps;
|
||||
}
|
||||
|
||||
exports = module.exports = {
|
||||
EncryptStream,
|
||||
DecryptStream,
|
||||
|
||||
encryptFilePath,
|
||||
decryptFilePath,
|
||||
|
||||
createReadStream,
|
||||
createWriteStream
|
||||
};
|
||||
@@ -6,22 +6,22 @@
|
||||
|
||||
exports = module.exports = {
|
||||
// a version change recreates all containers with latest docker config
|
||||
'version': '48.20.0',
|
||||
'version': '49.0.0',
|
||||
|
||||
'baseImages': [
|
||||
{ repo: 'cloudron/base', tag: 'cloudron/base:3.0.0@sha256:455c70428723e3a823198c57472785437eb6eab082e79b3ff04ea584faf46e92' }
|
||||
{ repo: 'cloudron/base', tag: 'cloudron/base:3.2.0@sha256:ba1d566164a67c266782545ea9809dc611c4152e27686fd14060332dd88263ea' }
|
||||
],
|
||||
|
||||
// a major version bump in the db containers will trigger the restore logic that uses the db dumps
|
||||
// docker inspect --format='{{index .RepoDigests 0}}' $IMAGE to get the sha256
|
||||
'images': {
|
||||
'turn': { repo: 'cloudron/turn', tag: 'cloudron/turn:1.3.1@sha256:759cafab7625ff538418a1f2ed5558b1d5bff08c576bba577d865d6d02b49091' },
|
||||
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:3.0.7@sha256:6679c2fb96f8d6d62349b607748570640a90fc46b50aad80ca2c0161655d07f4' },
|
||||
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:4.1.1@sha256:86e4e2f4fd43809efca7c9cb1def4d7608cf36cb9ea27052f9b64da4481db43a' },
|
||||
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:4.0.2@sha256:9df297ccc3370f38c54f8d614e214e082b363777cd1c6c9522e29663cc8f5362' },
|
||||
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:3.0.4@sha256:5c60de75d078ae609da5565f32dcd91030f45907e945756cc976ff207b8c6199' },
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:3.4.1@sha256:7e3fa6c409b20693acb2472f084516e48b2af6da1341c243148ffc10f74e7d82' },
|
||||
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:3.0.1@sha256:bed9f6b5d06fe2c5289e895e806cfa5b74ad62993d705be55d4554a67d128029' },
|
||||
'sftp': { repo: 'cloudron/sftp', tag: 'cloudron/sftp:3.4.2@sha256:810306478c3dac7caa7497e5f6381cc7ce2f68aafda849a4945d39a67cc04bc1' }
|
||||
'turn': { repo: 'cloudron/turn', tag: 'cloudron/turn:1.4.0@sha256:45817f1631992391d585f171498d257487d872480fd5646723a2b956cc4ef15d' },
|
||||
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:3.2.1@sha256:75cef64ba4917ba9ec68bc0c9d9ba3a9eeae00a70173cd6d81cc6118038737d9' },
|
||||
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:4.3.1@sha256:b0c564d097b765d4a639330843e2e813d2c87fc8ed34b7df7550bf2c6df0012c' },
|
||||
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:4.2.1@sha256:f7f689beea07b1c6a9503a48f6fb38ef66e5b22f59fc585a92842a6578b33d46' },
|
||||
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:3.3.0@sha256:89c4e8083631b6d16b5d630d9b27f8ecf301c62f81219d77bd5948a1f4a4375c' },
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:3.6.1@sha256:b8b93f007105080d4812a05648e6bc5e15c95c63f511c829cbc14a163d9ea029' },
|
||||
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:3.1.0@sha256:30ec3a01964a1e01396acf265183997c3e17fb07eac1a82b979292cc7719ff4b' },
|
||||
'sftp': { repo: 'cloudron/sftp', tag: 'cloudron/sftp:3.6.1@sha256:ba4b9a1fe274c0ef0a900e5d0deeb8f3da08e118798d1d90fbf995cc0cf6e3a3' }
|
||||
}
|
||||
};
|
||||
|
||||
176
src/ldap.js
176
src/ldap.js
@@ -21,7 +21,7 @@ const addonConfigs = require('./addonconfigs.js'),
|
||||
users = require('./users.js'),
|
||||
util = require('util');
|
||||
|
||||
var gServer = null;
|
||||
let gServer = null;
|
||||
|
||||
const NOOP = function () {};
|
||||
|
||||
@@ -48,11 +48,39 @@ async function authenticateApp(req, res, next) {
|
||||
next();
|
||||
}
|
||||
|
||||
// Will attach req.user if successful
|
||||
async function userAuthInternal(appId, req, res, next) {
|
||||
// extract the common name which might have different attribute names
|
||||
const attributeName = Object.keys(req.dn.rdns[0].attrs)[0];
|
||||
const commonName = req.dn.rdns[0].attrs[attributeName].value;
|
||||
if (!commonName) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
|
||||
let verifyFunc;
|
||||
if (attributeName === 'mail') {
|
||||
verifyFunc = users.verifyWithEmail;
|
||||
} else if (commonName.indexOf('@') !== -1) { // if mail is specified, enforce mail check
|
||||
verifyFunc = users.verifyWithEmail;
|
||||
} else if (commonName.indexOf('uid-') === 0) {
|
||||
verifyFunc = users.verify;
|
||||
} else {
|
||||
verifyFunc = users.verifyWithUsername;
|
||||
}
|
||||
|
||||
const [error, user] = await safe(verifyFunc(commonName, req.credentials || '', appId || ''));
|
||||
if (error && error.reason === BoxError.NOT_FOUND) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
if (error && error.reason === BoxError.INVALID_CREDENTIALS) return next(new ldap.InvalidCredentialsError(req.dn.toString()));
|
||||
if (error) return next(new ldap.OperationsError(error.message));
|
||||
|
||||
req.user = user;
|
||||
|
||||
next();
|
||||
}
|
||||
|
||||
async function getUsersWithAccessToApp(req) {
|
||||
assert.strictEqual(typeof req.app, 'object');
|
||||
|
||||
const result = await users.list();
|
||||
const allowedUsers = result.filter((user) => apps.canAccess(req.app, user));
|
||||
const allowedUsers = result.filter((user) => user.active && apps.canAccess(req.app, user)); // do not list inactive users
|
||||
return allowedUsers;
|
||||
}
|
||||
|
||||
@@ -85,13 +113,13 @@ function finalSend(results, req, res, next) {
|
||||
|
||||
if (cookie && Buffer.isBuffer(cookie)) {
|
||||
// we have pagination
|
||||
var first = min;
|
||||
let first = min;
|
||||
if (cookie.length !== 0) {
|
||||
first = parseInt(cookie.toString(), 10);
|
||||
}
|
||||
var last = sendPagedResults(first, first + pageSize);
|
||||
const last = sendPagedResults(first, first + pageSize);
|
||||
|
||||
var resultCookie;
|
||||
let resultCookie;
|
||||
if (last < max) {
|
||||
resultCookie = Buffer.from(last.toString());
|
||||
} else {
|
||||
@@ -142,9 +170,9 @@ async function userSearch(req, res, next) {
|
||||
const obj = {
|
||||
dn: dn.toString(),
|
||||
attributes: {
|
||||
objectclass: ['user', 'inetorgperson', 'person' ],
|
||||
objectclass: ['user', 'inetorgperson', 'person', 'organizationalperson', 'top' ],
|
||||
objectcategory: 'person',
|
||||
cn: user.id,
|
||||
cn: displayName,
|
||||
uid: user.id,
|
||||
entryuuid: user.id, // to support OpenLDAP clients
|
||||
mail: user.email,
|
||||
@@ -176,12 +204,13 @@ async function userSearch(req, res, next) {
|
||||
async function groupSearch(req, res, next) {
|
||||
debug('group search: dn %s, scope %s, filter %s (from %s)', req.dn.toString(), req.scope, req.filter.toString(), req.connection.ldap.id);
|
||||
|
||||
const [error, result] = await safe(getUsersWithAccessToApp(req));
|
||||
const [error, usersWithAccess] = await safe(getUsersWithAccessToApp(req));
|
||||
if (error) return next(new ldap.OperationsError(error.toString()));
|
||||
|
||||
const results = [];
|
||||
|
||||
const groups = [{
|
||||
// those are the old virtual groups for backwards compat
|
||||
const virtualGroups = [{
|
||||
name: 'users',
|
||||
admin: false
|
||||
}, {
|
||||
@@ -189,16 +218,45 @@ async function groupSearch(req, res, next) {
|
||||
admin: true
|
||||
}];
|
||||
|
||||
groups.forEach(function (group) {
|
||||
virtualGroups.forEach(function (group) {
|
||||
const dn = ldap.parseDN('cn=' + group.name + ',ou=groups,dc=cloudron');
|
||||
const members = group.admin ? result.filter(function (user) { return users.compareRoles(user.role, users.ROLE_ADMIN) >= 0; }) : result;
|
||||
const members = group.admin ? usersWithAccess.filter(function (user) { return users.compareRoles(user.role, users.ROLE_ADMIN) >= 0; }) : usersWithAccess;
|
||||
|
||||
const obj = {
|
||||
dn: dn.toString(),
|
||||
attributes: {
|
||||
objectclass: ['group'],
|
||||
cn: group.name,
|
||||
memberuid: members.map(function(entry) { return entry.id; })
|
||||
memberuid: members.map(function(entry) { return entry.id; }).sort()
|
||||
}
|
||||
};
|
||||
|
||||
// ensure all filter values are also lowercase
|
||||
const lowerCaseFilter = safe(function () { return ldap.parseFilter(req.filter.toString().toLowerCase()); }, null);
|
||||
if (!lowerCaseFilter) return next(new ldap.OperationsError(safe.error.toString()));
|
||||
|
||||
if ((req.dn.equals(dn) || req.dn.parentOf(dn)) && lowerCaseFilter.matches(obj.attributes)) {
|
||||
results.push(obj);
|
||||
}
|
||||
});
|
||||
|
||||
let [errorGroups, resultGroups] = await safe(groups.listWithMembers());
|
||||
if (errorGroups) return next(new ldap.OperationsError(errorGroups.toString()));
|
||||
|
||||
if (req.app.accessRestriction && req.app.accessRestriction.groups) {
|
||||
resultGroups = resultGroups.filter(function (g) { return req.app.accessRestriction.groups.indexOf(g.id) !== -1; });
|
||||
}
|
||||
|
||||
resultGroups.forEach(function (group) {
|
||||
const dn = ldap.parseDN('cn=' + group.name + ',ou=groups,dc=cloudron');
|
||||
const members = group.userIds.filter(function (uid) { return usersWithAccess.map(function (u) { return u.id; }).indexOf(uid) !== -1; });
|
||||
|
||||
const obj = {
|
||||
dn: dn.toString(),
|
||||
attributes: {
|
||||
objectclass: ['group'],
|
||||
cn: group.name,
|
||||
memberuid: members
|
||||
}
|
||||
};
|
||||
|
||||
@@ -237,7 +295,7 @@ async function groupAdminsCompare(req, res, next) {
|
||||
|
||||
// we only support memberuid here, if we add new group attributes later add them here
|
||||
if (req.attribute === 'memberuid') {
|
||||
var user = result.find(function (u) { return u.id === req.value; });
|
||||
const user = result.find(function (u) { return u.id === req.value; });
|
||||
if (user && users.compareRoles(user.role, users.ROLE_ADMIN) >= 0) return res.end(true);
|
||||
}
|
||||
|
||||
@@ -279,7 +337,8 @@ async function mailboxSearch(req, res, next) {
|
||||
res.end();
|
||||
}
|
||||
} else { // new sogo
|
||||
let [error, mailboxes] = await safe(mail.listAllMailboxes(1, 1000));
|
||||
// TODO figure out how proper pagination here could work
|
||||
let [error, mailboxes] = await safe(mail.listAllMailboxes(1, 100000));
|
||||
if (error) return next(new ldap.OperationsError(error.toString()));
|
||||
|
||||
mailboxes = mailboxes.filter(m => m.active);
|
||||
@@ -289,6 +348,7 @@ async function mailboxSearch(req, res, next) {
|
||||
for (const mailbox of mailboxes) {
|
||||
const dn = ldap.parseDN(`cn=${mailbox.name}@${mailbox.domain},ou=mailboxes,dc=cloudron`);
|
||||
|
||||
if (mailbox.ownerType === mail.OWNERTYPE_APP) continue; // cannot login with app mailbox anyway
|
||||
const [error, ownerObject] = await safe(mailbox.ownerType === mail.OWNERTYPE_USER ? users.get(mailbox.ownerId) : groups.get(mailbox.ownerId));
|
||||
if (error || !ownerObject) continue; // skip mailboxes with unknown user
|
||||
|
||||
@@ -407,30 +467,9 @@ async function mailingListSearch(req, res, next) {
|
||||
async function authenticateUser(req, res, next) {
|
||||
debug('user bind: %s (from %s)', req.dn.toString(), req.connection.ldap.id);
|
||||
|
||||
// extract the common name which might have different attribute names
|
||||
const attributeName = Object.keys(req.dn.rdns[0].attrs)[0];
|
||||
const commonName = req.dn.rdns[0].attrs[attributeName].value;
|
||||
if (!commonName) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
const appId = req.app.id;
|
||||
|
||||
let verifyFunc;
|
||||
if (attributeName === 'mail') {
|
||||
verifyFunc = users.verifyWithEmail;
|
||||
} else if (commonName.indexOf('@') !== -1) { // if mail is specified, enforce mail check
|
||||
verifyFunc = users.verifyWithEmail;
|
||||
} else if (commonName.indexOf('uid-') === 0) {
|
||||
verifyFunc = users.verify;
|
||||
} else {
|
||||
verifyFunc = users.verifyWithUsername;
|
||||
}
|
||||
|
||||
const [error, user] = await safe(verifyFunc(commonName, req.credentials || '', req.app.id));
|
||||
if (error && error.reason === BoxError.NOT_FOUND) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
if (error && error.reason === BoxError.INVALID_CREDENTIALS) return next(new ldap.InvalidCredentialsError(req.dn.toString()));
|
||||
if (error) return next(new ldap.OperationsError(error.message));
|
||||
|
||||
req.user = user;
|
||||
|
||||
next();
|
||||
await userAuthInternal(appId, req, res, next);
|
||||
}
|
||||
|
||||
async function authorizeUserForApp(req, res, next) {
|
||||
@@ -450,20 +489,24 @@ async function verifyMailboxPassword(mailbox, password) {
|
||||
assert.strictEqual(typeof mailbox, 'object');
|
||||
assert.strictEqual(typeof password, 'string');
|
||||
|
||||
if (mailbox.ownerType === mail.OWNERTYPE_USER) return await users.verify(mailbox.ownerId, password, users.AP_MAIL /* identifier */);
|
||||
if (mailbox.ownerType === mail.OWNERTYPE_USER) {
|
||||
return await users.verify(mailbox.ownerId, password, users.AP_MAIL /* identifier */);
|
||||
} else if (mailbox.ownerType === mail.OWNERTYPE_GROUP) {
|
||||
const userIds = await groups.getMembers(mailbox.ownerId);
|
||||
|
||||
const userIds = await groups.getMembers(mailbox.ownerId);
|
||||
let verifiedUser = null;
|
||||
for (const userId of userIds) {
|
||||
const [error, result] = await safe(users.verify(userId, password, users.AP_MAIL /* identifier */));
|
||||
if (error) continue; // try the next user
|
||||
verifiedUser = result;
|
||||
break; // found a matching validated user
|
||||
}
|
||||
|
||||
let verifiedUser = null;
|
||||
for (const userId of userIds) {
|
||||
const [error, result] = await safe(users.verify(userId, password, users.AP_MAIL /* identifier */));
|
||||
if (error) continue; // try the next user
|
||||
verifiedUser = result;
|
||||
break; // found a matching validated user
|
||||
if (!verifiedUser) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
return verifiedUser;
|
||||
} else {
|
||||
throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
}
|
||||
|
||||
if (!verifiedUser) throw new BoxError(BoxError.INVALID_CREDENTIALS);
|
||||
return verifiedUser;
|
||||
}
|
||||
|
||||
async function authenticateSftp(req, res, next) {
|
||||
@@ -515,7 +558,7 @@ async function userSearchSftp(req, res, next) {
|
||||
const obj = {
|
||||
dn: ldap.parseDN(`cn=${username}@${appFqdn},ou=sftp,dc=cloudron`).toString(),
|
||||
attributes: {
|
||||
homeDirectory: app.dataDir ? `/mnt/${app.id}` : `/mnt/appsdata/${app.id}/data`,
|
||||
homeDirectory: app.dataDir ? `/mnt/app-${app.id}` : `/mnt/appsdata/${app.id}/data`, // see also sftp.js
|
||||
objectclass: ['user'],
|
||||
objectcategory: 'person',
|
||||
cn: user.id,
|
||||
@@ -592,6 +635,26 @@ async function authenticateMail(req, res, next) {
|
||||
await authenticateService(req.dn.rdns[1].attrs.ou.value.toLowerCase(), req.dn, req, res, next);
|
||||
}
|
||||
|
||||
// https://ldapwiki.com/wiki/RootDSE / RFC 4512 - ldapsearch -x -h "${CLOUDRON_LDAP_SERVER}" -p "${CLOUDRON_LDAP_PORT}" -b "" -s base
|
||||
// ldapjs seems to call this handler for everything when search === ''
|
||||
async function maybeRootDSE(req, res, next) {
|
||||
debug(`maybeRootDSE: requested with scope:${req.scope} dn:${req.dn.toString()}`);
|
||||
|
||||
if (req.scope !== 'base') return next(new ldap.NoSuchObjectError()); // per the spec, rootDSE search require base scope
|
||||
if (!req.dn || req.dn.toString() !== '') return next(new ldap.NoSuchObjectError());
|
||||
|
||||
res.send({
|
||||
dn: '',
|
||||
attributes: {
|
||||
objectclass: [ 'RootDSE', 'top', 'OpenLDAProotDSE' ],
|
||||
supportedLDAPVersion: '3',
|
||||
vendorName: 'Cloudron LDAP',
|
||||
vendorVersion: '1.0.0'
|
||||
}
|
||||
});
|
||||
res.end();
|
||||
}
|
||||
|
||||
async function start() {
|
||||
const logger = {
|
||||
trace: NOOP,
|
||||
@@ -644,6 +707,16 @@ async function start() {
|
||||
res.end();
|
||||
});
|
||||
|
||||
// directus looks for the "DN" of the bind user
|
||||
gServer.search('ou=apps,dc=cloudron', function(req, res, next) {
|
||||
const obj = {
|
||||
dn: req.dn.toString(),
|
||||
};
|
||||
finalSend([obj], req, res, next);
|
||||
});
|
||||
|
||||
gServer.search('', maybeRootDSE); // when '', it seems the callback is called for everything else
|
||||
|
||||
// just log that an attempt was made to unknown route, this helps a lot during app packaging
|
||||
gServer.use(function(req, res, next) {
|
||||
debug('not handled: dn %s, scope %s, filter %s (from %s)', req.dn ? req.dn.toString() : '-', req.scope, req.filter ? req.filter.toString() : '-', req.connection.ldap.id);
|
||||
@@ -654,5 +727,8 @@ async function start() {
|
||||
}
|
||||
|
||||
async function stop() {
|
||||
if (gServer) gServer.close();
|
||||
if (!gServer) return;
|
||||
|
||||
gServer.close();
|
||||
gServer = null;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
'use strict';
|
||||
|
||||
var assert = require('assert'),
|
||||
const assert = require('assert'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
debug = require('debug')('box:locker'),
|
||||
EventEmitter = require('events').EventEmitter,
|
||||
@@ -32,8 +32,7 @@ Locker.prototype.lock = function (operation) {
|
||||
this._operation = operation;
|
||||
++this._lockDepth;
|
||||
this._timestamp = new Date();
|
||||
var that = this;
|
||||
this._watcherId = setInterval(function () { debug('Lock unreleased %s', that._operation); }, 1000 * 60 * 5);
|
||||
this._watcherId = setInterval(() => { debug('Lock unreleased %s', this._operation); }, 1000 * 60 * 5);
|
||||
|
||||
debug('Acquired : %s', this._operation);
|
||||
|
||||
|
||||
109
src/mail.js
109
src/mail.js
@@ -57,6 +57,7 @@ exports = module.exports = {
|
||||
|
||||
OWNERTYPE_USER: 'user',
|
||||
OWNERTYPE_GROUP: 'group',
|
||||
OWNERTYPE_APP: 'app',
|
||||
|
||||
DEFAULT_MEMORY_LIMIT: 512 * 1024 * 1024,
|
||||
|
||||
@@ -64,6 +65,7 @@ exports = module.exports = {
|
||||
TYPE_LIST: 'list',
|
||||
TYPE_ALIAS: 'alias',
|
||||
|
||||
_validateName: validateName,
|
||||
_delByDomain: delByDomain,
|
||||
_updateDomain: updateDomain
|
||||
};
|
||||
@@ -75,6 +77,7 @@ const assert = require('assert'),
|
||||
crypto = require('crypto'),
|
||||
database = require('./database.js'),
|
||||
debug = require('debug')('box:mail'),
|
||||
dig = require('./dig.js'),
|
||||
dns = require('./dns.js'),
|
||||
docker = require('./docker.js'),
|
||||
domains = require('./domains.js'),
|
||||
@@ -106,6 +109,8 @@ const assert = require('assert'),
|
||||
const DNS_OPTIONS = { timeout: 5000 };
|
||||
const REMOVE_MAILBOX_CMD = path.join(__dirname, 'scripts/rmmailbox.sh');
|
||||
|
||||
const OWNERTYPES = [ exports.OWNERTYPE_USER, exports.OWNERTYPE_GROUP, exports.OWNERTYPE_APP ];
|
||||
|
||||
// if you add a field here, listMailboxes has to be updated
|
||||
const MAILBOX_FIELDS = [ 'name', 'type', 'ownerId', 'ownerType', 'aliasName', 'aliasDomain', 'creationTime', 'membersJson', 'membersOnly', 'domain', 'active', 'enablePop3' ].join(',');
|
||||
const MAILDB_FIELDS = [ 'domain', 'enabled', 'mailFromValidation', 'catchAllJson', 'relayJson', 'dkimKeyJson', 'dkimSelector', 'bannerJson' ].join(',');
|
||||
@@ -158,19 +163,13 @@ function validateName(name) {
|
||||
if (name.length < 1) return new BoxError(BoxError.BAD_FIELD, 'mailbox name must be atleast 1 char');
|
||||
if (name.length >= 200) return new BoxError(BoxError.BAD_FIELD, 'mailbox name too long');
|
||||
|
||||
// also need to consider valid LDAP characters here (e.g '+' is reserved)
|
||||
if (/[^a-zA-Z0-9.-]/.test(name)) return new BoxError(BoxError.BAD_FIELD, 'mailbox name can only contain alphanumerals and dot');
|
||||
// also need to consider valid LDAP characters here (e.g '+' is reserved). keep hyphen at the end so it doesn't become a range.
|
||||
if (/[^a-zA-Z0-9._-]/.test(name)) return new BoxError(BoxError.BAD_FIELD, 'mailbox name can only contain alphanumerals, dot, hyphen or underscore');
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
async function checkOutboundPort25() {
|
||||
const smtpServer = _.sample([
|
||||
'smtp.gmail.com',
|
||||
'smtp.live.com',
|
||||
'smtp.1und1.de',
|
||||
]);
|
||||
|
||||
const relay = {
|
||||
value: 'OK',
|
||||
status: false,
|
||||
@@ -180,7 +179,7 @@ async function checkOutboundPort25() {
|
||||
return await new Promise((resolve) => {
|
||||
const client = new net.Socket();
|
||||
client.setTimeout(5000);
|
||||
client.connect(25, smtpServer);
|
||||
client.connect(25, constants.PORT25_CHECK_SERVER);
|
||||
client.on('connect', function () {
|
||||
relay.status = true;
|
||||
relay.value = 'OK';
|
||||
@@ -189,16 +188,16 @@ async function checkOutboundPort25() {
|
||||
});
|
||||
client.on('timeout', function () {
|
||||
relay.status = false;
|
||||
relay.value = `Connect to ${smtpServer} timed out. Check if port 25 (outbound) is blocked`;
|
||||
relay.value = `Connect to ${constants.PORT25_CHECK_SERVER} timed out. Check if port 25 (outbound) is blocked`;
|
||||
client.destroy();
|
||||
relay.errorMessage = `Connect to ${smtpServer} timed out.`;
|
||||
relay.errorMessage = `Connect to ${constants.PORT25_CHECK_SERVER} timed out.`;
|
||||
resolve(relay);
|
||||
});
|
||||
client.on('error', function (error) {
|
||||
relay.status = false;
|
||||
relay.value = `Connect to ${smtpServer} failed: ${error.message}. Check if port 25 (outbound) is blocked`;
|
||||
relay.value = `Connect to ${constants.PORT25_CHECK_SERVER} failed: ${error.message}. Check if port 25 (outbound) is blocked`;
|
||||
client.destroy();
|
||||
relay.errorMessage = `Connect to ${smtpServer} failed.`;
|
||||
relay.errorMessage = `Connect to ${constants.PORT25_CHECK_SERVER} failed: ${error.message}`;
|
||||
resolve(relay);
|
||||
});
|
||||
});
|
||||
@@ -269,7 +268,7 @@ async function checkDkim(mailDomain) {
|
||||
|
||||
dkim.expected = `v=DKIM1; t=s; p=${publicKey}`;
|
||||
|
||||
const [error, txtRecords] = await safe(dns.promises.resolve(dkim.domain, dkim.type, DNS_OPTIONS));
|
||||
const [error, txtRecords] = await safe(dig.resolve(dkim.domain, dkim.type, DNS_OPTIONS));
|
||||
if (error) {
|
||||
dkim.errorMessage = error.message;
|
||||
return dkim;
|
||||
@@ -298,7 +297,7 @@ async function checkSpf(domain, mailFqdn) {
|
||||
errorMessage: ''
|
||||
};
|
||||
|
||||
const [error, txtRecords] = await safe(dns.promises.resolve(spf.domain, spf.type, DNS_OPTIONS));
|
||||
const [error, txtRecords] = await safe(dig.resolve(spf.domain, spf.type, DNS_OPTIONS));
|
||||
if (error) {
|
||||
spf.errorMessage = error.message;
|
||||
return spf;
|
||||
@@ -336,7 +335,7 @@ async function checkMx(domain, mailFqdn) {
|
||||
errorMessage: ''
|
||||
};
|
||||
|
||||
const [error, mxRecords] = await safe(dns.promises.resolve(mx.domain, mx.type, DNS_OPTIONS));
|
||||
const [error, mxRecords] = await safe(dig.resolve(mx.domain, mx.type, DNS_OPTIONS));
|
||||
if (error) {
|
||||
mx.errorMessage = error.message;
|
||||
return mx;
|
||||
@@ -349,10 +348,10 @@ async function checkMx(domain, mailFqdn) {
|
||||
if (mx.status) return mx; // MX record is "my."
|
||||
|
||||
// cloudflare might create a conflict subdomain (https://support.cloudflare.com/hc/en-us/articles/360020296512-DNS-Troubleshooting-FAQ)
|
||||
const [error2, mxIps] = await safe(dns.promises.resolve(mxRecords[0].exchange, 'A', DNS_OPTIONS));
|
||||
const [error2, mxIps] = await safe(dig.resolve(mxRecords[0].exchange, 'A', DNS_OPTIONS));
|
||||
if (error2 || mxIps.length !== 1) return mx;
|
||||
|
||||
const [error3, ip] = await safe(sysinfo.getServerIp());
|
||||
const [error3, ip] = await safe(sysinfo.getServerIPv4());
|
||||
if (error3) return mx;
|
||||
|
||||
mx.status = mxIps[0] === ip;
|
||||
@@ -361,9 +360,9 @@ async function checkMx(domain, mailFqdn) {
|
||||
}
|
||||
|
||||
function txtToDict(txt) {
|
||||
var dict = {};
|
||||
const dict = {};
|
||||
txt.split(';').forEach(function(v) {
|
||||
var p = v.trim().split('=');
|
||||
const p = v.trim().split('=');
|
||||
dict[p[0]]=p[1];
|
||||
});
|
||||
return dict;
|
||||
@@ -381,7 +380,7 @@ async function checkDmarc(domain) {
|
||||
errorMessage: ''
|
||||
};
|
||||
|
||||
const [error, txtRecords] = await safe(dns.promises.resolve(dmarc.domain, dmarc.type, DNS_OPTIONS));
|
||||
const [error, txtRecords] = await safe(dig.resolve(dmarc.domain, dmarc.type, DNS_OPTIONS));
|
||||
|
||||
if (error) {
|
||||
dmarc.errorMessage = error.message;
|
||||
@@ -410,7 +409,7 @@ async function checkPtr(mailFqdn) {
|
||||
errorMessage: ''
|
||||
};
|
||||
|
||||
const [error, ip] = await safe(sysinfo.getServerIp());
|
||||
const [error, ip] = await safe(sysinfo.getServerIPv4());
|
||||
if (error) {
|
||||
ptr.errorMessage = error.message;
|
||||
return ptr;
|
||||
@@ -419,7 +418,7 @@ async function checkPtr(mailFqdn) {
|
||||
ptr.domain = ip.split('.').reverse().join('.') + '.in-addr.arpa';
|
||||
ptr.name = ip;
|
||||
|
||||
const [error2, ptrRecords] = await safe(dns.promises.resolve(ptr.domain, 'PTR', DNS_OPTIONS));
|
||||
const [error2, ptrRecords] = await safe(dig.resolve(ptr.domain, 'PTR', DNS_OPTIONS));
|
||||
if (error2) {
|
||||
ptr.errorMessage = error2.message;
|
||||
return ptr;
|
||||
@@ -495,21 +494,21 @@ const RBL_LIST = [
|
||||
|
||||
// this function currently only looks for black lists based on IP. TODO: also look up by domain
|
||||
async function checkRblStatus(domain) {
|
||||
const ip = await sysinfo.getServerIp();
|
||||
const ip = await sysinfo.getServerIPv4();
|
||||
|
||||
const flippedIp = ip.split('.').reverse().join('.');
|
||||
|
||||
// https://tools.ietf.org/html/rfc5782
|
||||
const blacklistedServers = [];
|
||||
for (const rblServer of RBL_LIST) {
|
||||
const [error, records] = await safe(dns.promises.resolve(flippedIp + '.' + rblServer.dns, 'A', DNS_OPTIONS));
|
||||
const [error, records] = await safe(dig.resolve(flippedIp + '.' + rblServer.dns, 'A', DNS_OPTIONS));
|
||||
if (error || !records) continue; // not listed
|
||||
|
||||
debug(`checkRblStatus: ${domain} (ip: ${flippedIp}) is in the blacklist of ${JSON.stringify(rblServer)}`);
|
||||
|
||||
const result = _.extend({ }, rblServer);
|
||||
|
||||
const [error2, txtRecords] = await safe(dns.promises.resolve(flippedIp + '.' + rblServer.dns, 'TXT', DNS_OPTIONS));
|
||||
const [error2, txtRecords] = await safe(dig.resolve(flippedIp + '.' + rblServer.dns, 'TXT', DNS_OPTIONS));
|
||||
result.txtRecords = error2 || !txtRecords ? 'No txt record' : txtRecords.map(x => x.join(''));
|
||||
|
||||
debug(`checkRblStatus: ${domain} (error: ${error2.message}) (txtRecords: ${JSON.stringify(txtRecords)})`);
|
||||
@@ -517,7 +516,7 @@ async function checkRblStatus(domain) {
|
||||
blacklistedServers.push(result);
|
||||
}
|
||||
|
||||
debug(`checkRblStatus: ${domain} (ip: ${ip}) servers: ${JSON.stringify(blacklistedServers)})`);
|
||||
debug(`checkRblStatus: ${domain} (ip: ${ip}) blacklistedServers: ${JSON.stringify(blacklistedServers)})`);
|
||||
|
||||
return { status: blacklistedServers.length === 0, ip, servers: blacklistedServers };
|
||||
}
|
||||
@@ -610,11 +609,10 @@ async function checkConfiguration() {
|
||||
return markdownMessage; // empty message means all status checks succeeded
|
||||
}
|
||||
|
||||
async function createMailConfig(mailFqdn, mailDomain) {
|
||||
async function createMailConfig(mailFqdn) {
|
||||
assert.strictEqual(typeof mailFqdn, 'string');
|
||||
assert.strictEqual(typeof mailDomain, 'string');
|
||||
|
||||
debug('createMailConfig: generating mail config');
|
||||
debug(`createMailConfig: generating mail config with ${mailFqdn}`);
|
||||
|
||||
const mailDomains = await listDomains();
|
||||
|
||||
@@ -623,7 +621,7 @@ async function createMailConfig(mailFqdn, mailDomain) {
|
||||
|
||||
// mail_domain is used for SRS
|
||||
if (!safe.fs.writeFileSync(`${paths.MAIL_CONFIG_DIR}/mail.ini`,
|
||||
`mail_in_domains=${mailInDomains}\nmail_out_domains=${mailOutDomains}\nmail_server_name=${mailFqdn}\nmail_domain=${mailDomain}\n\n`, 'utf8')) {
|
||||
`mail_in_domains=${mailInDomains}\nmail_out_domains=${mailOutDomains}\nmail_server_name=${mailFqdn}\n\n`, 'utf8')) {
|
||||
throw new BoxError(BoxError.FS_ERROR, `Could not create mail var file: ${safe.error.message}`);
|
||||
}
|
||||
|
||||
@@ -706,7 +704,7 @@ async function configureMail(mailFqdn, mailDomain, serviceConfig) {
|
||||
await shell.promises.exec('stopMail', 'docker stop mail || true');
|
||||
await shell.promises.exec('removeMail', 'docker rm -f mail || true');
|
||||
|
||||
const allowInbound = await createMailConfig(mailFqdn, mailDomain);
|
||||
const allowInbound = await createMailConfig(mailFqdn);
|
||||
|
||||
const ports = allowInbound ? '-p 587:2587 -p 993:9993 -p 4190:4190 -p 25:2587 -p 465:2465 -p 995:9995' : '';
|
||||
const readOnly = !serviceConfig.recoveryMode ? '--read-only' : '';
|
||||
@@ -762,8 +760,8 @@ async function restartMail() {
|
||||
const servicesConfig = await settings.getServicesConfig();
|
||||
const mailConfig = servicesConfig['mail'] || {};
|
||||
|
||||
debug(`restartMail: restarting mail container with mailFqdn:${settings.mailFqdn()} dashboardDomain:${settings.dashboardDomain()}`);
|
||||
await configureMail(settings.mailFqdn(), settings.dashboardDomain(), mailConfig);
|
||||
debug(`restartMail: restarting mail container with mailFqdn:${settings.mailFqdn()} mailDomain:${settings.mailDomain()}`);
|
||||
await configureMail(settings.mailFqdn(), settings.mailDomain(), mailConfig);
|
||||
}
|
||||
|
||||
async function startMail(existingInfra) {
|
||||
@@ -936,7 +934,7 @@ async function changeLocation(auditSource, progressCallback) {
|
||||
let progress = 20;
|
||||
progressCallback({ percent: progress, message: `Setting up DNS of certs of mail server ${fqdn}` });
|
||||
|
||||
await cloudron.setupDnsAndCert(subdomain, domain, auditSource, progressCallback);
|
||||
await cloudron.setupDnsAndCert(subdomain, domain, auditSource, (progress) => progressCallback({ message: progress.message })); // remove the percent
|
||||
const allDomains = await domains.list();
|
||||
|
||||
for (let idx = 0; idx < allDomains.length; idx++) {
|
||||
@@ -1166,13 +1164,14 @@ async function addMailbox(name, domain, data, auditSource) {
|
||||
let error = validateName(name);
|
||||
if (error) throw error;
|
||||
|
||||
if (ownerType !== exports.OWNERTYPE_USER && ownerType !== exports.OWNERTYPE_GROUP) throw new BoxError(BoxError.BAD_FIELD, 'bad owner type');
|
||||
if (!OWNERTYPES.includes(ownerType)) throw new BoxError(BoxError.BAD_FIELD, 'bad owner type');
|
||||
|
||||
[error] = await safe(database.query('INSERT INTO mailboxes (name, type, domain, ownerId, ownerType, active) VALUES (?, ?, ?, ?, ?, ?)', [ name, exports.TYPE_MAILBOX, domain, ownerId, ownerType, active ]));
|
||||
if (error && error.code === 'ER_DUP_ENTRY') throw new BoxError(BoxError.ALREADY_EXISTS, 'mailbox already exists');
|
||||
if (error && error.code === 'ER_NO_REFERENCED_ROW_2' && error.sqlMessage.includes('mailboxes_domain_constraint')) throw new BoxError(BoxError.NOT_FOUND, `no such domain '${domain}'`);
|
||||
if (error) throw error;
|
||||
|
||||
eventlog.add(eventlog.ACTION_MAIL_MAILBOX_ADD, auditSource, { name, domain, ownerId, ownerType, active });
|
||||
await eventlog.add(eventlog.ACTION_MAIL_MAILBOX_ADD, auditSource, { name, domain, ownerId, ownerType, active });
|
||||
}
|
||||
|
||||
async function updateMailbox(name, domain, data, auditSource) {
|
||||
@@ -1189,7 +1188,7 @@ async function updateMailbox(name, domain, data, auditSource) {
|
||||
|
||||
name = name.toLowerCase();
|
||||
|
||||
if (ownerType !== exports.OWNERTYPE_USER && ownerType !== exports.OWNERTYPE_GROUP) throw new BoxError(BoxError.BAD_FIELD, 'bad owner type');
|
||||
if (!OWNERTYPES.includes(ownerType)) throw new BoxError(BoxError.BAD_FIELD, 'bad owner type');
|
||||
|
||||
const mailbox = await getMailbox(name, domain);
|
||||
if (!mailbox) throw new BoxError(BoxError.NOT_FOUND, 'No such mailbox');
|
||||
@@ -1197,7 +1196,7 @@ async function updateMailbox(name, domain, data, auditSource) {
|
||||
const result = await database.query('UPDATE mailboxes SET ownerId = ?, ownerType = ?, active = ?, enablePop3 = ? WHERE name = ? AND domain = ?', [ ownerId, ownerType, active, enablePop3, name, domain ]);
|
||||
if (result.affectedRows === 0) throw new BoxError(BoxError.NOT_FOUND, 'Mailbox not found');
|
||||
|
||||
eventlog.add(eventlog.ACTION_MAIL_MAILBOX_UPDATE, auditSource, { name, domain, oldUserId: mailbox.userId, ownerId, ownerType, active });
|
||||
await eventlog.add(eventlog.ACTION_MAIL_MAILBOX_UPDATE, auditSource, { name, domain, oldUserId: mailbox.userId, ownerId, ownerType, active });
|
||||
}
|
||||
|
||||
async function removeSolrIndex(mailbox) {
|
||||
@@ -1205,9 +1204,8 @@ async function removeSolrIndex(mailbox) {
|
||||
|
||||
const addonDetails = await services.getContainerDetails('mail', 'CLOUDRON_MAIL_TOKEN');
|
||||
|
||||
const [error, response] = await safe(superagent.post(`https://${addonDetails.ip}:3000/solr_delete_index?access_token=${addonDetails.token}`)
|
||||
const [error, response] = await safe(superagent.post(`http://${addonDetails.ip}:3000/solr_delete_index?access_token=${addonDetails.token}`)
|
||||
.timeout(2000)
|
||||
.disableTLSCerts()
|
||||
.send({ mailbox })
|
||||
.ok(() => true));
|
||||
|
||||
@@ -1235,7 +1233,7 @@ async function delMailbox(name, domain, options, auditSource) {
|
||||
const [error] = await safe(removeSolrIndex(mailbox));
|
||||
if (error) debug(`delMailbox: failed to remove solr index: ${error.message}`);
|
||||
|
||||
eventlog.add(eventlog.ACTION_MAIL_MAILBOX_REMOVE, auditSource, { name, domain });
|
||||
await eventlog.add(eventlog.ACTION_MAIL_MAILBOX_REMOVE, auditSource, { name, domain });
|
||||
}
|
||||
|
||||
async function getAlias(name, domain) {
|
||||
@@ -1259,32 +1257,35 @@ async function getAliases(name, domain) {
|
||||
return await database.query('SELECT name, domain FROM mailboxes WHERE type = ? AND aliasName = ? AND aliasDomain = ? ORDER BY name', [ exports.TYPE_ALIAS, name, domain ]);
|
||||
}
|
||||
|
||||
async function setAliases(name, domain, aliases) {
|
||||
async function setAliases(name, domain, aliases, auditSource) {
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert(Array.isArray(aliases));
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
for (let i = 0; i < aliases.length; i++) {
|
||||
let name = aliases[i].name.toLowerCase();
|
||||
let domain = aliases[i].domain.toLowerCase();
|
||||
const name = aliases[i].name.toLowerCase();
|
||||
const domain = aliases[i].domain.toLowerCase();
|
||||
|
||||
let error = validateName(name);
|
||||
const error = validateName(name);
|
||||
if (error) throw error;
|
||||
|
||||
if (!validator.isEmail(`${name}@${domain}`)) throw new BoxError(BoxError.BAD_FIELD, `Invalid email: ${name}@${domain}`);
|
||||
const mailDomain = await getDomain(domain);
|
||||
if (!mailDomain) throw new BoxError(BoxError.NOT_FOUND, `mail domain ${domain} not found`);
|
||||
|
||||
aliases[i] = { name, domain };
|
||||
}
|
||||
|
||||
const results = await database.query('SELECT ' + MAILBOX_FIELDS + ' FROM mailboxes WHERE name = ? AND domain = ?', [ name, domain ]);
|
||||
if (results.length === 0) throw new BoxError(BoxError.NOT_FOUND, 'Mailbox not found');
|
||||
|
||||
let queries = [];
|
||||
const queries = [];
|
||||
// clear existing aliases
|
||||
queries.push({ query: 'DELETE FROM mailboxes WHERE aliasName = ? AND aliasDomain = ? AND type = ?', args: [ name, domain, exports.TYPE_ALIAS ] });
|
||||
aliases.forEach(function (alias) {
|
||||
for (const alias of aliases) {
|
||||
queries.push({ query: 'INSERT INTO mailboxes (name, domain, type, aliasName, aliasDomain, ownerId, ownerType) VALUES (?, ?, ?, ?, ?, ?, ?)',
|
||||
args: [ alias.name, alias.domain, exports.TYPE_ALIAS, name, domain, results[0].ownerId, results[0].ownerType ] });
|
||||
});
|
||||
}
|
||||
|
||||
const [error] = await safe(database.transaction(queries));
|
||||
if (error && error.code === 'ER_DUP_ENTRY' && error.message.indexOf('mailboxes_name_domain_unique_index') !== -1) {
|
||||
@@ -1294,6 +1295,8 @@ async function setAliases(name, domain, aliases) {
|
||||
throw new BoxError(BoxError.ALREADY_EXISTS, `Mailbox, mailinglist or alias for ${aliasMatch[1]} already exists`);
|
||||
}
|
||||
if (error) throw error;
|
||||
|
||||
await eventlog.add(eventlog.ACTION_MAIL_MAILBOX_UPDATE, auditSource, { name, domain, aliases });
|
||||
}
|
||||
|
||||
async function getLists(domain, search, page, perPage) {
|
||||
@@ -1348,7 +1351,7 @@ async function addList(name, domain, data, auditSource) {
|
||||
if (error && error.code === 'ER_DUP_ENTRY') throw new BoxError(BoxError.ALREADY_EXISTS, 'mailbox already exists');
|
||||
if (error) throw error;
|
||||
|
||||
eventlog.add(eventlog.ACTION_MAIL_LIST_ADD, auditSource, { name, domain, members, membersOnly, active });
|
||||
await eventlog.add(eventlog.ACTION_MAIL_LIST_ADD, auditSource, { name, domain, members, membersOnly, active });
|
||||
}
|
||||
|
||||
async function updateList(name, domain, data, auditSource) {
|
||||
@@ -1375,7 +1378,7 @@ async function updateList(name, domain, data, auditSource) {
|
||||
[ JSON.stringify(members), membersOnly, active, name, domain ]);
|
||||
if (result.affectedRows === 0) throw new BoxError(BoxError.NOT_FOUND, 'Mailbox not found');
|
||||
|
||||
eventlog.add(eventlog.ACTION_MAIL_MAILBOX_UPDATE, auditSource, { name, domain, oldMembers: result.members, members, membersOnly, active });
|
||||
await eventlog.add(eventlog.ACTION_MAIL_LIST_UPDATE, auditSource, { name, domain, oldMembers: result.members, members, membersOnly, active });
|
||||
}
|
||||
|
||||
async function delList(name, domain, auditSource) {
|
||||
@@ -1387,7 +1390,7 @@ async function delList(name, domain, auditSource) {
|
||||
const result = await database.query('DELETE FROM mailboxes WHERE ((name=? AND domain=?) OR (aliasName = ? AND aliasDomain=?))', [ name, domain, name, domain ]);
|
||||
if (result.affectedRows === 0) throw new BoxError(BoxError.NOT_FOUND, 'Mailbox not found');
|
||||
|
||||
eventlog.add(eventlog.ACTION_MAIL_LIST_REMOVE, auditSource, { name, domain });
|
||||
await eventlog.add(eventlog.ACTION_MAIL_LIST_REMOVE, auditSource, { name, domain });
|
||||
}
|
||||
|
||||
// resolves the members of a list. i.e the lists and aliases
|
||||
|
||||
@@ -144,7 +144,7 @@ async function sendNewLoginLocation(user, loginLocation) {
|
||||
|
||||
const mailOptions = {
|
||||
from: mailConfig.notificationFrom,
|
||||
to: user.fallbackEmail,
|
||||
to: user.email,
|
||||
subject: ejs.render(translation.translate('{{ newLoginEmail.subject }}', translationAssets.translations || {}, translationAssets.fallback || {}), { cloudron: mailConfig.cloudronName }),
|
||||
text: render('new_login_location-text.ejs', templateData, translationAssets),
|
||||
html: render('new_login_location-html.ejs', templateData, translationAssets)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
'use strict';
|
||||
|
||||
var url = require('url');
|
||||
const url = require('url');
|
||||
|
||||
/*
|
||||
* CORS middleware
|
||||
@@ -11,19 +11,19 @@ var url = require('url');
|
||||
*/
|
||||
module.exports = function cors(options) {
|
||||
options = options || { };
|
||||
var maxAge = options.maxAge || 60 * 60 * 25 * 5; // 5 days
|
||||
var origins = options.origins || [ '*' ];
|
||||
var allowCredentials = options.allowCredentials || false; // cookies
|
||||
const maxAge = options.maxAge || 60 * 60 * 25 * 5; // 5 days
|
||||
const origins = options.origins || [ '*' ];
|
||||
const allowCredentials = options.allowCredentials || false; // cookies
|
||||
|
||||
return function (req, res, next) {
|
||||
var requestOrigin = req.headers.origin;
|
||||
let requestOrigin = req.headers.origin;
|
||||
if (!requestOrigin) return next();
|
||||
|
||||
requestOrigin = url.parse(requestOrigin);
|
||||
if (!requestOrigin.host) return res.status(405).send('CORS not allowed from this domain');
|
||||
|
||||
var hostname = requestOrigin.host.split(':')[0]; // remove any port
|
||||
var originAllowed = origins.some(function (o) { return o === '*' || o === hostname; });
|
||||
const hostname = requestOrigin.host.split(':')[0]; // remove any port
|
||||
const originAllowed = origins.some(function (o) { return o === '*' || o === hostname; });
|
||||
if (!originAllowed) {
|
||||
return res.status(405).send('CORS not allowed from this domain');
|
||||
}
|
||||
|
||||
@@ -2,19 +2,19 @@
|
||||
|
||||
'use strict';
|
||||
|
||||
var multiparty = require('multiparty'),
|
||||
const multiparty = require('multiparty'),
|
||||
timeout = require('connect-timeout');
|
||||
|
||||
function _mime(req) {
|
||||
var str = req.headers['content-type'] || '';
|
||||
return str.split(';')[0];
|
||||
const str = req.headers['content-type'] || '';
|
||||
return str.split(';')[0];
|
||||
}
|
||||
|
||||
module.exports = function multipart(options) {
|
||||
return function (req, res, next) {
|
||||
if (_mime(req) !== 'multipart/form-data') return res.status(400).send('Invalid content-type. Expecting multipart');
|
||||
|
||||
var form = new multiparty.Form({
|
||||
const form = new multiparty.Form({
|
||||
uploadDir: '/tmp',
|
||||
keepExtensions: true,
|
||||
maxFieldsSize: options.maxFieldsSize || (2 * 1024), // only field size, not files
|
||||
@@ -29,7 +29,7 @@ module.exports = function multipart(options) {
|
||||
req.fields = { };
|
||||
req.files = { };
|
||||
|
||||
form.parse(req, function (err, fields, files) {
|
||||
form.parse(req, function (err /*, fields, files */) {
|
||||
if (err) return res.status(400).send('Error parsing request');
|
||||
next(null);
|
||||
});
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
isMountProvider,
|
||||
isManagedProvider,
|
||||
mountObjectFromBackupConfig,
|
||||
tryAddMount,
|
||||
removeMount,
|
||||
@@ -40,6 +40,7 @@ function validateMountOptions(type, options) {
|
||||
if (typeof options.password !== 'string') return new BoxError(BoxError.BAD_FIELD, 'password is not a string');
|
||||
if (typeof options.host !== 'string') return new BoxError(BoxError.BAD_FIELD, 'host is not a string');
|
||||
if (typeof options.remoteDir !== 'string') return new BoxError(BoxError.BAD_FIELD, 'remoteDir is not a string');
|
||||
if ('seal' in options && typeof options.seal !== 'boolean') return new BoxError(BoxError.BAD_FIELD, 'seal is not a boolean');
|
||||
return null;
|
||||
case 'nfs':
|
||||
if (typeof options.host !== 'string') return new BoxError(BoxError.BAD_FIELD, 'host is not a string');
|
||||
@@ -60,14 +61,16 @@ function validateMountOptions(type, options) {
|
||||
}
|
||||
}
|
||||
|
||||
function isMountProvider(provider) {
|
||||
function isManagedProvider(provider) {
|
||||
return provider === 'sshfs' || provider === 'cifs' || provider === 'nfs' || provider === 'ext4';
|
||||
}
|
||||
|
||||
function mountObjectFromBackupConfig(backupConfig) {
|
||||
assert(isManagedProvider(backupConfig.provider));
|
||||
|
||||
return {
|
||||
name: 'backup',
|
||||
hostPath: backupConfig.mountPoint,
|
||||
hostPath: paths.MANAGED_BACKUP_MOUNT_DIR,
|
||||
mountType: backupConfig.provider,
|
||||
mountOptions: backupConfig.mountOptions
|
||||
};
|
||||
@@ -80,15 +83,21 @@ function mountObjectFromBackupConfig(backupConfig) {
|
||||
function renderMountFile(mount) {
|
||||
assert.strictEqual(typeof mount, 'object');
|
||||
|
||||
const {name, hostPath, mountType, mountOptions} = mount;
|
||||
const { name, hostPath, mountType, mountOptions } = mount;
|
||||
|
||||
let options, what, type;
|
||||
switch (mountType) {
|
||||
case 'cifs':
|
||||
case 'cifs': {
|
||||
const out = safe.child_process.execSync(`systemd-escape -p '${hostPath}'`, { encoding: 'utf8' }); // this ensures uniqueness of creds file
|
||||
if (!out) throw new BoxError(BoxError.FS_ERROR, `Could not determine credentials file name: ${safe.error.message}`);
|
||||
const credentialsFilePath = path.join(paths.CIFS_CREDENTIALS_DIR, `${out.trim()}.cred`);
|
||||
if (!safe.fs.writeFileSync(credentialsFilePath, `username=${mountOptions.username}\npassword=${mountOptions.password}\n`, { mode: 0o600 })) throw new BoxError(BoxError.FS_ERROR, `Could not write credentials file: ${safe.error.message}`);
|
||||
|
||||
type = 'cifs';
|
||||
what = `//${mountOptions.host}` + path.join('/', mountOptions.remoteDir);
|
||||
options = `username=${mountOptions.username},password=${mountOptions.password},rw,iocharset=utf8,file_mode=0666,dir_mode=0777,uid=yellowtent,gid=yellowtent`;
|
||||
options = `credentials=${credentialsFilePath},rw,${mountOptions.seal ? 'seal,' : ''}iocharset=utf8,file_mode=0666,dir_mode=0777,uid=yellowtent,gid=yellowtent`;
|
||||
break;
|
||||
}
|
||||
case 'nfs':
|
||||
type = 'nfs';
|
||||
what = `${mountOptions.host}:${mountOptions.remoteDir}`;
|
||||
@@ -101,6 +110,8 @@ function renderMountFile(mount) {
|
||||
break;
|
||||
case 'sshfs': {
|
||||
const keyFilePath = path.join(paths.SSHFS_KEYS_DIR, `id_rsa_${mountOptions.host}`);
|
||||
if (!safe.fs.writeFileSync(keyFilePath, `${mount.mountOptions.privateKey}\n`, { mode: 0o600 })) throw new BoxError(BoxError.FS_ERROR, `Could not write private key: ${safe.error.message}`);
|
||||
|
||||
type = 'fuse.sshfs';
|
||||
what = `${mountOptions.user}@${mountOptions.host}:${mountOptions.remoteDir}`;
|
||||
options = `allow_other,port=${mountOptions.port},IdentityFile=${keyFilePath},StrictHostKeyChecking=no,reconnect`; // allow_other means non-root users can access it
|
||||
@@ -126,6 +137,11 @@ async function removeMount(mount) {
|
||||
if (mountType === 'sshfs') {
|
||||
const keyFilePath = path.join(paths.SSHFS_KEYS_DIR, `id_rsa_${mountOptions.host}`);
|
||||
safe.fs.unlinkSync(keyFilePath);
|
||||
} else if (mountType === 'cifs') {
|
||||
const out = safe.child_process.execSync(`systemd-escape -p '${hostPath}'`, { encoding: 'utf8' });
|
||||
if (!out) return;
|
||||
const credentialsFilePath = path.join(paths.CIFS_CREDENTIALS_DIR, `${out.trim()}.cred`);
|
||||
safe.fs.unlinkSync(credentialsFilePath);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,13 +194,6 @@ async function tryAddMount(mount, options) {
|
||||
|
||||
if (constants.TEST) return;
|
||||
|
||||
if (mount.mountType === 'sshfs') {
|
||||
const keyFilePath = path.join(paths.SSHFS_KEYS_DIR, `id_rsa_${mount.mountOptions.host}`);
|
||||
|
||||
safe.fs.mkdirSync(paths.SSHFS_KEYS_DIR);
|
||||
if (!safe.fs.writeFileSync(keyFilePath, `${mount.mountOptions.privateKey}\n`, { mode: 0o600 })) throw new BoxError(BoxError.FS_ERROR, safe.error);
|
||||
}
|
||||
|
||||
const [error] = await safe(shell.promises.sudo('addMount', [ ADD_MOUNT_CMD, renderMountFile(mount), options.timeout ], {}));
|
||||
if (error && error.code === 2) throw new BoxError(BoxError.MOUNT_ERROR, 'Failed to unmount existing mount'); // at this point, the old mount config is still there
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ async function setBlocklist(blocklist, auditSource) {
|
||||
for (const line of blocklist.split('\n')) {
|
||||
if (!line || line.startsWith('#')) continue;
|
||||
const rangeOrIP = line.trim();
|
||||
// this checks for IPv4 and IPv6
|
||||
if (!validator.isIP(rangeOrIP) && !validator.isIPRange(rangeOrIP)) throw new BoxError(BoxError.BAD_FIELD, `${rangeOrIP} is not a valid IP or range`);
|
||||
|
||||
if (rangeOrIP.indexOf('/') === -1) {
|
||||
|
||||
@@ -12,6 +12,7 @@ map $upstream_http_referrer_policy $hrp {
|
||||
|
||||
# http server
|
||||
server {
|
||||
# note listen [::]:80 only listens on ipv6 since ipv6only=on since nginx 1.3.4. listen 80 listens on ipv4 only
|
||||
<% if (endpoint === 'ip' || endpoint === 'setup') { -%>
|
||||
listen 80 default_server;
|
||||
server_name _;
|
||||
@@ -87,7 +88,13 @@ server {
|
||||
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256;
|
||||
ssl_prefer_server_ciphers off;
|
||||
|
||||
# some apps have underscores in headers. this is apparently disabled by default because of some legacy CGI compat
|
||||
underscores_in_headers on;
|
||||
|
||||
<% if (endpoint !== 'ip' && endpoint !== 'setup') { -%>
|
||||
# dhparams is generated only after dns setup
|
||||
ssl_dhparam /home/yellowtent/platformdata/dhparams.pem;
|
||||
<% } -%>
|
||||
add_header Strict-Transport-Security "max-age=63072000";
|
||||
|
||||
<% if ( ocsp ) { -%>
|
||||
@@ -112,14 +119,6 @@ server {
|
||||
add_header Referrer-Policy $hrp;
|
||||
proxy_hide_header Referrer-Policy;
|
||||
|
||||
# workaround caching issue after /logout. if max-age is set, browser uses cache and user thinks they have not logged out
|
||||
# have to keep all the add_header here to avoid repeating all add_header in location block
|
||||
<% if (proxyAuth.enabled) { %>
|
||||
proxy_hide_header Cache-Control;
|
||||
add_header Cache-Control no-cache;
|
||||
add_header Set-Cookie $auth_cookie;
|
||||
<% } %>
|
||||
|
||||
# gzip responses that are > 50k and not images
|
||||
gzip on;
|
||||
gzip_min_length 18k;
|
||||
@@ -187,10 +186,6 @@ server {
|
||||
proxy_pass http://127.0.0.1:3000/well-known-handler/;
|
||||
}
|
||||
|
||||
<% if (proxyAuth.enabled) { %>
|
||||
proxy_set_header X-App-ID "<%= proxyAuth.id %>";
|
||||
<% } %>
|
||||
|
||||
# increase the proxy buffer sizes to not run into buffer issues (http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffers)
|
||||
proxy_buffer_size 128k;
|
||||
proxy_buffers 4 256k;
|
||||
@@ -211,17 +206,17 @@ server {
|
||||
<% if ( endpoint === 'dashboard' || endpoint === 'setup' ) { %>
|
||||
location /api/ {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
client_max_body_size 1m;
|
||||
client_max_body_size 2m;
|
||||
}
|
||||
|
||||
location ~ ^/api/v1/(developer|session)/login$ {
|
||||
location ~ ^/api/v1/cloudron/login$ {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
client_max_body_size 1m;
|
||||
limit_req zone=admin_login burst=5;
|
||||
}
|
||||
|
||||
# the read timeout is between successive reads and not the whole connection
|
||||
location ~ ^/api/v1/apps/.*/exec$ {
|
||||
location ~ ^/api/v1/apps/.*/exec/.*/start$ {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
proxy_read_timeout 30m;
|
||||
}
|
||||
@@ -241,6 +236,11 @@ server {
|
||||
client_max_body_size 0;
|
||||
}
|
||||
|
||||
location ~ ^/api/v1/profile/backgroundImage {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
client_max_body_size 0;
|
||||
}
|
||||
|
||||
# graphite paths (uncomment block below and visit /graphite-web/dashboard)
|
||||
# remember to comment out the CSP policy as well to access the graphite dashboard
|
||||
# location ~ ^/graphite-web/ {
|
||||
@@ -284,6 +284,11 @@ server {
|
||||
location <%= proxyAuth.location %> {
|
||||
auth_request /proxy-auth;
|
||||
auth_request_set $auth_cookie $upstream_http_set_cookie;
|
||||
|
||||
auth_request_set $user $upstream_http_x_remote_user;
|
||||
auth_request_set $email $upstream_http_x_remote_email;
|
||||
auth_request_set $name $upstream_http_x_remote_name;
|
||||
|
||||
error_page 401 = @proxy-auth-login;
|
||||
|
||||
proxy_pass http://<%= ip %>:<%= port %>;
|
||||
@@ -301,12 +306,19 @@ server {
|
||||
}
|
||||
<% } %>
|
||||
|
||||
<% Object.keys(httpPaths).forEach(function (path) { -%>
|
||||
location "<%= path %>" {
|
||||
# the trailing / will replace part of the original URI matched by the location.
|
||||
proxy_pass http://<%= ip %>:<%= httpPaths[path] %>/;
|
||||
}
|
||||
<% }); %>
|
||||
<% if (proxyAuth.enabled) { %>
|
||||
# workaround caching issue after /logout. if max-age is set, browser uses cache and user thinks they have not logged out
|
||||
# IMPORTANT: have to keep all the add_headers at top level here to avoid repeating all the add_headers and proxy_set_headers in location block
|
||||
proxy_hide_header Cache-Control;
|
||||
add_header Cache-Control no-cache;
|
||||
add_header Set-Cookie $auth_cookie;
|
||||
|
||||
# To prevent header spoofing from a client, these variables must always be set (or removed with '') for all proxyAuth routes
|
||||
proxy_set_header X-App-ID "<%= proxyAuth.id %>";
|
||||
proxy_set_header X-Remote-User $user;
|
||||
proxy_set_header X-Remote-Email $email;
|
||||
proxy_set_header X-Remote-Name $name;
|
||||
<% } %>
|
||||
|
||||
<% } else if ( endpoint === 'redirect' ) { %>
|
||||
location / {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user