Compare commits

..

527 Commits

Author SHA1 Message Date
Girish Ramakrishnan 7df1399f17 typo 2023-02-01 21:52:27 +01:00
Girish Ramakrishnan ce8f6c4c6b ubuntu 18: systemd kill ends up killing the script itself
This is because KillMode=control-group by default

(cherry picked from commit c07c8b5bb8)
2023-02-01 18:51:16 +01:00
Girish Ramakrishnan 0832ebf052 ubuntu 18: ExecReload does not work
(cherry picked from commit 7bbc7c2306)
2023-02-01 17:28:27 +01:00
Girish Ramakrishnan 7be176a3b5 reverseproxy: LE backdates certs by an hour
https://community.letsencrypt.org/t/valid-from-date-on-cert-off-by-1-hour/103239
(cherry picked from commit 54add73d2a)
2023-02-01 13:07:40 +01:00
Girish Ramakrishnan 4d9612889b print subject and fix notBefore parsing
(cherry picked from commit 3f70edf3ec)
2023-02-01 12:39:22 +01:00
Girish Ramakrishnan 29faa722ac typo
(cherry picked from commit c63e0036cb)
2023-02-01 12:39:16 +01:00
Girish Ramakrishnan 2442abf10b reverseproxy: force renewal only renews if not issued in last 5 mins
otherwise, this leads to repeated renewals in checkCerts

(cherry picked from commit 3b9486596d)
2023-02-01 12:05:11 +01:00
Girish Ramakrishnan dcbec79b77 reverseproxy: get dates
(cherry picked from commit eddfd20f24)
2023-02-01 12:05:05 +01:00
Girish Ramakrishnan f874acbeb9 reverseproxy: add option to force renewal for e2e
(cherry picked from commit 690df0e5c4)
2023-02-01 12:04:58 +01:00
Girish Ramakrishnan 3e01faeca3 7.3.6 changes 2023-01-31 18:06:25 +01:00
Girish Ramakrishnan f7d7e36f10 reverseproxy: rebuild in 7.3 only 2023-01-31 18:02:13 +01:00
Girish Ramakrishnan 972f453535 reverseproxy: fix issue where renewed certs are not written to disk
(cherry picked from commit ce9e78d23b)
2023-01-31 17:59:30 +01:00
Girish Ramakrishnan d441b9d926 backup cleaner: do not delete mail snapshot
(cherry picked from commit 02b6aa93cb)
2023-01-31 11:51:32 +01:00
Johannes Zellner e0c996840d Use correct error object to avoid crash
(cherry picked from commit 6f84fd3f71)
2023-01-31 11:33:30 +01:00
Girish Ramakrishnan 3c5987cdad more 7.3.5 changes 2023-01-25 10:22:37 +01:00
Girish Ramakrishnan e0673d78b9 dns: resolve cname records using unbound
cname record can be external and the original NS may not respond to
recursive queries

(cherry picked from commit e4d9dbb558)
2023-01-25 09:58:24 +01:00
Girish Ramakrishnan 08136a5347 More 7.3.4 changes 2023-01-17 11:18:31 +01:00
Girish Ramakrishnan 98b5c77177 s3: add listing check
This is needed for situations like in cloudflare where the endpoint can
be mistakenly configured with the bucket name like https://xx.r2.cloudflarestorage.com/cloudron-backups .
The upload and del calls work but list and copy does not.

(cherry picked from commit 093fc98ae5)
2023-01-17 11:18:05 +01:00
Girish Ramakrishnan ea441d0b4b s3: throw any copy errors
(cherry picked from commit 40bcfdba76)
2023-01-17 11:18:00 +01:00
Girish Ramakrishnan d8b5b49ffd Update manifest format for runtimeDirs change 2023-01-16 12:27:58 +01:00
Girish Ramakrishnan 13fd595e8b contabo: network can be real slow
(cherry picked from commit 68f4f1ba85)
2022-12-24 16:34:18 +01:00
Girish Ramakrishnan 5f11e430bd unbound: disable controller interface explicitly
https://github.com/NLnetLabs/unbound/issues/806
(cherry picked from commit ae30fe25d7)
2022-12-24 16:34:09 +01:00
Girish Ramakrishnan cfa9f901c1 Fix crash in RBL check
(cherry picked from commit d5793bc7c0)
2022-12-09 00:00:46 +01:00
Girish Ramakrishnan ce2c9d9ac5 reverseproxy: fix typo in regexp matching
(cherry picked from commit d7d43c73fe)
2022-12-08 10:06:48 +01:00
Girish Ramakrishnan 8d5039da35 7.3.5 changes
(cherry picked from commit a198d1ea8d)
2022-12-08 10:06:43 +01:00
Girish Ramakrishnan c264ff32c2 du: fix crash when filesystem is cifs/nfs/sshfs
(cherry picked from commit 67cde5a62c)
2022-12-08 08:54:41 +01:00
Johannes Zellner 5e30bea155 Start with a default to not fail if no swap is present
(cherry picked from commit d126f056fc)
2022-12-08 08:54:33 +01:00
Johannes Zellner 2c63a89199 Disallow jupyter hub on demo
(cherry picked from commit db5e0b8fdf)
2022-12-08 08:54:27 +01:00
Girish Ramakrishnan d547bad17a 7.3.4 changes 2022-11-30 21:19:03 +01:00
Girish Ramakrishnan 36ddb8c7c2 prune: normalize the tag 2022-11-30 21:12:00 +01:00
Girish Ramakrishnan 6c9aa1a77f Revert "prune all images instead of parsing output"
This reverts commit d42c524a46.

This caused a bug that all app images are getting removed since we remove
all containers on infra update!
2022-11-30 20:00:51 +01:00
Girish Ramakrishnan 27dec3f61e bump test version 2022-11-30 19:56:51 +01:00
Girish Ramakrishnan 79cb8ef251 add route to get platform status 2022-11-30 19:54:32 +01:00
Girish Ramakrishnan f27847950c reverseproxy: notify cert change only in cron job
notifying this in ensureCertificate does not work if provider changed in the middle anyway.
might as well get them to be in sync in the cronjob.

this change also resulted in tls addon getting restarted non-stop if you change from wildcard
to non-wildcard since ensureCertificate notifies the change.
2022-11-30 15:55:32 +01:00
Girish Ramakrishnan 69b46d82ab Fix typo 2022-11-30 14:56:40 +01:00
Girish Ramakrishnan 2a660fa59d change terminology to running and unresponsive 2022-11-30 14:41:48 +01:00
Girish Ramakrishnan e942b8fe7e better debugs 2022-11-30 13:08:05 +01:00
Girish Ramakrishnan 1c3ef36a47 typo in graphite version 2022-11-30 10:37:28 +01:00
Girish Ramakrishnan d42c524a46 prune all images instead of parsing output
nothing is really lost since these are just unused images
2022-11-30 10:01:50 +01:00
Girish Ramakrishnan 15cc624fa5 do string compare in certs 2022-11-30 09:59:19 +01:00
Girish Ramakrishnan 7e1c56161d reverseproxy: notify services immediately
there are 2 cases where certs change (in db):
* LE cert is new or renewed
* fallback cert changes with fallback provider

if something is off i.e we crashed midway of above, then user can click the
rebuild button.
2022-11-29 18:27:08 +01:00
Girish Ramakrishnan 77a5f01585 reverseproxy: rebuild only when needed
re-creating nginx configs is only needed in 3 cases:
* provider changes. we create a rebuild file for this
* nginx config is somehow corrupt by external changes. user can click ui button

on startup, dashboard also always creates the nginx configs. so it's always up to provide the button
2022-11-29 18:17:53 +01:00
Girish Ramakrishnan 3aa3cb6e39 tls: remove any old location certs 2022-11-29 17:58:51 +01:00
Girish Ramakrishnan 302f975d5c handle type mismatch 2022-11-29 17:13:58 +01:00
Girish Ramakrishnan d23c65a7e7 reverseproxy: cert/key/csr are all pem
just use strings instead of binary/string confusion
2022-11-29 14:33:52 +01:00
Girish Ramakrishnan 1cf613dca6 Fix name of wildcard alias domain cert and configs 2022-11-29 13:35:17 +01:00
Girish Ramakrishnan 89127e1df7 reverseproxy: rework cert logic
9c8f78a059 already fixed many of the cert issues.

However, some issues were caught in the CI:

* The TLS addon has to be rebuilt and not just restarted. For this reason, we now
  move things to a directory instead of mounting files. This way the container is just restarted.

* Cleanups must be driven by the database and not the filesystem . Deleting files on disk or after a restore,
  the certs are left dangling forever in the db.

* Separate the db cert logic and disk cert logic. This way we can sync as many times as we want and whenever we want.
2022-11-29 11:07:23 +01:00
Girish Ramakrishnan c844be5be1 make validateLocations return error 2022-11-28 22:16:22 +01:00
Girish Ramakrishnan e15c6324e4 getDuplicateErrorDetails does not need domain map 2022-11-28 22:14:10 +01:00
Girish Ramakrishnan b70572a6e9 dns: fqdn only needs domain string
This is from the caas days, when we had hyphenated subdomains flag
2022-11-28 21:56:25 +01:00
Girish Ramakrishnan cab7409d85 mail: update haraka 2022-11-24 18:27:33 +01:00
Girish Ramakrishnan ce00165e41 Update containterd
this possible fixes stuck containers - https://github.com/containerd/containerd/issues/6772
2022-11-24 14:49:12 +01:00
Girish Ramakrishnan 38312b810a add note 2022-11-24 01:21:32 +01:00
Girish Ramakrishnan 9477e0bbb5 Fix crash when accessing memory_stats 2022-11-24 00:40:40 +01:00
Girish Ramakrishnan 4c6f7de10a more debug messages 2022-11-23 22:03:18 +01:00
Girish Ramakrishnan 28f3b697a1 tokens: add test for readonly token 2022-11-23 18:16:03 +01:00
Girish Ramakrishnan f728971479 add test that only owner can open tickets 2022-11-23 17:56:24 +01:00
Girish Ramakrishnan 30fb1aa351 proxy: do not set Host header when proxying
The default when proxying is $proxy_host.

Proxied apps must used X-Forwarded-Host header to determine the intended
target. I think we overwrote the Host header back in the day because apps
had varied support for this. Ideally, it can be removed across all our configurations.
2022-11-23 16:50:38 +01:00
Johannes Zellner a5d244b593 Add tests for proxy app upstreamUri 2022-11-23 14:36:57 +01:00
Girish Ramakrishnan 817e950d47 Fix upstreamUri verification 2022-11-23 12:58:17 +01:00
Girish Ramakrishnan 258eea4318 Fix appstore-test 2022-11-22 22:14:59 +01:00
Girish Ramakrishnan 1b0c33fc73 Fix system-test 2022-11-22 21:38:22 +01:00
Girish Ramakrishnan 1d56bcb2e0 Update node to 16.18.1 2022-11-22 19:29:54 +01:00
Johannes Zellner 35ea3b1575 Also include potential swap files in the disk usage stats 2022-11-22 12:15:17 +01:00
Girish Ramakrishnan c639559a6d Update docker 20.10.21
many users reporting hangs in docker, maybe this solves it
2022-11-21 13:20:49 +01:00
Girish Ramakrishnan b437466f8c mail: send quota value as raw bytes 2022-11-21 09:45:17 +01:00
Girish Ramakrishnan 3b8221190d Better error mesasge 2022-11-20 18:16:16 +01:00
Girish Ramakrishnan 250d54f157 postgresql: fix issue with pg_ctl timing out 2022-11-20 18:05:37 +01:00
Girish Ramakrishnan 5d0309f1ca reverseproxy: check renewal against cert instead of the files 2022-11-17 16:40:14 +01:00
Girish Ramakrishnan 00771d8197 reverseproxy: move dashboard config to subdir as well 2022-11-17 15:50:34 +01:00
Girish Ramakrishnan 641752a222 reverseproxy: remove getAcmeApiOptions 2022-11-17 12:39:23 +01:00
Girish Ramakrishnan e3b0d3960a reverseproxy: create configs in subdirectories for easy management 2022-11-17 12:16:11 +01:00
Girish Ramakrishnan cd90864bc3 typos 2022-11-17 11:46:29 +01:00
Girish Ramakrishnan 23cc0d6f0e acme2: do not pass around paths 2022-11-17 11:44:36 +01:00
Girish Ramakrishnan 51f43597bc Make location have subdomain just like in the database 2022-11-17 10:22:46 +01:00
Girish Ramakrishnan 28b5457e9c Fix validateLocations return value 2022-11-17 10:22:46 +01:00
Girish Ramakrishnan 35076b0e93 use vhost naming for nginx config terminology 2022-11-17 10:22:46 +01:00
Girish Ramakrishnan 293b8a0d34 remove location type from nginx filename
this will keep it consistent with upcoming cert filenames
2022-11-17 10:22:46 +01:00
Girish Ramakrishnan 0c8b8346f4 Move getLocationsSync into apps.js 2022-11-17 10:22:43 +01:00
Girish Ramakrishnan 8c2a1906ba Add to changes 2022-11-17 08:00:44 +01:00
Girish Ramakrishnan 720bafaf02 logrotate: only keep 14 days of logs
https://unix.stackexchange.com/questions/261696/logrotation-rotate-and-maxage-command
https://blog.gsterling.de/2017/10/03/logrotate-misconceptions-about-maxsize-and-size/
2022-11-17 00:47:39 +01:00
Johannes Zellner 0b6bbf4cc2 Set exec LANG via rest API only 2022-11-16 16:14:54 +01:00
Girish Ramakrishnan 013e15e361 reverseproxy: do deep compare in tlsConfig
wildcard field might change
2022-11-16 16:04:26 +01:00
Johannes Zellner 9da4f55754 Set default LANG in exec container to make umlauts and other special characters work 2022-11-16 15:49:06 +01:00
Girish Ramakrishnan e3642f4278 reverse proxy: rebuild configs on provider change 2022-11-16 12:42:06 +01:00
Girish Ramakrishnan 19b0d47988 remove obsolete fixme 2022-11-16 11:46:31 +01:00
Girish Ramakrishnan f82f533f36 Add SIGHUP handler to reload certs
we have to reload directory server certs out of process
2022-11-16 08:24:42 +01:00
Girish Ramakrishnan 15d5dfd406 reverseproxy: move the reload out of the write functions 2022-11-16 07:55:26 +01:00
Girish Ramakrishnan af870d0eac mail: fix dnsbl count
empty string was parsed as [''] leading the UI to think there is one zone
2022-11-14 22:06:33 +01:00
Girish Ramakrishnan 7b7e5d24de domains: update event not generated 2022-11-14 10:58:47 +01:00
Girish Ramakrishnan 0843baad8b reverseproxy: remove options from renewCerts 2022-11-14 08:13:47 +01:00
Girish Ramakrishnan 5e2a55ecad add debug 2022-11-13 22:10:01 +01:00
Girish Ramakrishnan c597d9fbaa add fixme 2022-11-13 21:55:13 +01:00
Girish Ramakrishnan 8b43d43e35 reverseproxy: compare the cert path on cert renewal
fqdn will not match for wildcard certs
2022-11-13 18:06:34 +01:00
Girish Ramakrishnan 5447181e41 cert: add some asserts 2022-11-13 17:27:05 +01:00
Girish Ramakrishnan 3caf77cee6 cert: add message for fallback cert 2022-11-13 16:59:22 +01:00
Girish Ramakrishnan 2515a0f18f cert: do not autoclean default cert 2022-11-13 16:56:51 +01:00
Girish Ramakrishnan 9c8f78a059 reverseproxy: simplify certificate renewal
An issue was that mail container was not getting refreshed with the up to
date certs. The root cause is that it is refreshed only in the renewCerts()
cron job. If cert renewal was caused by an app task, then the cron job will
skip the restart (since cert is fresh).

The other issue is that we keep hitting 0 length certs when we run out of disk
space. The root cause is that when out of disk space, a cert renewal will
cause cert to be written but since it has no space it is 0 length. Then, when
the user tries to restart the server, the box code does not write the cert again.

This change fixes the above two including:
* To simplify, we use the fallback cert only if we failed to get a LE cert. Expired LE certs
  will continue to be used. nginx is fine with this.

* restart directory as well on renewal
2022-11-13 11:55:12 +01:00
Girish Ramakrishnan f917eb8f13 rename variable 2022-11-11 16:21:28 +01:00
Johannes Zellner d19c7ac3e3 Return repository info in app rest api 2022-11-10 20:00:55 +01:00
Johannes Zellner f61131babf Amend app.repository depending on presence and value of dockerImage 2022-11-10 18:12:13 +01:00
Girish Ramakrishnan e9eeab074a Clarify error message further 2022-11-10 13:50:28 +01:00
Girish Ramakrishnan 3477cf474f security: do not password reset mail to cloudron owned mail domain
https://forum.cloudron.io/topic/7951/privilege-escalation-through-mail-manager-role
2022-11-10 12:59:03 +01:00
Girish Ramakrishnan d49c171c79 mail: fix 100% cpu use with unreachable servers 2022-11-09 23:04:05 +01:00
Johannes Zellner 0035247618 add app repository support 2022-11-09 15:46:00 +01:00
Girish Ramakrishnan 3d6cdf8ff3 run disk usage task once a day 2022-11-09 15:21:53 +01:00
Girish Ramakrishnan 925b08c7a1 Fix log test 2022-11-06 16:17:55 +01:00
Girish Ramakrishnan 440504a6e9 add tests for both the stream 2022-11-06 15:44:04 +01:00
Girish Ramakrishnan ca44f47af3 replace split with our own LogStream
split module is archived
2022-11-06 13:44:47 +01:00
Girish Ramakrishnan 9dac5e3406 typo 2022-11-06 11:57:45 +01:00
Girish Ramakrishnan d0b7097706 rimraf is gone 2022-11-06 11:48:56 +01:00
Girish Ramakrishnan fac0a9ca5d classes are not hoisted 2022-11-06 11:44:43 +01:00
Girish Ramakrishnan b6f707955c Update packages 2022-11-06 10:27:10 +01:00
Girish Ramakrishnan 962d7030bb replace progress-stream with our implementation
upstream is mostly unmaintained
2022-11-06 10:17:14 +01:00
Girish Ramakrishnan 5af1bbfb3c once: add debug 2022-11-05 15:36:07 +01:00
Girish Ramakrishnan f2d25ff2fd remove many of the scripts 2022-11-05 15:26:56 +01:00
Girish Ramakrishnan 94327e397a remove ununsed ejs-cli 2022-11-05 15:22:27 +01:00
Girish Ramakrishnan 9f54ec47b6 remove nyc and node-sass modules 2022-11-05 15:20:39 +01:00
Girish Ramakrishnan cb85336595 remove js-yaml (unused) 2022-11-05 15:19:00 +01:00
Girish Ramakrishnan b28d559d1a remove unused tar-stream 2022-11-05 15:17:26 +01:00
Girish Ramakrishnan 4918d2099f remove json module (not used) 2022-11-05 15:15:53 +01:00
Girish Ramakrishnan 8a5d4e2fb0 better debugs 2022-11-05 08:43:02 +01:00
Girish Ramakrishnan aae52ec795 backups: remove periodic dumping of heap info
this has not been as useful as I expected
2022-11-05 08:32:38 +01:00
Girish Ramakrishnan 549cb92ce7 return swap listing in the disk route 2022-11-04 15:25:12 +01:00
Johannes Zellner c4c90cfaf9 Add route to download app backups 2022-11-04 10:24:12 +01:00
Girish Ramakrishnan ad3e593f01 mail: disallow more characters in display name 2022-11-04 08:50:47 +01:00
Girish Ramakrishnan 1c4205b714 mount: ignore filesystem type 2022-11-03 23:28:02 +01:00
Girish Ramakrishnan 7a8559ca9e 7.3.3 changes 2022-11-02 22:41:24 +01:00
Girish Ramakrishnan 8bc3b832e7 detect oom in tasks correctly 2022-11-02 22:39:25 +01:00
Girish Ramakrishnan 80a3ca0f46 remove 16.04 related task logic 2022-11-02 21:22:42 +01:00
Girish Ramakrishnan 0f0a98f7ac Add TimeoutStopSec=10s for systemctl kill to work faster 2022-11-02 18:46:20 +01:00
Girish Ramakrishnan 59783eb11b ldap: memberof is a DN and not just group name
https://ldapwiki.com/wiki/MemberOf
https://access.redhat.com/documentation/en-us/red_hat_jboss_operations_network/3.1/html/admin_initial_setup_inventory_groups_and_users/ex-ldap-authz
2022-10-30 15:07:26 +01:00
Girish Ramakrishnan a2bf9180af relay: office365 wants login AUTH
https://support.microsoft.com/en-us/office/outlook-com-no-longer-supports-auth-plain-authentication-07f7d5e9-1697-465f-84d2-4513d4ff0145
2022-10-27 23:18:43 +02:00
Johannes Zellner e662cd7c80 If we can't fetch applink upstreamUri, just stop icon and title detection
This may happen for Cloudflare protected domains
2022-10-27 15:41:51 +02:00
Girish Ramakrishnan 2f946de775 make cache folders always writable 2022-10-24 23:58:20 +02:00
Girish Ramakrishnan d8eb8d23bb manifest: add runtimeDirs 2022-10-24 22:34:06 +02:00
Girish Ramakrishnan 17c7cc5ec7 Remove external df module
It has some parsing issues with locale
2022-10-18 19:56:18 +02:00
Johannes Zellner 8b295fbfdb total stats are reported directly as single value 2022-10-14 12:00:24 +02:00
Johannes Zellner 4e47a1ad3b Clean stats api response to have specific response types 2022-10-14 11:25:43 +02:00
Johannes Zellner 8f91991e1e Also collect total I/O stats for the containers 2022-10-14 11:15:52 +02:00
Girish Ramakrishnan ae66692eda Ensure collectd directory 2022-10-14 10:43:30 +02:00
Girish Ramakrishnan 7cb326cfff no camel case in filenames 2022-10-14 08:22:04 +02:00
Girish Ramakrishnan eb5c90a2e7 du: do not crash when app dir is missing
this can happen when the app is installing/uninstalling
2022-10-13 23:35:01 +02:00
Girish Ramakrishnan 91d1d0b74b add to changes 2022-10-13 23:12:20 +02:00
Girish Ramakrishnan 351292ce1a graph: return sum cpu value 2022-10-13 23:03:31 +02:00
Girish Ramakrishnan ca4e1e207c return cpuCount from app/service graphs as well 2022-10-13 22:38:44 +02:00
Girish Ramakrishnan 1872cea763 graphs: do not average cpu use
Show like htop/top: cpu core count * 100
2022-10-13 22:36:20 +02:00
Girish Ramakrishnan 4015afc69c graphs: send service graphs 2022-10-13 20:52:22 +02:00
Johannes Zellner 6d8c3febac Also add rootDSE to the directory server 2022-10-12 22:13:54 +02:00
Girish Ramakrishnan b5da4143c9 graphs: add app response in system graphs 2022-10-12 22:08:10 +02:00
Girish Ramakrishnan 4fe0402735 box data is separate from mail data already 2022-10-12 11:59:28 +02:00
Girish Ramakrishnan 4a3d85a269 add docker disk usage tests 2022-10-12 10:57:22 +02:00
Girish Ramakrishnan fa7c0a6e1b add disks tests 2022-10-12 10:45:29 +02:00
Girish Ramakrishnan 62d68e2733 graphs: remove the disk info 2022-10-12 10:30:02 +02:00
Girish Ramakrishnan edb6ed91fe add disk usage task 2022-10-12 10:26:21 +02:00
Girish Ramakrishnan a3f7ce15ab system: rework disks api to return by filesystem 2022-10-12 09:42:14 +02:00
Girish Ramakrishnan 4348556dc7 Fix applinks test 2022-10-12 09:37:25 +02:00
Girish Ramakrishnan deb6d78e4d bump addon timeouts 2022-10-11 23:33:35 +02:00
Girish Ramakrishnan 3c963329e9 du: exclude option 2022-10-11 23:14:50 +02:00
Girish Ramakrishnan 656f3fcc13 add system.du 2022-10-11 23:06:54 +02:00
Girish Ramakrishnan 760301ce02 Add docker.df 2022-10-11 23:06:51 +02:00
Girish Ramakrishnan 6f61145b01 configurecollectd.sh is no more 2022-10-11 21:04:25 +02:00
Johannes Zellner cbaf86b8c7 Use counter values for docker stats in collectd and grafana queries 2022-10-11 19:06:40 +02:00
Girish Ramakrishnan 9d35756db5 graphs: just query graphite IP instead of localhost mapping 2022-10-11 12:44:37 +02:00
Girish Ramakrishnan 22790fd9b7 system: include only ram info for graphs
app graphs only contain ram info since that is what docker stats provides
2022-10-11 11:54:06 +02:00
Johannes Zellner ad29f51833 Fixup typo guage -> gauge in docker-stats.py 2022-10-11 10:54:53 +02:00
Girish Ramakrishnan 3caffdb4e1 Rework app stats
Previously, the du plugin was collecting data every 20 seconds but
carbon was configured to only keep data every 12 hours causing much
confusion.

In the process of reworking this, it was determined:

* No need to collect disk usage info over time. Not sure how that is useful
* Instead, collect CPU/Network/Block info over time. We get this now from docker stats
* We also collect info about the services (addon containers)
* No need to reconfigure collectd for each app change anymore since there is no per
app collectd configuration anymore.
2022-10-10 21:13:26 +02:00
Girish Ramakrishnan 2133eab341 postgresql: fix issue when restoring large dumps 2022-10-10 12:30:26 +02:00
Johannes Zellner 25379f1d21 Prevent code from crashing when DO access token contains non-ascii characters 2022-10-07 11:25:17 +02:00
Johannes Zellner cb8d90699b Better feedback if no applink schema is provided 2022-10-06 19:49:33 +02:00
Johannes Zellner 6e4e8bf74d Add applink upstreamUri validation 2022-10-06 19:35:07 +02:00
Johannes Zellner 87a00b9209 Fixup app link icon fetching and do not overwrite upstreamUri with redirect 2022-10-06 19:23:15 +02:00
Girish Ramakrishnan d51b022721 applinks: make get return null
this style matches rest of the code base
2022-10-06 11:32:42 +02:00
Girish Ramakrishnan cb9b9272cd lint 2022-10-06 11:27:12 +02:00
Girish Ramakrishnan 7dbb677af4 postgresql: move config to runtime for debuggability 2022-10-06 10:13:49 +02:00
Girish Ramakrishnan 071202fb00 mail: log error 2022-10-04 10:57:42 +02:00
Girish Ramakrishnan fc7414cce6 support: require superadmin 2022-10-04 10:25:11 +02:00
Girish Ramakrishnan acb92c8865 mail queue: fix search + pagination 2022-10-03 10:51:35 +02:00
Girish Ramakrishnan c3793da5bb split checkPrecondition so it can be used in cleaner as well 2022-10-02 17:41:21 +02:00
Girish Ramakrishnan 4f4a0ec289 use mount code to check mount status 2022-10-02 16:51:03 +02:00
Girish Ramakrishnan a4a9b52966 Clarify error message 2022-10-02 16:38:12 +02:00
Girish Ramakrishnan 56b981a52b backups: when checking mount status, ignore the prefix 2022-10-02 16:33:04 +02:00
Girish Ramakrishnan 074e9cfd93 rename getRootPath to getBackupRootPath 2022-10-02 16:26:27 +02:00
Girish Ramakrishnan 9d17c6606b rename to checkBackupPreconditions
since this is called only by the backup logic
2022-10-02 16:20:14 +02:00
Girish Ramakrishnan b32288050e backups: check mount status before checking available size 2022-10-02 16:16:30 +02:00
Girish Ramakrishnan 4aab03bb07 import: cleanup app import logic 2022-10-02 10:08:50 +02:00
Girish Ramakrishnan 9f788c2c57 backup: reduce memory logs 2022-10-01 20:16:08 +02:00
Girish Ramakrishnan 84ba333aa1 app proxy: disable TLS check in app health monitor 2022-10-01 11:47:52 +02:00
Girish Ramakrishnan c07fe4195f eventlog: preserve last 2 months 2022-10-01 11:01:41 +02:00
Girish Ramakrishnan 92112986a7 7.3.1 changes 2022-10-01 08:46:13 +02:00
Girish Ramakrishnan 54af286fcd app proxy: workaround for nginx not starting if upstream is down
https://sandro-keil.de/blog/let-nginx-start-if-upstream-host-is-unavailable-or-down/

without a resolver, dns names do not resolve
2022-09-30 10:36:44 +02:00
Girish Ramakrishnan 7b5df02a0e app proxy: validate uri 2022-09-29 18:56:10 +02:00
Girish Ramakrishnan 4f0e0706b2 backups: fix id
avoid box_box_ and mail_mail_ in backup ids
2022-09-29 18:01:19 +02:00
Girish Ramakrishnan 1f74febdb0 mail: do not clear eventlog on restart 2022-09-28 22:16:32 +02:00
Girish Ramakrishnan 49bf333355 merge these changelog entries 2022-09-28 18:22:00 +02:00
Girish Ramakrishnan c4af06dd66 remove duplicate changelog entry 2022-09-28 18:21:12 +02:00
Johannes Zellner f5f9a8e520 Send 404 if applink icon does not exist 2022-09-28 15:18:05 +02:00
Johannes Zellner ae376774e4 Ensure we don't put empty applink icon buffers in db 2022-09-28 15:10:17 +02:00
Johannes Zellner ff8c2184f6 Convert applink ts to timestamp 2022-09-28 14:59:30 +02:00
Johannes Zellner a7b056a84c Some tweaks for better app link detection logic 2022-09-28 14:23:45 +02:00
Girish Ramakrishnan 131d456329 Add cloudflare R2 2022-09-27 19:44:20 +02:00
Girish Ramakrishnan d4bba93dbf cloudron-setup: Fix display on newline 2022-09-27 11:25:11 +02:00
Johannes Zellner e332ad96e4 Remove duplicate changes for 7.3.0 2022-09-26 17:42:39 +02:00
Girish Ramakrishnan c455325875 More changes 2022-09-26 09:37:49 +02:00
Girish Ramakrishnan 88e9f751ea mail: update for logging changes 2022-09-26 09:37:36 +02:00
Johannes Zellner 8677e86ace Add authorization to all routes 2022-09-24 21:27:43 +02:00
Johannes Zellner cde22cd0a3 Add token scope tests in routes 2022-09-24 20:56:43 +02:00
Johannes Zellner 6d7f7fbc9a Add some more token scope tests 2022-09-24 18:52:41 +02:00
Johannes Zellner 858c85ee85 Fixup more tests 2022-09-24 18:26:31 +02:00
Johannes Zellner 15d473d506 Fixup some token tests and error handling 2022-09-24 17:29:42 +02:00
Johannes Zellner 70d3040135 Validate token scopes 2022-09-23 13:09:07 +02:00
Johannes Zellner 56c567ac86 Add token scopes 2022-09-22 22:28:59 +02:00
Girish Ramakrishnan 1f5831b79e rename queue route 2022-09-22 19:48:20 +02:00
Girish Ramakrishnan 6382216dc5 mail: proxy queue routes correctly 2022-09-20 20:02:54 +02:00
Johannes Zellner 81b59eae36 improve applink businesslogic tests and fixup api 2022-09-19 21:00:44 +02:00
Girish Ramakrishnan bc3cb6acb5 more changes 2022-09-19 20:56:28 +02:00
Johannes Zellner fa768ad305 Support secureserver.net nameservers from GoDaddy 2022-09-19 19:58:52 +02:00
Johannes Zellner 5184e017c9 Error the task waited for fails in tests 2022-09-19 18:20:27 +02:00
Johannes Zellner d2ea6b2002 Fixup appstore tests 2022-09-19 17:21:55 +02:00
Johannes Zellner 3fcc3ea1aa Fixup reverseproxy tests 2022-09-19 17:04:44 +02:00
Girish Ramakrishnan 15877f45b8 more changes 2022-09-19 10:42:19 +02:00
Girish Ramakrishnan 0a514323a9 Update 7.3 changes 2022-09-19 10:41:48 +02:00
Johannes Zellner 1c07ec219c Do not query disk usage for apps without localstorage 2022-09-16 17:10:07 +02:00
Girish Ramakrishnan 82142f3f31 mail: fix issue where signature was appended to text attachments 2022-09-16 12:40:33 +02:00
Johannes Zellner 554dec640a Rework system graphs api 2022-09-15 16:07:08 +02:00
Girish Ramakrishnan d176ff2582 graphs: move system graph queries to the backend 2022-09-15 12:40:52 +02:00
Girish Ramakrishnan bd7ee437a8 collectd: fix memory stat collection configuration
https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/memory.html#usage-in-bytes says
this is the most efficient approach for v1. It says RSS+CACHE(+SWAP) is the more accurate value.
Elsewhere in the note in https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/memory.html#stat-file,
it says "‘rss + mapped_file” will give you resident set size of cgroup." Overall, it's not clear how
to compute the values so we just use the file.

https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html is better. https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html#memory
says the values are separated out.
2022-09-14 18:15:26 +02:00
Girish Ramakrishnan 0250661402 Revert spurious change 2022-09-14 17:59:44 +02:00
Girish Ramakrishnan 9cef08aa6a mail relay: do not accept TLS servers
haraka can only relay via STARTTLS
2022-09-14 17:42:21 +02:00
Johannes Zellner bead9589a1 Move app graphs graphite query to backend 2022-09-14 14:39:28 +02:00
Girish Ramakrishnan c5b631c0e5 mail: catch all is already fully qualified 2022-09-11 13:49:20 +02:00
Girish Ramakrishnan 4e75694ac6 mail: require catch all to be absolute 2022-09-11 12:56:58 +02:00
Girish Ramakrishnan 2a93c703ef mailserver: add queue routes 2022-08-31 08:45:18 +02:00
Johannes Zellner 3c92971665 If backup storage precondition is not met we want to throw 2022-08-29 22:54:23 +02:00
Johannes Zellner 563391c2f1 remove PermitRootLogin check as we now use cloudron-support user 2022-08-25 18:53:09 +02:00
Girish Ramakrishnan d4555886f4 add note on the reason for the flag 2022-08-25 16:36:57 +02:00
Girish Ramakrishnan a584fad278 proxyAuth: add supportsBearerAuth flag
required for firefly-iii
2022-08-25 16:12:42 +02:00
Girish Ramakrishnan e21f39bc0b Update mail container for quota support 2022-08-23 18:48:06 +02:00
Johannes Zellner 84ca85b315 Ensure app services like redis to be also started on restart if previously stopped 2022-08-23 11:41:08 +02:00
Girish Ramakrishnan d1bdb80c72 Update mail container for quota support 2022-08-22 19:03:47 +02:00
Johannes Zellner d20f8d5e75 Fix acme refactoring 2022-08-22 12:55:43 +02:00
Johannes Zellner b2de6624fd Make email actions buttons 2022-08-21 12:22:53 +02:00
Girish Ramakrishnan 1591541c7f mail: allow aliases to have wildcard
this came out of https://forum.cloudron.io/topic/6350/disposable-email-prefixes-for-existing-mailboxes/
2022-08-18 15:22:00 +02:00
Girish Ramakrishnan 6124323d52 improve mailbox.update 2022-08-18 12:38:46 +02:00
Girish Ramakrishnan b23189b45c mail: quota support 2022-08-18 11:31:40 +02:00
Girish Ramakrishnan 1c18c16e38 typo 2022-08-15 21:09:25 +02:00
Girish Ramakrishnan d07b1c7280 directoryServer: move out start/stop from cron 2022-08-15 21:08:22 +02:00
Girish Ramakrishnan 20d722f076 Fix test 2022-08-15 20:45:55 +02:00
Girish Ramakrishnan bb3be9f380 style 2022-08-15 20:45:55 +02:00
Girish Ramakrishnan edd284fe0b rename user directory to directory server 2022-08-15 20:45:51 +02:00
Girish Ramakrishnan b5cc7d90a9 Fix crash when cron seed file is missing 2022-08-10 22:07:05 +02:00
Girish Ramakrishnan 251c1f9757 add readOnly attribute check for port bindings 2022-08-10 14:22:31 +02:00
Girish Ramakrishnan 03cd9bcc7c Update readOnly flag to tcpPorts and udpPorts 2022-08-10 13:57:00 +02:00
Johannes Zellner fc8572c2af Raise alert for when an app cannot be autoupdated 2022-08-10 12:19:54 +02:00
Johannes Zellner a913660aeb Ensure we have a BoxError here 2022-08-10 12:19:54 +02:00
Girish Ramakrishnan 9c82765512 parseInt returns NaN on failure 2022-08-08 20:33:41 +02:00
Johannes Zellner ace96bd228 Fix stringification for debug of taskError object if set 2022-08-08 13:12:53 +02:00
Johannes Zellner 02d95810a6 Do not include proxy apps in graphs 2022-08-05 14:38:57 +02:00
Johannes Zellner 0fcb202364 Expose groups as memberof in ldap and userdirectory 2022-08-04 11:22:16 +02:00
Johannes Zellner 88eb809c6e For ldap users created on first login, make sure we also check 2fa if enabled 2022-08-03 18:20:43 +02:00
Johannes Zellner 1534eaf6f7 Fixup applink tests 2022-08-03 14:57:58 +02:00
Johannes Zellner a2a60ff426 Add support for LDAP cn=...+totptoken=.. support 2022-08-02 15:27:34 +02:00
Johannes Zellner afc70ac332 Expose twoFactorAuthenticationEnabled state of users via user directory 2022-08-02 15:27:34 +02:00
Girish Ramakrishnan d5e5b64df2 cloudron-setup/motd: show ipv4 or ipv6 setup link 2022-08-01 18:32:07 +02:00
Girish Ramakrishnan 4a18ecc0ef unbound: enable ip6 2022-08-01 14:15:09 +02:00
Girish Ramakrishnan f355403412 npm: make it work with ipv6 only servers 2022-08-01 14:15:09 +02:00
Girish Ramakrishnan 985320d355 switch registry url based on ipv6 availability 2022-08-01 14:15:09 +02:00
Girish Ramakrishnan 26c9d8bc88 notification: Fix crash when backupId is null 2022-08-01 14:15:09 +02:00
Girish Ramakrishnan 2b81163179 add to changes 2022-07-30 13:16:19 +02:00
Johannes Zellner 6715efca50 Distinguish ghost/impersonate logins from others 2022-07-29 20:39:18 +02:00
Johannes Zellner 612b1d6030 Also remove the virtual user and admin groups for userdirectory 2022-07-29 11:17:31 +02:00
Johannes Zellner b71254a0c3 Remove virtual user and admin groups to ldap user records 2022-07-29 11:11:53 +02:00
Johannes Zellner c0e5f60592 Also stash random minute cron tick in seed file 2022-07-29 09:15:42 +02:00
Girish Ramakrishnan 64243425ce installer: suppress VERSION not found error 2022-07-27 06:16:27 +02:00
Girish Ramakrishnan 9ad7fda3cd ubuntu: do not explicitly disable ipv6
IIRC, we had this because unbound will not start up on servers with IPv6 disabled (in the kernel).
Maybe this is a thing of the past by now.
2022-07-27 06:16:03 +02:00
Girish Ramakrishnan c0eedc97ac collectd: always disable FQDNLookup 2022-07-25 17:01:49 +02:00
Johannes Zellner 5b4a1e0ec1 Make certificate cron job more predictable with persistent hourly seed 2022-07-25 15:40:49 +02:00
Johannes Zellner 5b31486dc9 Randomize certificate renewal check over a whole day 2022-07-22 19:32:43 +02:00
Girish Ramakrishnan 116cde19f9 constants: location -> subdomain 2022-07-14 15:18:17 +05:30
Girish Ramakrishnan 14fc089f05 Fixup user and acme cert syncing 2022-07-14 15:04:45 +05:30
Girish Ramakrishnan 885d60f7cc reverseproxy: add setUserCertificate 2022-07-14 13:25:41 +05:30
Girish Ramakrishnan d33fd7b886 do not use bundle terminology
apparently, bundle is also like a cert chain
2022-07-14 12:39:41 +05:30
Girish Ramakrishnan ba067a959c reverseproxy: per location user certificates 2022-07-14 12:21:30 +05:30
Girish Ramakrishnan a246cb7e73 return location certificates 2022-07-14 11:57:04 +05:30
Girish Ramakrishnan f0abd7edc8 certificateJson can be null 2022-07-14 10:52:31 +05:30
Girish Ramakrishnan 127470ae59 domains: fix error handling 2022-07-14 10:35:59 +05:30
Girish Ramakrishnan efac46e40e verifyDomainConfig: just throw the error 2022-07-14 10:32:30 +05:30
Girish Ramakrishnan 6ab237034d remove superfluous validation 2022-07-13 12:06:48 +05:30
Girish Ramakrishnan 2af29fd844 cleanupCerts: add progress 2022-07-13 11:22:47 +05:30
Girish Ramakrishnan 1549f6a4d0 fix various terminology in code
subdomain, domain - strings
location - { subdomain, domain }
bundle - { cert, key }
bundlePath - { certFilePath, keyFilePath }

vhost is really just for virtual hosting
fqdn for others
2022-07-13 10:15:09 +05:30
Girish Ramakrishnan 5d16aca8f4 add script to recreate containers 2022-07-12 20:51:51 +05:30
Johannes Zellner 2facc6774b applinks icon improvements 2022-07-08 18:07:52 +02:00
Johannes Zellner e800c7d282 Only list applinks a user has access to 2022-07-08 15:14:48 +02:00
Johannes Zellner a58228952a Support accessRestriction for visibility of applinks 2022-07-07 19:44:59 +02:00
Johannes Zellner 3511856a7c support applink tags 2022-07-07 19:11:47 +02:00
Johannes Zellner 006a53dc7a Do not spam the logs on get queries 2022-07-07 18:56:21 +02:00
Johannes Zellner 45c73798b9 Fixup typo 2022-07-07 18:53:52 +02:00
Johannes Zellner c704884b10 Ensure applink label is a string 2022-07-07 18:53:27 +02:00
Johannes Zellner b54113ade3 Improve applink meta info detection 2022-07-07 18:19:53 +02:00
Johannes Zellner ac00225a75 Support applink update 2022-07-07 16:53:06 +02:00
Johannes Zellner f43fd21929 Better applink icon support 2022-07-07 16:06:04 +02:00
Johannes Zellner 741c21b368 Fixup applink routes 2022-07-07 13:01:23 +02:00
Johannes Zellner 5a26fe7361 Add applinks.js to routes/index 2022-07-07 12:44:12 +02:00
Johannes Zellner 1185dc7f79 Attempt to fetch applink icon and label from page 2022-07-07 12:36:53 +02:00
Johannes Zellner e1ac2b7b00 Add initial applink support 2022-07-06 20:37:52 +02:00
Girish Ramakrishnan e2c6672a5c better wording 2022-07-02 17:16:47 +05:30
Johannes Zellner 5c50534e21 Improve backup cleanup progress message 2022-07-01 14:18:50 +02:00
Girish Ramakrishnan 55e2139c69 restore: encrypted filenames 2022-06-27 09:49:58 -07:00
Johannes Zellner 34ff3462e9 Fixup backup_config migration script 2022-06-27 17:16:04 +02:00
Girish Ramakrishnan 104bdaf76b mail: cgroup v2 detection fix
there is crash in mail container when fts/solr is enabled
2022-06-26 14:28:22 -07:00
Girish Ramakrishnan c9f7b9a8a6 backups: make filename encryption optional 2022-06-26 09:37:22 -07:00
Girish Ramakrishnan 2e5d89be6b allow space in backup label 2022-06-24 09:18:51 -07:00
Girish Ramakrishnan bcf474aab6 redis: rebuild 2022-06-23 15:52:59 -07:00
Girish Ramakrishnan dea74f05ab remove bogus logic
db-migrate always runs a migration in a transaction. so no volume
was created in case of a failure
2022-06-23 10:31:13 -07:00
Girish Ramakrishnan 69e0f2f727 7.2.5 changes
(cherry picked from commit 131f823e57)
2022-06-23 10:27:57 -07:00
Girish Ramakrishnan 080f701f33 hetzner: debug typo 2022-06-22 22:12:19 -07:00
Girish Ramakrishnan 94a196bfa0 Fix issue where only 25 group members were returned
This is because GROUP_CONCAT defaults to 1024. uuid is 40 chars.
1024/40 = ~25
2022-06-22 17:54:52 -07:00
Girish Ramakrishnan 3a63158763 rename function to setMembers 2022-06-22 17:36:19 -07:00
Girish Ramakrishnan d9c47efe1f Fix storage volume migration
Patch the migration so it runs again properly in 7.2.5

https://forum.cloudron.io/topic/7256/app-data-regression-in-v7-2-4
(cherry picked from commit c2fdb9ae3f)
2022-06-22 17:16:47 -07:00
Johannes Zellner e818e5f7d5 Reload volumes in case one was created in the for loop 2022-06-22 15:32:50 +02:00
Girish Ramakrishnan cac0933334 typo 2022-06-13 13:55:04 -07:00
Girish Ramakrishnan b74f01bb9e clourdon-setup: memory keeps going lower 2022-06-13 10:58:55 -07:00
Girish Ramakrishnan 1f2d596a4a 7.2.4 changes
(cherry picked from commit 61a1ac6983)
2022-06-10 13:31:46 -07:00
Girish Ramakrishnan ce06b2e150 Fix upstreamUri validation 2022-06-10 11:23:58 -07:00
Girish Ramakrishnan 9bd9b72e5d apphealthmonitor: Fix crash 2022-06-10 11:09:41 -07:00
Girish Ramakrishnan a32166bc9d data dir: allow sameness of old and new dir
this makes it easy to migrate to a new volume setup
2022-06-09 17:49:33 -07:00
Johannes Zellner f382b8f1f5 Set real upstreamUri for healthcheck 2022-06-09 15:04:09 +02:00
Johannes Zellner fbc7fcf04b Put healthcheck errors in app logs 2022-06-09 14:56:40 +02:00
Johannes Zellner 11d7dfa071 Accept upstreamUri as string for proxy app install 2022-06-09 14:35:05 +02:00
Johannes Zellner 923a9f6560 Rename RELAY_APPSTORE_ID to PROXY_APP_APPSTORE_ID 2022-06-09 13:57:57 +02:00
Johannes Zellner 25f44f58e3 Start task also needs to skip container starting for proxy app 2022-06-09 10:48:54 +02:00
Johannes Zellner d55a6a5eec Update reverse proxy app config on upstreamUri change 2022-06-09 10:48:54 +02:00
Johannes Zellner f854d86986 Use upstreamUri in reverseproxy config 2022-06-09 10:48:54 +02:00
Johannes Zellner 6a7379e64c Add apps.upstreamUri support 2022-06-09 10:48:54 +02:00
Johannes Zellner a955457ee7 Support proxy app 2022-06-09 10:48:54 +02:00
Girish Ramakrishnan 67801020ed mailboxDisplayName is optional 2022-06-08 14:25:16 -07:00
Girish Ramakrishnan 037f4195da guard against two level subdir moves
this has never worked since the -wholename check only works for
one level deep
2022-06-08 12:24:11 -07:00
Girish Ramakrishnan 8cf0922401 Fix container creation when migrating data dir 2022-06-08 11:52:22 -07:00
Girish Ramakrishnan 6311c78bcd Fix quoting 2022-06-08 11:25:20 -07:00
Girish Ramakrishnan 544ca6e1f4 initial xfs support 2022-06-08 10:58:00 -07:00
Girish Ramakrishnan 6de198eaad sendmail: check for supportsDisplayName
it seems quite some apps don't support this. so, we need a way for the
ui to hide the field so that users are not confused.
2022-06-08 09:43:58 -07:00
Girish Ramakrishnan 6c67f13d90 Use bind mount instead of volume
see also c76b211ce0
2022-06-06 15:59:59 -07:00
Girish Ramakrishnan 7598cf2baf consolidate storage validation logic 2022-06-06 12:50:21 -07:00
Girish Ramakrishnan 7dba294961 storage: check volume status 2022-06-03 10:43:59 -07:00
Girish Ramakrishnan 4bee30dd83 fix more typos 2022-06-03 09:10:37 -07:00
Girish Ramakrishnan 7952a67ed2 guess the volume type better 2022-06-03 07:54:16 -07:00
Johannes Zellner 50b2eabfde Also fixup userdirectory tests 2022-06-03 13:59:21 +02:00
Johannes Zellner 591067ee22 Fixup ldap group search tests 2022-06-03 13:54:31 +02:00
Johannes Zellner 88f78c01ba Remove virtual groups users and admin exposed via ldap 2022-06-03 13:32:35 +02:00
Girish Ramakrishnan dddc5a1994 migrate app dataDir to volumes 2022-06-02 16:29:01 -07:00
Girish Ramakrishnan 8fc8128957 Make apps.getDataDir async 2022-06-02 11:19:33 -07:00
Girish Ramakrishnan c76b211ce0 localstorage: remove usage of docker volumes
just move bind mounts. the initial idea was to use docker volume backends
but we have no plans for this. in addition, usage of volumes means that
files get copied from the image and into volume on first run which is
not desired. people are putting /app/data stuff into images which ideally
should break.
2022-06-02 11:09:27 -07:00
Girish Ramakrishnan 0c13504928 Bump version 2022-06-02 11:02:06 -07:00
Girish Ramakrishnan 26ab7f2767 add mailbox display name to schema 2022-06-01 22:06:34 -07:00
Girish Ramakrishnan f78dabbf7e mail: add display name validation tests 2022-06-01 22:04:36 -07:00
Girish Ramakrishnan 39c5c44ac3 cloudron-firewall: fix spurious line 2022-06-01 09:28:50 -07:00
Girish Ramakrishnan 2dea7f8fe9 sendmail: restrict few characters in the display name 2022-06-01 08:13:19 -07:00
Girish Ramakrishnan 85af0d96d2 sendmail: allow display name to be set 2022-06-01 01:38:16 -07:00
Girish Ramakrishnan 176e917f51 update 7.2.3 changes 2022-05-31 13:27:00 -07:00
Girish Ramakrishnan 534c8f9c3f collectd: on one system, localhost was missing in /etc/hosts 2022-05-27 16:10:38 -07:00
Girish Ramakrishnan 5ee9feb0d2 If disk name has '.', replace with '_'
graphite uses . as the separator between different metric parts

see #348
2022-05-27 16:00:08 -07:00
Girish Ramakrishnan 723453dd1c 7.2.3 changes 2022-05-27 12:04:01 -07:00
Girish Ramakrishnan 45c9ddeacf appstore: allow re-registration on server side delete 2022-05-26 22:27:58 -07:00
Girish Ramakrishnan 5b075e3918 transfer ownership is not used anymore 2022-05-26 14:30:32 -07:00
Girish Ramakrishnan c9916c4107 Really disable FQDNLookup 2022-05-25 15:48:25 -07:00
Girish Ramakrishnan c7956872cb Add to changes 2022-05-25 15:14:01 -07:00
Girish Ramakrishnan 3adf8b5176 collectd: FQDNLookup causes collectd install to fail
this is on ubuntu 20

https://forum.cloudron.io/topic/7091/aws-ubuntu-20-04-installation-issue
2022-05-25 15:10:55 -07:00
Girish Ramakrishnan 40eae601da Update cloudron-manifestformat for new scheduler patterns 2022-05-23 11:02:04 -07:00
Girish Ramakrishnan 3eead2fdbe Fix possible duplicate key issue
console_server_origin in injected by the new setup script even for
7.1.x
2022-05-22 20:48:29 -07:00
Girish Ramakrishnan 9fcd6f9c0a cron: add @service which is probably clearer than @reboot in app context 2022-05-20 10:57:44 -07:00
Girish Ramakrishnan 17910584ca cron: add extensions
https://www.man7.org/linux/man-pages/man5/crontab.5.html#EXTENSIONS
2022-05-20 10:53:30 -07:00
Girish Ramakrishnan d9a02faf7a make the globals const 2022-05-20 09:38:22 -07:00
Girish Ramakrishnan d366f3107d net_admin: enable IPv6 forwarding in the container 2022-05-19 17:10:05 -07:00
Girish Ramakrishnan 2596afa7b3 appstore: set utmSource during user registration 2022-05-19 00:00:48 -07:00
Johannes Zellner aa1e8dc930 Give the dashboard a way to check backgroundImage availability 2022-05-17 15:25:44 +02:00
Johannes Zellner f3c66056b5 Allow to unset background image 2022-05-17 13:17:05 +02:00
Girish Ramakrishnan 93bacd00da Fix exec web socket/upload/download 2022-05-16 11:46:28 -07:00
Girish Ramakrishnan b5c2a0ff44 exec: rework API to get exit code 2022-05-16 11:23:58 -07:00
Johannes Zellner 6bd478b8b0 Add profile backgroundImage api 2022-05-15 12:08:11 +02:00
Girish Ramakrishnan c5c62ff294 Add to changes 2022-05-14 09:36:56 -07:00
Girish Ramakrishnan 7ed8678d50 mongodb: fix import timeout 2022-05-09 17:20:16 -07:00
Girish Ramakrishnan e19e5423f0 cloudron-support: Remove unused var 2022-05-07 19:25:06 -07:00
Girish Ramakrishnan 622ba01c7a ubuntu 22: collectd disappeared
https://bugs.launchpad.net/ubuntu/+source/collectd/+bug/1971093

also, remove the ubuntu 16 hack
2022-05-06 20:02:02 -07:00
Girish Ramakrishnan 935da3ed15 vultr: set ttl to 120
https://www.vultr.com/docs/introduction-to-vultr-dns/#Limitations
2022-05-06 12:29:12 -07:00
Girish Ramakrishnan ce054820a6 add migration to add consoleServerOrigin 2022-05-05 09:59:22 -07:00
Johannes Zellner a7668624b4 Ensure we also set the new console server origin during installation 2022-05-05 16:52:11 +02:00
Girish Ramakrishnan 01b36bb37e proxyAuth: make the POST to /logout redirect
for firefly-III
2022-05-03 18:19:22 -07:00
Girish Ramakrishnan 5d1aaf6bc6 cloudron-setup: silent 2022-05-03 10:20:19 -07:00
Girish Ramakrishnan 7ceb307110 Add 7.2.1 changes 2022-05-03 09:15:21 -07:00
Girish Ramakrishnan 6371b7c20d dns: add hetzner 2022-05-02 22:33:30 -07:00
Girish Ramakrishnan 7ec648164e Remove usage of util 2022-05-02 21:32:10 -07:00
Girish Ramakrishnan 6e98f5f36c backuptask: make upload/download async 2022-04-30 16:42:14 -07:00
Girish Ramakrishnan a098c6da34 noop: removeDir is async 2022-04-30 16:35:39 -07:00
Girish Ramakrishnan 94e70aca33 storage: downloadDir is not part of interface 2022-04-30 16:24:49 -07:00
Girish Ramakrishnan ea01586b52 storage: make copy async 2022-04-30 16:24:45 -07:00
Girish Ramakrishnan 8ceb80dc44 hush: return BoxError everywhere 2022-04-29 19:02:59 -07:00
Girish Ramakrishnan 2280b7eaf5 Add S3MultipartDownloadStream
This extends the modern Readable class
2022-04-29 18:23:56 -07:00
Girish Ramakrishnan 1c1d247a24 cloudron-support: update key 2022-04-29 12:39:42 -07:00
Girish Ramakrishnan 90a6ad8cf5 support: new keys (ed25519)
rsa keys are slowly going away
2022-04-29 12:37:27 -07:00
Girish Ramakrishnan 80d91e5540 Add missing changelog 2022-04-29 09:58:17 -07:00
Girish Ramakrishnan 26cf084e1c tarPack/tarExtract do not need a callback 2022-04-28 21:58:00 -07:00
Girish Ramakrishnan 8ef730ad9c backuptask: make upload/download async 2022-04-28 21:37:08 -07:00
Girish Ramakrishnan 7123ec433c split up backupformat logic into separate files 2022-04-28 19:10:57 -07:00
Girish Ramakrishnan c67d9fd082 move crypto code to hush.js 2022-04-28 18:12:17 -07:00
Girish Ramakrishnan dd8f710605 Fix failing test 2022-04-28 18:03:36 -07:00
Girish Ramakrishnan e097b79f65 godaddy: do not remove all the records of type 2022-04-28 17:46:03 -07:00
Girish Ramakrishnan 765f6d1b12 Revert "proxyAuth: use default fallback icon when no appstore icon or custom icon"
This reverts commit 045c3917c9.

This was committed by mistake, not sure how. 3d28833c35 is the commit
that fixes this issue.
2022-04-28 17:05:46 -07:00
Girish Ramakrishnan 7cf80ebf69 postgresql: add connection logs
This was an attempt to fix connection leak in postgresql. It turns
out that there was a long running cron task which was holding a db
connection. When that happens, the apptask might fail because postgres
says db is in use. The code in scheduler.js currently does not really
'suspend' task running because of re-entrancy issues.
2022-04-28 16:11:09 -07:00
Johannes Zellner cc328f3a6e cloudron-support --enable-ssh should only enable ssh not attempt to collect stats, this might fail 2022-04-28 11:31:18 +02:00
Girish Ramakrishnan 045c3917c9 proxyAuth: use default fallback icon when no appstore icon or custom icon 2022-04-28 10:48:25 +02:00
Girish Ramakrishnan ac2186ccf6 redis: fix cgroup check 2022-04-27 18:46:00 -07:00
Girish Ramakrishnan a57fe36643 collectd: add cgroup v2 config
Ubuntu 22 has cgroup v2 config by default

https://rootlesscontaine.rs/getting-started/common/cgroup2/#checking-whether-cgroup-v2-is-already-enabled
https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
https://man7.org/training/download/splc_cgroups_v1_slides.pdf
2022-04-27 18:41:20 -07:00
Girish Ramakrishnan 1e711f7928 Ubuntu 22 has private home directories by default (https://discourse.ubuntu.com/t/private-home-directories-for-ubuntu-21-04-onwards/) 2022-04-27 17:49:29 -07:00
Girish Ramakrishnan eafccde6cb Reset mysql password by detecting version (instead of ubuntu version) 2022-04-27 15:45:53 -07:00
Girish Ramakrishnan 6b85e11a22 update: collectd configuration can be removed 2022-04-27 15:41:28 -07:00
Girish Ramakrishnan a74de3811b mysqldump: better detection of --column-statistics support
since it's also needed for ubuntu 22
2022-04-27 15:39:53 -07:00
Girish Ramakrishnan 070a425c85 typo 2022-04-27 13:11:20 -07:00
Girish Ramakrishnan 32153ed47d nginx: switch to ubuntu's repo package
ubuntu 18.04 has nginx 1.14
ubuntu 20.04, 22.04 has nginx 1.18

We used a custom nginx for TLSv1.3 support (ssl_protocols TLSv1.3).

OpenSSL itself has TLS 1.3 only from Ubuntu 18.10. This is why we
installed custom packages on Ubuntu 18.04
2022-04-27 10:59:27 -07:00
Girish Ramakrishnan 454f9c4a79 syncer: task processor cannot be async because of asyncjs quirk 2022-04-27 09:14:51 -07:00
Girish Ramakrishnan 3d28833c35 proxyAuth: use default fallback icon when no appstore icon or custom icon 2022-04-26 19:43:22 -07:00
Girish Ramakrishnan be458020dd use string interpolation 2022-04-26 18:55:02 -07:00
Girish Ramakrishnan 9b6733fd88 godaddy: there is now a delete API 2022-04-26 18:44:50 -07:00
Girish Ramakrishnan 1b34a3e599 proxyAuth: add header spoofing note 2022-04-26 14:59:38 -07:00
Girish Ramakrishnan 67d29dbad8 systemd-detect-virt returns false when none detected 2022-04-26 14:59:26 -07:00
Girish Ramakrishnan 28b0043541 cloudron-setup: add container virtualization check 2022-04-26 08:24:36 -07:00
Girish Ramakrishnan 78824b059e turn off sso flag if an update removes sso options
ff-iii used to have LDAP but we removed it. in the database, 'sso'
is still true. the migration here will reset it back to false.
for future situations like these, we sync the sso flag on app update itself.

this ensures correct behavior when yet another update add back sso support.
in ff-iii case, a future update is bringing in proxyAuth based sso!

we don't store the 'sso' bit in backupdb, so user choice of sso is
lost if restore changes sso addons.
2022-04-25 23:36:58 -07:00
Girish Ramakrishnan c63709312d proxyAuth: set X-Remote-User, X-Remote-Email headers
Apps like firefly-iii support https://datatracker.ietf.org/doc/html/rfc3875#section-4.1.10
2022-04-25 22:24:41 -07:00
Girish Ramakrishnan 11cf24075b Keep proxyAuth.enabled configs together 2022-04-25 22:09:34 -07:00
Girish Ramakrishnan 5d440d55c3 Add to changes 2022-04-25 09:07:27 -07:00
Johannes Zellner 4c3b81d29c Add make user local tests and fixup route 2022-04-24 22:49:12 +02:00
Johannes Zellner 032218c0fd Add route to make user local 2022-04-24 22:22:25 +02:00
Johannes Zellner 0cd48bd239 Ensure LDAP usernames are always treated lowercase 2022-04-23 11:21:14 +02:00
Girish Ramakrishnan f5a2e8545b Initial support for ubuntu 22.04 Jammy Jellyfish 2022-04-21 12:30:37 -07:00
Girish Ramakrishnan 4306e20a8e Update docker to 20.10.14 2022-04-21 12:30:14 -07:00
Girish Ramakrishnan 635dd5f10d Update nodejs 2022-04-21 12:28:55 -07:00
Girish Ramakrishnan 7f89dfd261 add once.js 2022-04-15 19:01:35 -05:00
Girish Ramakrishnan e878e71b20 promisify once 2022-04-15 17:59:41 -05:00
Girish Ramakrishnan 64a2493ca2 Fixup prettyBytes 2022-04-15 17:56:24 -05:00
Girish Ramakrishnan 26f9635a38 taskworker: only support async workers 2022-04-15 17:40:46 -05:00
Girish Ramakrishnan 5f2492558d callback is not needed here 2022-04-15 17:29:15 -05:00
Girish Ramakrishnan c83c151e10 remove recursive-readdir 2022-04-15 11:34:16 -05:00
Girish Ramakrishnan 801dddc269 inline S3ReadStream 2022-04-15 11:25:15 -05:00
Girish Ramakrishnan 9a886111ad inline chunk.js 2022-04-15 09:25:54 -05:00
Girish Ramakrishnan bdc9a0cbe3 inline prettyBytes 2022-04-15 09:18:07 -05:00
Girish Ramakrishnan 555f914537 remove lodash.chunk 2022-04-15 08:07:46 -05:00
Girish Ramakrishnan 43f86674b4 Remove delay module 2022-04-15 07:52:35 -05:00
Girish Ramakrishnan f7ed044a40 fix storage test 2022-04-15 07:49:38 -05:00
Girish Ramakrishnan 72408f2542 Remove proxy-middleware, we have our own copy by now
See aad50fb5b2
2022-04-15 07:43:19 -05:00
Girish Ramakrishnan 0abc6c8844 replace pretty-bytes module 2022-04-15 07:34:16 -05:00
Girish Ramakrishnan d46de32ffb Update packages 2022-04-15 07:24:50 -05:00
Girish Ramakrishnan 185d5d66ad even more constness 2022-04-14 20:30:00 -05:00
Girish Ramakrishnan 01ce251596 constness 2022-04-14 18:03:43 -05:00
Girish Ramakrishnan 05d7a7f496 constness 2022-04-14 17:50:41 -05:00
Girish Ramakrishnan 685bda35b9 storage: make remove and removeDir async 2022-04-14 16:16:20 -05:00
Girish Ramakrishnan 8d8cdd38a9 Add missing await 2022-04-14 15:40:51 -05:00
Girish Ramakrishnan d54c03f0a0 storage: make exists async 2022-04-14 12:24:34 -05:00
Girish Ramakrishnan 11f7be2065 storage: verifyConfig is now async 2022-04-14 12:24:30 -05:00
Girish Ramakrishnan a39e0ab934 storage: make remount async 2022-04-14 09:57:31 -05:00
Girish Ramakrishnan b51082f7e4 storage: checkPreconditions is now async 2022-04-14 07:40:19 -05:00
Girish Ramakrishnan 9ec76c69ec s3: make callback of getS3Config 2022-04-14 07:35:41 -05:00
Girish Ramakrishnan b0a09a8a00 restore: fix usage of backupId 2022-04-13 21:23:12 -05:00
Girish Ramakrishnan 5870f949a3 Update changes 2022-04-13 11:39:23 -05:00
Girish Ramakrishnan 87cb90c9b6 Fix crash
ReferenceError: Cannot access 'backups' before initialization
2022-04-08 16:27:11 -07:00
Girish Ramakrishnan 21b900258a backup: fix format of id
the id is used in dependsOn by the UI to find the linked apps. if we
had it as an uuid, we have to query the db a lot
2022-04-08 16:23:27 -07:00
Johannes Zellner de9f3c10f4 Use new sftp addon 3.6.1
This will copy the uploaded file on conflict with a unique .number
extension
2022-04-07 18:02:11 +02:00
Johannes Zellner 47e45808a3 Give the addons a lot more time to initiate a connection 2022-04-06 13:05:09 +02:00
Girish Ramakrishnan 0280c2baba keep the backup if preserveSecs is -1 2022-04-05 11:08:38 -07:00
Girish Ramakrishnan 2f8f5fcb7d Typo 2022-04-05 10:26:05 -07:00
Girish Ramakrishnan 709d4041b2 backups: fix restore code path after backup id changes 2022-04-05 09:55:57 -07:00
Johannes Zellner b4b999bd74 Fix await safe usage 2022-04-05 13:17:49 +02:00
Girish Ramakrishnan ea3fd27123 backups: recursively update the dep preserveSecs
One idea was to compute this at cleanup time, but this has two problems:

* the UI won't reflect this value. can be good or bad
* the cleaner has no easy way to find out the "parent". I guess we should
  change our data structure, if we want to go down this route...
2022-04-04 21:29:35 -07:00
Girish Ramakrishnan 452a4d9a75 backups: add remotePath
the main motivation is that id can be used in REST API routes. previously,
the id was a path and this had a "/" in it. This made /api/v1/backups/:backupId
not work.
2022-04-04 20:40:40 -07:00
Girish Ramakrishnan 54934c41a7 storage: rename getBackupPath to getBasePath 2022-04-04 14:08:24 -07:00
Girish Ramakrishnan a05e564ae6 Fix expectation in test 2022-04-04 14:03:07 -07:00
Girish Ramakrishnan 57ac94bab6 Fix appstore test 2022-04-04 13:55:23 -07:00
Girish Ramakrishnan 6839ff4cf6 reverseproxy: fix typo
this type was causing nginx configs of the primary domain being re-written
everytime we try to renew certs
2022-04-04 10:30:32 -07:00
Girish Ramakrishnan 993dda9121 rename function 2022-04-03 08:29:59 -07:00
Girish Ramakrishnan 70695b1b0f backups: set label of backup and control it's retention 2022-04-02 19:30:54 -07:00
Girish Ramakrishnan d47b39d90b eventlog: distinguish install vs update finish 2022-04-01 14:19:53 -07:00
Girish Ramakrishnan 574d3b120f Use hyphens instead of camel case for scripts 2022-04-01 09:51:15 -07:00
Girish Ramakrishnan 3d1f2bf716 move init script into scripts
the baseimage directory was from a time when we used to build a
base image and snapshot it. this is not done anymore.

init-ubuntu.sh - static packages installed one time and managed by ubuntu
installer.sh - packages installed and maintained by cloudron. run before an update.
    this can "fail" and the updater can thus abort
start.sh - configuring packages
2022-04-01 09:48:40 -07:00
Girish Ramakrishnan bac5edc188 cloudron-setup: remove arguments to init script
this ends support for cloudron 5 installs with this version of the script
2022-04-01 09:37:06 -07:00
Girish Ramakrishnan 7700c56d3e cloudron-setup: remove --skip-baseimage-init, it is unused 2022-04-01 09:22:23 -07:00
Girish Ramakrishnan 9f395f64da accessToken -> cloudronToken 2022-03-31 23:59:42 -07:00
Girish Ramakrishnan 73d029ba4b cloudron-setup: add setup-token to arg list 2022-03-31 23:49:34 -07:00
Girish Ramakrishnan a292393a43 7.2 changes 2022-03-31 23:45:14 -07:00
Girish Ramakrishnan 37a4e8d5c5 cloudron-setup: add --setup-token 2022-03-31 23:38:54 -07:00
Girish Ramakrishnan 81728f4202 appstore: make the args of updateCloudron clear 2022-03-31 23:27:00 -07:00
Girish Ramakrishnan 2d2ddd1c49 add note on the existing setupToken 2022-03-31 23:02:26 -07:00
Girish Ramakrishnan bc49f64a0c appstore: it never returns 422
I think I meant 402 which is subscription expired/billing error
2022-03-31 22:51:40 -07:00
Girish Ramakrishnan 52fc031516 Log error message if updateCloudron failed 2022-03-31 22:46:14 -07:00
Girish Ramakrishnan cae528158c appstore: check login response 2022-03-31 22:43:34 -07:00
Girish Ramakrishnan 566a03cd59 remove unnecessary temp variables 2022-03-31 22:41:48 -07:00
Girish Ramakrishnan ad2221350f Add appstore web token
* For existing installs, migrate using the soon to be obsoleted user_token route
* For new installs, the token post login is stashed during registration time
2022-03-31 22:35:45 -07:00
Girish Ramakrishnan 656dca7c66 rename cloudron_token to appstore_api_token 2022-03-31 22:18:08 -07:00
Girish Ramakrishnan 638fe2e6c8 ldap: add rootDSE test 2022-03-31 21:18:56 -07:00
Girish Ramakrishnan 3295d2b727 settings: remove licenseKey
this is unused
2022-03-31 12:47:45 -07:00
Johannes Zellner c4689a8385 Add registerWithSetupToken() to be used 2022-03-31 17:29:44 +02:00
Girish Ramakrishnan d09d6c21fa sshfs: fix bug where sshfs mounts were generated without unbound dependancy 2022-03-30 21:39:15 -07:00
Girish Ramakrishnan 7ec1594428 create a separate support user
This creates a separate user named 'cloudron-support' using which we
can provide remote support. The hyphen username convention follows the
systemd sytem username convention.

With a separate user, we don't need to ask users to keep changing PermitRootLogin
(and remind them to change it back).

Using a sudo user has various advantages:

* https://askubuntu.com/questions/687249/why-does-ubuntu-have-a-disabled-root-account
* https://wiki.debian.org/sudo
* https://askubuntu.com/questions/16178/why-is-it-bad-to-log-in-as-root

The yellowtent user is also locked down further - no password and no shell login.
2022-03-30 15:08:20 -07:00
Girish Ramakrishnan 529f6fb2cd sftp: fix private key file permissions on restore 2022-03-30 11:58:21 -07:00
Girish Ramakrishnan 724f5643bc suppress grep message 2022-03-30 11:10:00 -07:00
Girish Ramakrishnan 74e849e2a1 backup cleaner: do not clean when provider is not mounted 2022-03-30 10:17:20 -07:00
Girish Ramakrishnan bfb233eca1 installer.sh: move installation of docker/node/nginx etc
no need to dup the code in two places. i think this will also
fix the unbound/resolvconf DNS resolution issue. this way unbound is configured
and is what gets used when docker is installed.

https://forum.cloudron.io/topic/6660/help-please-failing-setup-of-cloudron
https://forum.cloudron.io/topic/6632/help-me-please-got-error-while-installing-the-cloudron-on-a-fresh-ubuntu-20-04-x64-server
https://forum.cloudron.io/topic/6561/that-install-script-fails-74-times-out-of-75
2022-03-29 22:34:03 -07:00
Girish Ramakrishnan 5b27eb9c54 initializeBaseUbuntuImage: create yellowtent user 2022-03-29 21:41:46 -07:00
Girish Ramakrishnan faf91d4d00 sshfs and mount.nfs are in base image now 2022-03-29 21:32:48 -07:00
Girish Ramakrishnan dbb803ff5e cifs: use credentials file
this supports special characters in passwords better

https://forum.cloudron.io/topic/6577/failed-to-mount-inactive-mount-error-13-when-mounting-cifs-from-synology
2022-03-29 21:26:58 -07:00
Girish Ramakrishnan 0dea2d283b move sshfs key write logic to renderMountFile 2022-03-29 20:15:55 -07:00
Girish Ramakrishnan cbc44da102 create sshfs dir in start.sh 2022-03-29 20:13:41 -07:00
Girish Ramakrishnan 3f633c9779 dns: check for CNAME record
Check if CNAME record exists and remove it if overwrite is set
2022-03-29 13:53:34 -07:00
Girish Ramakrishnan 6933ccefe2 Update nginx to 1.20.0-1 2022-03-28 13:25:05 -07:00
Girish Ramakrishnan 54aeff1419 ldap: send rootDSE response
some apps like osTicket require this
2022-03-25 14:15:18 -07:00
Girish Ramakrishnan 14f9d7fe25 cloudron-setup: add a redo flag to workaround dns failures
temporary hotfix for dns issues some VMs are having:

https://forum.cloudron.io/topic/6660/help-please-failing-setup-of-cloudron
https://forum.cloudron.io/topic/6632/help-me-please-got-error-while-installing-the-cloudron-on-a-fresh-ubuntu-20-04-x64-server
https://forum.cloudron.io/topic/6561/that-install-script-fails-74-times-out-of-75
2022-03-25 10:33:49 -07:00
Girish Ramakrishnan 144e98abab image name cannot start with '/'
https://forum.cloudron.io/topic/6689/cannot-uninstall-custom-app
https://stackoverflow.com/questions/43091075/docker-restrictions-regarding-naming-image
2022-03-24 10:03:40 -07:00
Girish Ramakrishnan e0e0c049c8 add link to upstream issue 2022-03-23 09:52:17 -07:00
Johannes Zellner ef0f9c5298 Fixup cn attribute for ldap to be according to spec
Bring back b54c4bb399
2022-03-22 10:19:21 -07:00
Girish Ramakrishnan d13905377c firewall: do not add duplicate ldap redirect rules 2022-03-21 12:25:30 -07:00
Girish Ramakrishnan 6f1023e0cd Add to changes 2022-03-18 10:27:04 -07:00
Girish Ramakrishnan eeddc233dd more changes 2022-03-16 09:05:41 -07:00
Girish Ramakrishnan f48690ee11 dyndns: fix typo 2022-03-15 09:53:54 -07:00
Girish Ramakrishnan 3b0bdd9807 support: send the server IPv4 when remote support enabled 2022-03-14 21:30:54 -07:00
Girish Ramakrishnan 6dc5c4f13b ldap: add dummy apps search route for directus 2022-03-14 09:17:49 -07:00
Girish Ramakrishnan 9bb5096f1c nginx: enable underscores in headers
chatwoot requires this

https://www.chatwoot.com/docs/self-hosted/deployment/caprover#api-requests-failing-with-you-need-to-sign-in-or-sign-up-before-continuing

They are apparently disabled by default since they conflict with some CGI headers:

https://stackoverflow.com/questions/22856136/why-do-http-servers-forbid-underscores-in-http-header-names
https://www.nginx.com/resources/wiki/start/topics/tutorials/config_pitfalls/?highlight=disappearing%20http%20headers#missing-disappearing-http-headers
2022-03-13 23:04:34 -07:00
Girish Ramakrishnan af42008fd3 Enable IPv6 on new interfaces with net_admin cap 2022-03-12 09:14:37 -08:00
Johannes Zellner d6875d4949 Add test coverage support 2022-03-11 00:52:41 +01:00
Girish Ramakrishnan 4396bd3ea7 wildcard: handle ENODATA 2022-03-08 17:14:42 -08:00
Girish Ramakrishnan db03053e05 cloudflare: remove async 2022-03-08 14:30:27 -08:00
209 changed files with 9490 additions and 9501 deletions
+1
View File
@@ -1,5 +1,6 @@
node_modules/
coverage/
.nyc_output/
webadmin/dist/
installer/src/certs/server.key
+149
View File
@@ -2442,3 +2442,152 @@
* setup: ufw may not be installed
* mysql: fix default collation of databases
[7.1.4]
* wildcard dns: fix handling of ENODATA
* cloudflare: fix error handling
* openvpn: ipv6 support
* dyndns: fix issue where eventlog was getting filled with empty entries
* mandatory 2fa: Fix typo in 2FA check
[7.2.0]
* mail: hide log button for non-superadmins
* firewall: do not add duplicate ldap redirect rules
* ldap: respond to RootDSE
* Check if CNAME record exists and remove it if overwrite is set
* cifs: use credentials file for better password support
* installer: rework script to fix DNS resolution issues
* backup cleaner: do not clean if not mounted
* restore: fix sftp private key perms
* support: add a separate system user named cloudron-support
* sshfs: fix bug where sshfs mounts were generated without unbound dependancy
* cloudron-setup: add --setup-token
* notifications: add installation event
* backups: set label of backup and control it's retention
* wasabi: add new regions (London, Frankfurt, Paris, Toronto)
* docker: update to 20.10.14
* Ensure LDAP usernames are always treated lowercase
* Add a way to make LDAP users local
* proxyAuth: set X-Remote-User (rfc3875)
* GoDaddy: there is now a delete API
* nginx: use ubuntu packages for ubuntu 20.04 and 22.04
* Ubuntu 22.04 LTS support
* Add Hetzner DNS
* cron: add support for extensions (@reboot, @weekly etc)
* Add profile backgroundImage api
* exec: rework API to get exit code
* Add update available filter
[7.2.1]
* Refactor backup code to use async/await
* mongodb: fix bug where a small timeout prevented import of large backups
* Add update available filter
* exec: rework API to get exit code
* Add profile backgroundImage api
* cron: add support for extensions (@reboot, @weekly etc)
[7.2.2]
* Update cloudron-manifestformat for new scheduler patterns
* collectd: FQDNLookup causes collectd install to fail
[7.2.3]
* appstore: allow re-registration on server side delete
* transfer ownership route is not used anymore
* graphite: fix issue where disk names with '.' do not render
* dark mode fixes
* sendmail: mail from display name
* Use volumes for app data instead of raw path
* initial xfs support
[7.2.4]
* volumes: Ensure long volume names do not overflow the table
* Move all appstore filter to the left
* app data: allow sameness of old and new dir
[7.2.5]
* Fix storage volume migration
* Fix issue where only 25 group members were returned
* Fix eventlog display
[7.3.0]
* Proxied apps
* Applinks - app bookmarks in dashboard
* backups: optional encryption of backup file names
* eventlog: add event for impersonated user login
* ldap & user directory: Remove virtual user and admin groups
* Randomize certificate generation cronjob to lighten load on Let's Encrypt servers
* mail: catch all address can be any domain
* mail: accept only STARTTLS servers for relay
* graphs: cgroup v2 support
* mail: fix issue where signature was appended to text attachments
* redis: restart button will now rebuild if the container is missing
* backups: allow space in label name
* mail: fix crash when solr is enabled on Ubuntu 22 (cgroup v2 detection fix)
* mail: fix issue where certificate renewal did not restart the mail container properly
* notification: Fix crash when backupId is null
* IPv6: initial support for ipv6 only server
* User directory: Cloudron connector uses 2FA auth
* port bindings: add read only flag
* mail: add storage quota support
* mail: allow aliases to have wildcard
* proxyAuth: add supportsBearerAuth flag
* backups: Fix precondition check which was not erroring if mount is missing
* mail: add queue management API and UI
* graphs: show app disk usage graphs
* UI: fix issue where mailbox display name was not init correctly
* wasabi: add singapore and sydney regions
* filemanager: add split view
* nginx: fix zero length certs when out of disk space
* read only API tokens
[7.3.1]
* Add cloudlare R2
* app proxy: fixes to https proxying
* app links: fix icons
[7.3.2]
* support: require owner permissions
* postgresql: fix issue when restoring large dumps
* graphs: add cpu/disk/network usage
* graphs: new disk usage UI
* relay: add office 365
[7.3.3]
* Fix oom detection in tasks
* ldap: memberof is a DN and not just group name
* mail relay: office365 provider
* If we can't fetch applink upstreamUri, just stop icon and title detection
* manifest: add runtimeDirs
* remove external df module
* Show remaining disk space in usage graph
* Make users and groups available for the new app link dialog
* Show swaps in disk graphs
* disk usage: run once a day
* mail: fix 100% cpu use with unreachable servers
* security: do not password reset mail to cloudron owned mail domain
* logrotate: only keep 14 days of logs
* mail: fix dnsbl count when all servers are removed
* applink: make users and groups available for the new app link dialog
* Show app disk usage in storage tab
* Make volume read-only checkbox a dropdown
[7.3.4]
* Display platform update status in the UI
* Fix image pruning
* cloudflare: fix issue where incorrect URL configuration is accepted
[7.3.5]
* du: fix crash when filesystem is cifs/nfs/sshfs
* Start with a default to not fail if no swap is present
* Fix bug in cert cleanup logic causing it to repeatedly cleanup
* Fix crash in RBL check
* unbound: disable controller interface explicitly
* Fix issue where cert renewal logs where not displayed
* Fix loading of mailboxes
[7.3.6]
* aws: add melbourne region
* Fix display of box backups
* mail usage: fix issue caused by deleted mailboxes
* reverseproxy: fix issue where renewed certs are not written to disk
* support: fix crash when opening tickets with 0 length files
+11 -5
View File
@@ -9,7 +9,7 @@ const fs = require('fs'),
safe = require('safetydance'),
server = require('./src/server.js'),
settings = require('./src/settings.js'),
userdirectory = require('./src/userdirectory.js');
directoryServer = require('./src/directoryserver.js');
let logFd;
@@ -38,8 +38,8 @@ async function startServers() {
await proxyAuth.start();
await ldap.start();
const conf = await settings.getUserDirectoryConfig();
if (conf.enabled) await userdirectory.start();
const conf = await settings.getDirectoryServerConfig();
if (conf.enabled) await directoryServer.start();
}
async function main() {
@@ -49,12 +49,18 @@ async function main() {
// require this here so that logging handler is already setup
const debug = require('debug')('box:box');
process.on('SIGHUP', async function () {
debug('Received SIGHUP. Re-reading configs.');
const conf = await settings.getDirectoryServerConfig();
if (conf.enabled) await directoryServer.checkCertificate();
});
process.on('SIGINT', async function () {
debug('Received SIGINT. Shutting down.');
await proxyAuth.stop();
await server.stop();
await userdirectory.stop();
await directoryServer.stop();
await ldap.stop();
setTimeout(process.exit.bind(process), 3000);
});
@@ -64,7 +70,7 @@ async function main() {
await proxyAuth.stop();
await server.stop();
await userdirectory.stop();
await directoryServer.stop();
await ldap.stop();
setTimeout(process.exit.bind(process), 3000);
});
@@ -0,0 +1,9 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('DELETE FROM settings WHERE name=?', [ 'license_key' ], callback);
};
exports.down = function(db, callback) {
callback();
};
@@ -0,0 +1,9 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('UPDATE settings SET name=? WHERE name=?', [ 'appstore_api_token', 'cloudron_token' ], callback);
};
exports.down = function(db, callback) {
callback();
};
@@ -0,0 +1,37 @@
'use strict';
const superagent = require('superagent');
exports.up = function(db, callback) {
db.all('SELECT value FROM settings WHERE name="api_server_origin"', function (error, results) {
if (error || results.length === 0) return callback(error);
const apiServerOrigin = results[0].value;
db.all('SELECT value FROM settings WHERE name="appstore_api_token"', function (error, results) {
if (error || results.length === 0) return callback(error);
const apiToken = results[0].value;
console.log(`Getting appstore web token from ${apiServerOrigin}`);
superagent.post(`${apiServerOrigin}/api/v1/user_token`)
.send({})
.query({ accessToken: apiToken })
.timeout(30 * 1000).end(function (error, response) {
if (error && !error.response) {
console.log('Network error getting web token', error);
return callback();
}
if (response.statusCode !== 201 || !response.body.accessToken) {
console.log(`Bad status getting web token: ${response.status} ${response.text}`);
return callback();
}
db.runSql('INSERT settings (name, value) VALUES(?, ?)', [ 'appstore_web_token', response.body.accessToken ], callback);
});
});
});
};
exports.down = function(db, callback) {
callback();
};
@@ -0,0 +1,16 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE backups ADD COLUMN label VARCHAR(128) DEFAULT ""', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE backups DROP COLUMN label', function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -0,0 +1,51 @@
'use strict';
const async = require('async'),
hat = require('../src/hat.js');
exports.up = function(db, callback) {
db.all('SELECT * from backups', function (error, allBackups) {
if (error) return callback(error);
console.log(`Fixing up ${allBackups.length} backup entries`);
const idMap = {};
allBackups.forEach(b => {
b.remotePath = b.id;
b.id = `${b.type}_${b.identifier}_v${b.packageVersion}_${hat(256)}`; // id is used by the UI to derive dependent packages. making this a UUID will require a lot of db querying
idMap[b.remotePath] = b.id;
});
db.runSql('ALTER TABLE backups ADD COLUMN remotePath VARCHAR(256)', function (error) {
if (error) return callback(error);
db.runSql('ALTER TABLE backups CHANGE COLUMN dependsOn dependsOnJson TEXT', function (error) {
if (error) return callback(error);
async.eachSeries(allBackups, function (backup, iteratorDone) {
const dependsOnPaths = backup.dependsOn ? backup.dependsOn.split(',') : []; // previously, it was paths
let dependsOnIds = [];
dependsOnPaths.forEach(p => { if (idMap[p]) dependsOnIds.push(idMap[p]); });
db.runSql('UPDATE backups SET id = ?, remotePath = ?, dependsOnJson = ? WHERE id = ?', [ backup.id, backup.remotePath, JSON.stringify(dependsOnIds), backup.remotePath ], iteratorDone);
}, function (error) {
if (error) return callback(error);
db.runSql('ALTER TABLE backups MODIFY COLUMN remotePath VARCHAR(256) NOT NULL UNIQUE', callback);
});
});
});
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE backups DROP COLUMN remotePath', function (error) {
if (error) console.error(error);
db.runSql('ALTER TABLE backups RENAME COLUMN dependsOnJson to dependsOn', function (error) {
if (error) return callback(error);
callback(error);
});
});
};
@@ -0,0 +1,22 @@
'use strict';
const async = require('async');
exports.up = function(db, callback) {
db.all('SELECT * FROM apps', function (error, apps) {
if (error) return callback(error);
async.eachSeries(apps, function (app, iteratorDone) {
const manifest = JSON.parse(app.manifestJson);
const hasSso = !!manifest.addons['proxyAuth'] || !!manifest.addons['ldap'];
if (hasSso || !app.sso) return iteratorDone();
console.log(`Unsetting sso flag of ${app.id}`);
db.runSql('UPDATE apps SET sso=? WHERE id=?', [ 0, app.id ], iteratorDone);
}, callback);
});
};
exports.down = function(db, callback) {
callback();
};
@@ -0,0 +1,20 @@
'use strict';
exports.up = function(db, callback) {
db.all('SELECT * FROM settings WHERE name = ?', [ 'api_server_origin' ], function (error, result) {
if (error || result.length === 0) return callback(error);
let consoleOrigin;
switch (result[0].value) {
case 'https://api.dev.cloudron.io': consoleOrigin = 'https://console.dev.cloudron.io'; break;
case 'https://api.staging.cloudron.io': consoleOrigin = 'https://console.staging.cloudron.io'; break;
default: consoleOrigin = 'https://console.cloudron.io'; break;
}
db.runSql('REPLACE INTO settings (name, value) VALUES (?, ?)', [ 'console_server_origin', consoleOrigin ], callback);
});
};
exports.down = function(db, callback) {
callback();
};
@@ -0,0 +1,9 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE users ADD COLUMN backgroundImage MEDIUMBLOB', callback);
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE users DROP COLUMN backgroundImage', callback);
};
@@ -0,0 +1,12 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN mailboxDisplayName VARCHAR(128) DEFAULT "" NOT NULL', [], callback);
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps DROP COLUMN mailboxDisplayName', function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -0,0 +1,53 @@
'use strict';
const path = require('path'),
safe = require('safetydance'),
uuid = require('uuid');
function getMountPoint(dataDir) {
const output = safe.child_process.execSync(`df --output=target "${dataDir}" | tail -1`, { encoding: 'utf8' });
if (!output) return dataDir;
const mountPoint = output.trim();
if (mountPoint === '/') return dataDir;
return mountPoint;
}
exports.up = async function(db) {
// use safe() here because this migration failed midway in 7.2.4
await safe(db.runSql('ALTER TABLE apps ADD storageVolumeId VARCHAR(128), ADD FOREIGN KEY(storageVolumeId) REFERENCES volumes(id)'));
await safe(db.runSql('ALTER TABLE apps ADD storageVolumePrefix VARCHAR(128)'));
await safe(db.runSql('ALTER TABLE apps ADD CONSTRAINT apps_storageVolume UNIQUE (storageVolumeId, storageVolumePrefix)'));
const apps = await db.runSql('SELECT * FROM apps WHERE dataDir IS NOT NULL');
for (const app of apps) {
const allVolumes = await db.runSql('SELECT * FROM volumes');
console.log(`data-dir (${app.id}): migrating data dir ${app.dataDir}`);
const mountPoint = getMountPoint(app.dataDir);
const prefix = path.relative(mountPoint, app.dataDir);
console.log(`data-dir (${app.id}): migrating to mountpoint ${mountPoint} and prefix ${prefix}`);
const volume = allVolumes.find(v => v.hostPath === mountPoint);
if (volume) {
console.log(`data-dir (${app.id}): using existing volume ${volume.id}`);
await db.runSql('UPDATE apps SET storageVolumeId=?, storageVolumePrefix=? WHERE id=?', [ volume.id, prefix, app.id ]);
continue;
}
const id = uuid.v4().replace(/-/g, ''); // to make systemd mount file names more readable
const name = `appdata-${id}`;
const type = app.dataDir === mountPoint ? 'filesystem' : 'mountpoint';
console.log(`data-dir (${app.id}): creating new volume ${id}`);
await db.runSql('INSERT INTO volumes (id, name, hostPath, mountType, mountOptionsJson) VALUES (?, ?, ?, ?, ?)', [ id, name, mountPoint, type, JSON.stringify({}) ]);
await db.runSql('UPDATE apps SET storageVolumeId=?, storageVolumePrefix=? WHERE id=?', [ id, prefix, app.id ]);
}
await db.runSql('ALTER TABLE apps DROP COLUMN dataDir');
};
exports.down = async function(/*db*/) {
};
@@ -0,0 +1,9 @@
'use strict';
exports.up = async function (db) {
await db.runSql('ALTER TABLE apps ADD COLUMN upstreamUri VARCHAR(256) DEFAULT ""');
};
exports.down = async function (db) {
await db.runSql('ALTER TABLE apps DROP COLUMN upstreamUri');
};
@@ -0,0 +1,15 @@
'use strict';
exports.up = async function(db) {
const result = await db.runSql('SELECT * FROM settings WHERE name=?', [ 'backup_config' ]);
if (!result.length) return;
const backupConfig = JSON.parse(result[0].value);
if (backupConfig.encryption && backupConfig.format === 'rsync') backupConfig.encryptedFilenames = true;
await db.runSql('UPDATE settings SET value=? WHERE name=?', [ JSON.stringify(backupConfig), 'backup_config', ]);
};
exports.down = async function(/* db */) {
};
@@ -0,0 +1,22 @@
'use strict';
exports.up = async function (db) {
var cmd = 'CREATE TABLE applinks(' +
'id VARCHAR(128) NOT NULL UNIQUE,' +
'accessRestrictionJson TEXT,' +
'creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,' +
'updateTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,' +
'ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,' +
'label VARCHAR(128),' +
'tagsJson VARCHAR(2048),' +
'icon MEDIUMBLOB,' +
'upstreamUri VARCHAR(256) DEFAULT "",' +
'PRIMARY KEY (id)) CHARACTER SET utf8 COLLATE utf8_bin';
await db.runSql(cmd);
};
exports.down = async function (db) {
await db.runSql('DROP TABLE applinks');
};
@@ -0,0 +1,17 @@
'use strict';
const async = require('async');
exports.up = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE mailboxes ADD COLUMN storageQuota BIGINT DEFAULT 0'),
db.runSql.bind(db, 'ALTER TABLE mailboxes ADD COLUMN messagesQuota BIGINT DEFAULT 0'),
], callback);
};
exports.down = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE mailboxes DROP COLUMN storageQuota'),
db.runSql.bind(db, 'ALTER TABLE mailboxes DROP COLUMN messagesQuota')
], callback);
};
@@ -0,0 +1,18 @@
'use strict';
const safe = require('safetydance');
exports.up = async function (db) {
const mailDomains = await db.runSql('SELECT * FROM mail', []);
for (const mailDomain of mailDomains) {
let catchAll = safe.JSON.parse(mailDomain.catchAllJson) || [];
if (catchAll.length === 0) continue;
catchAll = catchAll.map(a => `${a}@${mailDomain.domain}`);
await db.runSql('UPDATE mail SET catchAllJson = ? WHERE domain = ?', [ JSON.stringify(catchAll), mailDomain.domain ]);
}
};
exports.down = async function( /* db */) {
};
@@ -0,0 +1,13 @@
'use strict';
exports.up = async function (db) {
await db.runSql('ALTER TABLE tokens DROP COLUMN scope');
await db.runSql('ALTER TABLE tokens ADD COLUMN scopeJson TEXT');
await db.runSql('UPDATE tokens SET scopeJson = ?', [ JSON.stringify({'*':'rw'})]);
};
exports.down = async function (db) {
await db.runSql('ALTER TABLE tokens ADD COLUMN scope VARCHAR(512) NOT NULL DEFAULT ""');
await db.runSql('ALTER TABLE tokens DROP COLUMN scopeJson');
};
+28 -5
View File
@@ -33,6 +33,7 @@ CREATE TABLE IF NOT EXISTS users(
resetTokenCreationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
active BOOLEAN DEFAULT 1,
avatar MEDIUMBLOB NOT NULL,
backgroundImage MEDIUMBLOB,
loginLocationsJson MEDIUMTEXT, // { locations: [{ ip, userAgent, city, country, ts }] }
INDEX creationTime_index (creationTime),
@@ -57,7 +58,7 @@ CREATE TABLE IF NOT EXISTS tokens(
accessToken VARCHAR(128) NOT NULL UNIQUE,
identifier VARCHAR(128) NOT NULL, // resourceId: app id or user id
clientId VARCHAR(128),
scope VARCHAR(512) NOT NULL,
scopeJson TEXT,
expires BIGINT NOT NULL, // FIXME: make this a timestamp
lastUsedTime TIMESTAMP NULL,
PRIMARY KEY(accessToken));
@@ -85,13 +86,15 @@ CREATE TABLE IF NOT EXISTS apps(
enableAutomaticUpdate BOOLEAN DEFAULT 1,
enableMailbox BOOLEAN DEFAULT 1, // whether sendmail addon is enabled
mailboxName VARCHAR(128), // mailbox of this app
mailboxDomain VARCHAR(128), // mailbox domain of this apps
mailboxDomain VARCHAR(128), // mailbox domain of this app
mailboxDisplayName VARCHAR(128), // mailbox display name
enableInbox BOOLEAN DEFAULT 0, // whether recvmail addon is enabled
inboxName VARCHAR(128), // mailbox of this app
inboxDomain VARCHAR(128), // mailbox domain of this apps
inboxDomain VARCHAR(128), // mailbox domain of this app
label VARCHAR(128), // display name
tagsJson VARCHAR(2048), // array of tags
dataDir VARCHAR(256) UNIQUE,
storageVolumeId VARCHAR(128),
storageVolumePrefix VARCHAR(128),
taskId INTEGER, // current task
errorJson TEXT,
servicesConfigJson TEXT, // app services configuration
@@ -99,9 +102,12 @@ CREATE TABLE IF NOT EXISTS apps(
appStoreIcon MEDIUMBLOB,
icon MEDIUMBLOB,
crontab TEXT,
upstreamUri VARCHAR(256) DEFAULT "",
FOREIGN KEY(mailboxDomain) REFERENCES domains(domain),
FOREIGN KEY(taskId) REFERENCES tasks(id),
FOREIGN KEY(storageVolumeId) REFERENCES volumes(id),
UNIQUE (storageVolumeId, storageVolumePrefix),
PRIMARY KEY(id));
CREATE TABLE IF NOT EXISTS appPortBindings(
@@ -133,12 +139,14 @@ CREATE TABLE IF NOT EXISTS appEnvVars(
CREATE TABLE IF NOT EXISTS backups(
id VARCHAR(128) NOT NULL,
remotePath VARCHAR(256) NOT NULL UNIQUE,
label VARCHAR(128) DEFAULT "",
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
packageVersion VARCHAR(128) NOT NULL, /* app version or box version */
encryptionVersion INTEGER, /* when null, unencrypted backup */
type VARCHAR(16) NOT NULL, /* 'box' or 'app' */
identifier VARCHAR(128) NOT NULL, /* 'box' or the app id */
dependsOn TEXT, /* comma separate list of objects this backup depends on */
dependsOnJson TEXT, /* comma separate list of objects this backup depends on */
state VARCHAR(16) NOT NULL,
manifestJson TEXT, /* to validate if the app can be installed in this version of box */
format VARCHAR(16) DEFAULT "tgz",
@@ -209,6 +217,8 @@ CREATE TABLE IF NOT EXISTS mailboxes(
domain VARCHAR(128),
active BOOLEAN DEFAULT 1,
enablePop3 BOOLEAN DEFAULT 0,
storageQuota BIGINT DEFAULT 0,
messagesQuota BIGINT DEFAULT 0,
FOREIGN KEY(domain) REFERENCES mail(domain),
FOREIGN KEY(aliasDomain) REFERENCES mail(domain),
@@ -290,4 +300,17 @@ CREATE TABLE IF NOT EXISTS blobs(
value MEDIUMBLOB,
PRIMARY KEY(id));
CREATE TABLE IF NOT EXISTS appLinks(
id VARCHAR(128) NOT NULL UNIQUE,
accessRestrictionJson TEXT, // { users: [ ], groups: [ ] }
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, // when the app was installed
updateTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, // when the last app update was done
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, // when this db record was updated (useful for UI caching)
label VARCHAR(128), // display name
tagsJson VARCHAR(2048), // array of tags
icon MEDIUMBLOB,
upstreamUri VARCHAR(256) DEFAULT "",
PRIMARY KEY(id));
CHARACTER SET utf8 COLLATE utf8_bin;
+1924 -4733
View File
File diff suppressed because it is too large Load Diff
+24 -43
View File
@@ -12,13 +12,12 @@
},
"dependencies": {
"@google-cloud/dns": "^2.2.4",
"@google-cloud/storage": "^5.16.1",
"@sindresorhus/df": "git+https://github.com/cloudron-io/df.git#type",
"async": "^3.2.2",
"aws-sdk": "^2.1053.0",
"@google-cloud/storage": "^5.20.5",
"async": "^3.2.4",
"aws-sdk": "^2.1248.0",
"basic-auth": "^2.0.1",
"body-parser": "^1.19.1",
"cloudron-manifestformat": "^5.15.0",
"body-parser": "^1.20.1",
"cloudron-manifestformat": "^5.19.1",
"connect": "^3.7.0",
"connect-lastmile": "^2.1.1",
"connect-timeout": "^1.9.0",
@@ -27,63 +26,45 @@
"cron": "^1.8.2",
"db-migrate": "^0.11.13",
"db-migrate-mysql": "^2.2.0",
"debug": "^4.3.3",
"delay": "^5.0.0",
"dockerode": "^3.3.1",
"ejs": "^3.1.6",
"ejs-cli": "^2.2.3",
"express": "^4.17.2",
"debug": "^4.3.4",
"dockerode": "^3.3.4",
"ejs": "^3.1.8",
"express": "^4.18.2",
"ipaddr.js": "^2.0.1",
"js-yaml": "^4.1.0",
"json": "^11.0.0",
"jsdom": "^20.0.2",
"jsonwebtoken": "^8.5.1",
"ldapjs": "^2.3.2",
"ldapjs": "^2.3.3",
"lodash": "^4.17.21",
"lodash.chunk": "^4.2.0",
"moment": "^2.29.1",
"moment-timezone": "^0.5.34",
"moment": "^2.29.4",
"moment-timezone": "^0.5.38",
"morgan": "^1.10.0",
"multiparty": "^4.2.2",
"multiparty": "^4.2.3",
"mysql": "^2.18.1",
"nodemailer": "^6.7.2",
"nodemailer-smtp-transport": "^2.7.4",
"once": "^1.4.0",
"pretty-bytes": "^5.6.0",
"progress-stream": "^2.0.0",
"proxy-middleware": "^0.15.0",
"qrcode": "^1.5.0",
"nodemailer": "^6.8.0",
"qrcode": "^1.5.1",
"readdirp": "^3.6.0",
"s3-block-read-stream": "^0.5.0",
"safetydance": "^2.2.0",
"semver": "^7.3.5",
"semver": "^7.3.8",
"speakeasy": "^2.0.0",
"split": "^1.0.1",
"superagent": "^7.0.1",
"superagent": "^7.1.5",
"tar-fs": "github:cloudron-io/tar-fs#ignore_stat_error",
"tar-stream": "^2.2.0",
"tldjs": "^2.3.1",
"ua-parser-js": "^1.0.2",
"underscore": "^1.13.2",
"ua-parser-js": "^1.0.32",
"underscore": "^1.13.6",
"uuid": "^8.3.2",
"validator": "^13.7.0",
"ws": "^8.4.0",
"ws": "^8.10.0",
"xml2js": "^0.4.23"
},
"devDependencies": {
"expect.js": "*",
"hock": "^1.4.1",
"js2xmlparser": "^4.0.2",
"mocha": "^9.1.3",
"mocha": "^9.2.2",
"mock-aws-s3": "git+https://github.com/cloudron-io/mock-aws-s3.git",
"nock": "^13.2.1",
"node-sass": "^7.0.1",
"recursive-readdir": "^2.2.2"
"nock": "^13.2.9"
},
"scripts": {
"test": "./runTests",
"postmerge": "/bin/true",
"precommit": "/bin/true",
"prepush": "npm test",
"dashboard": "node_modules/.bin/gulp"
"test": "./run-tests"
}
}
+5 -5
View File
@@ -6,7 +6,7 @@ readonly source_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly DATA_DIR="${HOME}/.cloudron_test"
readonly DEFAULT_TESTS="./src/test/*-test.js ./src/routes/test/*-test.js"
! "${source_dir}/src/test/checkInstall" && exit 1
! "${source_dir}/src/test/check-install" && exit 1
# cleanup old data dirs some of those docker container data requires sudo to be removed
echo "=> Provide root password to purge any leftover data in ${DATA_DIR} and load apparmor profile:"
@@ -23,7 +23,7 @@ mkdir -p ${DATA_DIR}
cd ${DATA_DIR}
mkdir -p appsdata
mkdir -p boxdata/box boxdata/mail boxdata/certs boxdata/mail/dkim/localhost boxdata/mail/dkim/foobar.com
mkdir -p platformdata/addons/mail/banner platformdata/nginx/cert platformdata/nginx/applications platformdata/collectd/collectd.conf.d platformdata/addons platformdata/logrotate.d platformdata/backup platformdata/logs/tasks platformdata/sftp/ssh platformdata/firewall platformdata/update
mkdir -p platformdata/addons/mail/banner platformdata/nginx/cert platformdata/nginx/applications/dashboard platformdata/collectd/collectd.conf.d platformdata/addons platformdata/logrotate.d platformdata/backup platformdata/logs/tasks platformdata/sftp/ssh platformdata/firewall platformdata/update
sudo mkdir -p /mnt/cloudron-test-music /media/cloudron-test-music # volume test
# translations
@@ -39,7 +39,7 @@ if [[ -z ${FAST+x} ]]; then
echo "=> Delete all docker containers first"
docker ps -qa --filter "label=isCloudronManaged" | xargs --no-run-if-empty docker rm -f
docker rm -f mysql-server
echo "==> To skip this run with: FAST=1 ./runTests"
echo "==> To skip this run with: FAST=1 ./run-tests"
else
echo "==> WARNING!! Skipping docker container cleanup, the database might not be pristine!"
fi
@@ -79,10 +79,10 @@ echo "=> Run database migrations"
cd "${source_dir}"
BOX_ENV=test DATABASE_URL=mysql://root:password@${MYSQL_IP}/box node_modules/.bin/db-migrate up
echo "=> Run tests with mocha"
TESTS=${DEFAULT_TESTS}
if [[ $# -gt 0 ]]; then
TESTS="$*"
fi
BOX_ENV=test ./node_modules/mocha/bin/_mocha --bail --no-timeouts --exit -R spec ${TESTS}
echo "=> Run tests with mocha"
BOX_ENV=test ./node_modules/.bin/mocha --bail --no-timeouts --exit -R spec ${TESTS}
+69 -36
View File
@@ -11,7 +11,7 @@ trap exitHandler EXIT
# change this to a hash when we make a upgrade release
readonly LOG_FILE="/var/log/cloudron-setup.log"
readonly MINIMUM_DISK_SIZE_GB="18" # this is the size of "/" and required to fit in docker images 18 is a safe bet for different reporting on 20GB min
readonly MINIMUM_MEMORY="974" # this is mostly reported for 1GB main memory (DO 992, EC2 990, Linode 989, Serverdiscounter.com 974)
readonly MINIMUM_MEMORY="960" # this is mostly reported for 1GB main memory (DO 992, EC2 967, Linode 989, Serverdiscounter.com 974)
readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 2400"
@@ -26,8 +26,8 @@ readonly GREEN='\033[32m'
readonly DONE='\033[m'
# verify the system has minimum requirements met
if [[ "${rootfs_type}" != "ext4" ]]; then
echo "Error: Cloudron requires '/' to be ext4" # see #364
if [[ "${rootfs_type}" != "ext4" && "${rootfs_type}" != "xfs" ]]; then
echo "Error: Cloudron requires '/' to be ext4 or xfs" # see #364
exit 1
fi
@@ -46,23 +46,30 @@ if [[ "$(uname -m)" != "x86_64" ]]; then
exit 1
fi
if cvirt=$(systemd-detect-virt --container); then
echo "Error: Cloudron does not support ${cvirt}, only runs on bare metal or with full hardware virtualization"
exit 1
fi
# do not use is-active in case box service is down and user attempts to re-install
if systemctl cat box.service >/dev/null 2>&1; then
echo "Error: Cloudron is already installed. To reinstall, start afresh"
exit 1
fi
initBaseImage="true"
provider="generic"
requestedVersion=""
installServerOrigin="https://api.cloudron.io"
apiServerOrigin="https://api.cloudron.io"
webServerOrigin="https://cloudron.io"
consoleServerOrigin="https://console.cloudron.io"
sourceTarballUrl=""
rebootServer="true"
setupToken=""
setupToken="" # this is a OTP for securing an installation (https://forum.cloudron.io/topic/6389/add-password-for-initial-configuration)
appstoreSetupToken=""
redo="false"
args=$(getopt -o "" -l "help,skip-baseimage-init,provider:,version:,env:,skip-reboot,generate-setup-token" -n "$0" -- "$@")
args=$(getopt -o "" -l "help,provider:,version:,env:,skip-reboot,generate-setup-token,setup-token:,redo" -n "$0" -- "$@")
eval set -- "${args}"
while true; do
@@ -74,17 +81,20 @@ while true; do
if [[ "$2" == "dev" ]]; then
apiServerOrigin="https://api.dev.cloudron.io"
webServerOrigin="https://dev.cloudron.io"
consoleServerOrigin="https://console.dev.cloudron.io"
installServerOrigin="https://api.dev.cloudron.io"
elif [[ "$2" == "staging" ]]; then
apiServerOrigin="https://api.staging.cloudron.io"
webServerOrigin="https://staging.cloudron.io"
consoleServerOrigin="https://console.staging.cloudron.io"
installServerOrigin="https://api.staging.cloudron.io"
elif [[ "$2" == "unstable" ]]; then
installServerOrigin="https://api.dev.cloudron.io"
fi
shift 2;;
--skip-baseimage-init) initBaseImage="false"; shift;;
--skip-reboot) rebootServer="false"; shift;;
--redo) redo="true"; shift;;
--setup-token) appstoreSetupToken="$2"; shift 2;;
--generate-setup-token) setupToken="$(openssl rand -hex 10)"; shift;;
--) break;;
*) echo "Unknown option $1"; exit 1;;
@@ -99,14 +109,16 @@ fi
# Only --help works with mismatched ubuntu
ubuntu_version=$(lsb_release -rs)
if [[ "${ubuntu_version}" != "16.04" && "${ubuntu_version}" != "18.04" && "${ubuntu_version}" != "20.04" ]]; then
echo "Cloudron requires Ubuntu 16.04, 18.04 or 20.04" > /dev/stderr
if [[ "${ubuntu_version}" != "16.04" && "${ubuntu_version}" != "18.04" && "${ubuntu_version}" != "20.04" && "${ubuntu_version}" != "22.04" ]]; then
echo "Cloudron requires Ubuntu 18.04, 20.04, 22.04" > /dev/stderr
exit 1
fi
if which nginx >/dev/null || which docker >/dev/null || which node > /dev/null; then
echo "Error: Some packages like nginx/docker/nodejs are already installed. Cloudron requires specific versions of these packages and will install them as part of it's installation. Please start with a fresh Ubuntu install and run this script again." > /dev/stderr
exit 1
if [[ "${redo}" == "false" ]]; then
echo "Error: Some packages like nginx/docker/nodejs are already installed. Cloudron requires specific versions of these packages and will install them as part of it's installation. Please start with a fresh Ubuntu install and run this script again." > /dev/stderr
exit 1
fi
fi
# Install MOTD file for stack script style installations. this is removed by the trap exit handler. Heredoc quotes prevents parameter expansion
@@ -143,17 +155,15 @@ echo ""
echo " Join us at https://forum.cloudron.io for any questions."
echo ""
if [[ "${initBaseImage}" == "true" ]]; then
echo "=> Updating apt and installing script dependencies"
if ! apt-get update &>> "${LOG_FILE}"; then
echo "Could not update package repositories. See ${LOG_FILE}"
exit 1
fi
echo "=> Updating apt and installing script dependencies"
if ! apt-get update &>> "${LOG_FILE}"; then
echo "Could not update package repositories. See ${LOG_FILE}"
exit 1
fi
if ! DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y install --no-install-recommends curl python3 ubuntu-standard software-properties-common -y &>> "${LOG_FILE}"; then
echo "Could not install setup dependencies (curl). See ${LOG_FILE}"
exit 1
fi
if ! DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y install --no-install-recommends curl python3 ubuntu-standard software-properties-common -y &>> "${LOG_FILE}"; then
echo "Could not install setup dependencies (curl). See ${LOG_FILE}"
exit 1
fi
echo "=> Checking version"
@@ -181,15 +191,13 @@ if ! $curl -sL "${sourceTarballUrl}" | tar -zxf - -C "${box_src_tmp_dir}"; then
exit 1
fi
if [[ "${initBaseImage}" == "true" ]]; then
echo -n "=> Installing base dependencies and downloading docker images (this takes some time) ..."
# initializeBaseUbuntuImage.sh args (provider, infraversion path) are only to support installation of pre 5.3 Cloudrons
if ! /bin/bash "${box_src_tmp_dir}/baseimage/initializeBaseUbuntuImage.sh" "generic" "../src" &>> "${LOG_FILE}"; then
echo "Init script failed. See ${LOG_FILE} for details"
exit 1
fi
echo ""
echo -n "=> Installing base dependencies and downloading docker images (this takes some time) ..."
init_ubuntu_script=$(test -f "${box_src_tmp_dir}/scripts/init-ubuntu.sh" && echo "${box_src_tmp_dir}/scripts/init-ubuntu.sh" || echo "${box_src_tmp_dir}/baseimage/initializeBaseUbuntuImage.sh")
if ! /bin/bash "${init_ubuntu_script}" &>> "${LOG_FILE}"; then
echo "Init script failed. See ${LOG_FILE} for details"
exit 1
fi
echo ""
# The provider flag is still used for marketplace images
echo "=> Installing Cloudron version ${version} (this takes some time) ..."
@@ -204,6 +212,20 @@ fi
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('api_server_origin', '${apiServerOrigin}');" 2>/dev/null
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('web_server_origin', '${webServerOrigin}');" 2>/dev/null
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('console_server_origin', '${consoleServerOrigin}');" 2>/dev/null
if [[ -n "${appstoreSetupToken}" ]]; then
if ! setupResponse=$(curl -sX POST -H "Content-type: application/json" --data "{\"setupToken\": \"${appstoreSetupToken}\"}" "${apiServerOrigin}/api/v1/cloudron_setup_done"); then
echo "Could not complete setup. See ${LOG_FILE} for details"
exit 1
fi
cloudronId=$(echo "${setupResponse}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj["cloudronId"])')
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('cloudron_id', '${cloudronId}');" 2>/dev/null
appstoreApiToken=$(echo "${setupResponse}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj["cloudronToken"])')
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('appstore_api_token', '${appstoreApiToken}');" 2>/dev/null
fi
echo -n "=> Waiting for cloudron to be ready (this takes some time) ..."
while true; do
@@ -214,20 +236,31 @@ while true; do
sleep 10
done
if ! ip=$(curl -s --fail --connect-timeout 2 --max-time 2 https://ipv4.api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p'); then
ip='<IP>'
fi
ip4=$(curl -s --fail --connect-timeout 10 --max-time 10 https://ipv4.api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p' || true)
ip6=$(curl -s --fail --connect-timeout 10 --max-time 10 https://ipv6.api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p' || true)
url4=""
url6=""
fallbackUrl=""
if [[ -z "${setupToken}" ]]; then
url="https://${ip}"
[[ -n "${ip4}" ]] && url4="https://${ip4}"
[[ -n "${ip6}" ]] && url6="https://[${ip6}]"
[[ -z "${ip4}" && -z "${ip6}" ]] && fallbackUrl="https://<IP>"
else
url="https://${ip}/?setupToken=${setupToken}"
[[ -n "${ip4}" ]] && url4="https://${ip4}/?setupToken=${setupToken}"
[[ -n "${ip6}" ]] && url6="https://[${ip6}]/?setupToken=${setupToken}"
[[ -z "${ip4}" && -z "${ip6}" ]] && fallbackUrl="https://<IP>?setupToken=${setupToken}"
fi
echo -e "\n\n${GREEN}After reboot, visit ${url} and accept the self-signed certificate to finish setup.${DONE}\n"
echo -e "\n\n${GREEN}After reboot, visit one of the following URLs and accept the self-signed certificate to finish setup.${DONE}\n"
[[ -n "${url4}" ]] && echo -e " * ${GREEN}${url4}${DONE}"
[[ -n "${url6}" ]] && echo -e " * ${GREEN}${url6}${DONE}"
[[ -n "${fallbackUrl}" ]] && echo -e " * ${GREEN}${fallbackUrl}${DONE}"
if [[ "${rebootServer}" == "true" ]]; then
systemctl stop box mysql # sometimes mysql ends up having corrupt privilege tables
read -p "The server has to be rebooted to apply all the settings. Reboot now ? [Y/n] " yn
# https://www.gnu.org/savannah-checkouts/gnu/bash/manual/bash.html#ANSI_002dC-Quoting
read -p $'\n'"The server has to be rebooted to apply all the settings. Reboot now ? [Y/n] " yn
yn=${yn:-y}
case $yn in
[Yy]* ) exitHandler; systemctl reboot;;
+26 -32
View File
@@ -8,7 +8,7 @@ set -eu -o pipefail
PASTEBIN="https://paste.cloudron.io"
OUT="/tmp/cloudron-support.log"
LINE="\n========================================================\n"
CLOUDRON_SUPPORT_PUBLIC_KEY="ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQVilclYAIu+ioDp/sgzzFz6YU0hPcRYY7ze/LiF/lC7uQqK062O54BFXTvQ3ehtFZCx3bNckjlT2e6gB8Qq07OM66De4/S/g+HJW4TReY2ppSPMVNag0TNGxDzVH8pPHOysAm33LqT2b6L/wEXwC6zWFXhOhHjcMqXvi8Ejaj20H1HVVcf/j8qs5Thkp9nAaFTgQTPu8pgwD8wDeYX1hc9d0PYGesTADvo6HF4hLEoEnefLw7PaStEbzk2fD3j7/g5r5HcgQQXBe74xYZ/1gWOX2pFNuRYOBSEIrNfJEjFJsqk3NR1+ZoMGK7j+AZBR4k0xbrmncQLcQzl6MMDzkp support@cloudron.io"
CLOUDRON_SUPPORT_PUBLIC_KEY="ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGWS+930b8QdzbchGljt3KSljH9wRhYvht8srrtQHdzg support@cloudron.io"
HELP_MESSAGE="
This script collects diagnostic information to help debug server related issues.
@@ -77,6 +77,31 @@ if [[ "`df --output="avail" /tmp | sed -n 2p`" -lt "5120" ]]; then
exit 1
fi
if [[ "${enableSSH}" == "true" ]]; then
ssh_port=$(cat /etc/ssh/sshd_config | grep "Port " | sed -e "s/.*Port //")
ssh_user="cloudron-support"
keys_file="/home/cloudron-support/.ssh/authorized_keys"
echo -e $LINE"SSH"$LINE >> $OUT
echo "Username: ${ssh_user}" >> $OUT
echo "Port: ${ssh_port}" >> $OUT
echo "Key file: ${keys_file}" >> $OUT
echo -n "Enabling ssh access for the Cloudron support team..."
mkdir -p $(dirname "${keys_file}") # .ssh does not exist sometimes
touch "${keys_file}" # required for concat to work
if ! grep -q "${CLOUDRON_SUPPORT_PUBLIC_KEY}" "${keys_file}"; then
echo -e "\n${CLOUDRON_SUPPORT_PUBLIC_KEY}" >> "${keys_file}"
chmod 600 "${keys_file}"
chown "${ssh_user}" "${keys_file}"
fi
echo "Done"
exit 0
fi
echo -n "Generating Cloudron Support stats..."
# clear file
@@ -119,37 +144,6 @@ iptables -L &>> $OUT
echo "Done"
if [[ "${enableSSH}" == "true" ]]; then
ssh_port=$(cat /etc/ssh/sshd_config | grep "Port " | sed -e "s/.*Port //")
permit_root_login=$(grep -q ^PermitRootLogin.*yes /etc/ssh/sshd_config && echo "yes" || echo "no")
# support.js uses similar logic
if [[ -d /home/ubuntu ]]; then
ssh_user="ubuntu"
keys_file="/home/ubuntu/.ssh/authorized_keys"
else
ssh_user="root"
keys_file="/root/.ssh/authorized_keys"
fi
echo -e $LINE"SSH"$LINE >> $OUT
echo "Username: ${ssh_user}" >> $OUT
echo "Port: ${ssh_port}" >> $OUT
echo "PermitRootLogin: ${permit_root_login}" >> $OUT
echo "Key file: ${keys_file}" >> $OUT
echo -n "Enabling ssh access for the Cloudron support team..."
mkdir -p $(dirname "${keys_file}") # .ssh does not exist sometimes
touch "${keys_file}" # required for concat to work
if ! grep -q "${CLOUDRON_SUPPORT_PUBLIC_KEY}" "${keys_file}"; then
echo -e "\n${CLOUDRON_SUPPORT_PUBLIC_KEY}" >> "${keys_file}"
chmod 600 "${keys_file}"
chown "${ssh_user}" "${keys_file}"
fi
echo "Done"
fi
echo -n "Uploading information..."
paste_key=$(curl -X POST ${PASTEBIN}/documents --silent --data-binary "@$OUT" | python3 -c "import sys, json; print(json.load(sys.stdin)['key'])")
echo "Done"
@@ -41,8 +41,8 @@ if ! $(cd "${SOURCE_DIR}/../dashboard" && git diff --exit-code >/dev/null); then
exit 1
fi
if [[ "$(node --version)" != "v16.13.1" ]]; then
echo "This script requires node 16.13.1"
if [[ "$(node --version)" != "v16.18.1" ]]; then
echo "This script requires node 16.18.1"
exit 1
fi
@@ -1,5 +1,8 @@
#!/bin/bash
# This script is run on the base ubuntu. Put things here which are managed by ubuntu
# This script is also run after ubuntu upgrade
set -euv -o pipefail
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
@@ -29,10 +32,37 @@ debconf-set-selections <<< 'mysql-server mysql-server/root_password_again passwo
# this enables automatic security upgrades (https://help.ubuntu.com/community/AutomaticSecurityUpdates)
# resolvconf is needed for unbound to work property after disabling systemd-resolved in 18.04
case "${ubuntu_version}" in
16.04)
gpg_package="gnupg"
mysql_package="mysql-server-5.7"
ntpd_package=""
python_package="python2.7"
nginx_package="" # we use custom package for TLS v1.3 support
;;
18.04)
gpg_package="gpg"
mysql_package="mysql-server-5.7"
ntpd_package=""
python_package="python2.7"
nginx_package="" # we use custom package for TLS v1.3 support
;;
20.04)
gpg_package="gpg"
mysql_package="mysql-server-8.0"
ntpd_package="systemd-timesyncd"
python_package="python3.8"
nginx_package="nginx-full"
;;
22.04)
gpg_package="gpg"
mysql_package="mysql-server-8.0"
ntpd_package="systemd-timesyncd"
python_package="python3.10"
nginx_package="nginx-full"
;;
esac
gpg_package=$([[ "${ubuntu_version}" == "16.04" ]] && echo "gnupg" || echo "gpg")
mysql_package=$([[ "${ubuntu_version}" == "20.04" ]] && echo "mysql-server-8.0" || echo "mysql-server-5.7")
ntpd_package=$([[ "${ubuntu_version}" == "20.04" ]] && echo "systemd-timesyncd" || echo "")
apt-get -y install --no-install-recommends \
acl \
apparmor \
@@ -45,11 +75,12 @@ apt-get -y install --no-install-recommends \
$gpg_package \
ipset \
iptables \
libpython2.7 \
lib${python_package} \
linux-generic \
logrotate \
$mysql_package \
nfs-common \
$nginx_package \
$ntpd_package \
openssh-server \
pwgen \
@@ -62,12 +93,6 @@ apt-get -y install --no-install-recommends \
unzip \
xfsprogs
echo "==> installing nginx for xenial for TLSv3 support"
curl -sL http://nginx.org/packages/ubuntu/pool/nginx/n/nginx/nginx_1.18.0-2~${ubuntu_codename}_amd64.deb -o /tmp/nginx.deb
# apt install with install deps (as opposed to dpkg -i)
apt install -y /tmp/nginx.deb
rm /tmp/nginx.deb
# on some providers like scaleway the sudo file is changed and we want to keep the old one
apt-get -o Dpkg::Options::="--force-confold" install -y --no-install-recommends sudo
@@ -75,36 +100,7 @@ apt-get -o Dpkg::Options::="--force-confold" install -y --no-install-recommends
# debconf-set-selection of unattended-upgrades/enable_auto_updates + dpkg-reconfigure does not work
cp /usr/share/unattended-upgrades/20auto-upgrades /etc/apt/apt.conf.d/20auto-upgrades
echo "==> Installing node.js"
readonly node_version=16.13.1
mkdir -p /usr/local/node-${node_version}
curl -sL https://nodejs.org/dist/v${node_version}/node-v${node_version}-linux-x64.tar.gz | tar zxf - --strip-components=1 -C /usr/local/node-${node_version}
ln -sf /usr/local/node-${node_version}/bin/node /usr/bin/node
ln -sf /usr/local/node-${node_version}/bin/npm /usr/bin/npm
apt-get install -y --no-install-recommends python # Install python which is required for npm rebuild
[[ "$(python --version 2>&1)" == "Python 2.7."* ]] || die "Expecting python version to be 2.7.x"
# https://docs.docker.com/engine/installation/linux/ubuntulinux/
echo "==> Installing Docker"
# create systemd drop-in file. if you channge options here, be sure to fixup installer.sh as well
mkdir -p /etc/systemd/system/docker.service.d
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=overlay2 --experimental --ip6tables" > /etc/systemd/system/docker.service.d/cloudron.conf
# there are 3 packages for docker - containerd, CLI and the daemon
readonly docker_version=20.10.12
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.4.9-1_amd64.deb" -o /tmp/containerd.deb
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
# apt install with install deps (as opposed to dpkg -i)
apt install -y /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
rm /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
storage_driver=$(docker info | grep "Storage Driver" | sed 's/.*: //')
if [[ "${storage_driver}" != "overlay2" ]]; then
echo "Docker is using "${storage_driver}" instead of overlay2"
exit 1
fi
apt-get install -y --no-install-recommends $python_package # Install python which is required for npm rebuild
# do not upgrade grub because it might prompt user and break this script
echo "==> Enable memory accounting"
@@ -112,30 +108,26 @@ apt-get -y --no-upgrade --no-install-recommends install grub2-common
sed -e 's/^GRUB_CMDLINE_LINUX="\(.*\)"$/GRUB_CMDLINE_LINUX="\1 cgroup_enable=memory swapaccount=1 panic_on_oops=1 panic=5"/' -i /etc/default/grub
update-grub
echo "==> Downloading docker images"
if [ ! -f "${arg_infraversionpath}/infra_version.js" ]; then
echo "No infra_versions.js found"
exit 1
fi
images=$(node -e "var i = require('${arg_infraversionpath}/infra_version.js'); console.log(i.baseImages.map(function (x) { return x.tag; }).join(' '), Object.keys(i.images).map(function (x) { return i.images[x].tag; }).join(' '));")
echo -e "\tPulling docker images: ${images}"
for image in ${images}; do
docker pull "${image}"
docker pull "${image%@sha256:*}" # this will tag the image for readability
done
echo "==> Install collectd"
# without this, libnotify4 will install gnome-shell
apt-get install -y libnotify4 --no-install-recommends
if ! apt-get install -y --no-install-recommends libcurl3-gnutls collectd collectd-utils; then
# FQDNLookup is true in default debian config. The box code has a custom collectd.conf that fixes this
echo "Failed to install collectd. Presumably because of http://mailman.verplant.org/pipermail/collectd/2015-March/006491.html"
sed -e 's/^FQDNLookup true/FQDNLookup false/' -i /etc/collectd/collectd.conf
fi
apt-get install -y libnotify4 libcurl3-gnutls --no-install-recommends
# https://bugs.launchpad.net/ubuntu/+source/collectd/+bug/1872281
[[ "${ubuntu_version}" == "20.04" ]] && echo -e "\nLD_PRELOAD=/usr/lib/python3.8/config-3.8-x86_64-linux-gnu/libpython3.8.so" >> /etc/default/collectd
if [[ "${ubuntu_version}" == "22.04" ]]; then
readonly launchpad="https://launchpad.net/ubuntu/+source/collectd/5.12.0-9/+build/23189375/+files"
cd /tmp && wget -q "${launchpad}/collectd_5.12.0-9_amd64.deb" "${launchpad}/collectd-utils_5.12.0-9_amd64.deb" "${launchpad}/collectd-core_5.12.0-9_amd64.deb" "${launchpad}/libcollectdclient1_5.12.0-9_amd64.deb"
cd /tmp && apt install -y --no-install-recommends ./libcollectdclient1_5.12.0-9_amd64.deb ./collectd-core_5.12.0-9_amd64.deb ./collectd_5.12.0-9_amd64.deb ./collectd-utils_5.12.0-9_amd64.deb && rm -f /tmp/collectd_*.deb
echo -e "\nLD_PRELOAD=/usr/lib/python3.10/config-3.10-x86_64-linux-gnu/libpython3.10.so" >> /etc/default/collectd
else
if ! apt-get install -y --no-install-recommends collectd collectd-utils; then
# FQDNLookup is true in default debian config. The box code has a custom collectd.conf that fixes this
echo "Failed to install collectd, continuing anyway. Presumably because of http://mailman.verplant.org/pipermail/collectd/2015-March/006491.html"
fi
if [[ "${ubuntu_version}" == "20.04" ]]; then
echo -e "\nLD_PRELOAD=/usr/lib/python3.8/config-3.8-x86_64-linux-gnu/libpython3.8.so" >> /etc/default/collectd
fi
fi
sed -e 's/^FQDNLookup true/FQDNLookup false/' -i /etc/collectd/collectd.conf
# some hosts like atlantic install ntp which conflicts with timedatectl. https://serverfault.com/questions/1024770/ubuntu-20-04-time-sync-problems-and-possibly-incorrect-status-information
echo "==> Configuring host"
@@ -153,7 +145,7 @@ sed -e '/Port 22/ i # NOTE: Cloudron only supports moving SSH to port 202. See h
# https://bugs.launchpad.net/ubuntu/+source/base-files/+bug/1701068
echo "==> Disabling motd news"
if [ -f "/etc/default/motd-news" ]; then
if [[ -f "/etc/default/motd-news" ]]; then
sed -i 's/^ENABLED=.*/ENABLED=0/' /etc/default/motd-news
fi
@@ -187,6 +179,21 @@ systemctl disable systemd-resolved || true
# on vultr, ufw is enabled by default. we have our own firewall
ufw disable || true
# we need unbound to work as this is required for installer.sh to do any DNS requests
echo -e "server:\n\tinterface: 127.0.0.1\n\tdo-ip6: no" > /etc/unbound/unbound.conf.d/cloudron-network.conf
# we need unbound to work as this is required for installer.sh to do any DNS requests. control-enable is for https://github.com/NLnetLabs/unbound/issues/806
echo -e "server:\n\tinterface: 127.0.0.1\n\nremote-control:\n\tcontrol-enable: no\n" > /etc/unbound/unbound.conf.d/cloudron-network.conf
systemctl restart unbound
# Ubuntu 22 has private home directories by default (https://discourse.ubuntu.com/t/private-home-directories-for-ubuntu-21-04-onwards/)
sed -e 's/^HOME_MODE\([[:space:]]\+\).*$/HOME_MODE\10755/' -i /etc/login.defs
# create the yellowtent user. system user has different numeric range, no age and won't show in login/gdm UI
# the nologin will also disable su/login
if ! id yellowtent 2>/dev/null; then
useradd --system --comment "Cloudron Box" --create-home --shell /usr/sbin/nologin yellowtent
fi
# add support user (no password, sudo)
if ! id cloudron-support 2>/dev/null; then
useradd --system --comment "Cloudron Support (support@cloudron.io)" --create-home --no-user-group --shell /bin/bash cloudron-support
fi
+53 -36
View File
@@ -69,14 +69,20 @@ readonly ubuntu_codename=$(lsb_release -cs)
readonly is_update=$(systemctl is-active -q box && echo "yes" || echo "no")
log "Updating from $(cat $box_src_dir/VERSION) to $(cat $box_src_tmp_dir/VERSION)"
log "Updating from $(cat $box_src_dir/VERSION 2>/dev/null) to $(cat $box_src_tmp_dir/VERSION 2>/dev/null)"
log "updating docker"
# https://docs.docker.com/engine/installation/linux/ubuntulinux/
readonly docker_version=20.10.21
readonly containerd_version=1.6.10-1
if ! which docker 2>/dev/null || [[ $(docker version --format {{.Client.Version}}) != "${docker_version}" ]]; then
log "installing/updating docker"
readonly docker_version=20.10.12
if [[ $(docker version --format {{.Client.Version}}) != "${docker_version}" ]]; then
# there are 3 packages for docker - containerd, CLI and the daemon
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.4.9-1_amd64.deb" -o /tmp/containerd.deb
# create systemd drop-in file already to make sure images are with correct driver
mkdir -p /etc/systemd/system/docker.service.d
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=overlay2 --experimental --ip6tables" > /etc/systemd/system/docker.service.d/cloudron.conf
# there are 3 packages for docker - containerd, CLI and the daemon (https://download.docker.com/linux/ubuntu/dists/jammy/pool/stable/amd64/)
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_${containerd_version}_amd64.deb" -o /tmp/containerd.deb
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
@@ -86,41 +92,40 @@ if [[ $(docker version --format {{.Client.Version}}) != "${docker_version}" ]];
rm /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
fi
# we want atleast nginx 1.14 for TLS v1.3 support. Ubuntu 20/22 already has nginx 1.18
# Ubuntu 18 OpenSSL does not have TLS v1.3 support, so we use the upstream nginx packages
readonly nginx_version=$(nginx -v 2>&1)
if [[ "${nginx_version}" != *"1.18."* ]]; then
log "installing nginx 1.18"
$curl -sL http://nginx.org/packages/ubuntu/pool/nginx/n/nginx/nginx_1.18.0-2~${ubuntu_codename}_amd64.deb -o /tmp/nginx.deb
if [[ "${ubuntu_version}" == "20.04" ]]; then
if [[ "${nginx_version}" == *"Ubuntu"* ]]; then
log "switching nginx to ubuntu package"
prepare_apt_once
apt remove -y nginx
apt install -y nginx-full
fi
elif [[ "${ubuntu_version}" == "18.04" ]]; then
if [[ "${nginx_version}" != *"1.18."* ]]; then
log "installing/updating nginx 1.18"
$curl -sL http://nginx.org/packages/ubuntu/pool/nginx/n/nginx/nginx_1.18.0-2~${ubuntu_codename}_amd64.deb -o /tmp/nginx.deb
prepare_apt_once
prepare_apt_once
# apt install with install deps (as opposed to dpkg -i)
apt install -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes /tmp/nginx.deb
rm /tmp/nginx.deb
# apt install with install deps (as opposed to dpkg -i)
apt install -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes /tmp/nginx.deb
rm /tmp/nginx.deb
fi
fi
if ! which mount.nfs; then
log "installing nfs-common"
prepare_apt_once
apt install -y nfs-common
fi
if ! which sshfs; then
log "installing sshfs"
prepare_apt_once
apt install -y sshfs
fi
log "updating node"
readonly node_version=16.13.1
if [[ "$(node --version)" != "v${node_version}" ]]; then
readonly node_version=16.18.1
if ! which node 2>/dev/null || [[ "$(node --version)" != "v${node_version}" ]]; then
log "installing/updating node ${node_version}"
mkdir -p /usr/local/node-${node_version}
$curl -sL https://nodejs.org/dist/v${node_version}/node-v${node_version}-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-${node_version}
ln -sf /usr/local/node-${node_version}/bin/node /usr/bin/node
ln -sf /usr/local/node-${node_version}/bin/npm /usr/bin/npm
rm -rf /usr/local/node-14.17.6
rm -rf /usr/local/node-16.14.2
fi
# this is here (and not in updater.js) because rebuild requires the above node
# note that rebuild requires the above node
for try in `seq 1 10`; do
# for reasons unknown, the dtrace package will fail. but rebuilding second time will work
@@ -138,15 +143,21 @@ if [[ ${try} -eq 10 ]]; then
fi
log "downloading new addon images"
images=$(node -e "var i = require('${box_src_tmp_dir}/src/infra_version.js'); console.log(i.baseImages.map(function (x) { return x.tag; }).join(' '), Object.keys(i.images).map(function (x) { return i.images[x].tag; }).join(' '));")
images=$(node -e "let i = require('${box_src_tmp_dir}/src/infra_version.js'); console.log(i.baseImages.map(function (x) { return x.tag; }).join(' '), Object.keys(i.images).map(function (x) { return i.images[x].tag; }).join(' '));")
log "\tPulling docker images: ${images}"
if ! curl -s --fail --connect-timeout 2 --max-time 2 https://ipv4.api.cloudron.io/api/v1/helper/public_ip; then
docker_registry=registry.ipv6.docker.com
else
docker_registry=registry-1.docker.io
fi
for image in ${images}; do
while ! docker pull "${image}"; do # this pulls the image using the sha256
while ! docker pull "${docker_registry}/${image}"; do # this pulls the image using the sha256
log "Could not pull ${image}"
sleep 5
done
while ! docker pull "${image%@sha256:*}"; do # this will tag the image for readability
while ! docker pull "${docker_registry}/${image%@sha256:*}"; do # this will tag the image for readability
log "Could not pull ${image%@sha256:*}"
sleep 5
done
@@ -159,15 +170,21 @@ CLOUDRON_SYSLOG_VERSION="1.1.0"
while [[ ! -f "${CLOUDRON_SYSLOG}" || "$(${CLOUDRON_SYSLOG} --version)" != ${CLOUDRON_SYSLOG_VERSION} ]]; do
rm -rf "${CLOUDRON_SYSLOG_DIR}"
mkdir -p "${CLOUDRON_SYSLOG_DIR}"
if npm install --unsafe-perm -g --prefix "${CLOUDRON_SYSLOG_DIR}" cloudron-syslog@${CLOUDRON_SYSLOG_VERSION}; then break; fi
# verbatim is not needed in node 18 since that is the default there. in node 16, ipv4 is preferred and this breaks on ipv6 only servers
if NODE_OPTIONS="--dns-result-order=verbatim" npm install --unsafe-perm -g --prefix "${CLOUDRON_SYSLOG_DIR}" cloudron-syslog@${CLOUDRON_SYSLOG_VERSION}; then break; fi
log "Failed to install cloudron-syslog, trying again"
sleep 5
done
if ! id "${user}" 2>/dev/null; then
useradd "${user}" -m
log "creating cloudron-support user"
if ! id cloudron-support 2>/dev/null; then
useradd --system --comment "Cloudron Support (support@cloudron.io)" --create-home --no-user-group --shell /bin/bash cloudron-support
fi
log "locking the ${user} account"
usermod --shell /usr/sbin/nologin "${user}"
passwd --lock "${user}"
if [[ "${is_update}" == "yes" ]]; then
log "stop box service for update"
${box_src_dir}/setup/stop.sh
+32
View File
@@ -0,0 +1,32 @@
#!/bin/bash
set -eu -o pipefail
readonly logfile="/home/yellowtent/platformdata/logs/box.log"
if [[ ${EUID} -ne 0 ]]; then
echo "This script should be run as root." > /dev/stderr
exit 1
fi
echo "This will re-create all the containers. Services will go down for a bit."
read -p "Do you want to proceed? (y/N) " -n 1 -r choice
echo
if [[ ! $choice =~ ^[Yy]$ ]]; then
exit 1
fi
echo -n "Re-creating addon containers (this takes a while) ."
line_count=$(cat /home/yellowtent/platformdata/logs/box.log | wc -l)
sed -e 's/"version": ".*",/"version":"48.0.0",/' -i /home/yellowtent/platformdata/INFRA_VERSION
systemctl restart box
while ! tail -n "+${line_count}" "${logfile}" | grep -q "platform is ready"; do
echo -n "."
sleep 2
done
echo -e "\nDone.\nThe Cloudron dashboard will say 'Configuring (Queued)' for each app. The apps will come up in a short while."
+15 -17
View File
@@ -20,7 +20,6 @@ readonly BOX_DATA_DIR="${HOME_DIR}/boxdata/box"
readonly MAIL_DATA_DIR="${HOME_DIR}/boxdata/mail"
readonly script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly json="$(realpath ${script_dir}/../node_modules/.bin/json)"
readonly ubuntu_version=$(lsb_release -rs)
cp -f "${script_dir}/../scripts/cloudron-support" /usr/bin/cloudron-support
@@ -57,9 +56,10 @@ mkdir -p "${PLATFORM_DATA_DIR}/mysql"
mkdir -p "${PLATFORM_DATA_DIR}/postgresql"
mkdir -p "${PLATFORM_DATA_DIR}/mongodb"
mkdir -p "${PLATFORM_DATA_DIR}/redis"
mkdir -p "${PLATFORM_DATA_DIR}/tls"
mkdir -p "${PLATFORM_DATA_DIR}/addons/mail/banner" \
"${PLATFORM_DATA_DIR}/addons/mail/dkim"
mkdir -p "${PLATFORM_DATA_DIR}/collectd/collectd.conf.d"
mkdir -p "${PLATFORM_DATA_DIR}/collectd"
mkdir -p "${PLATFORM_DATA_DIR}/logrotate.d"
mkdir -p "${PLATFORM_DATA_DIR}/acme"
mkdir -p "${PLATFORM_DATA_DIR}/backup"
@@ -71,6 +71,8 @@ mkdir -p "${PLATFORM_DATA_DIR}/logs/backup" \
mkdir -p "${PLATFORM_DATA_DIR}/update"
mkdir -p "${PLATFORM_DATA_DIR}/sftp/ssh" # sftp keys
mkdir -p "${PLATFORM_DATA_DIR}/firewall"
mkdir -p "${PLATFORM_DATA_DIR}/sshfs"
mkdir -p "${PLATFORM_DATA_DIR}/cifs"
# ensure backups folder exists and is writeable
mkdir -p /var/backups
@@ -105,8 +107,6 @@ unbound-anchor -a /var/lib/unbound/root.key
log "Adding systemd services"
cp -r "${script_dir}/start/systemd/." /etc/systemd/system/
[[ "${ubuntu_version}" == "16.04" ]] && sed -e 's/MemoryMax/MemoryLimit/g' -i /etc/systemd/system/box.service
[[ "${ubuntu_version}" == "16.04" ]] && sed -e 's/Type=notify/Type=simple/g' -i /etc/systemd/system/unbound.service
systemctl daemon-reload
systemctl enable --now cloudron-syslog
systemctl enable unbound
@@ -127,19 +127,13 @@ systemctl restart unbound
systemctl restart cloudron-syslog
log "Configuring sudoers"
rm -f /etc/sudoers.d/${USER}
cp "${script_dir}/start/sudoers" /etc/sudoers.d/${USER}
rm -f /etc/sudoers.d/${USER} /etc/sudoers.d/cloudron
cp "${script_dir}/start/sudoers" /etc/sudoers.d/cloudron
log "Configuring collectd"
rm -rf /etc/collectd /var/log/collectd.log
rm -rf /etc/collectd /var/log/collectd.log "${PLATFORM_DATA_DIR}/collectd/collectd.conf.d"
ln -sfF "${PLATFORM_DATA_DIR}/collectd" /etc/collectd
cp "${script_dir}/start/collectd/collectd.conf" "${PLATFORM_DATA_DIR}/collectd/collectd.conf"
if [[ "${ubuntu_version}" == "20.04" ]]; then
# https://bugs.launchpad.net/ubuntu/+source/collectd/+bug/1872281
if ! grep -q LD_PRELOAD /etc/default/collectd; then
echo -e "\nLD_PRELOAD=/usr/lib/python3.8/config-3.8-x86_64-linux-gnu/libpython3.8.so" >> /etc/default/collectd
fi
fi
systemctl restart collectd
log "Configuring sysctl"
@@ -167,7 +161,7 @@ log "Configuring nginx"
# link nginx config to system config
unlink /etc/nginx 2>/dev/null || rm -rf /etc/nginx
ln -s "${PLATFORM_DATA_DIR}/nginx" /etc/nginx
mkdir -p "${PLATFORM_DATA_DIR}/nginx/applications"
mkdir -p "${PLATFORM_DATA_DIR}/nginx/applications/dashboard"
mkdir -p "${PLATFORM_DATA_DIR}/nginx/cert"
cp "${script_dir}/start/nginx/nginx.conf" "${PLATFORM_DATA_DIR}/nginx/nginx.conf"
cp "${script_dir}/start/nginx/mime.types" "${PLATFORM_DATA_DIR}/nginx/mime.types"
@@ -178,7 +172,7 @@ fi
# worker_rlimit_nofile in nginx config can be max this number
mkdir -p /etc/systemd/system/nginx.service.d
if ! grep -q "^LimitNOFILE=" /etc/systemd/system/nginx.service.d/cloudron.conf; then
if ! grep -q "^LimitNOFILE=" /etc/systemd/system/nginx.service.d/cloudron.conf 2>/dev/null; then
echo -e "[Service]\nLimitNOFILE=16384\n" > /etc/systemd/system/nginx.service.d/cloudron.conf
fi
@@ -213,7 +207,8 @@ done
readonly mysql_root_password="password"
mysqladmin -u root -ppassword password password # reset default root password
if [[ "${ubuntu_version}" == "20.04" ]]; then
readonly mysqlVersion=$(mysql -NB -u root -p${mysql_root_password} -e 'SELECT VERSION()' 2>/dev/null)
if [[ "${mysqlVersion}" == "8.0."* ]]; then
# mysql 8 added a new caching_sha2_password scheme which mysqljs does not support
mysql -u root -p${mysql_root_password} -e "ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY '${mysql_root_password}';"
fi
@@ -230,11 +225,14 @@ fi
rm -f /etc/cloudron/cloudron.conf
# 7.3 branch only: we had a bug in 7.3 that renewed certs were not written to disk. this will rebuild nginx/certs in the cron job
touch "${PLATFORM_DATA_DIR}/nginx/rebuild-needed"
log "Changing ownership"
# note, change ownership after db migrate. this allow db migrate to move files around as root and then we can fix it up here
# be careful of what is chown'ed here. subdirs like mysql,redis etc are owned by the containers and will stop working if perms change
chown -R "${USER}" /etc/cloudron
chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/nginx" "${PLATFORM_DATA_DIR}/collectd" "${PLATFORM_DATA_DIR}/addons" "${PLATFORM_DATA_DIR}/acme" "${PLATFORM_DATA_DIR}/backup" "${PLATFORM_DATA_DIR}/logs" "${PLATFORM_DATA_DIR}/update" "${PLATFORM_DATA_DIR}/sftp" "${PLATFORM_DATA_DIR}/firewall"
chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/nginx" "${PLATFORM_DATA_DIR}/collectd" "${PLATFORM_DATA_DIR}/addons" "${PLATFORM_DATA_DIR}/acme" "${PLATFORM_DATA_DIR}/backup" "${PLATFORM_DATA_DIR}/logs" "${PLATFORM_DATA_DIR}/update" "${PLATFORM_DATA_DIR}/sftp" "${PLATFORM_DATA_DIR}/firewall" "${PLATFORM_DATA_DIR}/sshfs" "${PLATFORM_DATA_DIR}/cifs" "${PLATFORM_DATA_DIR}/tls"
chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}/INFRA_VERSION" 2>/dev/null || true
chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}"
chown "${USER}:${USER}" "${APPS_DATA_DIR}"
+7 -1
View File
@@ -61,6 +61,9 @@ ipset create cloudron_ldap_allowlist6 hash:net family inet6 || true
ipset flush cloudron_ldap_allowlist6
ldap_allowlist_json="/home/yellowtent/platformdata/firewall/ldap_allowlist.txt"
# delete any existing redirect rule
$iptables -t nat -D PREROUTING -p tcp --dport 636 -j REDIRECT --to-ports 3004 2>/dev/null || true
$ip6tables -t nat -D PREROUTING -p tcp --dport 636 -j REDIRECT --to-ports 3004 >/dev/null || true
if [[ -f "${ldap_allowlist_json}" ]]; then
# without the -n block, any last line without a new line won't be read it!
while read -r line || [[ -n "$line" ]]; do
@@ -74,8 +77,10 @@ if [[ -f "${ldap_allowlist_json}" ]]; then
done < "${ldap_allowlist_json}"
# ldap server we expose 3004 and also redirect from standard ldaps port 636
ipxtables -t nat -I PREROUTING -p tcp --dport 636 -j REDIRECT --to-ports 3004
$iptables -t nat -I PREROUTING -p tcp --dport 636 -j REDIRECT --to-ports 3004
$iptables -t filter -A CLOUDRON -m set --match-set cloudron_ldap_allowlist src -p tcp --dport 3004 -j ACCEPT
$ip6tables -t nat -I PREROUTING -p tcp --dport 636 -j REDIRECT --to-ports 3004
$ip6tables -t filter -A CLOUDRON -m set --match-set cloudron_ldap_allowlist6 src -p tcp --dport 3004 -j ACCEPT
fi
@@ -142,6 +147,7 @@ for port in 3306 5432 6379 27017; do
$iptables -A CLOUDRON_RATELIMIT -p tcp --syn -s 172.18.0.0/16 -d 172.18.0.0/16 --dport ${port} -m connlimit --connlimit-above 5000 -j CLOUDRON_RATELIMIT_LOG
done
# Add the rate limit chain to input chain
$iptables -t filter -C INPUT -j CLOUDRON_RATELIMIT 2>/dev/null || $iptables -t filter -I INPUT 1 -j CLOUDRON_RATELIMIT
$ip6tables -t filter -C INPUT -j CLOUDRON_RATELIMIT 2>/dev/null || $ip6tables -t filter -I INPUT 1 -j CLOUDRON_RATELIMIT
+33 -12
View File
@@ -4,30 +4,51 @@
printf "**********************************************************************\n\n"
cache_file="/var/cache/cloudron-motd-cache"
readonly cache_file4="/var/cache/cloudron-motd-cache4"
readonly cache_file6="/var/cache/cloudron-motd-cache6"
if [[ -z "$(ls -A /home/yellowtent/platformdata/addons/mail/dkim)" ]]; then
if [[ ! -f "${cache_file}" ]]; then
curl --fail --connect-timeout 2 --max-time 2 -q https://ipv4.api.cloudron.io/api/v1/helper/public_ip --output "${cache_file}" || true
fi
if [[ -f "${cache_file}" ]]; then
ip=$(sed -n -e 's/.*"ip": "\(.*\)"/\1/p' /var/cache/cloudron-motd-cache)
url4=""
url6=""
fallbackUrl=""
function detectIp() {
if [[ ! -f "${cache_file4}" ]]; then
ip4=$(curl -s --fail --connect-timeout 2 --max-time 2 https://ipv4.api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p' || true)
[[ -n "${ip4}" ]] && echo "${ip4}" > "${cache_file4}"
else
ip='<IP>'
ip4=$(cat "${cache_file4}")
fi
if [[ ! -f "${cache_file6}" ]]; then
ip6=$(curl -s --fail --connect-timeout 2 --max-time 2 https://ipv6.api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p' || true)
[[ -n "${ip6}" ]] && echo "${ip6}" > "${cache_file6}"
else
ip6=$(cat "${cache_file6}")
fi
if [[ ! -f /etc/cloudron/SETUP_TOKEN ]]; then
url="https://${ip}"
[[ -n "${ip4}" ]] && url4="https://${ip4}"
[[ -n "${ip6}" ]] && url6="https://[${ip6}]"
[[ -z "${ip4}" && -z "${ip6}" ]] && fallbackUrl="https://<IP>"
else
setupToken="$(cat /etc/cloudron/SETUP_TOKEN)"
url="https://${ip}/?setupToken=${setupToken}"
[[ -n "${ip4}" ]] && url4="https://${ip4}/?setupToken=${setupToken}"
[[ -n "${ip6}" ]] && url6="https://[${ip6}]/?setupToken=${setupToken}"
[[ -z "${ip4}" && -z "${ip6}" ]] && fallbackUrl="https://<IP>?setupToken=${setupToken}"
fi
}
if [[ -z "$(ls -A /home/yellowtent/platformdata/addons/mail/dkim)" ]]; then
detectIp
printf "\t\t\tWELCOME TO CLOUDRON\n"
printf "\t\t\t-------------------\n"
printf '\n\e[1;32m%-6s\e[m\n\n' "Visit ${url} on your browser and accept the self-signed certificate to finish setup."
printf "Cloudron overview - https://docs.cloudron.io/ \n"
printf '\n\e[1;32m%-6s\e[m\n' "Visit one of the following URLs on your browser and accept the self-signed certificate to finish setup."
[[ -n "${url4}" ]] && printf '\e[1;32m%-6s\e[m\n' " * ${url4}"
[[ -n "${url6}" ]] && printf '\e[1;32m%-6s\e[m\n' " * ${url6}"
[[ -n "${fallbackUrl}" ]] && printf '\e[1;32m%-6s\e[m\n' " * ${fallbackUrl}"
printf "\nCloudron overview - https://docs.cloudron.io/ \n"
printf "Cloudron setup - https://docs.cloudron.io/installation/#setup \n"
else
printf "\t\t\tNOTE TO CLOUDRON ADMINS\n"
+5 -24
View File
@@ -13,7 +13,7 @@
##############################################################################
Hostname "localhost"
#FQDNLookup true
FQDNLookup false
#BaseDir "/var/lib/collectd"
#PluginDir "/usr/lib/collectd"
#TypesDB "/usr/share/collectd/types.db" "/etc/collectd/my_types.db"
@@ -187,9 +187,9 @@ LoadPlugin swap
CalculateNum false
CalculateSum true
CalculateAverage true
CalculateAverage false
CalculateMinimum false
CalculateMaximum true
CalculateMaximum false
CalculateStddev false
</Aggregation>
</Plugin>
@@ -211,28 +211,12 @@ LoadPlugin swap
Interactive false
Import "df"
Import "du"
<Module du>
<Path>
Instance maildata
Dir "/home/yellowtent/boxdata/mail"
</Path>
<Path>
Instance boxdata
Dir "/home/yellowtent/boxdata"
Exclude "mail"
</Path>
<Path>
Instance platformdata
Dir "/home/yellowtent/platformdata"
</Path>
</Module>
Import "docker-stats"
</Plugin>
<Plugin write_graphite>
<Node "graphing">
Host "localhost"
Host "127.0.0.1"
Port "2003"
Protocol "tcp"
LogSendErrors true
@@ -243,6 +227,3 @@ LoadPlugin swap
</Node>
</Plugin>
<Include "/etc/collectd/collectd.conf.d">
Filter "*.conf"
</Include>
+1 -1
View File
@@ -14,7 +14,7 @@ def read():
for d in disks:
device = d[0]
if 'devicemapper' in d[1] or not device.startswith('/dev/'): continue
instance = device[len('/dev/'):].replace('/', '_') # see #348
instance = device[len('/dev/'):].replace('/', '_').replace('.', '_') # see #348
try:
st = os.statvfs(d[1]) # handle disk removal
+64
View File
@@ -0,0 +1,64 @@
import collectd,os,subprocess,json,re
# https://blog.dbrgn.ch/2017/3/10/write-a-collectd-python-plugin/
def parseSiSize(size):
units = {"B": 1, "KB": 10**3, "MB": 10**6, "GB": 10**9, "TB": 10**12}
number, unit, _ = re.split('([a-zA-Z]+)', size.upper())
return int(float(number)*units[unit])
def parseBinarySize(size):
units = {"B": 1, "KIB": 2**10, "MIB": 2**20, "GIB": 2**30, "TIB": 2**40}
number, unit, _ = re.split('([a-zA-Z]+)', size.upper())
return int(float(number)*units[unit])
def init():
collectd.info('custom docker-status plugin initialized')
def read():
try:
lines = subprocess.check_output('docker stats --format "{{ json . }}" --no-stream --no-trunc', shell=True).decode('utf-8').strip().split("\n")
except Exception as e:
collectd.info('\terror getting docker stats: %s' % (str(e)))
return 0
# Sample line
# {"BlockIO":"430kB / 676kB","CPUPerc":"0.00%","Container":"7eae5e6f4f11","ID":"7eae5e6f4f11","MemPerc":"59.15%","MemUsage":"45.55MiB / 77MiB","Name":"1062eef3-ec96-4d81-9f02-15b7dd81ccb9","NetIO":"1.5MB / 3.48MB","PIDs":"5"}
for line in lines:
stat = json.loads(line)
containerName = stat["Name"] # same as app id
networkData = stat["NetIO"].split("/")
networkRead = parseSiSize(networkData[0].strip())
networkWrite = parseSiSize(networkData[1].strip())
blockData = stat["BlockIO"].split("/")
blockRead = parseSiSize(blockData[0].strip())
blockWrite = parseSiSize(blockData[1].strip())
memUsageData = stat["MemUsage"].split("/")
memUsed = parseBinarySize(memUsageData[0].strip())
memMax = parseBinarySize(memUsageData[1].strip())
cpuPercData = stat["CPUPerc"].strip("%")
cpuPerc = float(cpuPercData)
# type comes from https://github.com/collectd/collectd/blob/master/src/types.db and https://collectd.org/wiki/index.php/Data_source
val = collectd.Values(type='gauge', plugin='docker-stats', plugin_instance=containerName)
val.dispatch(values=[networkRead], type_instance='network-read')
val.dispatch(values=[networkWrite], type_instance='network-write')
val.dispatch(values=[blockRead], type_instance='blockio-read')
val.dispatch(values=[blockWrite], type_instance='blockio-write')
val.dispatch(values=[memUsed], type_instance='mem-used')
val.dispatch(values=[memMax], type_instance='mem-max')
val.dispatch(values=[cpuPerc], type_instance='cpu-perc')
val = collectd.Values(type='counter', plugin='docker-stats', plugin_instance=containerName)
val.dispatch(values=[networkRead], type_instance='network-read')
val.dispatch(values=[networkWrite], type_instance='network-write')
val.dispatch(values=[blockRead], type_instance='blockio-read')
val.dispatch(values=[blockWrite], type_instance='blockio-write')
collectd.register_init(init)
# see Interval setting in collectd.conf for polling interval
collectd.register_read(read)
-105
View File
@@ -1,105 +0,0 @@
import collectd,os,subprocess,sys,re,time
# https://www.programcreek.com/python/example/106897/collectd.register_read
PATHS = [] # { name, dir, exclude }
# there is a pattern in carbon/storage-schemas.conf which stores values every 12h for a year
INTERVAL = 60 * 60 * 12 # twice a day. change values in docker-graphite if you change this
# we used to pass the INTERVAL as a parameter to register_read. however, collectd write_graphite
# takes a bit to load (tcp connection) and drops the du data. this then means that we have to wait
# for INTERVAL secs for du data. instead, we just cache the value for INTERVAL instead
CACHE = dict()
CACHE_TIME = 0
def du(pathinfo):
# -B1 makes du print block sizes and not apparent sizes (to match df which also uses block sizes)
dirname = pathinfo['dir']
cmd = 'timeout 1800 du -DsB1 "{}"'.format(dirname)
if pathinfo['exclude'] != '':
cmd += ' --exclude "{}"'.format(pathinfo['exclude'])
collectd.info('computing size with command: %s' % cmd);
try:
size = subprocess.check_output(cmd, shell=True).split()[0].decode('utf-8')
collectd.info('\tsize of %s is %s (time: %i)' % (dirname, size, int(time.time())))
return size
except Exception as e:
collectd.info('\terror getting the size of %s: %s' % (dirname, str(e)))
return 0
def parseSize(size):
units = {"B": 1, "KB": 10**3, "MB": 10**6, "GB": 10**9, "TB": 10**12}
number, unit, _ = re.split('([a-zA-Z]+)', size.upper())
return int(float(number)*units[unit])
def dockerSize():
# use --format '{{json .}}' to dump the string. '{{if eq .Type "Images"}}{{.Size}}{{end}}' still creates newlines
# https://godoc.org/github.com/docker/go-units#HumanSize is used. so it's 1000 (KB) and not 1024 (KiB)
cmd = 'timeout 1800 docker system df --format "{{.Size}}" | head -n1'
try:
size = subprocess.check_output(cmd, shell=True).strip().decode('utf-8')
collectd.info('size of docker images is %s (%s) (time: %i)' % (size, parseSize(size), int(time.time())))
return parseSize(size)
except Exception as e:
collectd.info('error getting docker images size : %s' % str(e))
return 0
# configure is called for each module block. this is called before init
def configure(config):
global PATHS
for child in config.children:
if child.key != 'Path':
collectd.info('du plugin: Unknown config key "%s"' % key)
continue
pathinfo = { 'name': '', 'dir': '', 'exclude': '' }
for node in child.children:
if node.key == 'Instance':
pathinfo['name'] = node.values[0]
elif node.key == 'Dir':
pathinfo['dir'] = node.values[0]
elif node.key == 'Exclude':
pathinfo['exclude'] = node.values[0]
PATHS.append(pathinfo);
collectd.info('du plugin: monitoring %s' % pathinfo['dir']);
def init():
global PATHS
collectd.info('custom du plugin initialized with %s %s' % (PATHS, sys.version))
def read():
global CACHE, CACHE_TIME
# read from cache if < 12 hours
read_cache = (time.time() - CACHE_TIME) < INTERVAL
if not read_cache:
CACHE_TIME = time.time()
for pathinfo in PATHS:
dirname = pathinfo['dir']
if read_cache and dirname in CACHE:
size = CACHE[dirname]
else:
size = du(pathinfo)
CACHE[dirname] = size
# type comes from https://github.com/collectd/collectd/blob/master/src/types.db
val = collectd.Values(type='capacity', plugin='du', plugin_instance=pathinfo['name'])
val.dispatch(values=[size], type_instance='usage')
if read_cache and 'docker' in CACHE:
size = CACHE['docker']
else:
size = dockerSize()
CACHE['docker'] = size
val = collectd.Values(type='capacity', plugin='du', plugin_instance='docker')
val.dispatch(values=[size], type_instance='usage')
collectd.register_init(init)
collectd.register_config(configure)
collectd.register_read(read)
+4 -2
View File
@@ -1,9 +1,11 @@
# logrotate config for box logs
# keep upto 5 logs of size 10M each
# we rotate weekly, unless 10M was hit. Keep only up to 5 rotated files. Also, delete if > 14 days old
/home/yellowtent/platformdata/logs/box.log {
rotate 5
size 10M
weekly
maxage 14
maxsize 10M
# we never compress so we can simply tail the files
nocompress
copytruncate
+4 -2
View File
@@ -14,7 +14,9 @@
/home/yellowtent/platformdata/logs/updater/*.log {
# only keep one rotated file, we currently do not send that over the api
rotate 1
size 10M
weekly
maxage 14
maxsize 10M
missingok
# we never compress so we can simply tail the files
nocompress
@@ -23,7 +25,7 @@
}
# keep task logs for a week. the 'nocreate' option ensures empty log files are not
# created post rotation
# created post rotation. task logs are kept for 7 days
/home/yellowtent/platformdata/logs/tasks/*.log {
minage 7
daily
+1
View File
@@ -39,4 +39,5 @@ http {
limit_req_zone $binary_remote_addr zone=admin_login:10m rate=10r/s; # 10 request a second
include applications/*.conf;
include applications/*/*.conf;
}
+8 -3
View File
@@ -1,6 +1,9 @@
# sudo logging breaks journalctl output with very long urls (systemd bug)
Defaults !syslog
Defaults!/home/yellowtent/box/src/scripts/checkvolume.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/checkvolume.sh
Defaults!/home/yellowtent/box/src/scripts/clearvolume.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/clearvolume.sh
@@ -16,9 +19,6 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmaddondir.sh
Defaults!/home/yellowtent/box/src/scripts/reboot.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reboot.sh
Defaults!/home/yellowtent/box/src/scripts/configurecollectd.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/configurecollectd.sh
Defaults!/home/yellowtent/box/src/scripts/collectlogs.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/collectlogs.sh
@@ -64,3 +64,8 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmmount.sh
Defaults!/home/yellowtent/box/src/scripts/remountmount.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/remountmount.sh
Defaults!/home/yellowtent/box/src/scripts/du.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/du.sh
cloudron-support ALL=(ALL) NOPASSWD: ALL
+1
View File
@@ -13,6 +13,7 @@ Type=idle
WorkingDirectory=/home/yellowtent/box
Restart=always
ExecStart=/home/yellowtent/box/box.js
ExecReload=/usr/bin/kill -HUP $MAINPID
; we run commands like df which will parse properly only with correct locale
Environment="HOME=/home/yellowtent" "USER=yellowtent" "DEBUG=box:*,connect-lastmile,-box:ldap" "BOX_ENV=cloudron" "NODE_ENV=production" "LC_ALL=C"
; kill apptask processes as well
+6 -1
View File
@@ -6,7 +6,7 @@ server:
interface: 127.0.0.1
interface: 172.18.0.1
ip-freebind: yes
do-ip6: no
do-ip6: yes
access-control: 127.0.0.1 allow
access-control: 172.18.0.1/16 allow
cache-max-negative-ttl: 30
@@ -14,3 +14,8 @@ server:
# enable below for logging to journalctl -u unbound
# verbosity: 5
# log-queries: yes
# https://github.com/NLnetLabs/unbound/issues/806
remote-control:
control-enable: no
-26
View File
@@ -1,26 +0,0 @@
'use strict';
exports = module.exports = {
verifyToken
};
const assert = require('assert'),
BoxError = require('./boxerror.js'),
safe = require('safetydance'),
tokens = require('./tokens.js'),
users = require('./users.js');
async function verifyToken(accessToken) {
assert.strictEqual(typeof accessToken, 'string');
const token = await tokens.getByAccessToken(accessToken);
if (!token) throw new BoxError(BoxError.INVALID_CREDENTIALS, 'No such token');
const user = await users.get(token.identifier);
if (!user) throw new BoxError(BoxError.INVALID_CREDENTIALS, 'User not found');
if (!user.active) throw new BoxError(BoxError.INVALID_CREDENTIALS, 'User not active');
await safe(tokens.update(token.id, { lastUsedTime: new Date() })); // ignore any error
return user;
}
+136 -141
View File
@@ -17,9 +17,11 @@ const assert = require('assert'),
fs = require('fs'),
os = require('os'),
path = require('path'),
paths = require('./paths.js'),
promiseRetry = require('./promise-retry.js'),
superagent = require('superagent'),
safe = require('safetydance'),
users = require('./users.js'),
_ = require('underscore');
const CA_PROD_DIRECTORY_URL = 'https://acme-v02.api.letsencrypt.org/directory',
@@ -29,16 +31,26 @@ const CA_PROD_DIRECTORY_URL = 'https://acme-v02.api.letsencrypt.org/directory',
// https://www.ietf.org/proceedings/92/slides/slides-92-acme-1.pdf
// https://community.letsencrypt.org/t/list-of-client-implementations/2103
function Acme2(options) {
assert.strictEqual(typeof options, 'object');
function Acme2(fqdn, domainObject, email) {
assert.strictEqual(typeof fqdn, 'string');
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof email, 'string');
this.accountKeyPem = null; // Buffer .
this.email = options.email;
this.fqdn = fqdn;
this.accountKey = null;
this.email = email;
this.keyId = null;
this.caDirectory = options.prod ? CA_PROD_DIRECTORY_URL : CA_STAGING_DIRECTORY_URL;
const prod = domainObject.tlsConfig.provider.match(/.*-prod/) !== null; // matches 'le-prod' or 'letsencrypt-prod'
this.caDirectory = prod ? CA_PROD_DIRECTORY_URL : CA_STAGING_DIRECTORY_URL;
this.directory = {};
this.performHttpAuthorization = !!options.performHttpAuthorization;
this.wildcard = !!options.wildcard;
this.performHttpAuthorization = domainObject.provider.match(/noop|manual|wildcard/) !== null;
this.wildcard = !!domainObject.tlsConfig.wildcard;
this.domain = domainObject.domain;
this.cn = fqdn !== this.domain && this.wildcard ? dns.makeWildcard(fqdn) : fqdn; // bare domain is not part of wildcard SAN
this.certName = this.cn.replace('*.', '_.');
debug(`Acme2: will get cert for fqdn: ${this.fqdn} cn: ${this.cn} certName: ${this.certName} wildcard: ${this.wildcard} http: ${this.performHttpAuthorization}`);
}
// urlsafe base64 encoding (jose)
@@ -47,16 +59,16 @@ function urlBase64Encode(string) {
}
function b64(str) {
var buf = Buffer.isBuffer(str) ? str : Buffer.from(str);
const buf = Buffer.isBuffer(str) ? str : Buffer.from(str);
return urlBase64Encode(buf.toString('base64'));
}
function getModulus(pem) {
assert(Buffer.isBuffer(pem));
assert.strictEqual(typeof pem, 'string');
var stdout = safe.child_process.execSync('openssl rsa -modulus -noout', { input: pem, encoding: 'utf8' });
const stdout = safe.child_process.execSync('openssl rsa -modulus -noout', { input: pem, encoding: 'utf8' });
if (!stdout) return null;
var match = stdout.match(/Modulus=([0-9a-fA-F]+)$/m);
const match = stdout.match(/Modulus=([0-9a-fA-F]+)$/m);
if (!match) return null;
return Buffer.from(match[1], 'hex');
}
@@ -64,8 +76,7 @@ function getModulus(pem) {
Acme2.prototype.sendSignedRequest = async function (url, payload) {
assert.strictEqual(typeof url, 'string');
assert.strictEqual(typeof payload, 'string');
assert(Buffer.isBuffer(this.accountKeyPem));
assert.strictEqual(typeof this.accountKey, 'string');
const that = this;
let header = {
@@ -80,7 +91,7 @@ Acme2.prototype.sendSignedRequest = async function (url, payload) {
header.jwk = {
e: b64(Buffer.from([0x01, 0x00, 0x01])), // exponent - 65537
kty: 'RSA',
n: b64(getModulus(this.accountKeyPem))
n: b64(getModulus(this.accountKey))
};
}
@@ -99,7 +110,7 @@ Acme2.prototype.sendSignedRequest = async function (url, payload) {
const signer = crypto.createSign('RSA-SHA256');
signer.update(protected64 + '.' + payload64, 'utf8');
const signature64 = urlBase64Encode(signer.sign(that.accountKeyPem, 'base64'));
const signature64 = urlBase64Encode(signer.sign(that.accountKey, 'base64'));
const data = {
protected: protected64,
@@ -135,7 +146,7 @@ Acme2.prototype.updateContact = async function (registrationUri) {
};
async function generateAccountKey() {
const acmeAccountKey = safe.child_process.execSync('openssl genrsa 4096');
const acmeAccountKey = safe.child_process.execSync('openssl genrsa 4096', { encoding: 'utf8' });
if (!acmeAccountKey) throw new BoxError(BoxError.OPENSSL_ERROR, `Could not generate acme account key: ${safe.error.message}`);
return acmeAccountKey;
}
@@ -147,18 +158,18 @@ Acme2.prototype.ensureAccount = async function () {
debug('ensureAccount: registering user');
this.accountKeyPem = await blobs.get(blobs.ACME_ACCOUNT_KEY);
if (!this.accountKeyPem) {
this.accountKey = await blobs.getString(blobs.ACME_ACCOUNT_KEY);
if (!this.accountKey) {
debug('ensureAccount: generating new account keys');
this.accountKeyPem = await generateAccountKey();
await blobs.set(blobs.ACME_ACCOUNT_KEY, this.accountKeyPem);
this.accountKey = await generateAccountKey();
await blobs.setString(blobs.ACME_ACCOUNT_KEY, this.accountKey);
}
let result = await this.sendSignedRequest(this.directory.newAccount, JSON.stringify(payload));
if (result.status === 403 && result.body.type === 'urn:ietf:params:acme:error:unauthorized') {
debug(`ensureAccount: key was revoked. ${result.status} ${JSON.stringify(result.body)}. generating new account key`);
this.accountKeyPem = await generateAccountKey();
await blobs.set(blobs.ACME_ACCOUNT_KEY, this.accountKeyPem);
this.accountKey = await generateAccountKey();
await blobs.setString(blobs.ACME_ACCOUNT_KEY, this.accountKey);
result = await this.sendSignedRequest(this.directory.newAccount, JSON.stringify(payload));
}
@@ -172,23 +183,21 @@ Acme2.prototype.ensureAccount = async function () {
await this.updateContact(result.headers.location);
};
Acme2.prototype.newOrder = async function (domain) {
assert.strictEqual(typeof domain, 'string');
Acme2.prototype.newOrder = async function () {
const payload = {
identifiers: [{
type: 'dns',
value: domain
value: this.cn
}]
};
debug(`newOrder: ${domain}`);
debug(`newOrder: ${this.cn}`);
const result = await this.sendSignedRequest(this.directory.newOrder, JSON.stringify(payload));
if (result.status === 403) throw new BoxError(BoxError.ACCESS_DENIED, `Forbidden sending new order: ${result.body.detail}`);
if (result.status !== 201) throw new BoxError(BoxError.ACME_ERROR, `Failed to send new order. Expecting 201, got ${result.statusCode} ${JSON.stringify(result.body)}`);
debug('newOrder: created order %s %j', domain, result.body);
debug(`newOrder: created order ${this.cn} %j`, result.body);
const order = result.body, orderUrl = result.headers.location;
@@ -222,12 +231,12 @@ Acme2.prototype.waitForOrder = async function (orderUrl) {
};
Acme2.prototype.getKeyAuthorization = function (token) {
assert(Buffer.isBuffer(this.accountKeyPem));
assert(typeof this.accountKey, 'string');
let jwk = {
e: b64(Buffer.from([0x01, 0x00, 0x01])), // Exponent - 65537
kty: 'RSA',
n: b64(getModulus(this.accountKeyPem))
n: b64(getModulus(this.accountKey))
};
let shasum = crypto.createHash('sha256');
@@ -275,10 +284,12 @@ Acme2.prototype.waitForChallenge = async function (challenge) {
};
// https://community.letsencrypt.org/t/public-beta-rate-limits/4772 for rate limits
Acme2.prototype.signCertificate = async function (domain, finalizationUrl, csrDer) {
assert.strictEqual(typeof domain, 'string');
Acme2.prototype.signCertificate = async function (finalizationUrl, csrPem) {
assert.strictEqual(typeof finalizationUrl, 'string');
assert(Buffer.isBuffer(csrDer));
assert.strictEqual(typeof csrPem, 'string');
const csrDer = safe.child_process.execSync('openssl req -inform pem -outform der', { input: csrPem });
if (!csrDer) throw new BoxError(BoxError.OPENSSL_ERROR, safe.error);
const payload = {
csr: b64(csrDer)
@@ -291,22 +302,28 @@ Acme2.prototype.signCertificate = async function (domain, finalizationUrl, csrDe
if (result.status !== 200) throw new BoxError(BoxError.ACME_ERROR, `Failed to sign certificate. Expecting 200, got ${result.status} ${JSON.stringify(result.body)}`);
};
Acme2.prototype.createKeyAndCsr = async function (hostname, keyFilePath, csrFilePath) {
assert.strictEqual(typeof hostname, 'string');
if (safe.fs.existsSync(keyFilePath)) {
debug('createKeyAndCsr: reuse the key for renewal at %s', keyFilePath);
} else {
let key = safe.child_process.execSync('openssl ecparam -genkey -name secp384r1'); // openssl ecparam -list_curves
if (!key) throw new BoxError(BoxError.OPENSSL_ERROR, safe.error);
if (!safe.fs.writeFileSync(keyFilePath, key)) throw new BoxError(BoxError.FS_ERROR, safe.error);
debug('createKeyAndCsr: key file saved at %s', keyFilePath);
Acme2.prototype.ensureKey = async function () {
const key = await blobs.getString(`${blobs.CERT_PREFIX}-${this.certName}.key`);
if (key) {
debug(`ensureKey: reuse existing key for ${this.cn}`);
return key;
}
debug(`ensureKey: generating new key for ${this.cn}`);
const newKey = safe.child_process.execSync('openssl ecparam -genkey -name secp384r1', { encoding: 'utf8' }); // openssl ecparam -list_curves
if (!newKey) throw new BoxError(BoxError.OPENSSL_ERROR, safe.error);
return newKey;
};
Acme2.prototype.createCsr = async function (key) {
assert.strictEqual(typeof key, 'string');
const [error, tmpdir] = await safe(fs.promises.mkdtemp(path.join(os.tmpdir(), 'acme-')));
if (error) throw new BoxError(BoxError.FS_ERROR, `Error creating temporary directory for openssl config: ${error.message}`);
const keyFilePath = path.join(tmpdir, 'key');
if (!safe.fs.writeFileSync(keyFilePath, key)) throw new BoxError(BoxError.FS_ERROR, `Failed to write key file: ${safe.error.message}`);
// OCSP must-staple is currently disabled because nginx does not provide staple on the first request (https://forum.cloudron.io/topic/4917/ocsp-stapling-for-tls-ssl/)
// ' -addext "tlsfeature = status_request"'; // this adds OCSP must-staple
// we used to use -addext to the CLI to add these but that arg doesn't work on Ubuntu 16.04
@@ -314,47 +331,37 @@ Acme2.prototype.createKeyAndCsr = async function (hostname, keyFilePath, csrFile
const conf = '[req]\nreq_extensions = v3_req\ndistinguished_name = req_distinguished_name\n'
+ '[req_distinguished_name]\n\n'
+ '[v3_req]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nsubjectAltName = @alt_names\n'
+ `[alt_names]\nDNS.1 = ${hostname}\n`;
+ `[alt_names]\nDNS.1 = ${this.cn}\n`;
const opensslConfigFile = path.join(tmpdir, 'openssl.conf');
if (!safe.fs.writeFileSync(opensslConfigFile, conf)) throw new BoxError(BoxError.FS_ERROR, `Failed to write openssl config: ${safe.error.message}`);
// while we pass the CN anyways, subjectAltName takes precedence
const csrDer = safe.child_process.execSync(`openssl req -new -key ${keyFilePath} -outform DER -subj /CN=${hostname} -config ${opensslConfigFile}`);
if (!csrDer) throw new BoxError(BoxError.OPENSSL_ERROR, safe.error);
if (!safe.fs.writeFileSync(csrFilePath, csrDer)) throw new BoxError(BoxError.FS_ERROR, safe.error); // bookkeeping. inspect with openssl req -text -noout -in hostname.csr -inform der
const csrPem = safe.child_process.execSync(`openssl req -new -key ${keyFilePath} -outform PEM -subj /CN=${this.cn} -config ${opensslConfigFile}`, { encoding: 'utf8' });
if (!csrPem) throw new BoxError(BoxError.OPENSSL_ERROR, safe.error);
await safe(fs.promises.rm(tmpdir, { recursive: true, force: true }));
debug('createKeyAndCsr: csr file (DER) saved at %s', csrFilePath);
return csrDer;
debug(`createCsr: csr file created for ${this.cn}`);
return csrPem; // inspect with openssl req -text -noout -in hostname.csr -inform pem
};
Acme2.prototype.downloadCertificate = async function (hostname, certUrl, certFilePath) {
assert.strictEqual(typeof hostname, 'string');
Acme2.prototype.downloadCertificate = async function (certUrl) {
assert.strictEqual(typeof certUrl, 'string');
await promiseRetry({ times: 5, interval: 20000, debug }, async () => {
debug(`downloadCertificate: downloading certificate of ${hostname}`);
return await promiseRetry({ times: 5, interval: 20000, debug }, async () => {
debug(`downloadCertificate: downloading certificate of ${this.cn}`);
const result = await this.postAsGet(certUrl);
if (result.statusCode === 202) throw new BoxError(BoxError.ACME_ERROR, 'Retry downloading certificate');
if (result.statusCode !== 200) throw new BoxError(BoxError.ACME_ERROR, `Failed to get cert. Expecting 200, got ${result.statusCode} ${JSON.stringify(result.body)}`);
const fullChainPem = result.body; // buffer
if (!safe.fs.writeFileSync(certFilePath, fullChainPem)) throw new BoxError(BoxError.FS_ERROR, safe.error);
debug(`downloadCertificate: cert file for ${hostname} saved at ${certFilePath}`);
const fullChainPem = result.body.toString('utf8'); // buffer
return fullChainPem;
});
};
Acme2.prototype.prepareHttpChallenge = async function (hostname, domain, authorization, acmeChallengesDir) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
Acme2.prototype.prepareHttpChallenge = async function (authorization) {
assert.strictEqual(typeof authorization, 'object');
assert.strictEqual(typeof acmeChallengesDir, 'string');
debug('prepareHttpChallenge: challenges: %j', authorization);
let httpChallenges = authorization.challenges.filter(function(x) { return x.type === 'http-01'; });
@@ -365,44 +372,39 @@ Acme2.prototype.prepareHttpChallenge = async function (hostname, domain, authori
let keyAuthorization = this.getKeyAuthorization(challenge.token);
debug('prepareHttpChallenge: writing %s to %s', keyAuthorization, path.join(acmeChallengesDir, challenge.token));
debug('prepareHttpChallenge: writing %s to %s', keyAuthorization, path.join(paths.ACME_CHALLENGES_DIR, challenge.token));
if (!safe.fs.writeFileSync(path.join(acmeChallengesDir, challenge.token), keyAuthorization)) throw new BoxError(BoxError.FS_ERROR, `Error writing challenge: ${safe.error.message}`);
if (!safe.fs.writeFileSync(path.join(paths.ACME_CHALLENGES_DIR, challenge.token), keyAuthorization)) throw new BoxError(BoxError.FS_ERROR, `Error writing challenge: ${safe.error.message}`);
return challenge;
};
Acme2.prototype.cleanupHttpChallenge = async function (hostname, domain, challenge, acmeChallengesDir) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
Acme2.prototype.cleanupHttpChallenge = async function (challenge) {
assert.strictEqual(typeof challenge, 'object');
assert.strictEqual(typeof acmeChallengesDir, 'string');
debug('cleanupHttpChallenge: unlinking %s', path.join(acmeChallengesDir, challenge.token));
debug('cleanupHttpChallenge: unlinking %s', path.join(paths.ACME_CHALLENGES_DIR, challenge.token));
if (!safe.fs.unlinkSync(path.join(acmeChallengesDir, challenge.token))) throw new BoxError(BoxError.FS_ERROR, `Error unlinking challenge: ${safe.error.message}`);
if (!safe.fs.unlinkSync(path.join(paths.ACME_CHALLENGES_DIR, challenge.token))) throw new BoxError(BoxError.FS_ERROR, `Error unlinking challenge: ${safe.error.message}`);
};
function getChallengeSubdomain(hostname, domain) {
function getChallengeSubdomain(cn, domain) {
let challengeSubdomain;
if (hostname === domain) {
if (cn === domain) {
challengeSubdomain = '_acme-challenge';
} else if (hostname.includes('*')) { // wildcard
let subdomain = hostname.slice(0, -domain.length - 1);
} else if (cn.includes('*')) { // wildcard
let subdomain = cn.slice(0, -domain.length - 1);
challengeSubdomain = subdomain ? subdomain.replace('*', '_acme-challenge') : '_acme-challenge';
} else {
challengeSubdomain = '_acme-challenge.' + hostname.slice(0, -domain.length - 1);
challengeSubdomain = '_acme-challenge.' + cn.slice(0, -domain.length - 1);
}
debug(`getChallengeSubdomain: challenge subdomain for hostname ${hostname} at domain ${domain} is ${challengeSubdomain}`);
debug(`getChallengeSubdomain: challenge subdomain for cn ${cn} at domain ${domain} is ${challengeSubdomain}`);
return challengeSubdomain;
}
Acme2.prototype.prepareDnsChallenge = async function (hostname, domain, authorization) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
Acme2.prototype.prepareDnsChallenge = async function (authorization) {
assert.strictEqual(typeof authorization, 'object');
debug('prepareDnsChallenge: challenges: %j', authorization);
@@ -415,39 +417,34 @@ Acme2.prototype.prepareDnsChallenge = async function (hostname, domain, authoriz
shasum.update(keyAuthorization);
const txtValue = urlBase64Encode(shasum.digest('base64'));
const challengeSubdomain = getChallengeSubdomain(hostname, domain);
const challengeSubdomain = getChallengeSubdomain(this.cn, this.domain);
debug(`prepareDnsChallenge: update ${challengeSubdomain} with ${txtValue}`);
await dns.upsertDnsRecords(challengeSubdomain, domain, 'TXT', [ `"${txtValue}"` ]);
await dns.upsertDnsRecords(challengeSubdomain, this.domain, 'TXT', [ `"${txtValue}"` ]);
await dns.waitForDnsRecord(challengeSubdomain, domain, 'TXT', txtValue, { times: 200 });
await dns.waitForDnsRecord(challengeSubdomain, this.domain, 'TXT', txtValue, { times: 200 });
return challenge;
};
Acme2.prototype.cleanupDnsChallenge = async function (hostname, domain, challenge) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
Acme2.prototype.cleanupDnsChallenge = async function (challenge) {
assert.strictEqual(typeof challenge, 'object');
const keyAuthorization = this.getKeyAuthorization(challenge.token);
let shasum = crypto.createHash('sha256');
const shasum = crypto.createHash('sha256');
shasum.update(keyAuthorization);
const txtValue = urlBase64Encode(shasum.digest('base64'));
let challengeSubdomain = getChallengeSubdomain(hostname, domain);
const challengeSubdomain = getChallengeSubdomain(this.cn, this.domain);
debug(`cleanupDnsChallenge: remove ${challengeSubdomain} with ${txtValue}`);
await dns.removeDnsRecords(challengeSubdomain, domain, 'TXT', [ `"${txtValue}"` ]);
await dns.removeDnsRecords(challengeSubdomain, this.domain, 'TXT', [ `"${txtValue}"` ]);
};
Acme2.prototype.prepareChallenge = async function (hostname, domain, authorizationUrl, acmeChallengesDir) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
Acme2.prototype.prepareChallenge = async function (authorizationUrl) {
assert.strictEqual(typeof authorizationUrl, 'string');
assert.strictEqual(typeof acmeChallengesDir, 'string');
debug(`prepareChallenge: http: ${this.performHttpAuthorization}`);
@@ -457,55 +454,49 @@ Acme2.prototype.prepareChallenge = async function (hostname, domain, authorizati
const authorization = response.body;
if (this.performHttpAuthorization) {
return await this.prepareHttpChallenge(hostname, domain, authorization, acmeChallengesDir);
return await this.prepareHttpChallenge(authorization);
} else {
return await this.prepareDnsChallenge(hostname, domain, authorization);
return await this.prepareDnsChallenge(authorization);
}
};
Acme2.prototype.cleanupChallenge = async function (hostname, domain, challenge, acmeChallengesDir) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
Acme2.prototype.cleanupChallenge = async function (challenge) {
assert.strictEqual(typeof challenge, 'object');
assert.strictEqual(typeof acmeChallengesDir, 'string');
debug(`cleanupChallenge: http: ${this.performHttpAuthorization}`);
if (this.performHttpAuthorization) {
await this.cleanupHttpChallenge(hostname, domain, challenge, acmeChallengesDir);
await this.cleanupHttpChallenge(challenge);
} else {
await this.cleanupDnsChallenge(hostname, domain, challenge);
await this.cleanupDnsChallenge(challenge);
}
};
Acme2.prototype.acmeFlow = async function (hostname, domain, paths) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof paths, 'object');
const { certFilePath, keyFilePath, csrFilePath, acmeChallengesDir } = paths;
Acme2.prototype.acmeFlow = async function () {
await this.ensureAccount();
const { order, orderUrl } = await this.newOrder(hostname);
const { order, orderUrl } = await this.newOrder();
const certificates = [];
for (let i = 0; i < order.authorizations.length; i++) {
const authorizationUrl = order.authorizations[i];
debug(`acmeFlow: authorizing ${authorizationUrl}`);
const challenge = await this.prepareChallenge(hostname, domain, authorizationUrl, acmeChallengesDir);
const challenge = await this.prepareChallenge(authorizationUrl);
await this.notifyChallengeReady(challenge);
await this.waitForChallenge(challenge);
const csrDer = await this.createKeyAndCsr(hostname, keyFilePath, csrFilePath);
await this.signCertificate(hostname, order.finalize, csrDer);
const key = await this.ensureKey();
const csr = await this.createCsr(key);
await this.signCertificate(order.finalize, csr);
const certUrl = await this.waitForOrder(orderUrl);
await this.downloadCertificate(hostname, certUrl, certFilePath);
const cert = await this.downloadCertificate(certUrl);
try {
await this.cleanupChallenge(hostname, domain, challenge, acmeChallengesDir);
} catch (cleanupError) {
debug('acmeFlow: ignoring error when cleaning up challenge:', cleanupError);
}
await safe(this.cleanupChallenge(challenge), { debug });
certificates.push({ cert, key, csr });
}
return certificates;
};
Acme2.prototype.loadDirectory = async function () {
@@ -522,32 +513,36 @@ Acme2.prototype.loadDirectory = async function () {
});
};
Acme2.prototype.getCertificate = async function (vhost, domain, paths) {
assert.strictEqual(typeof vhost, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof paths, 'object');
debug(`getCertificate: start acme flow for ${vhost} from ${this.caDirectory}`);
if (vhost !== domain && this.wildcard) { // bare domain is not part of wildcard SAN
vhost = dns.makeWildcard(vhost);
debug(`getCertificate: will get wildcard cert for ${vhost}`);
}
Acme2.prototype.getCertificate = async function () {
debug(`getCertificate: start acme flow for ${this.cn} from ${this.caDirectory}`);
await this.loadDirectory();
await this.acmeFlow(vhost, domain, paths);
const result = await this.acmeFlow();
debug(`getCertificate: acme flow completed for ${this.cn}. result: ${result.length}`);
await blobs.setString(`${blobs.CERT_PREFIX}-${this.certName}.key`, result[0].key);
await blobs.setString(`${blobs.CERT_PREFIX}-${this.certName}.cert`, result[0].cert);
await blobs.setString(`${blobs.CERT_PREFIX}-${this.certName}.csr`, result[0].csr);
return result[0];
};
async function getCertificate(vhost, domain, paths, options) {
assert.strictEqual(typeof vhost, 'string'); // this can also be a wildcard domain (for alias domains)
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof paths, 'object');
assert.strictEqual(typeof options, 'object');
async function getCertificate(fqdn, domainObject) {
assert.strictEqual(typeof fqdn, 'string'); // this can also be a wildcard domain (for alias domains)
assert.strictEqual(typeof domainObject, 'object');
await promiseRetry({ times: 3, interval: 0, debug }, async function () {
debug(`getCertificate: for vhost ${vhost} and domain ${domain}`);
// registering user with an email requires A or MX record (https://github.com/letsencrypt/boulder/issues/1197)
// we cannot use admin@fqdn because the user might not have set it up.
// we simply update the account with the latest email we have each time when getting letsencrypt certs
// https://github.com/ietf-wg-acme/acme/issues/30
const owner = await users.getOwner();
const email = owner?.email || 'webmaster@cloudron.io'; // can error if not activated yet
const acme = new Acme2(options || { });
return await acme.getCertificate(vhost, domain, paths);
return await promiseRetry({ times: 3, interval: 0, debug }, async function () {
debug(`getCertificate: for fqdn ${fqdn} and domain ${domainObject.domain}`);
const acme = new Acme2(fqdn, domainObject, email);
return await acme.getCertificate();
});
}
+22 -13
View File
@@ -70,25 +70,36 @@ async function checkAppHealth(app, options) {
const manifest = app.manifest;
const [error, data] = await safe(docker.inspect(app.containerId));
if (error || !data || !data.State) return await setHealth(app, apps.HEALTH_ERROR);
if (data.State.Running !== true) return await setHealth(app, apps.HEALTH_DEAD);
let healthCheckUrl, host;
if (app.manifest.id === constants.PROXY_APP_APPSTORE_ID) {
healthCheckUrl = app.upstreamUri;
host = new URL(app.upstreamUri).host; // includes port
} else {
const [error, data] = await safe(docker.inspect(app.containerId));
if (error || !data || !data.State) return await setHealth(app, apps.HEALTH_ERROR);
if (data.State.Running !== true) return await setHealth(app, apps.HEALTH_DEAD);
// non-appstore apps may not have healthCheckPath
if (!manifest.healthCheckPath) return await setHealth(app, apps.HEALTH_HEALTHY);
// non-appstore apps may not have healthCheckPath
if (!manifest.healthCheckPath) return await setHealth(app, apps.HEALTH_HEALTHY);
healthCheckUrl = `http://${app.containerIp}:${manifest.httpPort}${manifest.healthCheckPath}`;
host = app.fqdn;
}
const healthCheckUrl = `http://${app.containerIp}:${manifest.httpPort}${manifest.healthCheckPath}`;
const [healthCheckError, response] = await safe(superagent
.get(healthCheckUrl)
.set('Host', app.fqdn) // required for some apache configs with rewrite rules
.disableTLSCerts() // for app proxy
.set('Host', host) // required for some apache configs with rewrite rules
.set('User-Agent', 'Mozilla (CloudronHealth)') // required for some apps (e.g. minio)
.redirects(0)
.ok(() => true)
.timeout(options.timeout * 1000));
if (healthCheckError) {
await apps.appendLogLine(app, `=> Healtheck error: ${healthCheckError}`);
await setHealth(app, apps.HEALTH_UNHEALTHY);
} else if (response.status > 403) { // 2xx and 3xx are ok. even 401 and 403 are ok for now (for WP sites)
await apps.appendLogLine(app, `=> Healtheck error got response status ${response.status}`);
await setHealth(app, apps.HEALTH_UNHEALTHY);
} else {
await setHealth(app, apps.HEALTH_HEALTHY);
@@ -129,9 +140,7 @@ async function processDockerEvents(options) {
const [error, info] = await safe(getContainerInfo(containerId));
const program = error ? containerId : (info.addonName || info.app.fqdn);
const now = Date.now();
// do not send mails for dev apps
const notifyUser = !(info.app && info.app.debugMode) && ((now - gLastOomMailTime) > OOM_EVENT_LIMIT);
const notifyUser = !info?.app?.debugMode && ((now - gLastOomMailTime) > OOM_EVENT_LIMIT);
debug(`OOM ${program} notifyUser: ${notifyUser}. lastOomTime: ${gLastOomMailTime} (now: ${now})`);
@@ -163,10 +172,10 @@ async function processApp(options) {
await Promise.allSettled(healthChecks); // wait for all promises to finish
const alive = allApps
.filter(function (a) { return a.installationState === apps.ISTATE_INSTALLED && a.runState === apps.RSTATE_RUNNING && a.health === apps.HEALTH_HEALTHY; });
const stopped = allApps.filter(app => app.runState === apps.RSTATE_STOPPED);
const running = allApps.filter(function (a) { return a.installationState === apps.ISTATE_INSTALLED && a.runState === apps.RSTATE_RUNNING && a.health === apps.HEALTH_HEALTHY; });
debug(`app health: ${alive.length} alive / ${allApps.length - alive.length} dead.`);
debug(`app health: ${running.length} running / ${stopped.length} stopped / ${allApps.length - running.length - stopped.length} unresponsive`);
}
async function run(intervalSecs) {
+213
View File
@@ -0,0 +1,213 @@
'use strict';
exports = module.exports = {
list,
listByUser,
add,
get,
update,
remove,
getIcon
};
const assert = require('assert'),
apps = require('./apps.js'),
BoxError = require('./boxerror.js'),
database = require('./database.js'),
debug = require('debug')('box:applinks'),
jsdom = require('jsdom'),
safe = require('safetydance'),
superagent = require('superagent'),
uuid = require('uuid'),
validator = require('validator');
const APPLINKS_FIELDS= [ 'id', 'accessRestrictionJson', 'creationTime', 'updateTime', 'ts', 'label', 'tagsJson', 'icon', 'upstreamUri' ].join(',');
function postProcess(result) {
assert.strictEqual(typeof result, 'object');
assert(result.tagsJson === null || typeof result.tagsJson === 'string');
result.tags = safe.JSON.parse(result.tagsJson) || [];
delete result.tagsJson;
assert(result.accessRestrictionJson === null || typeof result.accessRestrictionJson === 'string');
result.accessRestriction = safe.JSON.parse(result.accessRestrictionJson);
if (result.accessRestriction && !result.accessRestriction.users) result.accessRestriction.users = [];
delete result.accessRestrictionJson;
result.ts = new Date(result.ts).getTime();
result.icon = result.icon ? result.icon : null;
}
function validateUpstreamUri(upstreamUri) {
assert.strictEqual(typeof upstreamUri, 'string');
if (!upstreamUri) return new BoxError(BoxError.BAD_FIELD, 'upstreamUri cannot be empty');
if (!upstreamUri.includes('://')) return new BoxError(BoxError.BAD_FIELD, 'upstreamUri has no schema');
const uri = safe(() => new URL(upstreamUri));
if (!uri) return new BoxError(BoxError.BAD_FIELD, `upstreamUri is invalid: ${safe.error.message}`);
if (uri.protocol !== 'http:' && uri.protocol !== 'https:') return new BoxError(BoxError.BAD_FIELD, 'upstreamUri has an unsupported scheme');
return null;
}
async function list() {
const results = await database.query(`SELECT ${APPLINKS_FIELDS} FROM applinks ORDER BY upstreamUri`);
results.forEach(postProcess);
return results;
}
async function listByUser(user) {
assert.strictEqual(typeof user, 'object');
const result = await list();
return result.filter((app) => apps.canAccess(app, user));
}
async function detectMetaInfo(applink) {
assert.strictEqual(typeof applink, 'object');
const [error, response] = await safe(superagent.get(applink.upstreamUri).set('User-Agent', 'Mozilla'));
if (error || !response.text) {
debug('detectMetaInfo: Unable to fetch upstreamUri to detect icon and title', error.statusCode);
return;
}
if (applink.favicon && applink.label) return;
// set redirected URI if any for favicon url
const redirectUri = (response.redirects && response.redirects.length) ? response.redirects[0] : null;
const dom = new jsdom.JSDOM(response.text);
if (!applink.icon) {
let favicon = '';
if (dom.window.document.querySelector('link[rel="apple-touch-icon"]')) favicon = dom.window.document.querySelector('link[rel="apple-touch-icon"]').href ;
if (!favicon.endsWith('.png') && dom.window.document.querySelector('meta[name="msapplication-TileImage"]')) favicon = dom.window.document.querySelector('meta[name="msapplication-TileImage"]').content ;
if (!favicon.endsWith('.png') && dom.window.document.querySelector('link[rel="shortcut icon"]')) favicon = dom.window.document.querySelector('link[rel="shortcut icon"]').href ;
if (!favicon.endsWith('.png') && dom.window.document.querySelector('link[rel="icon"]')) {
let iconElements = dom.window.document.querySelectorAll('link[rel="icon"]');
if (iconElements.length) {
favicon = iconElements[0].href; // choose first one for a start
// check if we have sizes attributes and then choose the largest one
iconElements = Array.from(iconElements).filter(function (e) {
return e.attributes.getNamedItem('sizes') && e.attributes.getNamedItem('sizes').value;
}).sort(function (a, b) {
return parseInt(b.attributes.getNamedItem('sizes').value.split('x')[0]) - parseInt(a.attributes.getNamedItem('sizes').value.split('x')[0]);
});
if (iconElements.length) favicon = iconElements[0].href;
}
}
if (!favicon.endsWith('.png') && dom.window.document.querySelector('meta[itemprop="image"]')) favicon = dom.window.document.querySelector('meta[itemprop="image"]').content;
if (favicon) {
if (favicon.startsWith('/')) favicon = (redirectUri || applink.upstreamUri) + favicon;
debug(`detectMetaInfo: found icon: ${favicon}`);
const [error, response] = await safe(superagent.get(favicon));
if (error) console.error(`Failed to fetch icon ${favicon}: `, error);
else if (response.ok && response.headers['content-type'] === 'image/png') applink.icon = response.body;
else console.error(`Failed to fetch icon ${favicon}: statusCode=${response.status}`);
} else {
console.error(`Unable to find a suitable icon for ${applink.upstreamUri}`);
}
}
if (!applink.label) {
if (dom.window.document.querySelector('meta[property="og:title"]')) applink.label = dom.window.document.querySelector('meta[property="og:title"]').content;
else if (dom.window.document.querySelector('meta[property="og:site_name"]')) applink.label = dom.window.document.querySelector('meta[property="og:site_name"]').content;
else if (dom.window.document.title) applink.label = dom.window.document.title;
}
}
async function add(applink) {
assert.strictEqual(typeof applink, 'object');
assert.strictEqual(typeof applink.upstreamUri, 'string');
debug(`add: ${applink.upstreamUri}`, applink);
let error = validateUpstreamUri(applink.upstreamUri);
if (error) throw error;
if (applink.icon) {
if (!validator.isBase64(applink.icon)) throw new BoxError(BoxError.BAD_FIELD, 'icon is not base64');
applink.icon = Buffer.from(applink.icon, 'base64');
}
await detectMetaInfo(applink);
const data = {
id: uuid.v4(),
accessRestrictionJson: applink.accessRestriction ? JSON.stringify(applink.accessRestriction) : null,
label: applink.label || '',
tagsJson: applink.tags ? JSON.stringify(applink.tags) : null,
icon: applink.icon || null,
upstreamUri: applink.upstreamUri
};
const query = 'INSERT INTO applinks (id, accessRestrictionJson, label, tagsJson, icon, upstreamUri) VALUES (?, ?, ?, ?, ?, ?)';
const args = [ data.id, data.accessRestrictionJson, data.label, data.tagsJson, data.icon, data.upstreamUri ];
[error] = await safe(database.query(query, args));
if (error) throw error;
return data.id;
}
async function get(applinkId) {
assert.strictEqual(typeof applinkId, 'string');
const result = await database.query(`SELECT ${APPLINKS_FIELDS} FROM applinks WHERE id = ?`, [ applinkId ]);
if (result.length === 0) return null;
postProcess(result[0]);
return result[0];
}
async function update(applinkId, applink) {
assert.strictEqual(typeof applinkId, 'string');
assert.strictEqual(typeof applink, 'object');
assert.strictEqual(typeof applink.upstreamUri, 'string');
debug(`update: ${applink.upstreamUri}`, applink);
let error = validateUpstreamUri(applink.upstreamUri);
if (error) throw error;
if (applink.icon) {
if (!validator.isBase64(applink.icon)) throw new BoxError(BoxError.BAD_FIELD, 'icon is not base64');
applink.icon = Buffer.from(applink.icon, 'base64');
}
await detectMetaInfo(applink);
const query = 'UPDATE applinks SET label=?, icon=?, upstreamUri=?, tagsJson=?, accessRestrictionJson=? WHERE id = ?';
const args = [ applink.label, applink.icon || null, applink.upstreamUri, applink.tags ? JSON.stringify(applink.tags) : null, applink.accessRestriction ? JSON.stringify(applink.accessRestriction) : null, applinkId ];
const result = await database.query(query, args);
if (result.affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Applink not found');
}
async function remove(applinkId) {
assert.strictEqual(typeof applinkId, 'string');
const result = await database.query('DELETE FROM applinks WHERE id = ?', [ applinkId ]);
if (result.affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Applink not found');
}
async function getIcon(applinkId) {
assert.strictEqual(typeof applinkId, 'string');
const applink = await get(applinkId);
if (!applink) throw new BoxError(BoxError.NOT_FOUND, 'Applink not found');
return applink.icon;
}
+380 -192
View File
File diff suppressed because it is too large Load Diff
+57 -80
View File
@@ -13,14 +13,17 @@ exports = module.exports = {
purchaseApp,
unpurchaseApp,
createUserToken,
getWebToken,
getSubscription,
isFreePlan,
getAppUpdate,
getBoxUpdate,
createTicket
createTicket,
// exported for tests
_unregister: unregister
};
const apps = require('./apps.js'),
@@ -34,6 +37,7 @@ const apps = require('./apps.js'),
safe = require('safetydance'),
semver = require('semver'),
settings = require('./settings.js'),
sysinfo = require('./sysinfo.js'),
superagent = require('superagent'),
support = require('./support.js'),
util = require('util');
@@ -78,17 +82,15 @@ async function login(email, password, totpToken) {
assert.strictEqual(typeof password, 'string');
assert.strictEqual(typeof totpToken, 'string');
const data = { email, password, totpToken };
const url = settings.apiServerOrigin() + '/api/v1/login';
const [error, response] = await safe(superagent.post(url)
.send(data)
const [error, response] = await safe(superagent.post(`${settings.apiServerOrigin()}/api/v1/login`)
.send({ email, password, totpToken })
.timeout(30 * 1000)
.ok(() => true));
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `login status code: ${response.status}`);
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Login error. status code: ${response.status}`);
if (!response.body.accessToken) throw new BoxError(BoxError.EXTERNAL_ERROR, `Login error. invalid response: ${response.text}`);
return response.body; // { userId, accessToken }
}
@@ -97,54 +99,36 @@ async function registerUser(email, password) {
assert.strictEqual(typeof email, 'string');
assert.strictEqual(typeof password, 'string');
const data = { email, password };
const url = settings.apiServerOrigin() + '/api/v1/register_user';
const [error, response] = await safe(superagent.post(url)
.send(data)
const [error, response] = await safe(superagent.post(`${settings.apiServerOrigin()}/api/v1/register_user`)
.send({ email, password, utmSource: 'cloudron-dashboard' })
.timeout(30 * 1000)
.ok(() => true));
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.status === 409) throw new BoxError(BoxError.ALREADY_EXISTS, 'account already exists');
if (response.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, `register status code: ${response.status}`);
if (response.status === 409) throw new BoxError(BoxError.ALREADY_EXISTS, 'Registration error: account already exists');
if (response.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, `Registration error. invalid response: ${response.status}`);
}
async function createUserToken() {
async function getWebToken() {
if (settings.isDemo()) throw new BoxError(BoxError.BAD_FIELD, 'Not allowed in demo mode');
const token = await settings.getCloudronToken();
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
const token = await settings.getAppstoreWebToken();
if (!token) throw new BoxError(BoxError.NOT_FOUND); // user will have to re-login with password somehow
const url = `${settings.apiServerOrigin()}/api/v1/user_token`;
const [error, response] = await safe(superagent.post(url)
.send({})
.query({ accessToken: token })
.timeout(30 * 1000)
.ok(() => true));
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, `getUserToken status code: ${response.status}`);
return response.body.accessToken;
return token;
}
async function getSubscription() {
const token = await settings.getCloudronToken();
const token = await settings.getAppstoreApiToken();
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
const url = settings.apiServerOrigin() + '/api/v1/subscription';
const [error, response] = await safe(superagent.get(url)
const [error, response] = await safe(superagent.get(`${settings.apiServerOrigin()}/api/v1/subscription`)
.query({ accessToken: token })
.timeout(30 * 1000)
.ok(() => true));
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR);
if (response.status === 502) throw new BoxError(BoxError.EXTERNAL_ERROR, `Stripe error: ${error.message}`);
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unknown error: ${error.message}`);
@@ -165,12 +149,10 @@ async function purchaseApp(data) {
assert(data.appstoreId || data.manifestId);
assert.strictEqual(typeof data.appId, 'string');
const token = await settings.getCloudronToken();
const token = await settings.getAppstoreApiToken();
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
const url = `${settings.apiServerOrigin()}/api/v1/cloudronapps`;
const [error, response] = await safe(superagent.post(url)
const [error, response] = await safe(superagent.post(`${settings.apiServerOrigin()}/api/v1/cloudronapps`)
.send(data)
.query({ accessToken: token })
.timeout(30 * 1000)
@@ -180,7 +162,6 @@ async function purchaseApp(data) {
if (response.status === 404) throw new BoxError(BoxError.NOT_FOUND); // appstoreId does not exist
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
if (response.status === 402) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
// 200 if already purchased, 201 is newly purchased
if (response.status !== 201 && response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', response.status, response.body));
}
@@ -190,7 +171,7 @@ async function unpurchaseApp(appId, data) {
assert.strictEqual(typeof data, 'object'); // { appstoreId, manifestId }
assert(data.appstoreId || data.manifestId);
const token = await settings.getCloudronToken();
const token = await settings.getAppstoreApiToken();
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
const url = `${settings.apiServerOrigin()}/api/v1/cloudronapps/${appId}`;
@@ -203,8 +184,7 @@ async function unpurchaseApp(appId, data) {
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.status === 404) return; // was never purchased
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
if (response.status !== 201 && response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', response.status, response.body));
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `App unpurchase failed to get app. status:${response.status}`);
[error, response] = await safe(superagent.del(url)
.send(data)
@@ -214,31 +194,28 @@ async function unpurchaseApp(appId, data) {
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
if (response.status !== 204) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', response.status, response.body));
if (response.status !== 204) throw new BoxError(BoxError.EXTERNAL_ERROR, `App unpurchase failed. status:${response.status}`);
}
async function getBoxUpdate(options) {
assert.strictEqual(typeof options, 'object');
const token = await settings.getCloudronToken();
const token = await settings.getAppstoreApiToken();
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
const url = `${settings.apiServerOrigin()}/api/v1/boxupdate`;
const query = {
accessToken: token,
boxVersion: constants.VERSION,
automatic: options.automatic
};
const [error, response] = await safe(superagent.get(url)
const [error, response] = await safe(superagent.get(`${settings.apiServerOrigin()}/api/v1/boxupdate`)
.query(query)
.timeout(30 * 1000)
.ok(() => true));
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
if (response.status === 204) return; // no update
if (response.status !== 200 || !response.body) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('Bad response: %s %s', response.status, response.text));
@@ -263,10 +240,9 @@ async function getAppUpdate(app, options) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
const token = await settings.getCloudronToken();
const token = await settings.getAppstoreApiToken();
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
const url = `${settings.apiServerOrigin()}/api/v1/appupdate`;
const query = {
accessToken: token,
boxVersion: constants.VERSION,
@@ -275,14 +251,13 @@ async function getAppUpdate(app, options) {
automatic: options.automatic
};
const [error, response] = await safe(superagent.get(url)
const [error, response] = await safe(superagent.get(`${settings.apiServerOrigin()}/api/v1/appupdate`)
.query(query)
.timeout(30 * 1000)
.ok(() => true));
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error);
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
if (response.status === 204) return; // no update
if (response.status !== 200 || !response.body) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('Bad response: %s %s', response.status, response.text));
@@ -306,24 +281,23 @@ async function getAppUpdate(app, options) {
async function registerCloudron(data) {
assert.strictEqual(typeof data, 'object');
const url = `${settings.apiServerOrigin()}/api/v1/register_cloudron`;
const { domain, accessToken, version, existingApps } = data;
const [error, response] = await safe(superagent.post(url)
.send(data)
const [error, response] = await safe(superagent.post(`${settings.apiServerOrigin()}/api/v1/register_cloudron`)
.send({ domain, accessToken, version, existingApps })
.timeout(30 * 1000)
.ok(() => true));
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unable to register cloudron: ${response.statusCode} ${error.message}`);
// cloudronId, token, licenseKey
// cloudronId, token
if (!response.body.cloudronId) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response - no cloudron id');
if (!response.body.cloudronToken) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response - no token');
if (!response.body.licenseKey) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response - no license');
await settings.setCloudronId(response.body.cloudronId);
await settings.setCloudronToken(response.body.cloudronToken);
await settings.setLicenseKey(response.body.licenseKey);
await settings.setAppstoreApiToken(response.body.cloudronToken);
await settings.setAppstoreWebToken(accessToken);
debug(`registerCloudron: Cloudron registered with id ${response.body.cloudronId}`);
}
@@ -331,23 +305,23 @@ async function registerCloudron(data) {
async function updateCloudron(data) {
assert.strictEqual(typeof data, 'object');
const token = await settings.getCloudronToken();
const { domain } = data;
const token = await settings.getAppstoreApiToken();
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
const url = `${settings.apiServerOrigin()}/api/v1/update_cloudron`;
const query = {
accessToken: token
};
const [error, response] = await safe(superagent.post(url)
const [error, response] = await safe(superagent.post(`${settings.apiServerOrigin()}/api/v1/update_cloudron`)
.query(query)
.send(data)
.send({ domain })
.timeout(30 * 1000)
.ok(() => true));
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error);
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('Bad response: %s %s', response.status, response.text));
debug(`updateCloudron: Cloudron updated with data ${JSON.stringify(data)}`);
@@ -356,13 +330,20 @@ async function updateCloudron(data) {
async function registerWithLoginCredentials(options) {
assert.strictEqual(typeof options, 'object');
const token = await settings.getCloudronToken();
if (token) throw new BoxError(BoxError.CONFLICT, 'Cloudron is already registered');
if (options.signup) await registerUser(options.email, options.password);
const result = await login(options.email, options.password, options.totpToken || '');
await registerCloudron({ domain: settings.dashboardDomain(), accessToken: result.accessToken, version: constants.VERSION });
for (const app of await apps.list()) {
await purchaseApp({ appId: app.id, appstoreId: app.appStoreId, manifestId: app.manifest.id || 'customapp' });
}
}
async function unregister() {
await settings.setCloudronId('');
await settings.setAppstoreApiToken('');
await settings.setAppstoreWebToken('');
}
async function createTicket(info, auditSource) {
@@ -374,11 +355,12 @@ async function createTicket(info, auditSource) {
assert.strictEqual(typeof info.description, 'string');
assert.strictEqual(typeof auditSource, 'object');
const token = await settings.getCloudronToken();
const token = await settings.getAppstoreApiToken();
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
if (info.enableSshSupport) {
await safe(support.enableRemoteSupport(true, auditSource));
info.ipv4 = await sysinfo.getServerIPv4();
}
info.app = info.appId ? await apps.get(info.appId) : null;
@@ -405,7 +387,6 @@ async function createTicket(info, auditSource) {
const [error, response] = await safe(request);
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
if (response.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('Bad response: %s %s', response.status, response.text));
await eventlog.add(eventlog.ACTION_SUPPORT_TICKET, auditSource, info);
@@ -413,22 +394,19 @@ async function createTicket(info, auditSource) {
return { message: `An email was sent to ${constants.SUPPORT_EMAIL}. We will get back shortly!` };
}
async function getApps() {
const token = await settings.getCloudronToken();
async function getApps(repository = 'core') {
const token = await settings.getAppstoreApiToken();
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
const unstable = await settings.getUnstableAppsConfig();
const url = `${settings.apiServerOrigin()}/api/v1/apps`;
const [error, response] = await safe(superagent.get(url)
.query({ accessToken: token, boxVersion: constants.VERSION, unstable: unstable })
const [error, response] = await safe(superagent.get(`${settings.apiServerOrigin()}/api/v1/apps`)
.query({ accessToken: token, boxVersion: constants.VERSION, unstable, repository })
.timeout(30 * 1000)
.ok(() => true));
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.status === 403 || response.status === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('App listing failed. %s %j', response.status, response.body));
if (!response.body.apps) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('Bad response: %s %s', response.status, response.text));
@@ -445,7 +423,7 @@ async function getAppVersion(appId, version) {
if (!isAppAllowed(appId, listingConfig)) throw new BoxError(BoxError.FEATURE_DISABLED);
const token = await settings.getCloudronToken();
const token = await settings.getAppstoreApiToken();
if (!token) throw new BoxError(BoxError.LICENSE_ERROR, 'Missing token');
let url = `${settings.apiServerOrigin()}/api/v1/apps/${appId}`;
@@ -459,7 +437,6 @@ async function getAppVersion(appId, version) {
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.status === 403 || response.statusCode === 401) throw new BoxError(BoxError.INVALID_CREDENTIALS);
if (response.status === 404) throw new BoxError(BoxError.NOT_FOUND);
if (response.status === 422) throw new BoxError(BoxError.LICENSE_ERROR, response.body.message);
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, util.format('App fetch failed. %s %j', response.status, response.body));
return response.body;
+48 -46
View File
@@ -17,10 +17,9 @@ const apps = require('./apps.js'),
AuditSource = require('./auditsource.js'),
backuptask = require('./backuptask.js'),
BoxError = require('./boxerror.js'),
collectd = require('./collectd.js'),
constants = require('./constants.js'),
debug = require('debug')('box:apptask'),
df = require('@sindresorhus/df'),
df = require('./df.js'),
dns = require('./dns.js'),
docker = require('./docker.js'),
ejs = require('ejs'),
@@ -41,13 +40,12 @@ const apps = require('./apps.js'),
sysinfo = require('./sysinfo.js'),
_ = require('underscore');
const COLLECTD_CONFIG_EJS = fs.readFileSync(__dirname + '/collectd/app.ejs', { encoding: 'utf8' }),
MV_VOLUME_CMD = path.join(__dirname, 'scripts/mvvolume.sh'),
const MV_VOLUME_CMD = path.join(__dirname, 'scripts/mvvolume.sh'),
LOGROTATE_CONFIG_EJS = fs.readFileSync(__dirname + '/logrotate.ejs', { encoding: 'utf8' }),
CONFIGURE_LOGROTATE_CMD = path.join(__dirname, 'scripts/configurelogrotate.sh');
function makeTaskError(error, app) {
assert.strictEqual(typeof error, 'object');
assert(error instanceof BoxError);
assert.strictEqual(typeof app, 'object');
// track a few variables which helps 'repair' restart the task (see also scheduleTask in apps.js)
@@ -71,11 +69,13 @@ async function updateApp(app, values) {
async function allocateContainerIp(app) {
assert.strictEqual(typeof app, 'object');
if (app.manifest.id === constants.PROXY_APP_APPSTORE_ID) return;
await promiseRetry({ times: 10, interval: 0, debug }, async function () {
const iprange = iputils.intFromIp('172.18.20.255') - iputils.intFromIp('172.18.16.1');
let rnd = Math.floor(Math.random() * iprange);
const containerIp = iputils.ipFromInt(iputils.intFromIp('172.18.16.1') + rnd);
updateApp(app, { containerIp });
await updateApp(app, { containerIp });
});
}
@@ -83,6 +83,8 @@ async function createContainer(app) {
assert.strictEqual(typeof app, 'object');
assert(!app.containerId); // otherwise, it will trigger volumeFrom
if (app.manifest.id === constants.PROXY_APP_APPSTORE_ID) return;
debug('createContainer: creating container');
const container = await docker.createContainer(app);
@@ -91,7 +93,6 @@ async function createContainer(app) {
// re-generate configs that rely on container id
await addLogrotateConfig(app);
await addCollectdProfile(app);
}
async function deleteContainers(app, options) {
@@ -101,7 +102,6 @@ async function deleteContainers(app, options) {
debug('deleteContainer: deleting app containers (app, scheduler)');
// remove configs that rely on container id
await removeCollectdProfile(app);
await removeLogrotateConfig(app);
await docker.stopContainers(app.id);
await docker.deleteContainers(app.id, options);
@@ -154,19 +154,6 @@ async function deleteAppDir(app, options) {
}
}
async function addCollectdProfile(app) {
assert.strictEqual(typeof app, 'object');
const collectdConf = ejs.render(COLLECTD_CONFIG_EJS, { appId: app.id, containerId: app.containerId, appDataDir: apps.getDataDir(app, app.dataDir) });
await collectd.addProfile(app.id, collectdConf);
}
async function removeCollectdProfile(app) {
assert.strictEqual(typeof app, 'object');
await collectd.removeProfile(app.id);
}
async function addLogrotateConfig(app) {
assert.strictEqual(typeof app, 'object');
@@ -265,24 +252,30 @@ async function waitForDnsPropagation(app) {
}
}
async function moveDataDir(app, targetDir) {
async function moveDataDir(app, targetVolumeId, targetVolumePrefix) {
assert.strictEqual(typeof app, 'object');
assert(targetDir === null || typeof targetDir === 'string');
assert(targetVolumeId === null || typeof targetVolumeId === 'string');
assert(targetVolumePrefix === null || typeof targetVolumePrefix === 'string');
const resolvedSourceDir = apps.getDataDir(app, app.dataDir);
const resolvedTargetDir = apps.getDataDir(app, targetDir);
const resolvedSourceDir = await apps.getStorageDir(app);
const resolvedTargetDir = await apps.getStorageDir(_.extend({}, app, { storageVolumeId: targetVolumeId, storageVolumePrefix: targetVolumePrefix }));
debug(`moveDataDir: migrating data from ${resolvedSourceDir} to ${resolvedTargetDir}`);
if (resolvedSourceDir === resolvedTargetDir) return;
if (resolvedSourceDir !== resolvedTargetDir) {
const [error] = await safe(shell.promises.sudo('moveDataDir', [ MV_VOLUME_CMD, resolvedSourceDir, resolvedTargetDir ], {}));
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error migrating data directory: ${error.message}`);
}
const [error] = await safe(shell.promises.sudo('moveDataDir', [ MV_VOLUME_CMD, resolvedSourceDir, resolvedTargetDir ], {}));
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, `Error migrating data directory: ${error.message}`);
await updateApp(app, { storageVolumeId: targetVolumeId, storageVolumePrefix: targetVolumePrefix });
}
async function downloadImage(manifest) {
assert.strictEqual(typeof manifest, 'object');
// skip for relay app
if (manifest.id === constants.PROXY_APP_APPSTORE_ID) return;
const info = await docker.info();
const [dfError, diskUsage] = await safe(df.file(info.DockerRootDir));
if (dfError) throw new BoxError(BoxError.FS_ERROR, `Error getting file system info: ${dfError.message}`);
@@ -297,6 +290,9 @@ async function startApp(app) {
if (app.runState === apps.RSTATE_STOPPED) return;
// skip for relay app
if (app.manifest.id === constants.PROXY_APP_APPSTORE_ID) return;
await docker.startContainer(app.id);
}
@@ -328,7 +324,7 @@ async function install(app, args, progressCallback) {
}
await services.teardownAddons(app, addonsToRemove);
if (!restoreConfig || restoreConfig.backupId) { // in-place import should not delete data dir
if (!restoreConfig || restoreConfig.remotePath) { // in-place import should not delete data dir
await deleteAppDir(app, { removeDirectory: false }); // do not remove any symlinked appdata dir
}
@@ -357,7 +353,7 @@ async function install(app, args, progressCallback) {
if (!restoreConfig) { // install
await progressCallback({ percent: 60, message: 'Setting up addons' });
await services.setupAddons(app, app.manifest.addons);
} else if (app.installationState === apps.ISTATE_PENDING_IMPORT && !restoreConfig.backupId) { // in-place import
} else if (app.installationState === apps.ISTATE_PENDING_IMPORT && !restoreConfig.remotePath) { // in-place import
await progressCallback({ percent: 60, message: 'Importing addons in-place' });
await services.setupAddons(app, app.manifest.addons);
await services.clearAddons(app, _.omit(app.manifest.addons, 'localstorage'));
@@ -517,8 +513,9 @@ async function migrateDataDir(app, args, progressCallback) {
assert.strictEqual(typeof args, 'object');
assert.strictEqual(typeof progressCallback, 'function');
const newDataDir = args.newDataDir;
assert(newDataDir === null || typeof newDataDir === 'string');
const { newStorageVolumeId, newStorageVolumePrefix } = args;
assert(newStorageVolumeId === null || typeof newStorageVolumeId === 'string');
assert(newStorageVolumePrefix === null || typeof newStorageVolumePrefix === 'string');
await progressCallback({ percent: 10, message: 'Cleaning up old install' });
await deleteContainers(app, { managedOnly: true });
@@ -526,12 +523,12 @@ async function migrateDataDir(app, args, progressCallback) {
await progressCallback({ percent: 45, message: 'Ensuring app data directory' });
await createAppDir(app);
// re-setup addons since this creates the localStorage volume
// re-setup addons since this creates the localStorage destination
await progressCallback({ percent: 50, message: 'Setting up addons' });
await services.setupAddons(_.extend({}, app, { dataDir: newDataDir }), app.manifest.addons);
await services.setupAddons(_.extend({}, app, { storageVolumeId: newStorageVolumeId, storageVolumePrefix: newStorageVolumePrefix }), app.manifest.addons);
await progressCallback({ percent: 60, message: 'Moving data dir' });
await moveDataDir(app, newDataDir);
await moveDataDir(app, newStorageVolumeId, newStorageVolumePrefix);
await progressCallback({ percent: 90, message: 'Creating container' });
await createContainer(app);
@@ -539,7 +536,7 @@ async function migrateDataDir(app, args, progressCallback) {
await startApp(app);
await progressCallback({ percent: 100, message: 'Done' });
await updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null, dataDir: newDataDir });
await updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null });
}
// configure is called for an infra update and repair to re-create container, reverseproxy config. it's all "local"
@@ -667,11 +664,10 @@ async function start(app, args, progressCallback) {
await progressCallback({ percent: 10, message: 'Starting app services' });
await services.startAppServices(app);
await progressCallback({ percent: 35, message: 'Starting container' });
await docker.startContainer(app.id);
await progressCallback({ percent: 60, message: 'Adding collectd profile' });
await addCollectdProfile(app);
if (app.manifest.id !== constants.PROXY_APP_APPSTORE_ID) {
await progressCallback({ percent: 35, message: 'Starting container' });
await docker.startContainer(app.id);
}
// stopped apps do not renew certs. currently, we don't do DNS to not overwrite existing user settings
await progressCallback({ percent: 80, message: 'Configuring reverse proxy' });
@@ -693,9 +689,6 @@ async function stop(app, args, progressCallback) {
await progressCallback({ percent: 50, message: 'Stopping app services' });
await services.stopAppServices(app);
await progressCallback({ percent: 80, message: 'Removing collectd profile' });
await removeCollectdProfile(app);
await progressCallback({ percent: 100, message: 'Done' });
await updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null });
}
@@ -705,8 +698,17 @@ async function restart(app, args, progressCallback) {
assert.strictEqual(typeof args, 'object');
assert.strictEqual(typeof progressCallback, 'function');
await progressCallback({ percent: 20, message: 'Restarting container' });
await docker.restartContainer(app.id);
if (app.manifest.id !== constants.PROXY_APP_APPSTORE_ID) {
await progressCallback({ percent: 10, message: 'Starting app services' });
await services.startAppServices(app);
await progressCallback({ percent: 20, message: 'Restarting container' });
await docker.restartContainer(app.id);
}
// stopped apps do not renew certs. currently, we don't do DNS to not overwrite existing user settings
await progressCallback({ percent: 80, message: 'Configuring reverse proxy' });
await reverseProxy.configureApp(app, AuditSource.APPTASK);
await progressCallback({ percent: 100, message: 'Done' });
await updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null });
+76 -83
View File
@@ -1,5 +1,7 @@
'use strict';
const BoxError = require('./boxerror.js');
exports = module.exports = {
run,
@@ -8,6 +10,7 @@ exports = module.exports = {
const apps = require('./apps.js'),
assert = require('assert'),
backupFormat = require('./backupformat.js'),
backups = require('./backups.js'),
constants = require('./constants.js'),
debug = require('debug')('box:backupcleaner'),
@@ -17,7 +20,6 @@ const apps = require('./apps.js'),
safe = require('safetydance'),
settings = require('./settings.js'),
storage = require('./storage.js'),
util = require('util'),
_ = require('underscore');
function applyBackupRetentionPolicy(allBackups, policy, referencedBackupIds) {
@@ -35,7 +37,7 @@ function applyBackupRetentionPolicy(allBackups, policy, referencedBackupIds) {
else backup.discardReason = 'creating-too-long';
} else if (referencedBackupIds.includes(backup.id)) {
backup.keepReason = 'reference';
} else if ((now - backup.creationTime) < (backup.preserveSecs * 1000)) {
} else if ((backup.preserveSecs === -1) || ((now - backup.creationTime) < (backup.preserveSecs * 1000))) {
backup.keepReason = 'preserveSecs';
} else if ((now - backup.creationTime < policy.keepWithinSecs * 1000) || policy.keepWithinSecs < 0) {
backup.keepReason = 'keepWithinSecs';
@@ -78,49 +80,42 @@ function applyBackupRetentionPolicy(allBackups, policy, referencedBackupIds) {
}
}
async function cleanupBackup(backupConfig, backup, progressCallback) {
async function removeBackup(backupConfig, backup, progressCallback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof backup, 'object');
assert.strictEqual(typeof progressCallback, 'function');
const backupFilePath = storage.getBackupFilePath(backupConfig, backup.id, backup.format);
const backupFilePath = backupFormat.api(backup.format).getBackupFilePath(backupConfig, backup.remotePath);
return new Promise((resolve) => {
function done(error) {
if (error) {
debug('cleanupBackup: error removing backup %j : %s', backup, error.message);
return resolve();
}
let removeError;
if (backup.format ==='tgz') {
progressCallback({ message: `${backup.remotePath}: Removing ${backupFilePath}`});
[removeError] = await safe(storage.api(backupConfig.provider).remove(backupConfig, backupFilePath));
} else {
progressCallback({ message: `${backup.remotePath}: Removing directory ${backupFilePath}`});
[removeError] = await safe(storage.api(backupConfig.provider).removeDir(backupConfig, backupFilePath, progressCallback));
}
// prune empty directory if possible
storage.api(backupConfig.provider).remove(backupConfig, path.dirname(backupFilePath), async function (error) {
if (error) debug('cleanupBackup: unable to prune backup directory %s : %s', path.dirname(backupFilePath), error.message);
if (removeError) {
debug('removeBackup: error removing backup %j : %s', backup, removeError.message);
return;
}
const [delError] = await safe(backups.del(backup.id));
if (delError) debug('cleanupBackup: error removing from database', delError);
else debug('cleanupBackup: removed %s', backup.id);
// prune empty directory if possible
const [pruneError] = await safe(storage.api(backupConfig.provider).remove(backupConfig, path.dirname(backupFilePath)));
if (pruneError) debug('removeBackup: unable to prune backup directory %s : %s', path.dirname(backupFilePath), pruneError.message);
resolve();
});
}
if (backup.format ==='tgz') {
progressCallback({ message: `${backup.id}: Removing ${backupFilePath}`});
storage.api(backupConfig.provider).remove(backupConfig, backupFilePath, done);
} else {
const events = storage.api(backupConfig.provider).removeDir(backupConfig, backupFilePath);
events.on('progress', (message) => progressCallback({ message: `${backup.id}: ${message}` }));
events.on('done', done);
}
});
const [delError] = await safe(backups.del(backup.id));
if (delError) debug(`removeBackup: error removing ${backup.id} from database`, delError);
else debug(`removeBackup: removed ${backup.remotePath}`);
}
async function cleanupAppBackups(backupConfig, referencedAppBackupIds, progressCallback) {
async function cleanupAppBackups(backupConfig, referencedBackupIds, progressCallback) {
assert.strictEqual(typeof backupConfig, 'object');
assert(Array.isArray(referencedAppBackupIds));
assert(Array.isArray(referencedBackupIds));
assert.strictEqual(typeof progressCallback, 'function');
let removedAppBackupIds = [];
const removedAppBackupPaths = [];
const allApps = await apps.list();
const allAppIds = allApps.map(a => a.id);
@@ -137,49 +132,49 @@ async function cleanupAppBackups(backupConfig, referencedAppBackupIds, progressC
// apply backup policy per app. keep latest backup only for existing apps
let appBackupsToRemove = [];
for (const appId of Object.keys(appBackupsById)) {
applyBackupRetentionPolicy(appBackupsById[appId], _.extend({ keepLatest: allAppIds.includes(appId) }, backupConfig.retentionPolicy), referencedAppBackupIds);
applyBackupRetentionPolicy(appBackupsById[appId], _.extend({ keepLatest: allAppIds.includes(appId) }, backupConfig.retentionPolicy), referencedBackupIds);
appBackupsToRemove = appBackupsToRemove.concat(appBackupsById[appId].filter(b => !b.keepReason));
}
for (const appBackup of appBackupsToRemove) {
await progressCallback({ message: `Removing app backup (${appBackup.identifier}): ${appBackup.id}`});
removedAppBackupIds.push(appBackup.id);
await cleanupBackup(backupConfig, appBackup, progressCallback); // never errors
removedAppBackupPaths.push(appBackup.remotePath);
await removeBackup(backupConfig, appBackup, progressCallback); // never errors
}
debug('cleanupAppBackups: done');
return removedAppBackupIds;
return removedAppBackupPaths;
}
async function cleanupMailBackups(backupConfig, referencedAppBackupIds, progressCallback) {
async function cleanupMailBackups(backupConfig, referencedBackupIds, progressCallback) {
assert.strictEqual(typeof backupConfig, 'object');
assert(Array.isArray(referencedAppBackupIds));
assert(Array.isArray(referencedBackupIds));
assert.strictEqual(typeof progressCallback, 'function');
let removedMailBackupIds = [];
const removedMailBackupPaths = [];
const mailBackups = await backups.getByTypePaged(backups.BACKUP_TYPE_MAIL, 1, 1000);
applyBackupRetentionPolicy(mailBackups, _.extend({ keepLatest: true }, backupConfig.retentionPolicy), referencedAppBackupIds);
applyBackupRetentionPolicy(mailBackups, _.extend({ keepLatest: true }, backupConfig.retentionPolicy), referencedBackupIds);
for (const mailBackup of mailBackups) {
if (mailBackup.keepReason) continue;
await progressCallback({ message: `Removing mail backup ${mailBackup.id}`});
removedMailBackupIds.push(mailBackup.id);
await cleanupBackup(backupConfig, mailBackup, progressCallback); // never errors
await progressCallback({ message: `Removing mail backup ${mailBackup.remotePath}`});
removedMailBackupPaths.push(mailBackup.remotePath);
await removeBackup(backupConfig, mailBackup, progressCallback); // never errors
}
debug('cleanupMailBackups: done');
return removedMailBackupIds;
return removedMailBackupPaths;
}
async function cleanupBoxBackups(backupConfig, progressCallback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof progressCallback, 'function');
let referencedAppBackupIds = [], removedBoxBackupIds = [];
let referencedBackupIds = [], removedBoxBackupPaths = [];
const boxBackups = await backups.getByTypePaged(backups.BACKUP_TYPE_BOX, 1, 1000);
@@ -187,48 +182,48 @@ async function cleanupBoxBackups(backupConfig, progressCallback) {
for (const boxBackup of boxBackups) {
if (boxBackup.keepReason) {
referencedAppBackupIds = referencedAppBackupIds.concat(boxBackup.dependsOn);
referencedBackupIds = referencedBackupIds.concat(boxBackup.dependsOn);
continue;
}
await progressCallback({ message: `Removing box backup ${boxBackup.id}`});
await progressCallback({ message: `Removing box backup ${boxBackup.remotePath}`});
removedBoxBackupIds.push(boxBackup.id);
await cleanupBackup(backupConfig, boxBackup, progressCallback);
removedBoxBackupPaths.push(boxBackup.remotePath);
await removeBackup(backupConfig, boxBackup, progressCallback);
}
debug('cleanupBoxBackups: done');
return { removedBoxBackupIds, referencedAppBackupIds };
return { removedBoxBackupPaths, referencedBackupIds };
}
// cleans up the database by checking if backup existsing in the remote
async function cleanupMissingBackups(backupConfig, progressCallback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof progressCallback, 'function');
const perPage = 1000;
let missingBackupIds = [];
const backupExists = util.promisify(storage.api(backupConfig.provider).exists);
const missingBackupPaths = [];
if (constants.TEST) return missingBackupIds;
if (constants.TEST) return missingBackupPaths;
let page = 1, result = [];
do {
result = await backups.list(page, perPage);
for (const backup of result) {
let backupFilePath = storage.getBackupFilePath(backupConfig, backup.id, backup.format);
let backupFilePath = backupFormat.api(backup.format).getBackupFilePath(backupConfig, backup.remotePath);
if (backup.format === 'rsync') backupFilePath = backupFilePath + '/'; // add trailing slash to indicate directory
const [existsError, exists] = await safe(backupExists(backupConfig, backupFilePath));
const [existsError, exists] = await safe(storage.api(backupConfig.provider).exists(backupConfig, backupFilePath));
if (existsError || exists) continue;
await progressCallback({ message: `Removing missing backup ${backup.id}`});
await progressCallback({ message: `Removing missing backup ${backup.remotePath}`});
const [delError] = await safe(backups.del(backup.id));
if (delError) debug(`cleanupBackup: error removing ${backup.id} from database`, delError);
if (delError) debug(`cleanupMissingBackups: error removing ${backup.id} from database`, delError);
missingBackupIds.push(backup.id);
missingBackupPaths.push(backup.remotePath);
}
++ page;
@@ -236,7 +231,7 @@ async function cleanupMissingBackups(backupConfig, progressCallback) {
debug('cleanupMissingBackups: done');
return missingBackupIds;
return missingBackupPaths;
}
// removes the snapshots of apps that have been uninstalled
@@ -247,31 +242,25 @@ async function cleanupSnapshots(backupConfig) {
const info = safe.JSON.parse(contents);
if (!info) return;
delete info.box;
const progressCallback = (progress) => { debug(`cleanupSnapshots: ${progress.message}`); };
for (const appId of Object.keys(info)) {
if (appId === 'box' || appId === 'mail') continue;
const app = await apps.get(appId);
if (app) continue; // app is still installed
await new Promise((resolve) => {
async function done(/* ignoredError */) {
safe.fs.unlinkSync(path.join(paths.BACKUP_INFO_DIR, `${appId}.sync.cache`));
safe.fs.unlinkSync(path.join(paths.BACKUP_INFO_DIR, `${appId}.sync.cache.new`));
if (info[appId].format ==='tgz') {
await safe(storage.api(backupConfig.provider).remove(backupConfig, backupFormat.api(info[appId].format).getBackupFilePath(backupConfig, `snapshot/app_${appId}`)), { debug });
} else {
await safe(storage.api(backupConfig.provider).removeDir(backupConfig, backupFormat.api(info[appId].format).getBackupFilePath(backupConfig, `snapshot/app_${appId}`), progressCallback), { debug });
}
await safe(backups.setSnapshotInfo(appId, null /* info */), { debug });
debug(`cleanupSnapshots: cleaned up snapshot of app ${appId}`);
safe.fs.unlinkSync(path.join(paths.BACKUP_INFO_DIR, `${appId}.sync.cache`));
safe.fs.unlinkSync(path.join(paths.BACKUP_INFO_DIR, `${appId}.sync.cache.new`));
resolve();
}
if (info[appId].format ==='tgz') {
storage.api(backupConfig.provider).remove(backupConfig, storage.getBackupFilePath(backupConfig, `snapshot/app_${appId}`, info[appId].format), done);
} else {
const events = storage.api(backupConfig.provider).removeDir(backupConfig, storage.getBackupFilePath(backupConfig, `snapshot/app_${appId}`, info[appId].format));
events.on('progress', function (detail) { debug(`cleanupSnapshots: ${detail}`); });
events.on('done', done);
}
});
await safe(backups.setSnapshotInfo(appId, null /* info */), { debug });
debug(`cleanupSnapshots: cleaned up snapshot of app ${appId}`);
}
debug('cleanupSnapshots: done');
@@ -282,25 +271,29 @@ async function run(progressCallback) {
const backupConfig = await settings.getBackupConfig();
const status = await storage.api(backupConfig.provider).getBackupProviderStatus(backupConfig);
debug(`clean: mount point status is ${JSON.stringify(status)}`);
if (status.state !== 'active') throw new BoxError(BoxError.MOUNT_ERROR, `Backup endpoint is not mounted: ${status.message}`);
if (backupConfig.retentionPolicy.keepWithinSecs < 0) {
debug('cleanup: keeping all backups');
return {};
}
await progressCallback({ percent: 10, message: 'Cleaning box backups' });
const { removedBoxBackupIds, referencedAppBackupIds } = await cleanupBoxBackups(backupConfig, progressCallback);
const { removedBoxBackupPaths, referencedBackupIds } = await cleanupBoxBackups(backupConfig, progressCallback); // references is app or mail backup ids
await progressCallback({ percent: 20, message: 'Cleaning mail backups' });
const removedMailBackupIds = await cleanupMailBackups(backupConfig, referencedAppBackupIds, progressCallback);
const removedMailBackupPaths = await cleanupMailBackups(backupConfig, referencedBackupIds, progressCallback);
await progressCallback({ percent: 40, message: 'Cleaning app backups' });
const removedAppBackupIds = await cleanupAppBackups(backupConfig, referencedAppBackupIds, progressCallback);
const removedAppBackupPaths = await cleanupAppBackups(backupConfig, referencedBackupIds, progressCallback);
await progressCallback({ percent: 70, message: 'Cleaning missing backups' });
const missingBackupIds = await cleanupMissingBackups(backupConfig, progressCallback);
await progressCallback({ percent: 70, message: 'Checking storage backend and removing stale entries in database' });
const missingBackupPaths = await cleanupMissingBackups(backupConfig, progressCallback);
await progressCallback({ percent: 90, message: 'Cleaning snapshots' });
await cleanupSnapshots(backupConfig);
return { removedBoxBackupIds, removedMailBackupIds, removedAppBackupIds, missingBackupIds };
return { removedBoxBackupPaths, removedMailBackupPaths, removedAppBackupPaths, missingBackupPaths };
}
+12
View File
@@ -0,0 +1,12 @@
'use strict';
exports = module.exports = {
api
};
function api(format) {
switch (format) {
case 'tgz': return require('./backupformat/tgz.js');
case 'rsync': return require('./backupformat/rsync.js');
}
}
+245
View File
@@ -0,0 +1,245 @@
'use strict';
exports = module.exports = {
getBackupFilePath,
download,
upload,
_saveFsMetadata: saveFsMetadata,
_restoreFsMetadata: restoreFsMetadata
};
const assert = require('assert'),
async = require('async'),
BoxError = require('../boxerror.js'),
DataLayout = require('../datalayout.js'),
debug = require('debug')('box:backupformat/rsync'),
fs = require('fs'),
hush = require('../hush.js'),
once = require('../once.js'),
path = require('path'),
safe = require('safetydance'),
storage = require('../storage.js'),
syncer = require('../syncer.js'),
util = require('util');
function getBackupFilePath(backupConfig, remotePath) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof remotePath, 'string');
const rootPath = storage.api(backupConfig.provider).getBackupRootPath(backupConfig);
return path.join(rootPath, remotePath);
}
function sync(backupConfig, remotePath, dataLayout, progressCallback, callback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof remotePath, 'string');
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof progressCallback, 'function');
assert.strictEqual(typeof callback, 'function');
// the number here has to take into account the s3.upload partSize (which is 10MB). So 20=200MB
const concurrency = backupConfig.syncConcurrency || (backupConfig.provider === 's3' ? 20 : 10);
const removeDir = util.callbackify(storage.api(backupConfig.provider).removeDir);
const remove = util.callbackify(storage.api(backupConfig.provider).remove);
syncer.sync(dataLayout, function processTask(task, iteratorCallback) {
debug('sync: processing task: %j', task);
// the empty task.path is special to signify the directory
const destPath = task.path && backupConfig.encryptedFilenames ? hush.encryptFilePath(task.path, backupConfig.encryption) : task.path;
const backupFilePath = path.join(getBackupFilePath(backupConfig, remotePath), destPath);
if (task.operation === 'removedir') {
debug(`Removing directory ${backupFilePath}`);
return removeDir(backupConfig, backupFilePath, progressCallback, iteratorCallback);
} else if (task.operation === 'remove') {
debug(`Removing ${backupFilePath}`);
return remove(backupConfig, backupFilePath, iteratorCallback);
}
let retryCount = 0;
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
retryCallback = once(retryCallback); // protect again upload() erroring much later after read stream error
++retryCount;
if (task.operation === 'add') {
progressCallback({ message: `Adding ${task.path}` + (retryCount > 1 ? ` (Try ${retryCount})` : '') });
debug(`Adding ${task.path} position ${task.position} try ${retryCount}`);
const stream = hush.createReadStream(dataLayout.toLocalPath('./' + task.path), backupConfig.encryption);
stream.on('error', (error) => retryCallback(error.message.includes('ENOENT') ? null : error)); // ignore error if file disappears
stream.on('progress', function (progress) {
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
if (!transferred && !speed) return progressCallback({ message: `Uploading ${task.path}` }); // 0M@0MBps looks wrong
progressCallback({ message: `Uploading ${task.path}: ${transferred}M@${speed}MBps` }); // 0M@0MBps looks wrong
});
// only create the destination path when we have confirmation that the source is available. otherwise, we end up with
// files owned as 'root' and the cp later will fail
stream.on('open', function () {
storage.api(backupConfig.provider).upload(backupConfig, backupFilePath, stream, function (error) {
debug(error ? `Error uploading ${task.path} try ${retryCount}: ${error.message}` : `Uploaded ${task.path}`);
retryCallback(error);
});
});
}
}, iteratorCallback);
}, concurrency, function (error) {
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
callback();
});
}
// this is not part of 'snapshotting' because we need root access to traverse
async function saveFsMetadata(dataLayout, metadataFile) {
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof metadataFile, 'string');
// contains paths prefixed with './'
const metadata = {
emptyDirs: [],
execFiles: [],
symlinks: []
};
// we assume small number of files. spawnSync will raise a ENOBUFS error after maxBuffer
for (let lp of dataLayout.localPaths()) {
const emptyDirs = safe.child_process.execSync(`find ${lp} -type d -empty`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
if (emptyDirs === null) throw new BoxError(BoxError.FS_ERROR, `Error finding empty dirs: ${safe.error.message}`);
if (emptyDirs.length) metadata.emptyDirs = metadata.emptyDirs.concat(emptyDirs.trim().split('\n').map((ed) => dataLayout.toRemotePath(ed)));
const execFiles = safe.child_process.execSync(`find ${lp} -type f -executable`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
if (execFiles === null) throw new BoxError(BoxError.FS_ERROR, `Error finding executables: ${safe.error.message}`);
if (execFiles.length) metadata.execFiles = metadata.execFiles.concat(execFiles.trim().split('\n').map((ef) => dataLayout.toRemotePath(ef)));
const symlinks = safe.child_process.execSync(`find ${lp} -type l`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
if (symlinks === null) throw new BoxError(BoxError.FS_ERROR, `Error finding symlinks: ${safe.error.message}`);
if (symlinks.length) metadata.symlinks = metadata.symlinks.concat(symlinks.trim().split('\n').map((sl) => {
const target = safe.fs.readlinkSync(sl);
return { path: dataLayout.toRemotePath(sl), target };
}));
}
if (!safe.fs.writeFileSync(metadataFile, JSON.stringify(metadata, null, 4))) throw new BoxError(BoxError.FS_ERROR, `Error writing fs metadata: ${safe.error.message}`);
}
async function restoreFsMetadata(dataLayout, metadataFile) {
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof metadataFile, 'string');
debug(`Recreating empty directories in ${dataLayout.toString()}`);
const metadataJson = safe.fs.readFileSync(metadataFile, 'utf8');
if (metadataJson === null) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Error loading fsmetadata.json:' + safe.error.message);
const metadata = safe.JSON.parse(metadataJson);
if (metadata === null) throw new BoxError(BoxError.EXTERNAL_ERROR, 'Error parsing fsmetadata.json:' + safe.error.message);
for (const emptyDir of metadata.emptyDirs) {
const [mkdirError] = await safe(fs.promises.mkdir(dataLayout.toLocalPath(emptyDir), { recursive: true }));
if (mkdirError) throw new BoxError(BoxError.FS_ERROR, `unable to create path: ${mkdirError.message}`);
}
for (const execFile of metadata.execFiles) {
const [chmodError] = await safe(fs.promises.chmod(dataLayout.toLocalPath(execFile), parseInt('0755', 8)));
if (chmodError) throw new BoxError(BoxError.FS_ERROR, `unable to chmod: ${chmodError.message}`);
}
for (const symlink of (metadata.symlinks || [])) {
if (!symlink.target) continue;
// the path may not exist if we had a directory full of symlinks
const [mkdirError] = await safe(fs.promises.mkdir(path.dirname(dataLayout.toLocalPath(symlink.path)), { recursive: true }));
if (mkdirError) throw new BoxError(BoxError.FS_ERROR, `unable to symlink (mkdir): ${mkdirError.message}`);
const [symlinkError] = await safe(fs.promises.symlink(symlink.target, dataLayout.toLocalPath(symlink.path), 'file'));
if (symlinkError) throw new BoxError(BoxError.FS_ERROR, `unable to symlink: ${symlinkError.message}`);
}
}
function downloadDir(backupConfig, backupFilePath, dataLayout, progressCallback, callback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof backupFilePath, 'string');
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof progressCallback, 'function');
assert.strictEqual(typeof callback, 'function');
debug(`downloadDir: ${backupFilePath} to ${dataLayout.toString()}`);
function downloadFile(entry, done) {
let relativePath = path.relative(backupFilePath, entry.fullPath);
if (backupConfig.encryptedFilenames) {
const { error, result } = hush.decryptFilePath(relativePath, backupConfig.encryption);
if (error) return done(new BoxError(BoxError.CRYPTO_ERROR, 'Unable to decrypt file'));
relativePath = result;
}
const destFilePath = dataLayout.toLocalPath('./' + relativePath);
fs.mkdir(path.dirname(destFilePath), { recursive: true }, function (error) {
if (error) return done(new BoxError(BoxError.FS_ERROR, error.message));
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
storage.api(backupConfig.provider).download(backupConfig, entry.fullPath, function (error, sourceStream) {
if (error) {
progressCallback({ message: `Download ${entry.fullPath} to ${destFilePath} errored: ${error.message}` });
return retryCallback(error);
}
let destStream = hush.createWriteStream(destFilePath, backupConfig.encryption);
// protect against multiple errors. must destroy the write stream so that a previous retry does not write
let closeAndRetry = once((error) => {
if (error) progressCallback({ message: `Download ${entry.fullPath} to ${destFilePath} errored: ${error.message}` });
else progressCallback({ message: `Download ${entry.fullPath} to ${destFilePath} finished` });
sourceStream.destroy();
destStream.destroy();
retryCallback(error);
});
destStream.on('progress', function (progress) {
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
if (!transferred && !speed) return progressCallback({ message: `Downloading ${entry.fullPath}` }); // 0M@0MBps looks wrong
progressCallback({ message: `Downloading ${entry.fullPath}: ${transferred}M@${speed}MBps` });
});
destStream.on('error', closeAndRetry);
sourceStream.on('error', closeAndRetry);
progressCallback({ message: `Downloading ${entry.fullPath} to ${destFilePath}` });
sourceStream.pipe(destStream, { end: true }).on('done', closeAndRetry);
});
}, done);
});
}
storage.api(backupConfig.provider).listDir(backupConfig, backupFilePath, 1000, function (entries, iteratorDone) {
// https://www.digitalocean.com/community/questions/rate-limiting-on-spaces?answer=40441
const concurrency = backupConfig.downloadConcurrency || (backupConfig.provider === 's3' ? 30 : 10);
async.eachLimit(entries, concurrency, downloadFile, iteratorDone);
}, callback);
}
async function download(backupConfig, remotePath, dataLayout, progressCallback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof remotePath, 'string');
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof progressCallback, 'function');
debug(`download: Downloading ${remotePath} to ${dataLayout.toString()}`);
const backupFilePath = getBackupFilePath(backupConfig, remotePath);
const downloadDirAsync = util.promisify(downloadDir);
await downloadDirAsync(backupConfig, backupFilePath, dataLayout, progressCallback);
await restoreFsMetadata(dataLayout, `${dataLayout.localRoot()}/fsmetadata.json`);
}
async function upload(backupConfig, remotePath, dataLayout, progressCallback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof remotePath, 'string');
assert.strictEqual(typeof dataLayout, 'object');
assert.strictEqual(typeof progressCallback, 'function');
const syncAsync = util.promisify(sync);
await saveFsMetadata(dataLayout, `${dataLayout.localRoot()}/fsmetadata.json`);
await syncAsync(backupConfig, remotePath, dataLayout, progressCallback);
}
+197
View File
@@ -0,0 +1,197 @@
'use strict';
exports = module.exports = {
getBackupFilePath,
download,
upload
};
const assert = require('assert'),
async = require('async'),
BoxError = require('../boxerror.js'),
DataLayout = require('../datalayout.js'),
debug = require('debug')('box:backupformat/tgz'),
{ DecryptStream, EncryptStream } = require('../hush.js'),
once = require('../once.js'),
path = require('path'),
ProgressStream = require('../progress-stream.js'),
storage = require('../storage.js'),
tar = require('tar-fs'),
zlib = require('zlib');
function getBackupFilePath(backupConfig, remotePath) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof remotePath, 'string');
const rootPath = storage.api(backupConfig.provider).getBackupRootPath(backupConfig);
const fileType = backupConfig.encryption ? '.tar.gz.enc' : '.tar.gz';
return path.join(rootPath, remotePath + fileType);
}
function tarPack(dataLayout, encryption) {
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof encryption, 'object');
const pack = tar.pack('/', {
dereference: false, // pack the symlink and not what it points to
entries: dataLayout.localPaths(),
ignoreStatError: (path, err) => {
debug(`tarPack: error stat'ing ${path} - ${err.code}`);
return err.code === 'ENOENT'; // ignore if file or dir got removed (probably some temporary file)
},
map: function(header) {
header.name = dataLayout.toRemotePath(header.name);
// the tar pax format allows us to encode filenames > 100 and size > 8GB (see #640)
// https://www.systutorials.com/docs/linux/man/5-star/
if (header.size > 8589934590 || header.name > 99) header.pax = { size: header.size };
return header;
},
strict: false // do not error for unknown types (skip fifo, char/block devices)
});
const gzip = zlib.createGzip({});
const ps = new ProgressStream({ interval: 10000 }); // emit 'progress' every 10 seconds
pack.on('error', function (error) {
debug('tarPack: tar stream error.', error);
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message));
});
gzip.on('error', function (error) {
debug('tarPack: gzip stream error.', error);
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message));
});
if (encryption) {
const encryptStream = new EncryptStream(encryption);
encryptStream.on('error', function (error) {
debug('tarPack: encrypt stream error.', error);
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message));
});
pack.pipe(gzip).pipe(encryptStream).pipe(ps);
} else {
pack.pipe(gzip).pipe(ps);
}
return ps;
}
function tarExtract(inStream, dataLayout, encryption) {
assert.strictEqual(typeof inStream, 'object');
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof encryption, 'object');
const gunzip = zlib.createGunzip({});
const ps = new ProgressStream({ interval: 10000 }); // display a progress every 10 seconds
const extract = tar.extract('/', {
map: function (header) {
header.name = dataLayout.toLocalPath(header.name);
return header;
},
dmode: 500 // ensure directory is writable
});
const emitError = once((error) => {
inStream.destroy();
ps.emit('error', error);
});
inStream.on('error', function (error) {
debug('tarExtract: input stream error.', error);
emitError(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
});
gunzip.on('error', function (error) {
debug('tarExtract: gunzip stream error.', error);
emitError(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
});
extract.on('error', function (error) {
debug('tarExtract: extract stream error.', error);
emitError(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
});
extract.on('finish', function () {
debug('tarExtract: done.');
// we use a separate event because ps is a through2 stream which emits 'finish' event indicating end of inStream and not extract
ps.emit('done');
});
if (encryption) {
const decrypt = new DecryptStream(encryption);
decrypt.on('error', function (error) {
debug('tarExtract: decrypt stream error.', error);
emitError(new BoxError(BoxError.EXTERNAL_ERROR, `Failed to decrypt: ${error.message}`));
});
inStream.pipe(ps).pipe(decrypt).pipe(gunzip).pipe(extract);
} else {
inStream.pipe(ps).pipe(gunzip).pipe(extract);
}
return ps;
}
async function download(backupConfig, remotePath, dataLayout, progressCallback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof remotePath, 'string');
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
assert.strictEqual(typeof progressCallback, 'function');
debug(`download: Downloading ${remotePath} to ${dataLayout.toString()}`);
const backupFilePath = getBackupFilePath(backupConfig, remotePath);
return new Promise((resolve, reject) => {
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
progressCallback({ message: `Downloading backup ${remotePath}` });
storage.api(backupConfig.provider).download(backupConfig, backupFilePath, function (error, sourceStream) {
if (error) return retryCallback(error);
const ps = tarExtract(sourceStream, dataLayout, backupConfig.encryption);
ps.on('progress', function (progress) {
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
if (!transferred && !speed) return progressCallback({ message: 'Downloading backup' }); // 0M@0MBps looks wrong
progressCallback({ message: `Downloading ${transferred}M@${speed}MBps` });
});
ps.on('error', retryCallback);
ps.on('done', retryCallback);
});
}, (error) => {
if (error) return reject(error);
resolve();
});
});
}
async function upload(backupConfig, remotePath, dataLayout, progressCallback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof remotePath, 'string');
assert.strictEqual(typeof dataLayout, 'object');
assert.strictEqual(typeof progressCallback, 'function');
debug(`upload: Uploading ${dataLayout.toString()} to ${remotePath}`);
return new Promise((resolve, reject) => {
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
retryCallback = once(retryCallback); // protect again upload() erroring much later after tar stream error
const tarStream = tarPack(dataLayout, backupConfig.encryption);
tarStream.on('progress', function (progress) {
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
if (!transferred && !speed) return progressCallback({ message: 'Uploading backup' }); // 0M@0MBps looks wrong
progressCallback({ message: `Uploading backup ${transferred}M@${speed}MBps` });
});
tarStream.on('error', retryCallback); // already returns BoxError
storage.api(backupConfig.provider).upload(backupConfig, getBackupFilePath(backupConfig, remotePath), tarStream, retryCallback);
}, (error) => {
if (error) return reject(error);
resolve();
});
});
}
+68 -50
View File
@@ -6,6 +6,7 @@ exports = module.exports = {
getByTypePaged,
add,
update,
setState,
list,
del,
@@ -17,8 +18,6 @@ exports = module.exports = {
injectPrivateFields,
removePrivateFields,
configureCollectd,
generateEncryptionKeysSync,
getSnapshotInfo,
@@ -43,38 +42,28 @@ exports = module.exports = {
const assert = require('assert'),
BoxError = require('./boxerror.js'),
collectd = require('./collectd.js'),
constants = require('./constants.js'),
CronJob = require('cron').CronJob,
crypto = require('crypto'),
database = require('./database.js'),
debug = require('debug')('box:backups'),
ejs = require('ejs'),
eventlog = require('./eventlog.js'),
fs = require('fs'),
hat = require('./hat.js'),
locker = require('./locker.js'),
path = require('path'),
paths = require('./paths.js'),
safe = require('safetydance'),
settings = require('./settings.js'),
storage = require('./storage.js'),
tasks = require('./tasks.js'),
util = require('util');
tasks = require('./tasks.js');
const COLLECTD_CONFIG_EJS = fs.readFileSync(__dirname + '/collectd/cloudron-backup.ejs', { encoding: 'utf8' });
const BACKUPS_FIELDS = [ 'id', 'identifier', 'creationTime', 'packageVersion', 'type', 'dependsOn', 'state', 'manifestJson', 'format', 'preserveSecs', 'encryptionVersion' ];
// helper until all storage providers have been ported
function maybePromisify(func) {
if (util.types.isAsyncFunction(func)) return func;
return util.promisify(func);
}
const BACKUPS_FIELDS = [ 'id', 'remotePath', 'label', 'identifier', 'creationTime', 'packageVersion', 'type', 'dependsOnJson', 'state', 'manifestJson', 'format', 'preserveSecs', 'encryptionVersion' ];
function postProcess(result) {
assert.strictEqual(typeof result, 'object');
result.dependsOn = result.dependsOn ? result.dependsOn.split(',') : [ ];
result.dependsOn = result.dependsOnJson ? safe.JSON.parse(result.dependsOnJson) : [];
delete result.dependsOnJson;
result.manifest = result.manifestJson ? safe.JSON.parse(result.manifestJson) : null;
delete result.manifestJson;
@@ -116,9 +105,9 @@ function generateEncryptionKeysSync(password) {
};
}
async function add(id, data) {
async function add(data) {
assert(data && typeof data === 'object');
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof data.remotePath, 'string');
assert(data.encryptionVersion === null || typeof data.encryptionVersion === 'number');
assert.strictEqual(typeof data.packageVersion, 'string');
assert.strictEqual(typeof data.type, 'string');
@@ -127,15 +116,20 @@ async function add(id, data) {
assert(Array.isArray(data.dependsOn));
assert.strictEqual(typeof data.manifest, 'object');
assert.strictEqual(typeof data.format, 'string');
assert.strictEqual(typeof data.preserveSecs, 'number');
const creationTime = data.creationTime || new Date(); // allow tests to set the time
const manifestJson = JSON.stringify(data.manifest);
const prefixId = data.type === exports.BACKUP_TYPE_APP ? `${data.type}_${data.identifier}` : data.type; // type and identifier are same for other types
const id = `${prefixId}_v${data.packageVersion}_${hat(256)}`; // id is used by the UI to derive dependent packages. making this a UUID will require a lot of db querying
const [error] = await safe(database.query('INSERT INTO backups (id, identifier, encryptionVersion, packageVersion, type, creationTime, state, dependsOn, manifestJson, format) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
[ id, data.identifier, data.encryptionVersion, data.packageVersion, data.type, creationTime, data.state, data.dependsOn.join(','), manifestJson, data.format ]));
const [error] = await safe(database.query('INSERT INTO backups (id, remotePath, identifier, encryptionVersion, packageVersion, type, creationTime, state, dependsOnJson, manifestJson, format, preserveSecs) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
[ id, data.remotePath, data.identifier, data.encryptionVersion, data.packageVersion, data.type, creationTime, data.state, JSON.stringify(data.dependsOn), manifestJson, data.format, data.preserveSecs ]));
if (error && error.code === 'ER_DUP_ENTRY') throw new BoxError(BoxError.ALREADY_EXISTS, 'Backup already exists');
if (error) throw error;
return id;
}
async function getByIdentifierAndStatePaged(identifier, state, page, perPage) {
@@ -172,19 +166,55 @@ async function getByTypePaged(type, page, perPage) {
return results;
}
async function update(id, backup) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof backup, 'object');
function validateLabel(label) {
assert.strictEqual(typeof label, 'string');
let fields = [ ], values = [ ];
for (const p in backup) {
fields.push(p + ' = ?');
values.push(backup[p]);
if (label.length >= 200) return new BoxError(BoxError.BAD_FIELD, 'label too long');
if (/[^a-zA-Z0-9._() -]/.test(label)) return new BoxError(BoxError.BAD_FIELD, 'label can only contain alphanumerals, space, dot, hyphen, brackets or underscore');
return null;
}
// this is called by REST API
async function update(id, data) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof data, 'object');
let error;
if ('label' in data) {
error = validateLabel(data.label);
if (error) throw error;
}
const fields = [], values = [];
for (const p in data) {
if (p === 'label' || p === 'preserveSecs') {
fields.push(p + ' = ?');
values.push(data[p]);
}
}
values.push(id);
const backup = await get(id);
if (backup === null) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
const result = await database.query('UPDATE backups SET ' + fields.join(', ') + ' WHERE id = ?', values);
if (result.affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
if ('preserveSecs' in data) {
// update the dependancies
for (const depId of backup.dependsOn) {
await database.query('UPDATE backups SET preserveSecs=? WHERE id = ?', [ data.preserveSecs, depId]);
}
}
}
async function setState(id, state) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof state, 'string');
const result = await database.query('UPDATE backups SET state = ? WHERE id = ?', [state, id]);
if (result.affectedRows !== 1) throw new BoxError(BoxError.NOT_FOUND, 'Backup not found');
}
async function startBackupTask(auditSource) {
@@ -205,7 +235,8 @@ async function startBackupTask(auditSource) {
const errorMessage = error ? error.message : '';
const timedOut = error ? error.code === tasks.ETIMEOUT : false;
await safe(eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId, errorMessage, timedOut, backupId }), { debug });
const backup = backupId ? await get(backupId) : null;
await safe(eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId, errorMessage, timedOut, backupId, remotePath: backup?.remotePath }), { debug });
});
return taskId;
@@ -268,31 +299,20 @@ async function startCleanupTask(auditSource) {
const taskId = await tasks.add(tasks.TASK_CLEAN_BACKUPS, []);
tasks.startTask(taskId, {}, async (error, result) => { // result is { removedBoxBackupIds, removedAppBackupIds, removedMailBackupIds, missingBackupIds }
tasks.startTask(taskId, {}, async (error, result) => { // result is { removedBoxBackupPaths, removedAppBackupPaths, removedMailBackupPaths, missingBackupPaths }
await safe(eventlog.add(eventlog.ACTION_BACKUP_CLEANUP_FINISH, auditSource, {
taskId,
errorMessage: error ? error.message : null,
removedBoxBackupIds: result ? result.removedBoxBackupIds : [],
removedMailBackupIds: result ? result.removedMailBackupIds : [],
removedAppBackupIds: result ? result.removedAppBackupIds : [],
missingBackupIds: result ? result.missingBackupIds : []
removedBoxBackupPaths: result ? result.removedBoxBackupPaths : [],
removedMailBackupPaths: result ? result.removedMailBackupPaths : [],
removedAppBackupPaths: result ? result.removedAppBackupPaths : [],
missingBackupPaths: result ? result.missingBackupPaths : []
}), { debug });
});
return taskId;
}
async function configureCollectd(backupConfig) {
assert.strictEqual(typeof backupConfig, 'object');
if (backupConfig.provider === 'filesystem') {
const collectdConf = ejs.render(COLLECTD_CONFIG_EJS, { backupDir: backupConfig.backupFolder });
await collectd.addProfile('cloudron-backup', collectdConf);
} else {
await collectd.removeProfile('cloudron-backup');
}
}
async function testConfig(backupConfig) {
assert.strictEqual(typeof backupConfig, 'object');
@@ -318,8 +338,7 @@ async function testConfig(backupConfig) {
if ('keepMonthly' in policy && typeof policy.keepMonthly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'keepMonthly must be a number');
if ('keepYearly' in policy && typeof policy.keepYearly !== 'number') return new BoxError(BoxError.BAD_FIELD, 'keepYearly must be a number');
const [error] = await safe(util.promisify(storage.api(backupConfig.provider).testConfig)(backupConfig));
return error;
await storage.api(backupConfig.provider).testConfig(backupConfig);
}
// this skips password check since that policy is only at creation time
@@ -329,8 +348,7 @@ async function testProviderConfig(backupConfig) {
const func = storage.api(backupConfig.provider);
if (!func) return new BoxError(BoxError.BAD_FIELD, 'unknown storage provider');
const [error] = await safe(util.promisify(storage.api(backupConfig.provider).testConfig)(backupConfig));
return error;
await storage.api(backupConfig.provider).testConfig(backupConfig);
}
async function remount(auditSource) {
@@ -341,5 +359,5 @@ async function remount(auditSource) {
const func = storage.api(backupConfig.provider);
if (!func) throw new BoxError(BoxError.BAD_FIELD, 'unknown storage provider');
await maybePromisify(storage.api(backupConfig.provider).remount)(backupConfig);
await storage.api(backupConfig.provider).remount(backupConfig);
}
+122 -655
View File
File diff suppressed because it is too large Load Diff
+8
View File
@@ -9,6 +9,8 @@ exports = module.exports = {
setString,
del,
listCertIds,
ACME_ACCOUNT_KEY: 'acme_account_key',
ADDON_TURN_SECRET: 'addon_turn_secret',
SFTP_PUBLIC_KEY: 'sftp_public_key',
@@ -16,6 +18,7 @@ exports = module.exports = {
PROXY_AUTH_TOKEN_SECRET: 'proxy_auth_token_secret',
CERT_PREFIX: 'cert',
CERT_SUFFIX: 'cert',
_clear: clear
};
@@ -62,3 +65,8 @@ async function del(id) {
async function clear() {
await database.query('DELETE FROM blobs');
}
async function listCertIds() {
const result = await database.query('SELECT id FROM blobs WHERE id LIKE ?', [ `${exports.CERT_PREFIX}-%.${exports.CERT_SUFFIX}` ]);
return result.map(r => r.id);
}
+1 -1
View File
@@ -52,7 +52,7 @@ BoxError.INACTIVE = 'Inactive'; // service/volume/mount
BoxError.INTERNAL_ERROR = 'Internal Error';
BoxError.INVALID_CREDENTIALS = 'Invalid Credentials';
BoxError.IPTABLES_ERROR = 'IPTables Error';
BoxError.LICENSE_ERROR = 'License Error';
BoxError.LICENSE_ERROR = 'License Error'; // billing or subscription expired
BoxError.LOGROTATE_ERROR = 'Logrotate Error';
BoxError.MAIL_ERROR = 'Mail Error';
BoxError.MOUNT_ERROR = 'Mount Error';
+2 -2
View File
@@ -1,6 +1,6 @@
'use strict';
let assert = require('assert'),
const assert = require('assert'),
fs = require('fs'),
path = require('path');
@@ -11,7 +11,7 @@ exports = module.exports = {
function getChanges(version) {
assert.strictEqual(typeof version, 'string');
let changelog = [ ];
const changelog = [];
const lines = fs.readFileSync(path.join(__dirname, '../CHANGES'), 'utf8').split('\n');
version = version.replace(/[+-].*/, ''); // strip prerelease
+34 -50
View File
@@ -19,6 +19,8 @@ exports = module.exports = {
renewCerts,
syncDnsRecords,
updateDiskUsage,
runSystemChecks
};
@@ -26,18 +28,17 @@ const apps = require('./apps.js'),
appstore = require('./appstore.js'),
assert = require('assert'),
AuditSource = require('./auditsource.js'),
backups = require('./backups.js'),
BoxError = require('./boxerror.js'),
branding = require('./branding.js'),
constants = require('./constants.js'),
cron = require('./cron.js'),
debug = require('debug')('box:cloudron'),
delay = require('delay'),
delay = require('./delay.js'),
dns = require('./dns.js'),
dockerProxy = require('./dockerproxy.js'),
domains = require('./domains.js'),
eventlog = require('./eventlog.js'),
fs = require('fs'),
LogStream = require('./log-stream.js'),
mail = require('./mail.js'),
notifications = require('./notifications.js'),
path = require('path'),
@@ -49,7 +50,6 @@ const apps = require('./apps.js'),
settings = require('./settings.js'),
shell = require('./shell.js'),
spawn = require('child_process').spawn,
split = require('split'),
sysinfo = require('./sysinfo.js'),
tasks = require('./tasks.js'),
users = require('./users.js');
@@ -90,10 +90,13 @@ async function notifyUpdate() {
const version = safe.fs.readFileSync(paths.VERSION_FILE, 'utf8');
if (version === constants.VERSION) return;
await eventlog.add(eventlog.ACTION_UPDATE_FINISH, AuditSource.CRON, { errorMessage: '', oldVersion: version || 'dev', newVersion: constants.VERSION });
const [error] = await safe(tasks.setCompletedByType(tasks.TASK_UPDATE, { error: null }));
if (error && error.reason !== BoxError.NOT_FOUND) throw error; // when hotfixing, task may not exist
if (!version) {
await eventlog.add(eventlog.ACTION_INSTALL_FINISH, AuditSource.CRON, { version: constants.VERSION });
} else {
await eventlog.add(eventlog.ACTION_UPDATE_FINISH, AuditSource.CRON, { errorMessage: '', oldVersion: version || 'dev', newVersion: constants.VERSION });
const [error] = await safe(tasks.setCompletedByType(tasks.TASK_UPDATE, { error: null }));
if (error && error.reason !== BoxError.NOT_FOUND) throw error; // when hotfixing, task may not exist
}
safe.fs.writeFileSync(paths.VERSION_FILE, constants.VERSION, 'utf8');
}
@@ -105,18 +108,11 @@ async function runStartupTasks() {
// stop all the systemd tasks
tasks.push(platform.stopAllTasks);
// this configures collectd to collect backup storage metrics if filesystem is used. This is also triggerd when the settings change with the rest api
tasks.push(async function () {
const backupConfig = await settings.getBackupConfig();
await backups.configureCollectd(backupConfig);
});
// always generate webadmin config since we have no versioning mechanism for the ejs
tasks.push(async function () {
if (!settings.dashboardDomain()) return;
const domainObject = await domains.get(settings.dashboardDomain());
await reverseProxy.writeDashboardConfig(domainObject);
await reverseProxy.writeDashboardConfig(settings.dashboardDomain());
});
tasks.push(async function () {
@@ -137,7 +133,7 @@ async function runStartupTasks() {
// we used to run tasks in parallel but simultaneous nginx reloads was causing issues
for (let i = 0; i < tasks.length; i++) {
const [error] = await safe(tasks[i]());
if (error) debug(`Startup task at index ${i} failed: ${error.message}`);
if (error) debug(`Startup task at index ${i} failed: ${error.message} ${error.stack}`);
}
}
@@ -152,6 +148,7 @@ async function getConfig() {
return {
apiServerOrigin: settings.apiServerOrigin(),
webServerOrigin: settings.webServerOrigin(),
consoleServerOrigin: settings.consoleServerOrigin(),
adminDomain: settings.dashboardDomain(),
adminFqdn: settings.dashboardFqdn(),
mailFqdn: settings.mailFqdn(),
@@ -162,7 +159,7 @@ async function getConfig() {
footer: branding.renderFooter(allSettings[settings.FOOTER_KEY] || constants.FOOTER),
features: appstore.getFeatures(),
profileLocked: allSettings[settings.PROFILE_CONFIG_KEY].lockUserProfiles,
mandatory2FA: allSettings[settings.PROFILE_CONFIG_KEY].mandatory2FA
mandatory2FA: allSettings[settings.PROFILE_CONFIG_KEY].mandatory2FA,
};
}
@@ -215,7 +212,7 @@ async function getLogs(unit, options) {
assert.strictEqual(typeof options.format, 'string');
assert.strictEqual(typeof options.follow, 'boolean');
var lines = options.lines === -1 ? '+1' : options.lines,
const lines = options.lines === -1 ? '+1' : options.lines,
format = options.format || 'json',
follow = options.follow;
@@ -231,25 +228,12 @@ async function getLogs(unit, options) {
const cp = spawn('/usr/bin/tail', args);
const transformStream = split(function mapper(line) {
if (format !== 'json') return line + '\n';
const logStream = new LogStream({ format, source: unit });
logStream.close = cp.kill.bind(cp, 'SIGKILL'); // hook for caller. closing stream kills the child process
const data = line.split(' '); // logs are <ISOtimestamp> <msg>
let timestamp = (new Date(data[0])).getTime();
if (isNaN(timestamp)) timestamp = 0;
cp.stdout.pipe(logStream);
return JSON.stringify({
realtimeTimestamp: timestamp * 1000,
message: line.slice(data[0].length+1),
source: unit
}) + '\n';
});
transformStream.close = cp.kill.bind(cp, 'SIGKILL'); // closing stream kills the child process
cp.stdout.pipe(transformStream);
return transformStream;
return logStream;
}
async function prepareDashboardDomain(domain, auditSource) {
@@ -260,15 +244,12 @@ async function prepareDashboardDomain(domain, auditSource) {
if (settings.isDemo()) throw new BoxError(BoxError.CONFLICT, 'Not allowed in demo mode');
const domainObject = await domains.get(domain);
if (!domain) throw new BoxError(BoxError.NOT_FOUND, 'No such domain');
const fqdn = dns.fqdn(constants.DASHBOARD_LOCATION, domainObject);
const fqdn = dns.fqdn(constants.DASHBOARD_SUBDOMAIN, domain);
const result = await apps.list();
if (result.some(app => app.fqdn === fqdn)) throw new BoxError(BoxError.BAD_STATE, 'Dashboard location conflicts with an existing app');
const taskId = await tasks.add(tasks.TASK_SETUP_DNS_AND_CERT, [ constants.DASHBOARD_LOCATION, domain, auditSource ]);
const taskId = await tasks.add(tasks.TASK_SETUP_DNS_AND_CERT, [ constants.DASHBOARD_SUBDOMAIN, domain, auditSource ]);
tasks.startTask(taskId, {});
@@ -282,15 +263,12 @@ async function setDashboardDomain(domain, auditSource) {
debug(`setDashboardDomain: ${domain}`);
const domainObject = await domains.get(domain);
if (!domain) throw new BoxError(BoxError.NOT_FOUND, 'No such domain');
await reverseProxy.writeDashboardConfig(domainObject);
const fqdn = dns.fqdn(constants.DASHBOARD_LOCATION, domainObject);
await reverseProxy.writeDashboardConfig(domain);
const fqdn = dns.fqdn(constants.DASHBOARD_SUBDOMAIN, domain);
await settings.setDashboardLocation(domain, fqdn);
await safe(appstore.updateCloudron({ domain }));
await safe(appstore.updateCloudron({ domain }), { debug });
await eventlog.add(eventlog.ACTION_DASHBOARD_DOMAIN_UPDATE, auditSource, { domain, fqdn });
}
@@ -324,8 +302,7 @@ async function setupDnsAndCert(subdomain, domain, auditSource, progressCallback)
assert.strictEqual(typeof auditSource, 'object');
assert.strictEqual(typeof progressCallback, 'function');
const domainObject = await domains.get(domain);
const dashboardFqdn = dns.fqdn(subdomain, domainObject);
const dashboardFqdn = dns.fqdn(subdomain, domain);
const ipv4 = await sysinfo.getServerIPv4();
const ipv6 = await sysinfo.getServerIPv6();
@@ -337,7 +314,8 @@ async function setupDnsAndCert(subdomain, domain, auditSource, progressCallback)
await dns.waitForDnsRecord(subdomain, domain, 'A', ipv4, { interval: 30000, times: 50000 });
if (ipv6) await dns.waitForDnsRecord(subdomain, domain, 'AAAA', ipv6, { interval: 30000, times: 50000 });
progressCallback({ percent: 60, message: `Getting certificate of ${dashboardFqdn}` });
await reverseProxy.ensureCertificate(dns.fqdn(subdomain, domainObject), domain, auditSource);
const location = { subdomain, domain, fqdn: dashboardFqdn, type: apps.LOCATION_TYPE_DASHBOARD, certificate: null };
await reverseProxy.ensureCertificate(location, {}, auditSource);
}
async function syncDnsRecords(options) {
@@ -347,3 +325,9 @@ async function syncDnsRecords(options) {
tasks.startTask(taskId, {});
return taskId;
}
async function updateDiskUsage() {
const taskId = await tasks.add(tasks.TASK_UPDATE_DISK_USAGE, []);
tasks.startTask(taskId, {});
return taskId;
}
-43
View File
@@ -1,43 +0,0 @@
'use strict';
exports = module.exports = {
addProfile,
removeProfile
};
const assert = require('assert'),
BoxError = require('./boxerror.js'),
debug = require('debug')('collectd'),
path = require('path'),
paths = require('./paths.js'),
safe = require('safetydance'),
shell = require('./shell.js');
const CONFIGURE_COLLECTD_CMD = path.join(__dirname, 'scripts/configurecollectd.sh');
async function addProfile(name, profile) {
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof profile, 'string');
const configFilePath = path.join(paths.COLLECTD_APPCONFIG_DIR, `${name}.conf`);
// skip restarting collectd if the profile already exists with the same contents
const currentProfile = safe.fs.readFileSync(configFilePath, 'utf8') || '';
if (currentProfile === profile) return;
if (!safe.fs.writeFileSync(configFilePath, profile)) throw new BoxError(BoxError.FS_ERROR, `Error writing collectd config: ${safe.error.message}`);
const [error] = await safe(shell.promises.sudo('addCollectdProfile', [ CONFIGURE_COLLECTD_CMD, 'add', name ], {}));
if (error) throw new BoxError(BoxError.COLLECTD_ERROR, 'Could not add collectd config');
}
async function removeProfile(name) {
assert.strictEqual(typeof name, 'string');
if (!safe.fs.unlinkSync(path.join(paths.COLLECTD_APPCONFIG_DIR, `${name}.conf`))) {
if (safe.error.code !== 'ENOENT') debug('Error removing collectd profile', safe.error);
}
const [error] = await safe(shell.promises.sudo('removeCollectdProfile', [ CONFIGURE_COLLECTD_CMD, 'remove', name ], {}));
if (error) throw new BoxError(BoxError.COLLECTD_ERROR, 'Could not remove collectd config');
}
-42
View File
@@ -1,42 +0,0 @@
LoadPlugin "table"
<Plugin table>
<Table "/sys/fs/cgroup/memory/docker/<%= containerId %>/memory.stat">
Instance "<%= appId %>-memory"
Separator " \\n"
<Result>
Type gauge
InstancesFrom 0
ValuesFrom 1
</Result>
</Table>
<Table "/sys/fs/cgroup/memory/docker/<%= containerId %>/memory.max_usage_in_bytes">
Instance "<%= appId %>-memory"
Separator "\\n"
<Result>
Type gauge
InstancePrefix "max_usage_in_bytes"
ValuesFrom 0
</Result>
</Table>
<Table "/sys/fs/cgroup/cpuacct/docker/<%= containerId %>/cpuacct.stat">
Instance "<%= appId %>-cpu"
Separator " \\n"
<Result>
Type gauge
InstancesFrom 0
ValuesFrom 1
</Result>
</Table>
</Plugin>
<Plugin python>
<Module du>
<Path>
Instance "<%= appId %>"
Dir "<%= appDataDir %>"
</Path>
</Module>
</Plugin>
+11 -5
View File
@@ -1,14 +1,14 @@
'use strict';
let fs = require('fs'),
const fs = require('fs'),
path = require('path');
const CLOUDRON = process.env.BOX_ENV === 'cloudron',
TEST = process.env.BOX_ENV === 'test';
exports = module.exports = {
SMTP_LOCATION: 'smtp',
IMAP_LOCATION: 'imap',
SMTP_SUBDOMAIN: 'smtp',
IMAP_SUBDOMAIN: 'imap',
// These are combined into one array because users and groups become mailboxes
RESERVED_NAMES: [
@@ -22,7 +22,7 @@ exports = module.exports = {
'admins', 'users' // ldap code uses 'users' pseudo group
],
DASHBOARD_LOCATION: 'my',
DASHBOARD_SUBDOMAIN: 'my',
PORT: CLOUDRON ? 3000 : 5454,
INTERNAL_SMTP_PORT: 2525, // this value comes from the mail container
@@ -40,6 +40,7 @@ exports = module.exports = {
DEMO_USERNAME: 'cloudron',
DEMO_BLACKLISTED_APPS: [
'org.jupyter.cloudronapp',
'com.github.cloudtorrent',
'net.alltubedownload.cloudronapp',
'com.adguard.home.cloudronapp',
@@ -49,6 +50,8 @@ exports = module.exports = {
],
DEMO_APP_LIMIT: 20,
PROXY_APP_APPSTORE_ID: 'io.cloudron.builtin.appproxy',
AUTOUPDATE_PATTERN_NEVER: 'never',
// the db field is a blob so we make this explicit
@@ -63,12 +66,15 @@ exports = module.exports = {
PORT25_CHECK_SERVER: 'port25check.cloudron.io',
FORUM_URL: 'https://forum.cloudron.io',
SUPPORT_USERNAME: 'cloudron-support',
SUPPORT_EMAIL: 'support@cloudron.io',
USER_DIRECTORY_LDAP_DN: 'cn=admin,ou=system,dc=cloudron',
FOOTER: '&copy; %YEAR% &nbsp; [Cloudron](https://cloudron.io) &nbsp; &nbsp; &nbsp; [Forum <i class="fa fa-comments"></i>](https://forum.cloudron.io)',
VERSION: process.env.BOX_ENV === 'cloudron' ? fs.readFileSync(path.join(__dirname, '../VERSION'), 'utf8').trim() : '7.0.0-test'
VERSION: process.env.BOX_ENV === 'cloudron' ? fs.readFileSync(path.join(__dirname, '../VERSION'), 'utf8').trim() : '7.3.0-test'
};
+41 -13
View File
@@ -28,13 +28,13 @@ const appHealthMonitor = require('./apphealthmonitor.js'),
dyndns = require('./dyndns.js'),
eventlog = require('./eventlog.js'),
janitor = require('./janitor.js'),
paths = require('./paths.js'),
safe = require('safetydance'),
scheduler = require('./scheduler.js'),
settings = require('./settings.js'),
system = require('./system.js'),
updater = require('./updater.js'),
updateChecker = require('./updatechecker.js'),
userdirectory = require('./userdirectory.js'),
_ = require('underscore');
const gJobs = {
@@ -50,7 +50,8 @@ const gJobs = {
dockerVolumeCleaner: null,
dynamicDns: null,
schedulerSync: null,
appHealthMonitor: null
appHealthMonitor: null,
diskUsage: null
};
// cron format
@@ -61,16 +62,46 @@ const gJobs = {
// Months: 0-11
// Day of Week: 0-6
async function startJobs() {
debug('startJobs: starting cron jobs');
function getCronSeed() {
let hour = null;
let minute = null;
const seedData = safe.fs.readFileSync(paths.CRON_SEED_FILE, 'utf8') || '';
const parts = seedData.split(':');
if (parts.length === 2) {
hour = parseInt(parts[0]) || null;
minute = parseInt(parts[1]) || null;
}
if ((hour == null || hour < 0 || hour > 23) || (minute == null || minute < 0 || minute > 60)) {
hour = Math.floor(24 * Math.random());
minute = Math.floor(60 * Math.random());
debug(`getCronSeed: writing new cron seed file with ${hour}:${minute} to ${paths.CRON_SEED_FILE}`);
safe.fs.writeFileSync(paths.CRON_SEED_FILE, `${hour}:${minute}`);
}
return { hour, minute };
}
async function startJobs() {
const { hour, minute } = getCronSeed();
debug(`startJobs: starting cron jobs with hour ${hour} and minute ${minute}`);
const randomTick = Math.floor(60*Math.random());
gJobs.systemChecks = new CronJob({
cronTime: `${randomTick} ${randomTick} 2 * * *`, // once a day. if you change this interval, change the notification messages with correct duration
cronTime: `00 ${minute} 2 * * *`, // once a day. if you change this interval, change the notification messages with correct duration
onTick: async () => await safe(cloudron.runSystemChecks(), { debug }),
start: true
});
gJobs.diskUsage = new CronJob({
cronTime: `00 ${minute} 3 * * *`, // once a day
onTick: async () => await safe(cloudron.updateDiskUsage(), { debug }),
start: true
});
gJobs.diskSpaceChecker = new CronJob({
cronTime: '00 30 * * * *', // every 30 minutes. if you change this interval, change the notification messages with correct duration
onTick: async () => await safe(system.checkDiskSpace(), { debug }),
@@ -79,7 +110,7 @@ async function startJobs() {
// this is run separately from the update itself so that the user can disable automatic updates but can still get a notification
gJobs.updateCheckerJob = new CronJob({
cronTime: `${randomTick} ${randomTick} 1,5,9,13,17,21,23 * * *`,
cronTime: `00 ${minute} 1,5,9,13,17,21,23 * * *`,
onTick: async () => await safe(updateChecker.checkForUpdates({ automatic: true }), { debug }),
start: true
});
@@ -98,7 +129,7 @@ async function startJobs() {
gJobs.cleanupEventlog = new CronJob({
cronTime: '00 */30 * * * *', // every 30 minutes
onTick: async () => await safe(eventlog.cleanup({ creationTime: new Date(Date.now() - 60 * 60 * 24 * 10 * 1000) }), { debug }), // 10 days ago
onTick: async () => await safe(eventlog.cleanup({ creationTime: new Date(Date.now() - 60 * 60 * 24 * 60 * 1000) }), { debug }), // 60 days ago
start: true
});
@@ -114,8 +145,9 @@ async function startJobs() {
start: true
});
// randomized per Cloudron based on hourlySeed
gJobs.certificateRenew = new CronJob({
cronTime: '00 00 */12 * * *', // every 12 hours
cronTime: `00 10 ${hour} * * *`,
onTick: async () => await safe(cloudron.renewCerts({}, AuditSource.CRON), { debug }),
start: true
});
@@ -148,10 +180,6 @@ async function handleSettingsChanged(key, value) {
await stopJobs();
await startJobs();
break;
case settings.USER_DIRECTORY_KEY:
if (value.enabled) await userdirectory.start();
else await userdirectory.stop();
break;
default:
break;
}
+7 -4
View File
@@ -61,8 +61,9 @@ async function initialize() {
// note the pool also has an 'acquire' event but that is called whenever we do a getConnection()
connection.on('error', (error) => debug(`Connection ${connection.threadId} error: ${error.message} ${error.code}`));
connection.query('USE ' + gDatabase.name);
connection.query(`USE ${gDatabase.name}`);
connection.query('SET SESSION sql_mode = \'strict_all_tables\'');
connection.query('SET SESSION group_concat_max_len = 65536'); // GROUP_CONCAT has only 1024 default
});
}
@@ -143,9 +144,11 @@ async function importFromFile(file) {
async function exportToFile(file) {
assert.strictEqual(typeof file, 'string');
// latest mysqldump enables column stats by default which is not present in MySQL 5.7 server
// this option must not be set in production cloudrons which still use the old mysqldump
const colStats = (!constants.TEST && require('fs').readFileSync('/etc/lsb-release', 'utf-8').includes('20.04')) ? '--column-statistics=0' : '';
// latest mysqldump enables column stats by default which is not present in 5.7 util
const mysqlDumpHelp = safe.child_process.execSync('/usr/bin/mysqldump --help', { encoding: 'utf8' });
if (!mysqlDumpHelp) throw new BoxError(BoxError.DATABASE_ERROR, safe.error);
const hasColStats = mysqlDumpHelp.includes('column-statistics');
const colStats = hasColStats ? '--column-statistics=0' : '';
const cmd = `/usr/bin/mysqldump -h "${gDatabase.hostname}" -u root -p${gDatabase.password} ${colStats} --single-transaction --routines --triggers ${gDatabase.name} > "${file}"`;
+13
View File
@@ -0,0 +1,13 @@
'use strict';
exports = module.exports = delay;
const assert = require('assert');
function delay(msecs) {
assert.strictEqual(typeof msecs, 'number');
return new Promise(function (resolve) {
setTimeout(resolve, msecs);
});
}
+46
View File
@@ -0,0 +1,46 @@
'use strict';
exports = module.exports = {
disks,
file
};
const assert = require('assert'),
BoxError = require('./boxerror.js'),
safe = require('safetydance');
function parseLine(line) {
const parts = line.split(/\s+/, 7); // this way the mountpoint can have spaces in it
return {
filesystem: parts[0],
type: parts[1],
size: Number.parseInt(parts[2], 10),
used: Number.parseInt(parts[3], 10),
available: Number.parseInt(parts[4], 10),
capacity: Number.parseInt(parts[5], 10) / 100, // note: this has a trailing %
mountpoint: parts[6]
};
}
async function disks() {
const output = safe.child_process.execSync('df -B1 --output=source,fstype,size,used,avail,pcent,target', { encoding: 'utf8' });
if (!output) throw new BoxError(BoxError.FS_ERROR, `Error running df: ${safe.error.message}`);
const lines = output.trim().split('\n').slice(1); // discard header
const result = [];
for (const line of lines) {
result.push(parseLine(line));
}
return result;
}
async function file(filename) {
assert.strictEqual(typeof filename, 'string');
const output = safe.child_process.execSync(`df -B1 --output=source,fstype,size,used,avail,pcent,target ${filename}`, { encoding: 'utf8' });
if (!output) throw new BoxError(BoxError.FS_ERROR, `Error running df: ${safe.error.message}`);
const lines = output.trim().split('\n').slice(1); // discard header
return parseLine(lines[0]);
}
+2 -3
View File
@@ -34,8 +34,7 @@ async function resolve(hostname, rrtype, options) {
if (error && error.code === 'ECANCELLED') error.code = 'TIMEOUT';
if (error) throw error;
// result is an empty array if there was no error but there is no record. when you query a random
// domain, it errors with ENOTFOUND. But if you query an existing domain (A record) but with different
// type (CNAME) it is not an error and empty array. for TXT records, result is 2d array of strings
// when you query a random record, it errors with ENOTFOUND. But, if you query a record which has a different type
// we sometimes get empty array and sometimes ENODATA. for TXT records, result is 2d array of strings
return result;
}
+81 -69
View File
@@ -4,6 +4,8 @@ exports = module.exports = {
start,
stop,
checkCertificate,
validateConfig,
applyConfig
};
@@ -11,29 +13,25 @@ exports = module.exports = {
const assert = require('assert'),
BoxError = require('./boxerror.js'),
constants = require('./constants.js'),
debug = require('debug')('box:userdirectory'),
dns = require('./dns.js'),
domains = require('./domains.js'),
debug = require('debug')('box:directoryserver'),
eventlog = require('./eventlog.js'),
fs = require('fs'),
groups = require('./groups.js'),
ldap = require('ldapjs'),
path = require('path'),
paths = require('./paths.js'),
reverseproxy = require('./reverseproxy.js'),
reverseProxy = require('./reverseproxy.js'),
safe = require('safetydance'),
settings = require('./settings.js'),
speakeasy = require('speakeasy'),
shell = require('./shell.js'),
users = require('./users.js'),
util = require('util'),
validator = require('validator');
var gServer = null;
let gServer = null, gCertificate = null;
const NOOP = function () {};
const GROUP_USERS_DN = 'cn=users,ou=groups,dc=cloudron';
const GROUP_ADMINS_DN = 'cn=admins,ou=groups,dc=cloudron';
const SET_LDAP_ALLOWLIST_CMD = path.join(__dirname, 'scripts/setldapallowlist.sh');
async function validateConfig(config) {
@@ -68,6 +66,8 @@ async function applyConfig(config) {
const [error] = await safe(shell.promises.sudo('setLdapAllowlist', [ SET_LDAP_ALLOWLIST_CMD ], {}));
if (error) throw new BoxError(BoxError.IPTABLES_ERROR, `Error setting ldap allowlist: ${error.message}`);
if (config.enabled) await start(); else await stop();
}
// helper function to deal with pagination
@@ -99,13 +99,13 @@ function finalSend(results, req, res, next) {
if (cookie && Buffer.isBuffer(cookie)) {
// we have pagination
var first = min;
let first = min;
if (cookie.length !== 0) {
first = parseInt(cookie.toString(), 10);
}
var last = sendPagedResults(first, first + pageSize);
const last = sendPagedResults(first, first + pageSize);
var resultCookie;
let resultCookie;
if (last < max) {
resultCookie = Buffer.from(last.toString());
} else {
@@ -142,23 +142,43 @@ async function authorize(req, res, next) {
return next();
}
// https://ldapwiki.com/wiki/RootDSE / RFC 4512 - ldapsearch -x -h "${CLOUDRON_LDAP_SERVER}" -p "${CLOUDRON_LDAP_PORT}" -b "" -s base
// ldapjs seems to call this handler for everything when search === ''
async function maybeRootDSE(req, res, next) {
debug(`maybeRootDSE: requested with scope:${req.scope} dn:${req.dn.toString()}`);
if (req.scope !== 'base') return next(new ldap.NoSuchObjectError()); // per the spec, rootDSE search require base scope
if (!req.dn || req.dn.toString() !== '') return next(new ldap.NoSuchObjectError());
res.send({
dn: '',
attributes: {
objectclass: [ 'RootDSE', 'top', 'OpenLDAProotDSE' ],
supportedLDAPVersion: '3',
vendorName: 'Cloudron LDAP',
vendorVersion: '1.0.0'
}
});
res.end();
}
async function userSearch(req, res, next) {
debug('user search: dn %s, scope %s, filter %s (from %s)', req.dn.toString(), req.scope, req.filter.toString(), req.connection.ldap.id);
const [error, result] = await safe(users.list());
const [error, allUsers] = await safe(users.list());
if (error) return next(new ldap.OperationsError(error.toString()));
const [groupsError, allGroups] = await safe(groups.listWithMembers());
if (groupsError) return next(new ldap.OperationsError(error.toString()));
let results = [];
// send user objects
result.forEach(function (user) {
for (const user of allUsers) {
// skip entries with empty username. Some apps like owncloud can't deal with this
if (!user.username) return;
if (!user.username) continue;
const dn = ldap.parseDN('cn=' + user.id + ',ou=users,dc=cloudron');
const memberof = [ GROUP_USERS_DN ];
if (users.compareRoles(user.role, users.ROLE_ADMIN) >= 0) memberof.push(GROUP_ADMINS_DN);
const dn = ldap.parseDN(`cn=${user.id},ou=users,dc=cloudron`);
const displayName = user.displayName || user.username || ''; // displayName can be empty and username can be null
const nameParts = displayName.split(' ');
@@ -179,10 +199,12 @@ async function userSearch(req, res, next) {
givenName: firstName,
username: user.username,
samaccountname: user.username, // to support ActiveDirectory clients
memberof: memberof
memberof: allGroups.filter(function (g) { return g.userIds.indexOf(user.id) !== -1; }).map(function (g) { return `cn=${g.name},ou=groups,dc=cloudron`; })
}
};
if (user.twoFactorAuthenticationEnabled) obj.attributes.twoFactorAuthenticationEnabled = true;
// http://www.zytrax.com/books/ldap/ape/core-schema.html#sn has 'name' as SUP which is a DirectoryString
// which is required to have atleast one character if present
if (lastName.length !== 0) obj.attributes.sn = lastName;
@@ -194,7 +216,7 @@ async function userSearch(req, res, next) {
if ((req.dn.equals(dn) || req.dn.parentOf(dn)) && lowerCaseFilter.matches(obj.attributes)) {
results.push(obj);
}
});
}
finalSend(results, req, res, next);
}
@@ -202,54 +224,24 @@ async function userSearch(req, res, next) {
async function groupSearch(req, res, next) {
debug('group search: dn %s, scope %s, filter %s (from %s)', req.dn.toString(), req.scope, req.filter.toString(), req.connection.ldap.id);
const [error, result] = await safe(users.list());
const [error, allUsers] = await safe(users.list());
if (error) return next(new ldap.OperationsError(error.toString()));
const results = [];
// those are the old virtual groups for backwards compat
const virtualGroups = [{
name: 'users',
admin: false
}, {
name: 'admins',
admin: true
}];
virtualGroups.forEach(function (group) {
const dn = ldap.parseDN('cn=' + group.name + ',ou=groups,dc=cloudron');
const members = group.admin ? result.filter(function (user) { return users.compareRoles(user.role, users.ROLE_ADMIN) >= 0; }) : result;
const obj = {
dn: dn.toString(),
attributes: {
objectclass: ['group'],
cn: group.name,
memberuid: members.map(function(entry) { return entry.id; }).sort()
}
};
// ensure all filter values are also lowercase
const lowerCaseFilter = safe(function () { return ldap.parseFilter(req.filter.toString().toLowerCase()); }, null);
if (!lowerCaseFilter) return next(new ldap.OperationsError(safe.error.toString()));
if ((req.dn.equals(dn) || req.dn.parentOf(dn)) && lowerCaseFilter.matches(obj.attributes)) {
results.push(obj);
}
});
let [errorGroups, resultGroups] = await safe(groups.listWithMembers());
let [errorGroups, allGroups] = await safe(groups.listWithMembers());
if (errorGroups) return next(new ldap.OperationsError(errorGroups.toString()));
resultGroups.forEach(function (group) {
const dn = ldap.parseDN('cn=' + group.name + ',ou=groups,dc=cloudron');
const members = group.userIds.filter(function (uid) { return result.map(function (u) { return u.id; }).indexOf(uid) !== -1; });
for (const group of allGroups) {
const dn = ldap.parseDN(`cn=${group.name},ou=groups,dc=cloudron`);
const members = group.userIds.filter(function (uid) { return allUsers.map(function (u) { return u.id; }).indexOf(uid) !== -1; });
const obj = {
dn: dn.toString(),
attributes: {
objectclass: ['group'],
cn: group.name,
gidnumber: group.id,
memberuid: members
}
};
@@ -261,7 +253,7 @@ async function groupSearch(req, res, next) {
if ((req.dn.equals(dn) || req.dn.parentOf(dn)) && lowerCaseFilter.matches(obj.attributes)) {
results.push(obj);
}
});
}
finalSend(results, req, res, next);
}
@@ -269,12 +261,15 @@ async function groupSearch(req, res, next) {
// Will attach req.user if successful
async function userAuth(req, res, next) {
// extract the common name which might have different attribute names
const attributeName = Object.keys(req.dn.rdns[0].attrs)[0];
const commonName = req.dn.rdns[0].attrs[attributeName].value;
const cnAttributeName = Object.keys(req.dn.rdns[0].attrs)[0];
const commonName = req.dn.rdns[0].attrs[cnAttributeName].value;
if (!commonName) return next(new ldap.NoSuchObjectError(req.dn.toString()));
const TOTPTOKEN_ATTRIBUTE_NAME = 'totptoken'; // This has to be in-sync with externalldap.js
const totpToken = req.dn.rdns[0].attrs[TOTPTOKEN_ATTRIBUTE_NAME] ? req.dn.rdns[0].attrs[TOTPTOKEN_ATTRIBUTE_NAME].value : null;
let verifyFunc;
if (attributeName === 'mail') {
if (cnAttributeName === 'mail') {
verifyFunc = users.verifyWithEmail;
} else if (commonName.indexOf('@') !== -1) { // if mail is specified, enforce mail check
verifyFunc = users.verifyWithEmail;
@@ -289,12 +284,17 @@ async function userAuth(req, res, next) {
if (error && error.reason === BoxError.INVALID_CREDENTIALS) return next(new ldap.InvalidCredentialsError(req.dn.toString()));
if (error) return next(new ldap.OperationsError(error.message));
// currently this is only optional if totpToken is provided and user has 2fa enabled
if (totpToken && user.twoFactorAuthenticationEnabled) {
const verified = speakeasy.totp.verify({ secret: user.twoFactorAuthenticationSecret, encoding: 'base32', token: totpToken, window: 2 });
if (!verified) return next(new ldap.InvalidCredentialsError(req.dn.toString()));
}
req.user = user;
next();
}
// FIXME this needs to be restarted if settings changes or dashboard cert got renewed
async function start() {
if (gServer) return; // already running
@@ -307,13 +307,11 @@ async function start() {
fatal: debug
};
const domainObject = await domains.get(settings.dashboardDomain());
const dashboardFqdn = dns.fqdn(constants.DASHBOARD_LOCATION, domainObject);
const bundle = await reverseproxy.getCertificatePath(dashboardFqdn, domainObject.domain);
gCertificate = await reverseProxy.getDirectoryServerCertificate();
gServer = ldap.createServer({
certificate: fs.readFileSync(bundle.certFilePath, 'utf8'),
key: fs.readFileSync(bundle.keyFilePath, 'utf8'),
certificate: gCertificate.cert,
key: gCertificate.key,
log: logger
});
@@ -324,12 +322,12 @@ async function start() {
gServer.bind('ou=system,dc=cloudron', async function(req, res, next) {
debug('system bind: %s (from %s)', req.dn.toString(), req.connection.ldap.id);
const tmp = await settings.getUserDirectoryConfig();
const tmp = await settings.getDirectoryServerConfig();
if (!req.dn.equals(constants.USER_DIRECTORY_LDAP_DN)) return next(new ldap.InvalidCredentialsError(req.dn.toString()));
if (req.credentials !== tmp.secret) return next(new ldap.InvalidCredentialsError(req.dn.toString()));
req.user = { user: 'userDirectoryAdmin' };
req.user = { user: 'directoryServerAdmin' };
res.end();
@@ -342,11 +340,13 @@ async function start() {
gServer.bind('ou=users,dc=cloudron', userAuth, async function (req, res) {
assert.strictEqual(typeof req.user, 'object');
await eventlog.upsertLoginEvent(eventlog.ACTION_USER_LOGIN, { authType: 'userdirectory', id: req.connection.ldap.id }, { userId: req.user.id, user: users.removePrivateFields(req.user) });
await eventlog.upsertLoginEvent(req.user.ghost ? eventlog.ACTION_USER_LOGIN_GHOST : eventlog.ACTION_USER_LOGIN, { authType: 'directoryserver', id: req.connection.ldap.id }, { userId: req.user.id, user: users.removePrivateFields(req.user) });
res.end();
});
gServer.search('', maybeRootDSE); // when '', it seems the callback is called for everything else
// just log that an attempt was made to unknown route, this helps a lot during app packaging
gServer.use(function(req, res, next) {
debug('not handled: dn %s, scope %s, filter %s (from %s)', req.dn ? req.dn.toString() : '-', req.scope, req.filter ? req.filter.toString() : '-', req.connection.ldap.id);
@@ -365,3 +365,15 @@ async function stop() {
gServer.close();
gServer = null;
}
async function checkCertificate() {
const certificate = await reverseProxy.getDirectoryServerCertificate();
if (certificate.cert === gCertificate.cert) {
debug('checkCertificate: certificate has not changed');
return;
}
debug('checkCertificate: certificate changed. restarting');
await stop();
await start();
}
+32 -19
View File
@@ -51,6 +51,7 @@ function api(provider) {
case 'namecom': return require('./dns/namecom.js');
case 'namecheap': return require('./dns/namecheap.js');
case 'netcup': return require('./dns/netcup.js');
case 'hetzner': return require('./dns/hetzner.js');
case 'noop': return require('./dns/noop.js');
case 'manual': return require('./dns/manual.js');
case 'wildcard': return require('./dns/wildcard.js');
@@ -58,33 +59,33 @@ function api(provider) {
}
}
function fqdn(subdomain, domainObject) {
function fqdn(subdomain, domain) {
assert.strictEqual(typeof subdomain, 'string');
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof domain, 'string');
return subdomain + (subdomain ? '.' : '') + domainObject.domain;
return subdomain + (subdomain ? '.' : '') + domain;
}
// Hostname validation comes from RFC 1123 (section 2.1)
// Domain name validation comes from RFC 2181 (Name syntax)
// https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names
// We are validating the validity of the location-fqdn as host name (and not dns name)
function validateHostname(subdomain, domainObject) {
function validateHostname(subdomain, domain) {
assert.strictEqual(typeof subdomain, 'string');
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof domain, 'string');
const hostname = fqdn(subdomain, domainObject);
const hostname = fqdn(subdomain, domain);
const RESERVED_LOCATIONS = [
constants.SMTP_LOCATION,
constants.IMAP_LOCATION
const RESERVED_SUBDOMAINS = [
constants.SMTP_SUBDOMAIN,
constants.IMAP_SUBDOMAIN
];
if (RESERVED_LOCATIONS.indexOf(subdomain) !== -1) return new BoxError(BoxError.BAD_FIELD, `subdomain '${subdomain}' is reserved`);
if (RESERVED_SUBDOMAINS.indexOf(subdomain) !== -1) return new BoxError(BoxError.BAD_FIELD, `subdomain '${subdomain}' is reserved`);
if (hostname === settings.dashboardFqdn()) return new BoxError(BoxError.BAD_FIELD, `subdomain '${subdomain}' is reserved`);
// workaround https://github.com/oncletom/tld.js/issues/73
var tmp = hostname.replace('_', '-');
const tmp = hostname.replace('_', '-');
if (!tld.isValid(tmp)) return new BoxError(BoxError.BAD_FIELD, 'Hostname is not a valid domain name');
if (hostname.length > 253) return new BoxError(BoxError.BAD_FIELD, 'Hostname length exceeds 253 characters');
@@ -122,6 +123,9 @@ async function checkDnsRecords(subdomain, domain) {
assert.strictEqual(typeof subdomain, 'string');
assert.strictEqual(typeof domain, 'string');
const cnameRecords = await getDnsRecords(subdomain, domain, 'CNAME');
if (cnameRecords.length !== 0) return { needsOverwrite: true };
const ipv4Records = await getDnsRecords(subdomain, domain, 'A');
const ipv4 = await sysinfo.getServerIPv4();
@@ -158,7 +162,7 @@ async function removeDnsRecords(subdomain, domain, type, values) {
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
debug('removeDNSRecord: %s on %s type %s values', subdomain, domain, type, values);
debug('removeDNSRecords: %s on %s type %s values', subdomain, domain, type, values);
const domainObject = await domains.get(domain);
const [error] = await safe(api(domainObject.provider).del(domainObject, subdomain, type, values));
@@ -180,11 +184,11 @@ async function waitForDnsRecord(subdomain, domain, type, value, options) {
await api(domainObject.provider).wait(domainObject, subdomain, type, value, options);
}
function makeWildcard(vhost) {
assert.strictEqual(typeof vhost, 'string');
function makeWildcard(fqdn) {
assert.strictEqual(typeof fqdn, 'string');
// if the vhost is like *.example.com, this function will do nothing
let parts = vhost.split('.');
// if the fqdn is like *.example.com, this function will do nothing
const parts = fqdn.split('.');
parts[0] = '*';
return parts.join('.');
}
@@ -195,7 +199,7 @@ async function registerLocation(location, options, recordType, recordValue) {
// get the current record before updating it
const [getError, values] = await safe(getDnsRecords(location.subdomain, location.domain, recordType));
if (getError) {
const retryable = getError.reason !== BoxError.ACCESS_DENIED && getError.reason !== BoxError.NOT_FOUND;
const retryable = getError.reason !== BoxError.ACCESS_DENIED && getError.reason !== BoxError.NOT_FOUND; // NOT_FOUND is when zone is not found
debug(`registerLocation: Get error. retryable: ${retryable}. ${getError.message}`);
throw new BoxError(getError.reason, getError.message, { domain: location, retryable });
}
@@ -224,9 +228,18 @@ async function registerLocations(locations, options, progressCallback) {
const ipv6 = await sysinfo.getServerIPv6();
for (const location of locations) {
progressCallback({ message: `Registering location: ${location.subdomain ? (location.subdomain + '.') : ''}${location.domain}` });
const fqdn = `${location.subdomain ? (location.subdomain + '.') : ''}${location.domain}`;
progressCallback({ message: `Registering location: ${fqdn}` });
await promiseRetry({ times: 200, interval: 5000, debug, retry: (error) => error.retryable }, async function () {
// cname records cannot co-exist with other records
const [getError, values] = await safe(getDnsRecords(location.subdomain, location.domain, 'CNAME'));
if (!getError && values.length === 1) {
if (!options.overwriteDns) throw new BoxError(BoxError.ALREADY_EXISTS, 'DNS CNAME record already exists', { domain: location, retryable: false });
debug(`registerLocations: removing CNAME record of ${fqdn}`);
await removeDnsRecords(location.subdomain, location.domain, 'CNAME', values);
}
await registerLocation(location, options, 'A', ipv4);
if (ipv6) await registerLocation(location, options, 'AAAA', ipv6);
});
@@ -282,7 +295,7 @@ async function syncDnsRecords(options, progressCallback) {
progress += Math.round(100/(1+allDomains.length));
let locations = [];
if (domain.domain === settings.dashboardDomain()) locations.push({ subdomain: constants.DASHBOARD_LOCATION, domain: settings.dashboardDomain() });
if (domain.domain === settings.dashboardDomain()) locations.push({ subdomain: constants.DASHBOARD_SUBDOMAIN, domain: settings.dashboardDomain() });
if (domain.domain === settings.mailDomain() && settings.mailFqdn() !== settings.dashboardFqdn()) locations.push({ subdomain: mailSubdomain, domain: settings.mailDomain() });
for (const app of allApps) {
+5 -5
View File
@@ -34,7 +34,7 @@ function injectPrivateFields(newConfig, currentConfig) {
if (newConfig.token === constants.SECRET_PLACEHOLDER) newConfig.token = currentConfig.token;
}
async function translateRequestError(result) {
function translateRequestError(result) {
assert.strictEqual(typeof result, 'object');
if (result.statusCode === 404) return new BoxError(BoxError.NOT_FOUND, util.format('%s %j', result.statusCode, 'API does not exist'));
@@ -105,7 +105,7 @@ async function upsert(domainObject, location, type, values) {
const domainConfig = domainObject.config,
zoneName = domainObject.zoneName,
fqdn = dns.fqdn(location, domainObject);
fqdn = dns.fqdn(location, domainObject.domain);
debug('upsert: %s for zone %s of type %s with values %j', fqdn, zoneName, type, values);
@@ -166,7 +166,7 @@ async function get(domainObject, location, type) {
const domainConfig = domainObject.config,
zoneName = domainObject.zoneName,
fqdn = dns.fqdn(location, domainObject);
fqdn = dns.fqdn(location, domainObject.domain);
const zone = await getZoneByName(domainConfig, zoneName);
const result = await getDnsRecords(domainConfig, zone.id, fqdn, type);
@@ -182,7 +182,7 @@ async function del(domainObject, location, type, values) {
const domainConfig = domainObject.config,
zoneName = domainObject.zoneName,
fqdn = dns.fqdn(location, domainObject);
fqdn = dns.fqdn(location, domainObject.domain);
const zone = await getZoneByName(domainConfig, zoneName);
@@ -212,7 +212,7 @@ async function wait(domainObject, subdomain, type, value, options) {
const domainConfig = domainObject.config,
zoneName = domainObject.zoneName,
fqdn = dns.fqdn(subdomain, domainObject);
fqdn = dns.fqdn(subdomain, domainObject.domain);
debug('wait: %s for zone %s of type %s', fqdn, zoneName, type);
+9 -3
View File
@@ -18,13 +18,12 @@ const assert = require('assert'),
dns = require('../dns.js'),
safe = require('safetydance'),
superagent = require('superagent'),
util = require('util'),
waitForDns = require('./waitfordns.js');
const DIGITALOCEAN_ENDPOINT = 'https://api.digitalocean.com';
function formatError(response) {
return util.format('DigitalOcean DNS error [%s] %j', response.statusCode, response.body);
return `DigitalOcean DNS error ${response.statusCode} ${JSON.stringify(response.body)}`;
}
function removePrivateFields(domainObject) {
@@ -201,11 +200,17 @@ async function wait(domainObject, subdomain, type, value, options) {
assert.strictEqual(typeof value, 'string');
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
const fqdn = dns.fqdn(subdomain, domainObject);
const fqdn = dns.fqdn(subdomain, domainObject.domain);
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
}
// https://stackoverflow.com/questions/14313183/javascript-regex-how-do-i-check-if-the-string-is-ascii-only
function isASCII(str) {
// eslint-disable-next-line no-control-regex
return /^[\x00-\x7F]*$/.test(str);
}
async function verifyDomainConfig(domainObject) {
assert.strictEqual(typeof domainObject, 'object');
@@ -213,6 +218,7 @@ async function verifyDomainConfig(domainObject) {
zoneName = domainObject.zoneName;
if (!domainConfig.token || typeof domainConfig.token !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'token must be a non-empty string');
if (!isASCII(domainConfig.token)) throw new BoxError(BoxError.BAD_FIELD, 'token contains invalid characters');
const ip = '127.0.0.1';
+1 -1
View File
@@ -119,7 +119,7 @@ async function wait(domainObject, subdomain, type, value, options) {
assert.strictEqual(typeof value, 'string');
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
const fqdn = dns.fqdn(subdomain, domainObject);
const fqdn = dns.fqdn(subdomain, domainObject.domain);
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
}
+4 -4
View File
@@ -76,7 +76,7 @@ async function upsert(domainObject, location, type, values) {
const domainConfig = domainObject.config,
zoneName = domainObject.zoneName,
fqdn = dns.fqdn(location, domainObject);
fqdn = dns.fqdn(location, domainObject.domain);
debug('add: %s for zone %s of type %s with values %j', fqdn, zoneName, type, values);
@@ -105,7 +105,7 @@ async function get(domainObject, location, type) {
const domainConfig = domainObject.config,
zoneName = domainObject.zoneName,
fqdn = dns.fqdn(location, domainObject);
fqdn = dns.fqdn(location, domainObject.domain);
const zone = await getZoneByName(getDnsCredentials(domainConfig), zoneName);
@@ -130,7 +130,7 @@ async function del(domainObject, location, type, values) {
const domainConfig = domainObject.config,
zoneName = domainObject.zoneName,
fqdn = dns.fqdn(location, domainObject);
fqdn = dns.fqdn(location, domainObject.domain);
const zone = await getZoneByName(getDnsCredentials(domainConfig), zoneName);
@@ -151,7 +151,7 @@ async function wait(domainObject, subdomain, type, value, options) {
assert.strictEqual(typeof value, 'string');
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
const fqdn = dns.fqdn(subdomain, domainObject);
const fqdn = dns.fqdn(subdomain, domainObject.domain);
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
}
+15 -21
View File
@@ -24,12 +24,6 @@ const assert = require('assert'),
// const GODADDY_API_OTE = 'https://api.ote-godaddy.com/v1/domains';
const GODADDY_API = 'https://api.godaddy.com/v1/domains';
// this is a workaround for godaddy not having a delete API
// https://stackoverflow.com/questions/39347464/delete-record-libcloud-godaddy-api
const GODADDY_INVALID_IP = '0.0.0.0';
const GODADDY_INVALID_IPv6 = '0:0:0:0:0:0:0:0';
const GODADDY_INVALID_TXT = '""';
function formatError(response) {
return util.format(`GoDaddy DNS error [${response.statusCode}] ${response.body.message}`);
}
@@ -106,6 +100,12 @@ async function get(domainObject, location, type) {
const values = response.body.map(function (record) { return record.data; });
if (values.length === 1) {
// legacy: this was a workaround for godaddy not having a delete API
// https://stackoverflow.com/questions/39347464/delete-record-libcloud-godaddy-api
const GODADDY_INVALID_IP = '0.0.0.0';
const GODADDY_INVALID_IPv6 = '0:0:0:0:0:0:0:0';
const GODADDY_INVALID_TXT = '""';
if ((type === 'A' && values[0] === GODADDY_INVALID_IP)
|| (type === 'AAAA' && values[0] === GODADDY_INVALID_IPv6)
|| (type === 'TXT' && values[0] === GODADDY_INVALID_TXT)) return []; // pretend this record doesn't exist
@@ -124,30 +124,24 @@ async function del(domainObject, location, type, values) {
zoneName = domainObject.zoneName,
name = dns.getName(domainObject, location, type) || '@';
debug(`get: ${name} in zone ${zoneName} of type ${type} with values ${JSON.stringify(values)}`);
debug(`del: ${name} in zone ${zoneName} of type ${type} with values ${JSON.stringify(values)}`);
if (type !== 'A' && type !== 'AAAA' && type !== 'TXT') throw new BoxError(BoxError.EXTERNAL_ERROR, 'Record deletion is not supported by GoDaddy API');
const result = await get(domainObject, location, type);
if (result.length === 0) return;
// check if the record exists at all so that we don't insert the "Dead" record for no reason
const existingRecords = await get(domainObject, location, type);
if (existingRecords.length === 0) return;
const tmp = result.filter(r => !values.includes(r));
// godaddy does not have a delete API. so fill it up with an invalid IP that we can ignore in future get()
const records = [{
ttl: 600,
data: type === 'A' ? GODADDY_INVALID_IP : (type === 'AAAA' ? GODADDY_INVALID_IPv6 : GODADDY_INVALID_TXT)
}];
if (tmp.length) return await upsert(domainObject, location, type, tmp); // only remove 'values'
const [error, response] = await safe(superagent.put(`${GODADDY_API}/${zoneName}/records/${type}/${name}`)
const [error, response] = await safe(superagent.del(`${GODADDY_API}/${zoneName}/records/${type}/${name}`)
.set('Authorization', `sso-key ${domainConfig.apiKey}:${domainConfig.apiSecret}`)
.send(records)
.timeout(30 * 1000)
.ok(() => true));
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.statusCode === 404) return;
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
if (response.statusCode !== 204) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
}
async function wait(domainObject, subdomain, type, value, options) {
@@ -157,7 +151,7 @@ async function wait(domainObject, subdomain, type, value, options) {
assert.strictEqual(typeof value, 'string');
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
const fqdn = dns.fqdn(subdomain, domainObject);
const fqdn = dns.fqdn(subdomain, domainObject.domain);
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
}
@@ -184,7 +178,7 @@ async function verifyDomainConfig(domainObject) {
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
if (!nameservers.every(function (n) { return n.toLowerCase().indexOf('.domaincontrol.com') !== -1; })) {
if (!nameservers.every(function (n) { return n.toLowerCase().indexOf('.domaincontrol.com') !== -1 || n.toLowerCase().indexOf('.secureserver.net') !== -1; })) {
debug('verifyDomainConfig: %j does not contain GoDaddy NS', nameservers);
throw new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to GoDaddy');
}
+259
View File
@@ -0,0 +1,259 @@
'use strict';
exports = module.exports = {
removePrivateFields,
injectPrivateFields,
upsert,
get,
del,
wait,
verifyDomainConfig
};
const assert = require('assert'),
BoxError = require('../boxerror.js'),
constants = require('../constants.js'),
debug = require('debug')('box:dns/hetzner'),
dig = require('../dig.js'),
dns = require('../dns.js'),
safe = require('safetydance'),
superagent = require('superagent'),
waitForDns = require('./waitfordns.js');
const ENDPOINT = 'https://dns.hetzner.com/api/v1';
function formatError(response) {
return `Hetzner DNS error ${response.statusCode} ${JSON.stringify(response.body)}`;
}
function removePrivateFields(domainObject) {
domainObject.config.token = constants.SECRET_PLACEHOLDER;
return domainObject;
}
function injectPrivateFields(newConfig, currentConfig) {
if (newConfig.token === constants.SECRET_PLACEHOLDER) newConfig.token = currentConfig.token;
}
async function getZone(domainConfig, zoneName) {
assert.strictEqual(typeof domainConfig, 'object');
assert.strictEqual(typeof zoneName, 'string');
const [error, response] = await safe(superagent.get(`${ENDPOINT}/zones`)
.set('Auth-API-Token', domainConfig.token)
.query({ search_name: zoneName })
.timeout(30 * 1000)
.retry(5)
.ok(() => true));
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.statusCode === 401 || response.statusCode === 403) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
if (!Array.isArray(response.body.zones)) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
const zone = response.body.zones.filter(z => z.name === zoneName);
if (zone.length === 0) throw new BoxError(BoxError.NOT_FOUND, formatError(response));
return zone[0];
}
async function getZoneRecords(domainConfig, zone, name, type) {
assert.strictEqual(typeof domainConfig, 'object');
assert.strictEqual(typeof zone, 'object');
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof type, 'string');
let page = 1, matchingRecords = [];
debug(`getInternal: getting dns records of ${zone.name} with ${name} and type ${type}`);
const perPage = 50;
// eslint-disable-next-line no-constant-condition
while (true) {
const [error, response] = await safe(superagent.get(`${ENDPOINT}/records`)
.set('Auth-API-Token', domainConfig.token)
.query({ zone_id: zone.id, page, per_page: perPage })
.timeout(30 * 1000)
.retry(5)
.ok(() => true));
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, formatError(response));
if (response.statusCode === 401 || response.statusCode === 403) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
matchingRecords = matchingRecords.concat(response.body.records.filter(function (record) {
return (record.type === type && record.name === name);
}));
if (response.body.records.length < perPage) break;
++page;
}
return matchingRecords;
}
async function upsert(domainObject, location, type, values) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
const domainConfig = domainObject.config,
zoneName = domainObject.zoneName,
name = dns.getName(domainObject, location, type) || '@';
debug('upsert: %s for zone %s of type %s with values %j', name, zoneName, type, values);
const zone = await getZone(domainConfig, zoneName);
const records = await getZoneRecords(domainConfig, zone, name, type);
// used to track available records to update instead of create
let i = 0;
for (let value of values) {
const data = {
type,
name,
value,
ttl: 60,
zone_id: zone.id
};
if (i >= records.length) {
const [error, response] = await safe(superagent.post(`${ENDPOINT}/records`)
.set('Auth-API-Token', domainConfig.token)
.send(data)
.timeout(30 * 1000)
.retry(5)
.ok(() => true));
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
if (response.statusCode === 422) throw new BoxError(BoxError.BAD_FIELD, response.body.message);
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
} else {
const [error, response] = await safe(superagent.put(`${ENDPOINT}/records/${records[i].id}`)
.set('Auth-API-Token', domainConfig.token)
.send(data)
.timeout(30 * 1000)
.retry(5)
.ok(() => true));
++i;
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
if (response.statusCode === 422) throw new BoxError(BoxError.BAD_FIELD, response.body.message);
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
}
}
for (let j = values.length + 1; j < records.length; j++) {
const [error] = await safe(superagent.del(`${ENDPOINT}/records/${records[j].id}`)
.set('Auth-API-Token', domainConfig.token)
.timeout(30 * 1000)
.retry(5)
.ok(() => true));
if (error) debug(`upsert: error removing record ${records[j].id}: ${error.message}`);
}
debug('upsert: completed');
}
async function get(domainObject, location, type) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
const domainConfig = domainObject.config,
zoneName = domainObject.zoneName,
name = dns.getName(domainObject, location, type) || '@';
const zone = await getZone(domainConfig, zoneName);
const result = await getZoneRecords(domainConfig, zone, name, type);
return result.map(function (record) { return record.value; });
}
async function del(domainObject, location, type, values) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
const domainConfig = domainObject.config,
zoneName = domainObject.zoneName,
name = dns.getName(domainObject, location, type) || '@';
const zone = await getZone(domainConfig, zoneName);
const records = await getZoneRecords(domainConfig, zone, name, type);
if (records.length === 0) return;
const matchingRecords = records.filter(function (record) { return values.some(function (value) { return value === record.value; }); });
if (matchingRecords.length === 0) return;
for (const r of matchingRecords) {
const [error, response] = await safe(superagent.del(`${ENDPOINT}/records/${r.id}`)
.set('Auth-API-Token', domainConfig.token)
.timeout(30 * 1000)
.retry(5)
.ok(() => true));
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.statusCode === 404) return;
if (response.statusCode === 403 || response.statusCode === 401) throw new BoxError(BoxError.ACCESS_DENIED, formatError(response));
if (response.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, formatError(response));
}
}
async function wait(domainObject, subdomain, type, value, options) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof subdomain, 'string');
assert.strictEqual(typeof type, 'string');
assert.strictEqual(typeof value, 'string');
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
const fqdn = dns.fqdn(subdomain, domainObject.domain);
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
}
async function verifyDomainConfig(domainObject) {
assert.strictEqual(typeof domainObject, 'object');
const domainConfig = domainObject.config,
zoneName = domainObject.zoneName;
if (!domainConfig.token || typeof domainConfig.token !== 'string') throw new BoxError(BoxError.BAD_FIELD, 'token must be a non-empty string');
const ip = '127.0.0.1';
const credentials = {
token: domainConfig.token
};
if (process.env.BOX_ENV === 'test') return credentials; // this shouldn't be here
const [error, nameservers] = await safe(dig.resolve(zoneName, 'NS', { timeout: 5000 }));
if (error && error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
// https://docs.hetzner.com/dns-console/dns/general/dns-overview#the-hetzner-online-name-servers-are
if (nameservers.map(function (n) { return n.toLowerCase(); }).indexOf('oxygen.ns.hetzner.com') === -1) {
debug('verifyDomainConfig: %j does not contain Hetzner NS', nameservers);
throw new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Hetzner');
}
const location = 'cloudrontestdns';
await upsert(domainObject, location, 'A', [ ip ]);
debug('verifyDomainConfig: Test A record added');
await del(domainObject, location, 'A', [ ip ]);
debug('verifyDomainConfig: Test A record removed again');
return credentials;
}
+1 -1
View File
@@ -225,7 +225,7 @@ async function wait(domainObject, subdomain, type, value, options) {
assert.strictEqual(typeof value, 'string');
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
const fqdn = dns.fqdn(subdomain, domainObject);
const fqdn = dns.fqdn(subdomain, domainObject.domain);
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
}
+1 -1
View File
@@ -62,7 +62,7 @@ async function wait(domainObject, subdomain, type, value, options) {
assert.strictEqual(typeof value, 'string');
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
const fqdn = dns.fqdn(subdomain, domainObject);
const fqdn = dns.fqdn(subdomain, domainObject.domain);
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
}
+2 -2
View File
@@ -90,7 +90,7 @@ async function setZone(domainConfig, zoneName, hosts) {
// Map to query params https://www.namecheap.com/support/api/methods/domains-dns/set-hosts.aspx
hosts.forEach(function (host, i) {
var n = i+1; // api starts with 1 not 0
const n = i+1; // api starts with 1 not 0
query['TTL' + n] = '300'; // keep it low
query['HostName' + n] = host.HostName || host.Name;
query['RecordType' + n] = host.RecordType || host.Type;
@@ -237,7 +237,7 @@ async function wait(domainObject, subdomain, type, value, options) {
assert.strictEqual(typeof value, 'string');
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
const fqdn = dns.fqdn(subdomain, domainObject);
const fqdn = dns.fqdn(subdomain, domainObject.domain);
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
}
+1 -1
View File
@@ -206,7 +206,7 @@ async function wait(domainObject, subdomain, type, value, options) {
assert.strictEqual(typeof value, 'string');
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
const fqdn = dns.fqdn(subdomain, domainObject);
const fqdn = dns.fqdn(subdomain, domainObject.domain);
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
}
+1 -1
View File
@@ -217,7 +217,7 @@ async function wait(domainObject, subdomain, type, value, options) {
assert.strictEqual(typeof value, 'string');
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
const fqdn = dns.fqdn(subdomain, domainObject);
const fqdn = dns.fqdn(subdomain, domainObject.domain);
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
}
+4 -4
View File
@@ -95,7 +95,7 @@ async function upsert(domainObject, location, type, values) {
const domainConfig = domainObject.config,
zoneName = domainObject.zoneName,
fqdn = dns.fqdn(location, domainObject);
fqdn = dns.fqdn(location, domainObject.domain);
debug('add: %s for zone %s of type %s with values %j', fqdn, zoneName, type, values);
@@ -134,7 +134,7 @@ async function get(domainObject, location, type) {
const domainConfig = domainObject.config,
zoneName = domainObject.zoneName,
fqdn = dns.fqdn(location, domainObject);
fqdn = dns.fqdn(location, domainObject.domain);
const zone = await getZoneByName(domainConfig, zoneName);
@@ -165,7 +165,7 @@ async function del(domainObject, location, type, values) {
const domainConfig = domainObject.config,
zoneName = domainObject.zoneName,
fqdn = dns.fqdn(location, domainObject);
fqdn = dns.fqdn(location, domainObject.domain);
const zone = await getZoneByName(domainConfig, zoneName);
@@ -212,7 +212,7 @@ async function wait(domainObject, subdomain, type, value, options) {
assert.strictEqual(typeof value, 'string');
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
const fqdn = dns.fqdn(subdomain, domainObject);
const fqdn = dns.fqdn(subdomain, domainObject.domain);
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
}
+2 -2
View File
@@ -99,7 +99,7 @@ async function upsert(domainObject, location, type, values) {
for (const value of values) {
const data = {
type,
ttl: 300 // lowest
ttl: 120 // lowest
};
if (type === 'MX') {
@@ -195,7 +195,7 @@ async function wait(domainObject, subdomain, type, value, options) {
assert.strictEqual(typeof value, 'string');
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
const fqdn = dns.fqdn(subdomain, domainObject);
const fqdn = dns.fqdn(subdomain, domainObject.domain);
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
}
+4 -3
View File
@@ -7,7 +7,8 @@ const assert = require('assert'),
debug = require('debug')('box:dns/waitfordns'),
dig = require('../dig.js'),
promiseRetry = require('../promise-retry.js'),
safe = require('safetydance');
safe = require('safetydance'),
_ = require('underscore');
async function resolveIp(hostname, type, options) {
assert.strictEqual(typeof hostname, 'string');
@@ -20,12 +21,12 @@ async function resolveIp(hostname, type, options) {
if (!error && results.length !== 0) return results;
// try CNAME record at authoritative server
debug(`resolveIp: Checking if ${hostname} has CNAME record at ${options.server}`);
debug(`resolveIp: No A record. Checking if ${hostname} has CNAME record at ${options.server}`);
const cnameResults = await dig.resolve(hostname, 'CNAME', options);
if (cnameResults.length === 0) return cnameResults;
// recurse lookup the CNAME record
debug(`resolveIp: Resolving ${hostname}'s CNAME record ${cnameResults[0]}`);
debug(`resolveIp: CNAME record found. Resolving ${hostname}'s CNAME record ${cnameResults[0]} using unbound`);
return await dig.resolve(cnameResults[0], type, options);
}
+8 -6
View File
@@ -62,7 +62,7 @@ async function wait(domainObject, subdomain, type, value, options) {
assert.strictEqual(typeof value, 'string');
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
const fqdn = dns.fqdn(subdomain, domainObject);
const fqdn = dns.fqdn(subdomain, domainObject.domain);
await waitForDns(fqdn, domainObject.zoneName, type, value, options);
}
@@ -77,11 +77,12 @@ async function verifyDomainConfig(domainObject) {
if (error || !nameservers) throw new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers');
const location = 'cloudrontestdns';
const fqdn = dns.fqdn(location, domainObject);
const fqdn = dns.fqdn(location, domainObject.domain);
const [ipv4Error, ipv4Result] = await safe(dig.resolve(fqdn, 'A', { server: '127.0.0.1', timeout: 5000 }));
if (ipv4Error && ipv4Error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv4 of ${fqdn}. Please check if you have set up *.${domainObject.domain} to point to this server's IP`);
if (ipv4Error || !ipv4Result) throw new BoxError(BoxError.BAD_FIELD, ipv4Error ? ipv4Error.message : `Unable to resolve IPv4 of ${fqdn}`);
if (ipv4Error && (ipv4Error.code === 'ENOTFOUND' || ipv4Error.code === 'ENODATA')) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv4 of ${fqdn}. Please check if you have set up *.${domainObject.domain} to point to this server's IP`);
if (ipv4Error) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv4 of ${fqdn}: ${ipv4Error.message}`);
if (!ipv4Result) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv4 of ${fqdn}`);
const ipv4 = await sysinfo.getServerIPv4();
if (ipv4Result.length !== 1 || ipv4 !== ipv4Result[0]) throw new BoxError(BoxError.EXTERNAL_ERROR, `Domain resolves to ${JSON.stringify(ipv4Result)} instead of IPv4 ${ipv4}`);
@@ -89,8 +90,9 @@ async function verifyDomainConfig(domainObject) {
const ipv6 = await sysinfo.getServerIPv6(); // both should be RFC 5952 format
if (ipv6) {
const [ipv6Error, ipv6Result] = await safe(dig.resolve(fqdn, 'AAAA', { server: '127.0.0.1', timeout: 5000 }));
if (ipv6Error && ipv6Error.code === 'ENOTFOUND') throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv6 of ${fqdn}`);
if (ipv6Error || !ipv6Result) throw new BoxError(BoxError.BAD_FIELD, ipv6Error ? ipv6Error.message : `Unable to resolve IPv6 of ${fqdn}`);
if (ipv6Error && (ipv6Error.code === 'ENOTFOUND' || ipv6Error.code === 'ENODATA')) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv6 of ${fqdn}`);
if (ipv6Error) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv6 of ${fqdn}: ${ipv6Error.message}`);
if (!ipv6Result) throw new BoxError(BoxError.BAD_FIELD, `Unable to resolve IPv6 of ${fqdn}`);
if (ipv6Result.length !== 1 || ipv6 !== ipv6Result[0]) throw new BoxError(BoxError.EXTERNAL_ERROR, `Domain resolves to ${JSON.stringify(ipv6Result)} instead of IPv6 ${ipv6}`);
}
+78 -91
View File
@@ -8,6 +8,7 @@ exports = module.exports = {
ping,
info,
df,
downloadImage,
createContainer,
startContainer,
@@ -20,13 +21,15 @@ exports = module.exports = {
createSubcontainer,
inspect,
getContainerIp,
execContainer,
getEvents,
memoryUsage,
createVolume,
removeVolume,
clearVolume,
update,
createExec,
startExec,
getExec,
resizeExec
};
const apps = require('./apps.js'),
@@ -34,10 +37,9 @@ const apps = require('./apps.js'),
BoxError = require('./boxerror.js'),
constants = require('./constants.js'),
debug = require('debug')('box:docker'),
delay = require('delay'),
delay = require('./delay.js'),
Docker = require('dockerode'),
path = require('path'),
reverseProxy = require('./reverseproxy.js'),
paths = require('./paths.js'),
services = require('./services.js'),
settings = require('./settings.js'),
shell = require('./shell.js'),
@@ -46,9 +48,6 @@ const apps = require('./apps.js'),
volumes = require('./volumes.js'),
_ = require('underscore');
const CLEARVOLUME_CMD = path.join(__dirname, 'scripts/clearvolume.sh'),
MKDIRVOLUME_CMD = path.join(__dirname, 'scripts/mkdirvolume.sh');
const DOCKER_SOCKET_PATH = '/var/run/docker.sock';
const gConnection = new Docker({ socketPath: DOCKER_SOCKET_PATH });
@@ -117,7 +116,7 @@ async function pullImage(manifest) {
// https://github.com/dotcloud/docker/issues/1074 says each status message
// is emitted as a chunk
stream.on('data', function (chunk) {
var data = safe.JSON.parse(chunk) || { };
const data = safe.JSON.parse(chunk) || { };
debug('pullImage: %j', data);
// The data.status here is useless because this is per layer as opposed to per image
@@ -194,28 +193,23 @@ async function getAddonMounts(app) {
for (const addon of Object.keys(addons)) {
switch (addon) {
case 'localstorage':
case 'localstorage': {
const storageDir = await apps.getStorageDir(app);
mounts.push({
Target: '/app/data',
Source: `${app.id}-localstorage`,
Type: 'volume',
Source: storageDir,
Type: 'bind',
ReadOnly: false
});
break;
}
case 'tls': {
const bundle = await reverseProxy.getCertificatePath(app.fqdn, app.domain);
const certificateDir = `${paths.PLATFORM_DATA_DIR}/tls/${app.id}`;
mounts.push({
Target: '/etc/certs/tls_cert.pem',
Source: bundle.certFilePath,
Type: 'bind',
ReadOnly: true
});
mounts.push({
Target: '/etc/certs/tls_key.pem',
Source: bundle.keyFilePath,
Target: '/etc/certs',
Source: certificateDir,
Type: 'bind',
ReadOnly: true
});
@@ -314,6 +308,15 @@ async function createSubcontainer(app, name, cmd, options) {
const mounts = await getMounts(app);
const addonEnv = await services.getEnvironment(app);
const runtimeVolumes = {
'/tmp': {},
'/run': {},
'/home/cloudron/.cache': {},
'/root/.cache': {}
};
if (app.manifest.runtimeDirs) {
app.manifest.runtimeDirs.forEach(dir => runtimeVolumes[dir] = {});
}
let containerOptions = {
name: name, // for referencing containers
@@ -322,10 +325,7 @@ async function createSubcontainer(app, name, cmd, options) {
Cmd: (isAppContainer && app.debugMode && app.debugMode.cmd) ? app.debugMode.cmd : cmd,
Env: stdEnv.concat(addonEnv).concat(portEnv).concat(appEnv).concat(secondaryDomainsEnv),
ExposedPorts: isAppContainer ? exposedPorts : { },
Volumes: { // see also ReadonlyRootfs
'/tmp': {},
'/run': {}
},
Volumes: runtimeVolumes,
Labels: {
'fqdn': app.fqdn,
'appId': app.id,
@@ -342,7 +342,7 @@ async function createSubcontainer(app, name, cmd, options) {
'syslog-format': 'rfc5424'
}
},
Memory: system.getMemoryAllocation(memoryLimit),
Memory: await system.getMemoryAllocation(memoryLimit),
MemorySwap: memoryLimit, // Memory + Swap
PortBindings: isAppContainer ? dockerPortBindings : { },
PublishAllPorts: false,
@@ -355,7 +355,8 @@ async function createSubcontainer(app, name, cmd, options) {
VolumesFrom: isAppContainer ? null : [ app.containerId + ':rw' ],
SecurityOpt: [ 'apparmor=docker-cloudron-app' ],
CapAdd: [],
CapDrop: []
CapDrop: [],
Sysctls: {}
}
};
@@ -388,7 +389,13 @@ async function createSubcontainer(app, name, cmd, options) {
const capabilities = manifest.capabilities || [];
// https://docs-stage.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities
if (capabilities.includes('net_admin')) containerOptions.HostConfig.CapAdd.push('NET_ADMIN', 'NET_RAW');
if (capabilities.includes('net_admin')) {
containerOptions.HostConfig.CapAdd.push('NET_ADMIN', 'NET_RAW');
// ipv6 for new interfaces is disabled in the container. this prevents the openvpn tun device having ipv6
// See https://github.com/moby/moby/issues/20569 and https://github.com/moby/moby/issues/33099
containerOptions.HostConfig.Sysctls['net.ipv6.conf.all.disable_ipv6'] = '0';
containerOptions.HostConfig.Sysctls['net.ipv6.conf.all.forwarding'] = '1';
}
if (capabilities.includes('mlock')) containerOptions.HostConfig.CapAdd.push('IPC_LOCK'); // mlock prevents swapping
if (!capabilities.includes('ping')) containerOptions.HostConfig.CapDrop.push('NET_RAW'); // NET_RAW is included by default by Docker
@@ -506,7 +513,7 @@ async function deleteImage(manifest) {
const dockerImage = manifest ? manifest.dockerImage : null;
if (!dockerImage) return;
if (dockerImage.includes('//')) return; // a common mistake is to paste a https:// as docker image. this results in a crash at runtime in dockerode module
if (dockerImage.includes('//') || dockerImage.startsWith('/')) return; // a common mistake is to paste a https:// as docker image. this results in a crash at runtime in dockerode module (https://github.com/apocas/dockerode/issues/548)
const removeOptions = {
force: false, // might be shared with another instance of this app
@@ -552,30 +559,51 @@ async function getContainerIp(containerId) {
return ip;
}
async function execContainer(containerId, options) {
async function createExec(containerId, options) {
assert.strictEqual(typeof containerId, 'string');
assert.strictEqual(typeof options, 'object');
const container = gConnection.getContainer(containerId);
const [error, exec] = await safe(container.exec(options.execOptions));
const [error, exec] = await safe(container.exec(options));
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND);
if (error && error.statusCode === 409) throw new BoxError(BoxError.BAD_STATE, error.message); // container restarting/not running
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
const [startError, stream] = await safe(exec.start(options.startOptions)); /* in hijacked mode, stream is a net.socket */
if (startError) throw new BoxError(BoxError.DOCKER_ERROR, startError);
return exec.id;
}
if (options.rows && options.columns) {
// there is a race where resizing too early results in a 404 "no such exec"
// https://git.cloudron.io/cloudron/box/issues/549
setTimeout(function () {
exec.resize({ h: options.rows, w: options.columns }, function (error) { if (error) debug('Error resizing console', error); });
}, 2000);
}
async function startExec(execId, options) {
assert.strictEqual(typeof execId, 'string');
assert.strictEqual(typeof options, 'object');
const exec = gConnection.getExec(execId);
const [error, stream] = await safe(exec.start(options)); /* in hijacked mode, stream is a net.socket */
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND);
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
return stream;
}
async function getExec(execId) {
assert.strictEqual(typeof execId, 'string');
const exec = gConnection.getExec(execId);
const [error, result] = await safe(exec.inspect());
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND, `Unable to find exec container ${execId}`);
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
return { exitCode: result.ExitCode, running: result.Running };
}
async function resizeExec(execId, options) {
assert.strictEqual(typeof execId, 'string');
assert.strictEqual(typeof options, 'object');
const exec = gConnection.getExec(execId);
const [error] = await safe(exec.resize(options)); // { h, w }
if (error && error.statusCode === 404) throw new BoxError(BoxError.NOT_FOUND);
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
}
async function getEvents(options) {
assert.strictEqual(typeof options, 'object');
@@ -596,59 +624,18 @@ async function memoryUsage(containerId) {
return result;
}
async function createVolume(name, volumeDataDir, labels) {
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof volumeDataDir, 'string');
assert.strictEqual(typeof labels, 'object');
const volumeOptions = {
Name: name,
Driver: 'local',
DriverOpts: { // https://github.com/moby/moby/issues/19990#issuecomment-248955005
type: 'none',
device: volumeDataDir,
o: 'bind'
},
Labels: labels
};
// requires sudo because the path can be outside appsdata
let [error] = await safe(shell.promises.sudo('createVolume', [ MKDIRVOLUME_CMD, volumeDataDir ], {}));
if (error) throw new BoxError(BoxError.FS_ERROR, `Error creating app data dir: ${error.message}`);
[error] = await safe(gConnection.createVolume(volumeOptions));
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
}
async function clearVolume(name, options) {
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof options, 'object');
let volume = gConnection.getVolume(name);
let [error, v] = await safe(volume.inspect());
if (error && error.statusCode === 404) return;
if (error) throw new BoxError(BoxError.DOCKER_ERROR, error);
const volumeDataDir = v.Options.device;
[error] = await shell.promises.sudo('clearVolume', [ CLEARVOLUME_CMD, options.removeDirectory ? 'rmdir' : 'clear', volumeDataDir ], {});
if (error) throw new BoxError(BoxError.FS_ERROR, error);
}
// this only removes the volume and not the data
async function removeVolume(name) {
assert.strictEqual(typeof name, 'string');
let volume = gConnection.getVolume(name);
const [error] = await safe(volume.remove());
if (error && error.statusCode !== 404) throw new BoxError(BoxError.DOCKER_ERROR, `removeVolume: Error removing volume: ${error.message}`);
}
async function info() {
const [error, result] = await safe(gConnection.info());
if (error) throw new BoxError(BoxError.DOCKER_ERROR, 'Error connecting to docker');
return result;
}
async function df() {
const [error, result] = await safe(gConnection.df());
if (error) throw new BoxError(BoxError.DOCKER_ERROR, 'Error connecting to docker');
return result;
}
async function update(name, memory, memorySwap) {
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof memory, 'number');
+3 -3
View File
@@ -41,7 +41,7 @@ async function authorizeApp(req, res, next) {
}
function attachDockerRequest(req, res, next) {
var options = {
const options = {
socketPath: '/var/run/docker.sock',
method: req.method,
path: req.url,
@@ -143,8 +143,8 @@ async function start() {
// eslint-disable-next-line no-unused-vars
gHttpServer.on('upgrade', function (req, client, head) {
// Create a new tcp connection to the TCP server
var remote = net.connect('/var/run/docker.sock', function () {
var upgradeMessage = req.method + ' ' + req.url + ' HTTP/1.1\r\n' +
const remote = net.connect('/var/run/docker.sock', function () {
let upgradeMessage = req.method + ' ' + req.url + ' HTTP/1.1\r\n' +
`Host: ${req.headers.host}\r\n` +
'Connection: Upgrade\r\n' +
'Upgrade: tcp\r\n';
+38 -33
View File
@@ -9,6 +9,8 @@ module.exports = exports = {
del,
clear,
getDomainObjectMap,
removePrivateFields,
removeRestrictedFields,
};
@@ -17,6 +19,7 @@ const assert = require('assert'),
BoxError = require('./boxerror.js'),
crypto = require('crypto'),
database = require('./database.js'),
debug = require('debug')('box:domains'),
eventlog = require('./eventlog.js'),
mail = require('./mail.js'),
reverseProxy = require('./reverseproxy.js'),
@@ -54,6 +57,7 @@ function api(provider) {
case 'digitalocean': return require('./dns/digitalocean.js');
case 'gandi': return require('./dns/gandi.js');
case 'godaddy': return require('./dns/godaddy.js');
case 'hetzner': return require('./dns/hetzner.js');
case 'linode': return require('./dns/linode.js');
case 'vultr': return require('./dns/vultr.js');
case 'namecom': return require('./dns/namecom.js');
@@ -76,13 +80,13 @@ async function verifyDomainConfig(domainConfig, domain, zoneName, provider) {
if (!backend) throw new BoxError(BoxError.BAD_FIELD, 'Invalid provider');
const domainObject = { config: domainConfig, domain: domain, zoneName: zoneName };
const [error, result] = await safe(api(provider).verifyDomainConfig(domainObject));
if (error && error.reason === BoxError.ACCESS_DENIED) return { error: new BoxError(BoxError.BAD_FIELD, `Access denied: ${error.message}`) };
if (error && error.reason === BoxError.NOT_FOUND) return { error: new BoxError(BoxError.BAD_FIELD, `Zone not found: ${error.message}`) };
if (error && error.reason === BoxError.EXTERNAL_ERROR) return { error: new BoxError(BoxError.BAD_FIELD, `Configuration error: ${error.message}`) };
if (error) return { error };
const [error, sanitizedConfig] = await safe(api(provider).verifyDomainConfig(domainObject));
if (error && error.reason === BoxError.ACCESS_DENIED) throw new BoxError(BoxError.BAD_FIELD, `Access denied: ${error.message}`);
if (error && error.reason === BoxError.NOT_FOUND) throw new BoxError(BoxError.BAD_FIELD, `Zone not found: ${error.message}`);
if (error && error.reason === BoxError.EXTERNAL_ERROR) throw new BoxError(BoxError.BAD_FIELD, `Configuration error: ${error.message}`);
if (error) throw error;
return { error: null, sanitizedConfig: result };
return sanitizedConfig;
}
function validateTlsConfig(tlsConfig, dnsProvider) {
@@ -133,7 +137,7 @@ async function add(domain, data, auditSource) {
}
if (fallbackCertificate) {
let error = reverseProxy.validateCertificate('test', { domain, config }, fallbackCertificate);
let error = reverseProxy.validateCertificate('test', domain, fallbackCertificate);
if (error) throw error;
} else {
fallbackCertificate = await reverseProxy.generateFallbackCertificate(domain);
@@ -150,12 +154,11 @@ async function add(domain, data, auditSource) {
dkimSelector = `cloudron-${suffix}`;
}
const result = await verifyDomainConfig(config, domain, zoneName, provider);
if (result.error) throw result.error;
const sanitizedConfig = await verifyDomainConfig(config, domain, zoneName, provider);
let queries = [
const queries = [
{ query: 'INSERT INTO domains (domain, zoneName, provider, configJson, tlsConfigJson, fallbackCertificateJson) VALUES (?, ?, ?, ?, ?, ?)',
args: [ domain, zoneName, provider, JSON.stringify(result.sanitizedConfig), JSON.stringify(tlsConfig), JSON.stringify(fallbackCertificate) ] },
args: [ domain, zoneName, provider, JSON.stringify(sanitizedConfig), JSON.stringify(tlsConfig), JSON.stringify(fallbackCertificate) ] },
{ query: 'INSERT INTO mail (domain, dkimKeyJson, dkimSelector) VALUES (?, ?, ?)', args: [ domain, JSON.stringify(dkimKey), dkimSelector || 'cloudron' ] },
];
@@ -167,7 +170,7 @@ async function add(domain, data, auditSource) {
await eventlog.add(eventlog.ACTION_DOMAIN_ADD, auditSource, { domain, zoneName, provider });
safe(mail.onDomainAdded(domain)); // background
safe(mail.onDomainAdded(domain), { debug }); // background
}
async function get(domain) {
@@ -194,7 +197,6 @@ async function setConfig(domain, data, auditSource) {
assert.strictEqual(typeof auditSource, 'object');
let { zoneName, provider, config, fallbackCertificate, tlsConfig } = data;
let error;
if (settings.isDemo() && (domain === settings.dashboardDomain())) throw new BoxError(BoxError.CONFLICT, 'Not allowed in demo mode');
@@ -206,20 +208,19 @@ async function setConfig(domain, data, auditSource) {
}
if (fallbackCertificate) {
let error = reverseProxy.validateCertificate('test', domainObject, fallbackCertificate);
let error = reverseProxy.validateCertificate('test', domain, fallbackCertificate);
if (error) throw error;
}
error = validateTlsConfig(tlsConfig, provider);
if (error) throw error;
const tlsConfigError = validateTlsConfig(tlsConfig, provider);
if (tlsConfigError) throw tlsConfigError;
if (provider === domainObject.provider) api(provider).injectPrivateFields(config, domainObject.config);
const result = await verifyDomainConfig(config, domain, zoneName, provider);
if (result.error) throw result.error;
const sanitizedConfig = await verifyDomainConfig(config, domain, zoneName, provider);
const newData = {
config: result.sanitizedConfig,
config: sanitizedConfig,
zoneName,
provider,
tlsConfig,
@@ -227,7 +228,7 @@ async function setConfig(domain, data, auditSource) {
if (fallbackCertificate) newData.fallbackCertificate = fallbackCertificate;
let args = [ ], fields = [ ];
const args = [], fields = [];
for (const k in newData) {
if (k === 'config' || k === 'tlsConfig' || k === 'fallbackCertificate') { // json fields
fields.push(`${k}Json = ?`);
@@ -239,13 +240,11 @@ async function setConfig(domain, data, auditSource) {
}
args.push(domain);
[error] = await safe(database.query('UPDATE domains SET ' + fields.join(', ') + ' WHERE domain=?', args));
if (error && error.reason === BoxError.NOT_FOUND) throw new BoxError(BoxError.NOT_FOUND, 'Domain not found');
if (error) throw new BoxError(BoxError.DATABASE_ERROR, error);
const result = await database.query('UPDATE domains SET ' + fields.join(', ') + ' WHERE domain=?', args);
if (result.affectedRows === 0) throw new BoxError(BoxError.NOT_FOUND, 'Domain not found');
if (!fallbackCertificate) return;
await reverseProxy.setFallbackCertificate(domain, fallbackCertificate);
if (fallbackCertificate) await reverseProxy.setFallbackCertificate(domain, fallbackCertificate);
if (!_.isEqual(domainObject.tlsConfig, tlsConfig.provider)) await reverseProxy.handleCertificateProviderChanged(domain);
await eventlog.add(eventlog.ACTION_DOMAIN_UPDATE, auditSource, { domain, zoneName, provider });
}
@@ -255,12 +254,11 @@ async function setWellKnown(domain, wellKnown, auditSource) {
assert.strictEqual(typeof wellKnown, 'object');
assert.strictEqual(typeof auditSource, 'object');
let error = validateWellKnown(wellKnown);
if (error) throw error;
const wellKnownError = validateWellKnown(wellKnown);
if (wellKnownError) throw wellKnownError;
[error] = await safe(database.query('UPDATE domains SET wellKnownJson = ? WHERE domain=?', [ JSON.stringify(wellKnown), domain ]));
if (error && error.reason === BoxError.NOT_FOUND) throw new BoxError(BoxError.NOT_FOUND, 'Domain not found');
if (error) throw new BoxError(BoxError.DATABASE_ERROR, error);
const result = await database.query('UPDATE domains SET wellKnownJson = ? WHERE domain=?', [ JSON.stringify(wellKnown), domain ]);
if (result.affectedRows === 0) throw new BoxError(BoxError.NOT_FOUND, 'Domain not found');
await eventlog.add(eventlog.ACTION_DOMAIN_UPDATE, auditSource, { domain, wellKnown });
}
@@ -298,15 +296,22 @@ async function clear() {
// removes all fields that are strictly private and should never be returned by API calls
function removePrivateFields(domain) {
var result = _.pick(domain, 'domain', 'zoneName', 'provider', 'config', 'tlsConfig', 'fallbackCertificate', 'wellKnown');
const result = _.pick(domain, 'domain', 'zoneName', 'provider', 'config', 'tlsConfig', 'fallbackCertificate', 'wellKnown');
return api(result.provider).removePrivateFields(result);
}
// removes all fields that are not accessible by a normal user
function removeRestrictedFields(domain) {
var result = _.pick(domain, 'domain', 'zoneName', 'provider');
const result = _.pick(domain, 'domain', 'zoneName', 'provider');
result.config = {}; // always ensure config object
return result;
}
async function getDomainObjectMap() {
const domainObjects = await list();
let domainObjectMap = {};
for (let d of domainObjects) { domainObjectMap[d.domain] = d; }
return domainObjectMap;
}
+4 -4
View File
@@ -27,7 +27,7 @@ async function sync(auditSource) {
info.ipv4 = info.ip;
delete info.ip;
}
const ipv4Changed = info.ip !== ipv4;
const ipv4Changed = info.ipv4 !== ipv4;
const ipv6Changed = ipv6 && info.ipv6 !== ipv6; // both should be RFC 5952 format
if (!ipv4Changed && !ipv6Changed) {
@@ -35,9 +35,9 @@ async function sync(auditSource) {
return;
}
debug(`refreshDNS: updating IP from ${info.ip} to ipv4: ${ipv4} (changed: ${ipv4Changed}) ipv6: ${ipv6} (changed: ${ipv6Changed})`);
if (ipv4Changed) await dns.upsertDnsRecords(constants.DASHBOARD_LOCATION, settings.dashboardDomain(), 'A', [ ipv4 ]);
if (ipv6Changed) await dns.upsertDnsRecords(constants.DASHBOARD_LOCATION, settings.dashboardDomain(), 'AAAA', [ ipv6 ]);
debug(`refreshDNS: updating IP from ${info.ipv4} to ipv4: ${ipv4} (changed: ${ipv4Changed}) ipv6: ${ipv6} (changed: ${ipv6Changed})`);
if (ipv4Changed) await dns.upsertDnsRecords(constants.DASHBOARD_SUBDOMAIN, settings.dashboardDomain(), 'A', [ ipv4 ]);
if (ipv6Changed) await dns.upsertDnsRecords(constants.DASHBOARD_SUBDOMAIN, settings.dashboardDomain(), 'AAAA', [ ipv6 ]);
const result = await apps.list();
for (const app of result) {
+4 -1
View File
@@ -35,7 +35,7 @@ exports = module.exports = {
ACTION_BACKUP_CLEANUP_FINISH: 'backup.cleanup.finish',
ACTION_CERTIFICATE_NEW: 'certificate.new',
ACTION_CERTIFICATE_RENEWAL: 'certificate.renew',
ACTION_CERTIFICATE_RENEWAL: 'certificate.renew', // obsolete
ACTION_CERTIFICATE_CLEANUP: 'certificate.cleanup',
ACTION_DASHBOARD_DOMAIN_UPDATE: 'dashboard.domain.update',
@@ -44,6 +44,8 @@ exports = module.exports = {
ACTION_DOMAIN_UPDATE: 'domain.update',
ACTION_DOMAIN_REMOVE: 'domain.remove',
ACTION_INSTALL_FINISH: 'cloudron.install.finish',
ACTION_MAIL_LOCATION: 'mail.location',
ACTION_MAIL_ENABLED: 'mail.enabled',
ACTION_MAIL_DISABLED: 'mail.disabled',
@@ -67,6 +69,7 @@ exports = module.exports = {
ACTION_USER_ADD: 'user.add',
ACTION_USER_LOGIN: 'user.login',
ACTION_USER_LOGIN_GHOST: 'user.login.ghost',
ACTION_USER_LOGOUT: 'user.logout',
ACTION_USER_REMOVE: 'user.remove',
ACTION_USER_UPDATE: 'user.update',
+36 -5
View File
@@ -2,6 +2,7 @@
exports = module.exports = {
verifyPassword,
verifyPasswordAndTotpToken,
maybeCreateUser,
testConfig,
@@ -20,7 +21,7 @@ const assert = require('assert'),
debug = require('debug')('box:externalldap'),
groups = require('./groups.js'),
ldap = require('ldapjs'),
once = require('once'),
once = require('./once.js'),
safe = require('safetydance'),
settings = require('./settings.js'),
tasks = require('./tasks.js'),
@@ -42,8 +43,9 @@ function translateUser(ldapConfig, ldapUser) {
// RFC: https://datatracker.ietf.org/doc/html/rfc2798
return {
username: ldapUser[ldapConfig.usernameField],
username: ldapUser[ldapConfig.usernameField].toLowerCase(),
email: ldapUser.mail || ldapUser.mailPrimaryAddress,
twoFactorAuthenticationEnabled: !!ldapUser.twoFactorAuthenticationEnabled,
displayName: ldapUser.displayName || ldapUser.cn // user.giveName + ' ' + user.sn
};
}
@@ -254,8 +256,11 @@ async function maybeCreateUser(identifier) {
throw error;
}
// fetch the full record
return await users.get(userId);
// fetch the full record and amend potential twoFA settings
const newUser = await users.get(userId);
if (user.twoFactorAuthenticationEnabled) newUser.twoFactorAuthenticationEnabled = true;
return newUser;
}
async function verifyPassword(user, password) {
@@ -279,6 +284,32 @@ async function verifyPassword(user, password) {
return translateUser(externalLdapConfig, ldapUsers[0]);
}
async function verifyPasswordAndTotpToken(user, password, totpToken) {
assert.strictEqual(typeof user, 'object');
assert.strictEqual(typeof password, 'string');
assert.strictEqual(typeof totpToken, 'string');
const externalLdapConfig = await settings.getExternalLdapConfig();
if (externalLdapConfig.provider === 'noop') throw new BoxError(BoxError.BAD_STATE, 'not enabled');
const ldapUsers = await ldapUserSearch(externalLdapConfig, { filter: `${externalLdapConfig.usernameField}=${user.username}` });
if (ldapUsers.length === 0) throw new BoxError(BoxError.NOT_FOUND);
if (ldapUsers.length > 1) throw new BoxError(BoxError.CONFLICT);
const client = await getClient(externalLdapConfig, { bind: false });
// inject totptoken into first attribute
const rdns = ldapUsers[0].dn.split(',');
const totpTokenDn = `${rdns[0]}+totptoken=${totpToken},` + rdns.slice(1).join(',');
const [error] = await safe(util.promisify(client.bind.bind(client))(totpTokenDn, password));
client.unbind();
if (error instanceof ldap.InvalidCredentialsError) throw new BoxError(BoxError.INVALID_CREDENTIALS);
if (error) throw new BoxError(BoxError.EXTERNAL_ERROR, error);
return translateUser(externalLdapConfig, ldapUsers[0]);
}
async function startSyncer() {
const externalLdapConfig = await settings.getExternalLdapConfig();
if (externalLdapConfig.provider === 'noop') throw new BoxError(BoxError.BAD_STATE, 'not enabled');
@@ -432,7 +463,7 @@ async function syncGroupUsers(externalLdapConfig, progressCallback) {
debug(`syncGroupUsers: Found member object at ${memberDn} adding to group ${group.name}`);
const username = result[externalLdapConfig.usernameField];
const username = result[externalLdapConfig.usernameField].toLowerCase();
if (!username) continue;
const [getError, userObject] = await safe(users.getByUsername(username));
+125
View File
@@ -0,0 +1,125 @@
'use strict';
exports = module.exports = {
getSystem,
getContainerStats
};
const apps = require('./apps.js'),
assert = require('assert'),
BoxError = require('./boxerror.js'),
docker = require('./docker.js'),
os = require('os'),
safe = require('safetydance'),
services = require('./services.js'),
superagent = require('superagent');
// for testing locally: curl 'http://${graphite-ip}:8000/graphite-web/render?format=json&from=-1min&target=absolute(collectd.localhost.du-docker.capacity-usage)'
// the datapoint is (value, timestamp) https://graphite.readthedocs.io/en/latest/
async function getGraphiteUrl() {
const [error, result] = await safe(docker.inspect('graphite'));
if (error && error.reason === BoxError.NOT_FOUND) return { status: exports.SERVICE_STATUS_STOPPED };
if (error) throw error;
const ip = safe.query(result, 'NetworkSettings.Networks.cloudron.IPAddress', null);
if (!ip) throw new BoxError(BoxError.INACTIVE, 'Error getting IP of graphite service');
return `http://${ip}:8000/graphite-web/render`;
}
async function getContainerStats(name, fromMinutes, noNullPoints) {
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof fromMinutes, 'number');
assert.strictEqual(typeof noNullPoints, 'boolean');
const timeBucketSize = fromMinutes > (24 * 60) ? (6*60) : 5;
const graphiteUrl = await getGraphiteUrl();
// https://collectd.org/wiki/index.php/Data_source . the gauge is point in time value. counter is the change of value
const targets = [
`summarize(collectd.localhost.docker-stats-${name}.gauge-cpu-perc, "${timeBucketSize}min", "avg")`,
`summarize(collectd.localhost.docker-stats-${name}.gauge-mem-used, "${timeBucketSize}min", "avg")`,
// `summarize(collectd.localhost.docker-stats-${name}.gauge-mem-max, "${timeBucketSize}min", "avg")`,
`summarize(collectd.localhost.docker-stats-${name}.counter-blockio-read, "${timeBucketSize}min", "sum")`,
`summarize(collectd.localhost.docker-stats-${name}.counter-blockio-write, "${timeBucketSize}min", "sum")`,
`summarize(collectd.localhost.docker-stats-${name}.counter-network-read, "${timeBucketSize}min", "sum")`,
`summarize(collectd.localhost.docker-stats-${name}.counter-network-write, "${timeBucketSize}min", "sum")`,
`summarize(collectd.localhost.docker-stats-${name}.gauge-blockio-read, "${fromMinutes}min", "max")`,
`summarize(collectd.localhost.docker-stats-${name}.gauge-blockio-write, "${fromMinutes}min", "max")`,
`summarize(collectd.localhost.docker-stats-${name}.gauge-network-read, "${fromMinutes}min", "max")`,
`summarize(collectd.localhost.docker-stats-${name}.gauge-network-write, "${fromMinutes}min", "max")`,
];
const results = [];
for (const target of targets) {
const query = {
target: target,
format: 'json',
from: `-${fromMinutes}min`,
until: 'now',
noNullPoints: !!noNullPoints
};
const [error, response] = await safe(superagent.get(graphiteUrl).query(query).timeout(30 * 1000).ok(() => true));
if (error) throw new BoxError(BoxError.NETWORK_ERROR, error.message);
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unknown error with ${target}: ${response.status} ${response.text}`);
results.push(response.body[0] && response.body[0].datapoints ? response.body[0].datapoints : []);
}
// results are datapoints[[value, ts], [value, ts], ...];
return {
cpu: results[0],
memory: results[1],
blockRead: results[2],
blockWrite: results[3],
networkRead: results[4],
networkWrite: results[5],
blockReadTotal: results[6][0] && results[6][0][0] ? results[6][0][0] : 0,
blockWriteTotal: results[7][0] && results[7][0][0] ? results[7][0][0] : 0,
networkReadTotal: results[8][0] && results[8][0][0] ? results[8][0][0] : 0,
networkWriteTotal: results[9][0] && results[9][0][0] ? results[9][0][0] : 0,
cpuCount: os.cpus().length
};
}
async function getSystem(fromMinutes, noNullPoints) {
assert.strictEqual(typeof fromMinutes, 'number');
assert.strictEqual(typeof noNullPoints, 'boolean');
const timeBucketSize = fromMinutes > (24 * 60) ? (6*60) : 5;
const graphiteUrl = await getGraphiteUrl();
const cpuQuery = `summarize(sum(collectd.localhost.aggregation-cpu-sum.cpu-system, collectd.localhost.aggregation-cpu-sum.cpu-user), "${timeBucketSize}min", "avg")`;
const memoryQuery = `summarize(collectd.localhost.memory.memory-used, "${timeBucketSize}min", "avg")`;
const query = {
target: [ cpuQuery, memoryQuery ],
format: 'json',
from: `-${fromMinutes}min`,
until: 'now'
};
const [memCpuError, memCpuResponse] = await safe(superagent.get(graphiteUrl).query(query).timeout(30 * 1000).ok(() => true));
if (memCpuError) throw new BoxError(BoxError.NETWORK_ERROR, memCpuError.message);
if (memCpuResponse.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Unknown error: ${memCpuResponse.status} ${memCpuResponse.text}`);
const appResponses = {};
for (const app of await apps.list()) {
appResponses[app.id] = await getContainerStats(app.id, fromMinutes, noNullPoints);
}
const serviceResponses = {};
for (const serviceId of await services.listServices()) {
serviceResponses[serviceId] = await getContainerStats(serviceId, fromMinutes, noNullPoints);
}
return {
cpu: memCpuResponse.body[0] && memCpuResponse.body[0].datapoints ? memCpuResponse.body[0].datapoints : [],
memory: memCpuResponse.body[1] && memCpuResponse.body[1].datapoints ? memCpuResponse.body[1].datapoints : [],
apps: appResponses,
services: serviceResponses,
cpuCount: os.cpus().length
};
}
+225
View File
@@ -0,0 +1,225 @@
'use strict';
const assert = require('assert'),
BoxError = require('./boxerror.js'),
crypto = require('crypto'),
debug = require('debug')('box:hush'),
fs = require('fs'),
ProgressStream = require('./progress-stream.js'),
TransformStream = require('stream').Transform;
class EncryptStream extends TransformStream {
constructor(encryption) {
super();
this._headerPushed = false;
this._iv = crypto.randomBytes(16);
this._cipher = crypto.createCipheriv('aes-256-cbc', Buffer.from(encryption.dataKey, 'hex'), this._iv);
this._hmac = crypto.createHmac('sha256', Buffer.from(encryption.dataHmacKey, 'hex'));
}
pushHeaderIfNeeded() {
if (!this._headerPushed) {
const magic = Buffer.from('CBV2');
this.push(magic);
this._hmac.update(magic);
this.push(this._iv);
this._hmac.update(this._iv);
this._headerPushed = true;
}
}
_transform(chunk, ignoredEncoding, callback) {
this.pushHeaderIfNeeded();
try {
const crypt = this._cipher.update(chunk);
this._hmac.update(crypt);
callback(null, crypt);
} catch (error) {
callback(new BoxError(BoxError.CRYPTO_ERROR, `Encryption error when updating: ${error.message}`));
}
}
_flush(callback) {
try {
this.pushHeaderIfNeeded(); // for 0-length files
const crypt = this._cipher.final();
this.push(crypt);
this._hmac.update(crypt);
callback(null, this._hmac.digest()); // +32 bytes
} catch (error) {
callback(new BoxError(BoxError.CRYPTO_ERROR, `Encryption error when flushing: ${error.message}`));
}
}
}
class DecryptStream extends TransformStream {
constructor(encryption) {
super();
this._key = Buffer.from(encryption.dataKey, 'hex');
this._header = Buffer.alloc(0);
this._decipher = null;
this._hmac = crypto.createHmac('sha256', Buffer.from(encryption.dataHmacKey, 'hex'));
this._buffer = Buffer.alloc(0);
}
_transform(chunk, ignoredEncoding, callback) {
const needed = 20 - this._header.length; // 4 for magic, 16 for iv
if (this._header.length !== 20) { // not gotten header yet
this._header = Buffer.concat([this._header, chunk.slice(0, needed)]);
if (this._header.length !== 20) return callback();
if (!this._header.slice(0, 4).equals(new Buffer.from('CBV2'))) return callback(new BoxError(BoxError.CRYPTO_ERROR, 'Invalid magic in header'));
const iv = this._header.slice(4);
this._decipher = crypto.createDecipheriv('aes-256-cbc', this._key, iv);
this._hmac.update(this._header);
}
this._buffer = Buffer.concat([ this._buffer, chunk.slice(needed) ]);
if (this._buffer.length < 32) return callback(); // hmac trailer length is 32
try {
const cipherText = this._buffer.slice(0, -32);
this._hmac.update(cipherText);
const plainText = this._decipher.update(cipherText);
this._buffer = this._buffer.slice(-32);
callback(null, plainText);
} catch (error) {
callback(new BoxError(BoxError.CRYPTO_ERROR, `Decryption error: ${error.message}`));
}
}
_flush (callback) {
if (this._buffer.length !== 32) return callback(new BoxError(BoxError.CRYPTO_ERROR, 'Invalid password or tampered file (not enough data)'));
try {
if (!this._hmac.digest().equals(this._buffer)) return callback(new BoxError(BoxError.CRYPTO_ERROR, 'Invalid password or tampered file (mac mismatch)'));
const plainText = this._decipher.final();
callback(null, plainText);
} catch (error) {
callback(new BoxError(BoxError.CRYPTO_ERROR, `Invalid password or tampered file: ${error.message}`));
}
}
}
function encryptFilePath(filePath, encryption) {
assert.strictEqual(typeof filePath, 'string');
assert.strictEqual(typeof encryption, 'object');
const encryptedParts = filePath.split('/').map(function (part) {
let hmac = crypto.createHmac('sha256', Buffer.from(encryption.filenameHmacKey, 'hex'));
const iv = hmac.update(part).digest().slice(0, 16); // iv has to be deterministic, for our sync (copy) logic to work
const cipher = crypto.createCipheriv('aes-256-cbc', Buffer.from(encryption.filenameKey, 'hex'), iv);
let crypt = cipher.update(part);
crypt = Buffer.concat([ iv, crypt, cipher.final() ]);
return crypt.toString('base64') // ensures path is valid
.replace(/\//g, '-') // replace '/' of base64 since it conflicts with path separator
.replace(/=/g,''); // strip trailing = padding. this is only needed if we concat base64 strings, which we don't
});
return encryptedParts.join('/');
}
function decryptFilePath(filePath, encryption) {
assert.strictEqual(typeof filePath, 'string');
assert.strictEqual(typeof encryption, 'object');
const decryptedParts = [];
for (let part of filePath.split('/')) {
part = part + Array(part.length % 4).join('='); // add back = padding
part = part.replace(/-/g, '/'); // replace with '/'
try {
const buffer = Buffer.from(part, 'base64');
const iv = buffer.slice(0, 16);
let decrypt = crypto.createDecipheriv('aes-256-cbc', Buffer.from(encryption.filenameKey, 'hex'), iv);
const plainText = decrypt.update(buffer.slice(16));
const plainTextString = Buffer.concat([ plainText, decrypt.final() ]).toString('utf8');
const hmac = crypto.createHmac('sha256', Buffer.from(encryption.filenameHmacKey, 'hex'));
if (!hmac.update(plainTextString).digest().slice(0, 16).equals(iv)) return { error: new BoxError(BoxError.CRYPTO_ERROR, `mac error decrypting part ${part} of path ${filePath}`) };
decryptedParts.push(plainTextString);
} catch (error) {
debug(`Error decrypting part ${part} of path ${filePath}:`, error);
return { error: new BoxError(BoxError.CRYPTO_ERROR, `Error decrypting part ${part} of path ${filePath}: ${error.message}`) };
}
}
return { result: decryptedParts.join('/') };
}
function createReadStream(sourceFile, encryption) {
assert.strictEqual(typeof sourceFile, 'string');
assert.strictEqual(typeof encryption, 'object');
const stream = fs.createReadStream(sourceFile);
const ps = new ProgressStream({ interval: 10000 }); // display a progress every 10 seconds
stream.on('error', function (error) {
debug(`createReadStream: read stream error at ${sourceFile}`, error);
ps.emit('error', new BoxError(BoxError.FS_ERROR, `Error reading ${sourceFile}: ${error.message} ${error.code}`));
});
stream.on('open', () => ps.emit('open'));
if (encryption) {
let encryptStream = new EncryptStream(encryption);
encryptStream.on('error', function (error) {
debug(`createReadStream: encrypt stream error ${sourceFile}`, error);
ps.emit('error', new BoxError(BoxError.CRYPTO_ERROR, `Encryption error at ${sourceFile}: ${error.message}`));
});
return stream.pipe(encryptStream).pipe(ps);
} else {
return stream.pipe(ps);
}
}
function createWriteStream(destFile, encryption) {
assert.strictEqual(typeof destFile, 'string');
assert.strictEqual(typeof encryption, 'object');
const stream = fs.createWriteStream(destFile);
const ps = new ProgressStream({ interval: 10000 }); // display a progress every 10 seconds
stream.on('error', function (error) {
debug(`createWriteStream: write stream error ${destFile}`, error);
ps.emit('error', new BoxError(BoxError.FS_ERROR, `Write error ${destFile}: ${error.message}`));
});
stream.on('finish', function () {
debug('createWriteStream: done.');
// we use a separate event because ps is a through2 stream which emits 'finish' event indicating end of inStream and not write
ps.emit('done');
});
if (encryption) {
let decrypt = new DecryptStream(encryption);
decrypt.on('error', function (error) {
debug(`createWriteStream: decrypt stream error ${destFile}`, error);
ps.emit('error', new BoxError(BoxError.CRYPTO_ERROR, `Decryption error at ${destFile}: ${error.message}`));
});
ps.pipe(decrypt).pipe(stream);
} else {
ps.pipe(stream);
}
return ps;
}
exports = module.exports = {
EncryptStream,
DecryptStream,
encryptFilePath,
decryptFilePath,
createReadStream,
createWriteStream
};
+8 -8
View File
@@ -6,7 +6,7 @@
exports = module.exports = {
// a version change recreates all containers with latest docker config
'version': '49.0.0',
'version': '49.4.0',
'baseImages': [
{ repo: 'cloudron/base', tag: 'cloudron/base:3.2.0@sha256:ba1d566164a67c266782545ea9809dc611c4152e27686fd14060332dd88263ea' }
@@ -16,12 +16,12 @@ exports = module.exports = {
// docker inspect --format='{{index .RepoDigests 0}}' $IMAGE to get the sha256
'images': {
'turn': { repo: 'cloudron/turn', tag: 'cloudron/turn:1.4.0@sha256:45817f1631992391d585f171498d257487d872480fd5646723a2b956cc4ef15d' },
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:3.2.1@sha256:75cef64ba4917ba9ec68bc0c9d9ba3a9eeae00a70173cd6d81cc6118038737d9' },
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:4.3.0@sha256:9f0cfe83310a6f67885c21804c01e73c8d256606217f44dcefb3e05a5402b2c9' },
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:4.2.0@sha256:c8ebdbe2663b26fcd58b1e6b97906b62565adbe4a06256ba0f86114f78b37e6b' },
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:3.2.1@sha256:30a5550c41c3be3a01dbf457497ba2f3c05f3121c595a17ef1aacbef931b6114' },
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:3.6.1@sha256:b8b93f007105080d4812a05648e6bc5e15c95c63f511c829cbc14a163d9ea029' },
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:3.1.0@sha256:30ec3a01964a1e01396acf265183997c3e17fb07eac1a82b979292cc7719ff4b' },
'sftp': { repo: 'cloudron/sftp', tag: 'cloudron/sftp:3.6.0@sha256:9c686b10c1a3ba344a743f399d08b4da5426e111f455114980f0ae0229c1ab23' }
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:3.2.2@sha256:8648ca5a16fcdec72799b919c5f62419fd19e922e3d98d02896b921ae6127ef4' },
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:4.3.5@sha256:bc8cb91cbd48ee9a2f5a609b6131cd21a0210c15aaf127ee77963d90a125530a' },
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:4.2.2@sha256:df928d7dce1ac6454fc584787fa863f6d5e7ee0abb775dde5916a555fc94c3c7' },
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:3.3.1@sha256:383e11a5c7a54d17eb6bbceb0ffa92f486167be6ea9978ec745c8c8e9b7dfb19' },
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:3.7.4@sha256:8ddbf13ee3fd479e18923c7bf1370d9d8aa5f12a94cbbda5afac8b5a4af72a28' },
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:3.2.0@sha256:182e5cae69fbddc703cb9f91be909452065c7ae159e9836cc88317c7a00f0e62' },
'sftp': { repo: 'cloudron/sftp', tag: 'cloudron/sftp:3.6.1@sha256:ba4b9a1fe274c0ef0a900e5d0deeb8f3da08e118798d1d90fbf995cc0cf6e3a3' }
}
};
+3 -1
View File
@@ -28,7 +28,7 @@ async function cleanupTmpVolume(containerInfo) {
const cmd = 'find /tmp -type f -mtime +10 -exec rm -rf {} +'.split(' '); // 10 day old files
debug('cleanupTmpVolume %j', containerInfo.Names);
debug(`cleanupTmpVolume ${JSON.stringify(containerInfo.Names)}`);
const [error, execContainer] = await safe(gConnection.getContainer(containerInfo.Id).exec({ Cmd: cmd, AttachStdout: true, AttachStderr: true, Tty: false }));
if (error) throw new BoxError(BoxError.DOCKER_ERROR, `Failed to exec container: ${error.message}`);
@@ -53,4 +53,6 @@ async function cleanupDockerVolumes() {
for (const container of containers) {
await safe(cleanupTmpVolume(container), { debug }); // intentionally ignore error
}
debug('Cleaned up docker volumes');
}
+69 -64
View File
@@ -21,13 +21,10 @@ const addonConfigs = require('./addonconfigs.js'),
users = require('./users.js'),
util = require('util');
var gServer = null;
let gServer = null;
const NOOP = function () {};
const GROUP_USERS_DN = 'cn=users,ou=groups,dc=cloudron';
const GROUP_ADMINS_DN = 'cn=admins,ou=groups,dc=cloudron';
// Will attach req.app if successful
async function authenticateApp(req, res, next) {
const sourceIp = req.connection.ldap.id.split(':')[0];
@@ -113,13 +110,13 @@ function finalSend(results, req, res, next) {
if (cookie && Buffer.isBuffer(cookie)) {
// we have pagination
var first = min;
let first = min;
if (cookie.length !== 0) {
first = parseInt(cookie.toString(), 10);
}
var last = sendPagedResults(first, first + pageSize);
const last = sendPagedResults(first, first + pageSize);
var resultCookie;
let resultCookie;
if (last < max) {
resultCookie = Buffer.from(last.toString());
} else {
@@ -150,6 +147,9 @@ async function userSearch(req, res, next) {
const [error, result] = await safe(getUsersWithAccessToApp(req));
if (error) return next(new ldap.OperationsError(error.toString()));
const [groupsError, allGroups] = await safe(groups.listWithMembers());
if (groupsError) return next(new ldap.OperationsError(error.toString()));
let results = [];
// send user objects
@@ -159,9 +159,6 @@ async function userSearch(req, res, next) {
const dn = ldap.parseDN('cn=' + user.id + ',ou=users,dc=cloudron');
const memberof = [ GROUP_USERS_DN ];
if (users.compareRoles(user.role, users.ROLE_ADMIN) >= 0) memberof.push(GROUP_ADMINS_DN);
const displayName = user.displayName || user.username || ''; // displayName can be empty and username can be null
const nameParts = displayName.split(' ');
const firstName = nameParts[0];
@@ -172,7 +169,7 @@ async function userSearch(req, res, next) {
attributes: {
objectclass: ['user', 'inetorgperson', 'person', 'organizationalperson', 'top' ],
objectcategory: 'person',
cn: user.id,
cn: displayName,
uid: user.id,
entryuuid: user.id, // to support OpenLDAP clients
mail: user.email,
@@ -181,7 +178,7 @@ async function userSearch(req, res, next) {
givenName: firstName,
username: user.username,
samaccountname: user.username, // to support ActiveDirectory clients
memberof: memberof
memberof: allGroups.filter(function (g) { return g.userIds.indexOf(user.id) !== -1; }).map(function (g) { return `cn=${g.name},ou=groups,dc=cloudron`; })
}
};
@@ -204,42 +201,8 @@ async function userSearch(req, res, next) {
async function groupSearch(req, res, next) {
debug('group search: dn %s, scope %s, filter %s (from %s)', req.dn.toString(), req.scope, req.filter.toString(), req.connection.ldap.id);
const [error, usersWithAccess] = await safe(getUsersWithAccessToApp(req));
if (error) return next(new ldap.OperationsError(error.toString()));
const results = [];
// those are the old virtual groups for backwards compat
const virtualGroups = [{
name: 'users',
admin: false
}, {
name: 'admins',
admin: true
}];
virtualGroups.forEach(function (group) {
const dn = ldap.parseDN('cn=' + group.name + ',ou=groups,dc=cloudron');
const members = group.admin ? usersWithAccess.filter(function (user) { return users.compareRoles(user.role, users.ROLE_ADMIN) >= 0; }) : usersWithAccess;
const obj = {
dn: dn.toString(),
attributes: {
objectclass: ['group'],
cn: group.name,
memberuid: members.map(function(entry) { return entry.id; }).sort()
}
};
// ensure all filter values are also lowercase
const lowerCaseFilter = safe(function () { return ldap.parseFilter(req.filter.toString().toLowerCase()); }, null);
if (!lowerCaseFilter) return next(new ldap.OperationsError(safe.error.toString()));
if ((req.dn.equals(dn) || req.dn.parentOf(dn)) && lowerCaseFilter.matches(obj.attributes)) {
results.push(obj);
}
});
let [errorGroups, resultGroups] = await safe(groups.listWithMembers());
if (errorGroups) return next(new ldap.OperationsError(errorGroups.toString()));
@@ -248,15 +211,15 @@ async function groupSearch(req, res, next) {
}
resultGroups.forEach(function (group) {
const dn = ldap.parseDN('cn=' + group.name + ',ou=groups,dc=cloudron');
const members = group.userIds.filter(function (uid) { return usersWithAccess.map(function (u) { return u.id; }).indexOf(uid) !== -1; });
const dn = ldap.parseDN(`cn=${group.name},ou=groups,dc=cloudron`);
const obj = {
dn: dn.toString(),
attributes: {
objectclass: ['group'],
cn: group.name,
memberuid: members
gidnumber: group.id,
memberuid: group.userIds
}
};
@@ -295,7 +258,7 @@ async function groupAdminsCompare(req, res, next) {
// we only support memberuid here, if we add new group attributes later add them here
if (req.attribute === 'memberuid') {
var user = result.find(function (u) { return u.id === req.value; });
const user = result.find(function (u) { return u.id === req.value; });
if (user && users.compareRoles(user.role, users.ROLE_ADMIN) >= 0) return res.end(true);
}
@@ -305,25 +268,35 @@ async function groupAdminsCompare(req, res, next) {
async function mailboxSearch(req, res, next) {
debug('mailbox search: dn %s, scope %s, filter %s (from %s)', req.dn.toString(), req.scope, req.filter.toString(), req.connection.ldap.id);
// if cn is set we only search for one mailbox specifically
// if cn is set OR filter is mail= we only search for one mailbox specifically
let email, dn;
if (req.dn.rdns[0].attrs.cn) {
const email = req.dn.rdns[0].attrs.cn.value.toLowerCase();
email = req.dn.rdns[0].attrs.cn.value.toLowerCase();
dn = req.dn.toString();
} else if (req.filter instanceof ldap.EqualityFilter && req.filter.attribute === 'mail') {
email = req.filter.value.toLowerCase();
dn = `cn=${email},${req.dn.toString()}`;
}
if (email) {
const parts = email.split('@');
if (parts.length !== 2) return next(new ldap.NoSuchObjectError(req.dn.toString()));
if (parts.length !== 2) return next(new ldap.NoSuchObjectError(dn.toString()));
const [error, mailbox] = await safe(mail.getMailbox(parts[0], parts[1]));
if (error) return next(new ldap.OperationsError(error.toString()));
if (!mailbox) return next(new ldap.NoSuchObjectError(req.dn.toString()));
if (!mailbox.active) return next(new ldap.NoSuchObjectError(req.dn.toString()));
if (!mailbox) return next(new ldap.NoSuchObjectError(dn.toString()));
if (!mailbox.active) return next(new ldap.NoSuchObjectError(dn.toString()));
const obj = {
dn: req.dn.toString(),
dn: dn.toString(),
attributes: {
objectclass: ['mailbox'],
objectcategory: 'mailbox',
cn: `${mailbox.name}@${mailbox.domain}`,
uid: `${mailbox.name}@${mailbox.domain}`,
mail: `${mailbox.name}@${mailbox.domain}`
mail: `${mailbox.name}@${mailbox.domain}`,
storagequota: mailbox.storageQuota,
messagesquota: mailbox.messagesQuota,
}
};
@@ -336,7 +309,7 @@ async function mailboxSearch(req, res, next) {
} else {
res.end();
}
} else { // new sogo
} else { // new sogo and dovecot listing (doveadm -A)
// TODO figure out how proper pagination here could work
let [error, mailboxes] = await safe(mail.listAllMailboxes(1, 100000));
if (error) return next(new ldap.OperationsError(error.toString()));
@@ -360,7 +333,9 @@ async function mailboxSearch(req, res, next) {
displayname: mailbox.ownerType === mail.OWNERTYPE_USER ? ownerObject.displayName : ownerObject.name,
cn: `${mailbox.name}@${mailbox.domain}`,
uid: `${mailbox.name}@${mailbox.domain}`,
mail: `${mailbox.name}@${mailbox.domain}`
mail: `${mailbox.name}@${mailbox.domain}`,
storagequota: mailbox.storageQuota,
messagesquota: mailbox.messagesQuota,
}
};
@@ -390,7 +365,7 @@ async function mailAliasSearch(req, res, next) {
const parts = email.split('@');
if (parts.length !== 2) return next(new ldap.NoSuchObjectError(req.dn.toString()));
const [error, alias] = await safe(mail.getAlias(parts[0], parts[1]));
const [error, alias] = await safe(mail.searchAlias(parts[0], parts[1]));
if (error) return next(new ldap.OperationsError(error.toString()));
if (!alias) return next(new ldap.NoSuchObjectError(req.dn.toString()));
@@ -404,7 +379,7 @@ async function mailAliasSearch(req, res, next) {
attributes: {
objectclass: ['nisMailAlias'],
objectcategory: 'nisMailAlias',
cn: `${alias.name}@${alias.domain}`,
cn: `${parts[0]}@${alias.domain}`, // alias.name can contain wildcard character
rfc822MailMember: `${alias.aliasName}@${alias.aliasDomain}`
}
};
@@ -480,7 +455,7 @@ async function authorizeUserForApp(req, res, next) {
// we return no such object, to avoid leakage of a users existence
if (!canAccess) return next(new ldap.NoSuchObjectError(req.dn.toString()));
await eventlog.upsertLoginEvent(eventlog.ACTION_USER_LOGIN, { authType: 'ldap', appId: req.app.id }, { userId: req.user.id, user: users.removePrivateFields(req.user) });
await eventlog.upsertLoginEvent(req.user.ghost ? eventlog.ACTION_USER_LOGIN_GHOST : eventlog.ACTION_USER_LOGIN, { authType: 'ldap', appId: req.app.id }, { userId: req.user.id, user: users.removePrivateFields(req.user) });
res.end();
}
@@ -558,7 +533,7 @@ async function userSearchSftp(req, res, next) {
const obj = {
dn: ldap.parseDN(`cn=${username}@${appFqdn},ou=sftp,dc=cloudron`).toString(),
attributes: {
homeDirectory: app.dataDir ? `/mnt/app-${app.id}` : `/mnt/appsdata/${app.id}/data`, // see also sftp.js
homeDirectory: app.storageVolumeId ? `/mnt/app-${app.id}` : `/mnt/appsdata/${app.id}/data`, // see also sftp.js
objectclass: ['user'],
objectcategory: 'person',
cn: user.id,
@@ -625,7 +600,7 @@ async function authenticateService(serviceId, dn, req, res, next) {
if (verifyError && verifyError.reason === BoxError.INVALID_CREDENTIALS) return next(new ldap.InvalidCredentialsError(dn.toString()));
if (verifyError) return next(new ldap.OperationsError(verifyError.message));
eventlog.upsertLoginEvent(eventlog.ACTION_USER_LOGIN, { authType: 'ldap', mailboxId: email }, { userId: result.id, user: users.removePrivateFields(result) });
eventlog.upsertLoginEvent(result.ghost ? eventlog.ACTION_USER_LOGIN_GHOST : eventlog.ACTION_USER_LOGIN, { authType: 'ldap', mailboxId: email }, { userId: result.id, user: users.removePrivateFields(result) });
res.end();
}
@@ -635,6 +610,26 @@ async function authenticateMail(req, res, next) {
await authenticateService(req.dn.rdns[1].attrs.ou.value.toLowerCase(), req.dn, req, res, next);
}
// https://ldapwiki.com/wiki/RootDSE / RFC 4512 - ldapsearch -x -h "${CLOUDRON_LDAP_SERVER}" -p "${CLOUDRON_LDAP_PORT}" -b "" -s base
// ldapjs seems to call this handler for everything when search === ''
async function maybeRootDSE(req, res, next) {
debug(`maybeRootDSE: requested with scope:${req.scope} dn:${req.dn.toString()}`);
if (req.scope !== 'base') return next(new ldap.NoSuchObjectError()); // per the spec, rootDSE search require base scope
if (!req.dn || req.dn.toString() !== '') return next(new ldap.NoSuchObjectError());
res.send({
dn: '',
attributes: {
objectclass: [ 'RootDSE', 'top', 'OpenLDAProotDSE' ],
supportedLDAPVersion: '3',
vendorName: 'Cloudron LDAP',
vendorVersion: '1.0.0'
}
});
res.end();
}
async function start() {
const logger = {
trace: NOOP,
@@ -687,6 +682,16 @@ async function start() {
res.end();
});
// directus looks for the "DN" of the bind user
gServer.search('ou=apps,dc=cloudron', function(req, res, next) {
const obj = {
dn: req.dn.toString(),
};
finalSend([obj], req, res, next);
});
gServer.search('', maybeRootDSE); // when '', it seems the callback is called for everything else
// just log that an attempt was made to unknown route, this helps a lot during app packaging
gServer.use(function(req, res, next) {
debug('not handled: dn %s, scope %s, filter %s (from %s)', req.dn ? req.dn.toString() : '-', req.scope, req.filter ? req.filter.toString() : '-', req.connection.ldap.id);
+2 -3
View File
@@ -1,6 +1,6 @@
'use strict';
var assert = require('assert'),
const assert = require('assert'),
BoxError = require('./boxerror.js'),
debug = require('debug')('box:locker'),
EventEmitter = require('events').EventEmitter,
@@ -32,8 +32,7 @@ Locker.prototype.lock = function (operation) {
this._operation = operation;
++this._lockDepth;
this._timestamp = new Date();
var that = this;
this._watcherId = setInterval(function () { debug('Lock unreleased %s', that._operation); }, 1000 * 60 * 5);
this._watcherId = setInterval(() => { debug('Lock unreleased %s', this._operation); }, 1000 * 60 * 5);
debug('Acquired : %s', this._operation);
+52
View File
@@ -0,0 +1,52 @@
'use strict';
const stream = require('stream'),
{ StringDecoder } = require('string_decoder'),
TransformStream = stream.Transform;
class LogStream extends TransformStream {
constructor(options) {
super();
this._options = Object.assign({ source: 'unknown', format: 'json' }, options);
this._decoder = new StringDecoder();
this._soFar = '';
}
_format(line) {
if (this._options.format !== 'json') return line + '\n';
const data = line.split(' '); // logs are <ISOtimestamp> <msg>
let timestamp = (new Date(data[0])).getTime();
if (isNaN(timestamp)) timestamp = 0;
const message = line.slice(data[0].length+1);
// ignore faulty empty logs
if (!timestamp && !message) return;
return JSON.stringify({
realtimeTimestamp: timestamp * 1000,
message: message,
source: this._options.source
}) + '\n';
}
_transform(chunk, encoding, callback) {
const data = this._soFar + this._decoder.write(chunk);
let start = this._soFar.length, end = -1;
while ((end = data.indexOf('\n', start)) !== -1) {
const line = data.slice(start, end); // does not include end
this.push(this._format(line));
start = end + 1;
}
this._soFar = data.slice(start);
callback(null);
}
_flush(callback) {
const line = this._soFar + this._decoder.end();
this.push(this._format(line));
callback(null);
}
}
exports = module.exports = LogStream;
+8 -5
View File
@@ -1,11 +1,12 @@
# Generated by apptask
# keep upto 7 rotated logs. rotation triggered daily or ahead of time if size is > 1M
# keep upto 5 rotated logs. rotation triggered weekly or ahead of time if size is > 10M
<%= volumePath %>/*.log <%= volumePath %>/*/*.log <%= volumePath %>/*/*/*.log {
rotate 7
daily
rotate 5
weekly
maxage 14
compress
maxsize 1M
maxsize 10M
missingok
delaycompress
# this truncates the original log file and not the rotated one
@@ -15,7 +16,9 @@
/home/yellowtent/platformdata/logs/<%= appId %>/*.log {
# only keep one rotated file, we currently do not send that over the api
rotate 1
size 10M
weekly
maxage 14
maxsize 10M
missingok
# we never compress so we can simply tail the files
nocompress
+104 -39
View File
@@ -22,6 +22,7 @@ exports = module.exports = {
setDnsRecords,
validateName,
validateDisplayName,
setMailFromValidation,
setCatchAllAddress,
@@ -31,7 +32,7 @@ exports = module.exports = {
startMail,
restartMail,
handleCertChanged,
checkCertificate,
getMailAuth,
sendTestMail,
@@ -47,6 +48,7 @@ exports = module.exports = {
getAlias,
getAliases,
setAliases,
searchAlias,
getLists,
getList,
@@ -65,7 +67,6 @@ exports = module.exports = {
TYPE_LIST: 'list',
TYPE_ALIAS: 'alias',
_validateName: validateName,
_delByDomain: delByDomain,
_updateDomain: updateDomain
};
@@ -96,13 +97,11 @@ const assert = require('assert'),
services = require('./services.js'),
settings = require('./settings.js'),
shell = require('./shell.js'),
smtpTransport = require('nodemailer-smtp-transport'),
superagent = require('superagent'),
sysinfo = require('./sysinfo.js'),
system = require('./system.js'),
tasks = require('./tasks.js'),
users = require('./users.js'),
util = require('util'),
validator = require('validator'),
_ = require('underscore');
@@ -112,7 +111,7 @@ const REMOVE_MAILBOX_CMD = path.join(__dirname, 'scripts/rmmailbox.sh');
const OWNERTYPES = [ exports.OWNERTYPE_USER, exports.OWNERTYPE_GROUP, exports.OWNERTYPE_APP ];
// if you add a field here, listMailboxes has to be updated
const MAILBOX_FIELDS = [ 'name', 'type', 'ownerId', 'ownerType', 'aliasName', 'aliasDomain', 'creationTime', 'membersJson', 'membersOnly', 'domain', 'active', 'enablePop3' ].join(',');
const MAILBOX_FIELDS = [ 'name', 'type', 'ownerId', 'ownerType', 'aliasName', 'aliasDomain', 'creationTime', 'membersJson', 'membersOnly', 'domain', 'active', 'enablePop3', 'storageQuota', 'messagesQuota' ].join(',');
const MAILDB_FIELDS = [ 'domain', 'enabled', 'mailFromValidation', 'catchAllJson', 'relayJson', 'dkimKeyJson', 'dkimSelector', 'bannerJson' ].join(',');
function postProcessMailbox(data) {
@@ -169,6 +168,30 @@ function validateName(name) {
return null;
}
function validateAlias(name) {
assert.strictEqual(typeof name, 'string');
if (name.length < 1) return new BoxError(BoxError.BAD_FIELD, 'mailbox name must be atleast 1 char');
if (name.length >= 200) return new BoxError(BoxError.BAD_FIELD, 'mailbox name too long');
// also need to consider valid LDAP characters here (e.g '+' is reserved). keep hyphen at the end so it doesn't become a range.
if (/[^a-zA-Z0-9._*-]/.test(name)) return new BoxError(BoxError.BAD_FIELD, 'mailbox name can only contain alphanumerals, dot, hyphen, asterisk or underscore');
return null;
}
function validateDisplayName(name) {
assert.strictEqual(typeof name, 'string');
if (name.length < 1) return new BoxError(BoxError.BAD_FIELD, 'mailbox display name must be atleast 1 char');
if (name.length >= 100) return new BoxError(BoxError.BAD_FIELD, 'mailbox display name too long');
// technically only ":" is disallowed it seems (https://www.rfc-editor.org/rfc/rfc5322#section-2.2)
// in https://www.rfc-editor.org/rfc/rfc2822.html, display-name is a "phrase"
if (/["<>)(,;\\@:]/.test(name)) return new BoxError(BoxError.BAD_FIELD, 'mailbox display name is not valid');
return null;
}
async function checkOutboundPort25() {
const relay = {
value: 'OK',
@@ -214,7 +237,8 @@ async function checkSmtpRelay(relay) {
connectionTimeout: 5000,
greetingTimeout: 5000,
host: relay.host,
port: relay.port
port: relay.port,
secure: false // haraka relay only supports STARTTLS
};
// only set auth if either username or password is provided, some relays auth based on IP (range)
@@ -227,9 +251,9 @@ async function checkSmtpRelay(relay) {
if (relay.acceptSelfSignedCerts) options.tls = { rejectUnauthorized: false };
const transporter = nodemailer.createTransport(smtpTransport(options));
const transporter = nodemailer.createTransport(options);
const [error] = await safe(util.promisify(transporter.verify)());
const [error] = await safe(transporter.verify());
result.status = !error;
if (error) {
result.value = result.errorMessage = error.message;
@@ -360,9 +384,9 @@ async function checkMx(domain, mailFqdn) {
}
function txtToDict(txt) {
var dict = {};
const dict = {};
txt.split(';').forEach(function(v) {
var p = v.trim().split('=');
const p = v.trim().split('=');
dict[p[0]]=p[1];
});
return dict;
@@ -494,7 +518,11 @@ const RBL_LIST = [
// this function currently only looks for black lists based on IP. TODO: also look up by domain
async function checkRblStatus(domain) {
const ip = await sysinfo.getServerIPv4();
const [error, ip] = await safe(sysinfo.getServerIPv4());
if (error) {
debug(`checkRblStatus: unable to determine server IPv4: ${error.message}`);
return { status: false, ip: null, servers: [] };
}
const flippedIp = ip.split('.').reverse().join('.');
@@ -511,7 +539,7 @@ async function checkRblStatus(domain) {
const [error2, txtRecords] = await safe(dig.resolve(flippedIp + '.' + rblServer.dns, 'TXT', DNS_OPTIONS));
result.txtRecords = error2 || !txtRecords ? 'No txt record' : txtRecords.map(x => x.join(''));
debug(`checkRblStatus: ${domain} (error: ${error2.message}) (txtRecords: ${JSON.stringify(txtRecords)})`);
debug(`checkRblStatus: ${domain} (error: ${error2?.message || null}) (txtRecords: ${JSON.stringify(txtRecords)})`);
blacklistedServers.push(result);
}
@@ -562,7 +590,7 @@ async function getStatus(domain) {
for (let i = 0; i < checks.length; i++) {
const response = responses[i], check = checks[i];
if (response.status !== 'fulfilled') {
debug(`check ${check.what} was rejected. This is not expected`);
debug(`check ${check.what} was rejected. This is not expected. reason: ${response.reason}`);
continue;
}
@@ -632,7 +660,7 @@ async function createMailConfig(mailFqdn) {
// create sections for per-domain configuration
for (const domain of mailDomains) {
const catchAll = domain.catchAll.map(function (c) { return `${c}@${domain.domain}`; }).join(',');
const catchAll = domain.catchAll.join(',');
const mailFromValidation = domain.mailFromValidation;
if (!safe.fs.appendFileSync(`${paths.MAIL_CONFIG_DIR}/mail.ini`,
@@ -656,7 +684,8 @@ async function createMailConfig(mailFqdn) {
const enableRelay = relay.provider !== 'cloudron-smtp' && relay.provider !== 'noop',
host = relay.host || '',
port = relay.port || 25,
authType = relay.username ? 'plain' : '',
// office365 removed plain auth (https://support.microsoft.com/en-us/office/outlook-com-no-longer-supports-auth-plain-authentication-07f7d5e9-1697-465f-84d2-4513d4ff0145)
authType = relay.username ? (relay.provider === 'office365-legacy-smtp' ? 'login' : 'plain') : '',
username = relay.username || '',
password = relay.password || '',
forceFromAddress = relay.forceFromAddress ? 'true' : 'false';
@@ -685,18 +714,18 @@ async function configureMail(mailFqdn, mailDomain, serviceConfig) {
const tag = infra.images.mail.tag;
const memoryLimit = serviceConfig.memoryLimit || exports.DEFAULT_MEMORY_LIMIT;
const memory = system.getMemoryAllocation(memoryLimit);
const memory = await system.getMemoryAllocation(memoryLimit);
const cloudronToken = hat(8 * 128), relayToken = hat(8 * 128);
const bundle = await reverseProxy.getCertificatePath(mailFqdn, mailDomain);
const certificate = await reverseProxy.getMailCertificate();
const dhparamsFilePath = `${paths.MAIL_CONFIG_DIR}/dhparams.pem`;
const mailCertFilePath = `${paths.MAIL_CONFIG_DIR}/tls_cert.pem`;
const mailKeyFilePath = `${paths.MAIL_CONFIG_DIR}/tls_key.pem`;
if (!safe.child_process.execSync(`cp ${paths.DHPARAMS_FILE} ${dhparamsFilePath}`)) throw new BoxError(BoxError.FS_ERROR, 'Could not copy dhparams:' + safe.error.message);
if (!safe.child_process.execSync(`cp ${bundle.certFilePath} ${mailCertFilePath}`)) throw new BoxError(BoxError.FS_ERROR, 'Could not create cert file:' + safe.error.message);
if (!safe.child_process.execSync(`cp ${bundle.keyFilePath} ${mailKeyFilePath}`)) throw new BoxError(BoxError.FS_ERROR, 'Could not create key file:' + safe.error.message);
if (!safe.child_process.execSync(`cp ${paths.DHPARAMS_FILE} ${dhparamsFilePath}`)) throw new BoxError(BoxError.FS_ERROR, `Could not copy dhparams: ${safe.error.message}`);
if (!safe.fs.writeFileSync(mailCertFilePath, certificate.cert)) throw new BoxError(BoxError.FS_ERROR, `Could not create cert file: ${safe.error.message}`);
if (!safe.fs.writeFileSync(mailKeyFilePath, certificate.key)) throw new BoxError(BoxError.FS_ERROR, `Could not create key file: ${safe.error.message}`);
// if the 'yellowtent' user of OS and the 'cloudron' user of mail container don't match, the keys become inaccessible by mail code
if (!safe.fs.chmodSync(mailKeyFilePath, 0o644)) throw new BoxError(BoxError.FS_ERROR, `Could not chmod key file: ${safe.error.message}`);
@@ -767,6 +796,7 @@ async function restartMail() {
async function startMail(existingInfra) {
assert.strictEqual(typeof existingInfra, 'object');
debug('startMail: starting');
await restartMail();
}
@@ -778,11 +808,19 @@ async function restartMailIfActivated() {
return; // not provisioned yet, do not restart container after dns setup
}
debug('restartMailIfActivated: restarting on activated');
await restartMail();
}
async function handleCertChanged() {
debug('handleCertChanged: will restart if activated');
async function checkCertificate() {
const certificate = await reverseProxy.getMailCertificate();
const cert = safe.fs.readFileSync(`${paths.MAIL_CONFIG_DIR}/tls_cert.pem`, { encoding: 'utf8' });
if (cert === certificate.cert) {
debug('checkCertificate: certificate has not changed');
return;
}
debug('checkCertificate: certificate has changed');
await restartMailIfActivated();
}
@@ -956,8 +994,7 @@ async function setLocation(subdomain, domain, auditSource) {
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof auditSource, 'object');
const domainObject = await domains.get(domain);
const fqdn = dns.fqdn(subdomain, domainObject);
const fqdn = dns.fqdn(subdomain, domain);
await settings.setMailLocation(domain, fqdn);
@@ -973,6 +1010,7 @@ async function onDomainAdded(domain) {
if (!settings.mailFqdn()) return; // mail domain is not set yet (when provisioning)
debug(`onDomainAdded: configuring mail for added domain ${domain}`);
await upsertDnsRecords(domain, settings.mailFqdn());
await restartMailIfActivated();
}
@@ -980,6 +1018,7 @@ async function onDomainAdded(domain) {
async function onDomainRemoved(domain) {
assert.strictEqual(typeof domain, 'string');
debug(`onDomainRemoved: configuring mail for removed domain ${domain}`);
await restartMail();
}
@@ -1019,6 +1058,10 @@ async function setCatchAllAddress(domain, addresses) {
assert.strictEqual(typeof domain, 'string');
assert(Array.isArray(addresses));
for (const address of addresses) {
if (!validator.isEmail(address)) throw new BoxError(BoxError.BAD_FIELD, `Invalid catch all address: ${address}`);
}
await updateDomain(domain, { catchAll: addresses });
safe(restartMail(), { debug }); // have to restart mail container since haraka cannot watch symlinked config files (mail.ini)
@@ -1079,7 +1122,7 @@ async function listMailboxes(domain, search, page, perPage) {
const escapedSearch = mysql.escape('%' + search + '%'); // this also quotes the string
const searchQuery = search ? ` HAVING (name LIKE ${escapedSearch} OR aliasNames LIKE ${escapedSearch} OR aliasDomains LIKE ${escapedSearch})` : ''; // having instead of where because of aggregated columns use
const query = 'SELECT m1.name AS name, m1.domain AS domain, m1.ownerId AS ownerId, m1.ownerType as ownerType, m1.active as active, JSON_ARRAYAGG(m2.name) AS aliasNames, JSON_ARRAYAGG(m2.domain) AS aliasDomains, m1.enablePop3 AS enablePop3 '
const query = 'SELECT m1.name AS name, m1.domain AS domain, m1.ownerId AS ownerId, m1.ownerType as ownerType, m1.active as active, JSON_ARRAYAGG(m2.name) AS aliasNames, JSON_ARRAYAGG(m2.domain) AS aliasDomains, m1.enablePop3 AS enablePop3, m1.storageQuota AS storageQuota, m1.messagesQuota AS messagesQuota '
+ ` FROM (SELECT * FROM mailboxes WHERE type='${exports.TYPE_MAILBOX}') AS m1`
+ ` LEFT JOIN (SELECT * FROM mailboxes WHERE type='${exports.TYPE_ALIAS}') AS m2`
+ ' ON m1.name=m2.aliasName AND m1.domain=m2.aliasDomain AND m1.ownerId=m2.ownerId'
@@ -1100,7 +1143,7 @@ async function listAllMailboxes(page, perPage) {
assert.strictEqual(typeof page, 'number');
assert.strictEqual(typeof perPage, 'number');
const query = 'SELECT m1.name AS name, m1.domain AS domain, m1.ownerId AS ownerId, m1.ownerType as ownerType, m1.active as active, JSON_ARRAYAGG(m2.name) AS aliasNames, JSON_ARRAYAGG(m2.domain) AS aliasDomains, m1.enablePop3 AS enablePop3 '
const query = 'SELECT m1.name AS name, m1.domain AS domain, m1.ownerId AS ownerId, m1.ownerType as ownerType, m1.active as active, JSON_ARRAYAGG(m2.name) AS aliasNames, JSON_ARRAYAGG(m2.domain) AS aliasDomains, m1.enablePop3 AS enablePop3, m1.storageQuota AS storageQuota, m1.messagesQuota AS messagesQuota '
+ ` FROM (SELECT * FROM mailboxes WHERE type='${exports.TYPE_MAILBOX}') AS m1`
+ ` LEFT JOIN (SELECT * FROM mailboxes WHERE type='${exports.TYPE_ALIAS}') AS m2`
+ ' ON m1.name=m2.aliasName AND m1.domain=m2.aliasDomain AND m1.ownerId=m2.ownerId'
@@ -1154,10 +1197,12 @@ async function addMailbox(name, domain, data, auditSource) {
assert.strictEqual(typeof data, 'object');
assert.strictEqual(typeof auditSource, 'object');
const { ownerId, ownerType, active } = data;
const { ownerId, ownerType, active, storageQuota, messagesQuota } = data;
assert.strictEqual(typeof ownerId, 'string');
assert.strictEqual(typeof ownerType, 'string');
assert.strictEqual(typeof active, 'boolean');
assert(Number.isInteger(storageQuota) && storageQuota >= 0);
assert(Number.isInteger(messagesQuota) && messagesQuota >= 0);
name = name.toLowerCase();
@@ -1166,12 +1211,13 @@ async function addMailbox(name, domain, data, auditSource) {
if (!OWNERTYPES.includes(ownerType)) throw new BoxError(BoxError.BAD_FIELD, 'bad owner type');
[error] = await safe(database.query('INSERT INTO mailboxes (name, type, domain, ownerId, ownerType, active) VALUES (?, ?, ?, ?, ?, ?)', [ name, exports.TYPE_MAILBOX, domain, ownerId, ownerType, active ]));
[error] = await safe(database.query('INSERT INTO mailboxes (name, type, domain, ownerId, ownerType, active, storageQuota, messagesQuota) VALUES (?, ?, ?, ?, ?, ?, ?, ?)',
[ name, exports.TYPE_MAILBOX, domain, ownerId, ownerType, active, storageQuota, messagesQuota ]));
if (error && error.code === 'ER_DUP_ENTRY') throw new BoxError(BoxError.ALREADY_EXISTS, 'mailbox already exists');
if (error && error.code === 'ER_NO_REFERENCED_ROW_2' && error.sqlMessage.includes('mailboxes_domain_constraint')) throw new BoxError(BoxError.NOT_FOUND, `no such domain '${domain}'`);
if (error) throw error;
await eventlog.add(eventlog.ACTION_MAIL_MAILBOX_ADD, auditSource, { name, domain, ownerId, ownerType, active });
await eventlog.add(eventlog.ACTION_MAIL_MAILBOX_ADD, auditSource, { name, domain, ownerId, ownerType, active, storageQuota, messageQuota: messagesQuota });
}
async function updateMailbox(name, domain, data, auditSource) {
@@ -1180,23 +1226,30 @@ async function updateMailbox(name, domain, data, auditSource) {
assert.strictEqual(typeof data, 'object');
assert.strictEqual(typeof auditSource, 'object');
const { ownerId, ownerType, active, enablePop3 } = data;
assert.strictEqual(typeof ownerId, 'string');
assert.strictEqual(typeof ownerType, 'string');
assert.strictEqual(typeof active, 'boolean');
assert.strictEqual(typeof enablePop3, 'boolean');
const args = [];
const fields = [];
for (const k in data) {
if (k === 'enablePop3' || k === 'active') {
fields.push(k + ' = ?');
args.push(data[k] ? 1 : 0);
continue;
}
name = name.toLowerCase();
if (k === 'ownerType' && !OWNERTYPES.includes(data[k])) throw new BoxError(BoxError.BAD_FIELD, 'bad owner type');
if (!OWNERTYPES.includes(ownerType)) throw new BoxError(BoxError.BAD_FIELD, 'bad owner type');
fields.push(k + ' = ?');
args.push(data[k]);
}
args.push(name.toLowerCase());
args.push(domain);
const mailbox = await getMailbox(name, domain);
if (!mailbox) throw new BoxError(BoxError.NOT_FOUND, 'No such mailbox');
const result = await database.query('UPDATE mailboxes SET ownerId = ?, ownerType = ?, active = ?, enablePop3 = ? WHERE name = ? AND domain = ?', [ ownerId, ownerType, active, enablePop3, name, domain ]);
const result = await safe(database.query('UPDATE mailboxes SET ' + fields.join(', ') + ' WHERE name = ? AND domain = ?', args));
if (result.affectedRows === 0) throw new BoxError(BoxError.NOT_FOUND, 'Mailbox not found');
await eventlog.add(eventlog.ACTION_MAIL_MAILBOX_UPDATE, auditSource, { name, domain, oldUserId: mailbox.userId, ownerId, ownerType, active });
await eventlog.add(eventlog.ACTION_MAIL_MAILBOX_UPDATE, auditSource, Object.assign(data, { name, domain, oldUserId: mailbox.userId }) );
}
async function removeSolrIndex(mailbox) {
@@ -1248,6 +1301,18 @@ async function getAlias(name, domain) {
return results[0];
}
async function searchAlias(name, domain) {
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof domain, 'string');
const results = await database.query(`SELECT ${MAILBOX_FIELDS} FROM mailboxes WHERE ? LIKE REPLACE(REPLACE(name, '*', '%'), '_', '\\_') AND type = ? AND domain = ?`, [ name, exports.TYPE_ALIAS, domain ]);
if (results.length === 0) return null;
results.forEach(function (result) { postProcessMailbox(result); });
return results[0];
}
async function getAliases(name, domain) {
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof domain, 'string');
@@ -1267,7 +1332,7 @@ async function setAliases(name, domain, aliases, auditSource) {
const name = aliases[i].name.toLowerCase();
const domain = aliases[i].domain.toLowerCase();
const error = validateName(name);
const error = validateAlias(name);
if (error) throw error;
const mailDomain = await getDomain(domain);
+4 -3
View File
@@ -6,10 +6,11 @@
<p>{{ passwordResetEmail.description }}</p>
<p>
<a href="<%= resetLink %>">{{ passwordResetEmail.resetAction }}</a>
</p>
<br/>
<a style="border-radius: 2px; background-color: #2196f3; color: white; padding: 6px 12px; text-decoration: none;" href="<%= resetLink %>">{{ passwordResetEmail.resetAction }}</a>
<br/>
<br/>
{{ passwordResetEmail.expireNote }}

Some files were not shown because too many files have changed in this diff Show More