Compare commits
513 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| a50409bdca | |||
| 60a722e6cc | |||
| 4d6cafa589 | |||
| 63e557430b | |||
| 04acb4423d | |||
| ea813acf4c | |||
| b1198dfdbf | |||
| 4342de3747 | |||
| ef8bc7e7e9 | |||
| e18e401f6b | |||
| ab998c47e8 | |||
| 9fb830b2e1 | |||
| 415c3f90a1 | |||
| 60c8ff7fb1 | |||
| 037816313c | |||
| 3d285d1ac6 | |||
| 135338786f | |||
| 661f1fce31 | |||
| 03664ef784 | |||
| d2111ef2b6 | |||
| e0df19c888 | |||
| 6a523606ca | |||
| b6cd40e63c | |||
| b421866bf5 | |||
| fe06075816 | |||
| 2b73eb90ec | |||
| 5555321cf5 | |||
| f087ebbee0 | |||
| d04f64d3d4 | |||
| 777a5a0929 | |||
| 6c297f890e | |||
| 3c8d0b1b37 | |||
| 74f2cd156f | |||
| a9fdffa9af | |||
| e6f8e8eb94 | |||
| 1bd89ca055 | |||
| 0e226d0314 | |||
| e8d4e2c792 | |||
| 4cfbed8273 | |||
| 0410ac9780 | |||
| 82fcf6a770 | |||
| a1332865c0 | |||
| ae0e4de93e | |||
| 02a6525558 | |||
| 5afef14760 | |||
| 890d589a36 | |||
| 89a50c4b83 | |||
| da5cd2b62c | |||
| 57321624aa | |||
| 876ae822b2 | |||
| 1ceb75868b | |||
| 98ad16f943 | |||
| 9363746c1a | |||
| 7a1b9ab94c | |||
| 46d6b5b81f | |||
| 7e8757a78c | |||
| e508b25ecd | |||
| 3fdc10c523 | |||
| 717953c162 | |||
| daa34c3b4d | |||
| bf5c78d819 | |||
| 1763144278 | |||
| 2f598529fc | |||
| 8264e69e2f | |||
| b0638df94e | |||
| bb61eee557 | |||
| 39c39b861d | |||
| e3deda4ef3 | |||
| 7e44e7de82 | |||
| 9dd0518c00 | |||
| 81313d1c40 | |||
| 2ceccc4557 | |||
| 1c36918e92 | |||
| 8d93df23c1 | |||
| 0c06b34a2c | |||
| fe980eab7f | |||
| 979b903bf2 | |||
| 4b8ee0934a | |||
| 0439725790 | |||
| 4b3ef33989 | |||
| 9e99d51853 | |||
| 00a9fa8f34 | |||
| 84a35343d1 | |||
| 397bd17c55 | |||
| c8e377a9bd | |||
| 90e3138bae | |||
| 24b32a763b | |||
| 69a12d36ef | |||
| 1485718fa6 | |||
| 750f03d9de | |||
| b5ddf1d24d | |||
| 043a35111d | |||
| 676457b589 | |||
| e61f11be81 | |||
| 101a44affd | |||
| 7995c664ed | |||
| 6023c0e5dc | |||
| d49d76c1ee | |||
| 77ef212daa | |||
| 7aa80193c0 | |||
| 5632c74556 | |||
| 7a08745af1 | |||
| d9ba0858c7 | |||
| 617e51d294 | |||
| c07d322fff | |||
| 9b8fa8a772 | |||
| c351242af7 | |||
| 55245557f5 | |||
| ee1cef3ee8 | |||
| 5d51a7178f | |||
| 9d52397bcc | |||
| 5098fbe061 | |||
| 7062aa4ac7 | |||
| d6fec4f2b9 | |||
| 86ef462c76 | |||
| c76e7a3f63 | |||
| 2516a08659 | |||
| 562fe30333 | |||
| 4e0eed4bb2 | |||
| b604caec72 | |||
| 6b409e9089 | |||
| 015d434358 | |||
| c8e448cb84 | |||
| 03924be491 | |||
| 2729cecf4a | |||
| 32e2377828 | |||
| fdb8139b03 | |||
| 4b25c8a5ad | |||
| ae930a7fe8 | |||
| 3b9144ba4d | |||
| be6ea3d4c1 | |||
| a2983e58b5 | |||
| a99e86a5df | |||
| 906ad80069 | |||
| ac65f765e5 | |||
| c5bfe82315 | |||
| 7035b3c18a | |||
| 2108c61d97 | |||
| 2bdbb47286 | |||
| 333b8970b8 | |||
| 5673cfe2be | |||
| 4429239dbc | |||
| b6ab9aa9f5 | |||
| 84bde6327f | |||
| d6f49eb54f | |||
| 3c8c5e158b | |||
| b3045b796f | |||
| c0febacc30 | |||
| f8ada91dc5 | |||
| d0e2ce9a9e | |||
| e157608992 | |||
| 8dbe0ddaf3 | |||
| b0cb18539c | |||
| 97b6d76694 | |||
| 9de6c8ee2b | |||
| cd28b1106b | |||
| b3a5dafee0 | |||
| eb4ab8defd | |||
| 639744e9cb | |||
| 6a942ab27a | |||
| 278f1d6d24 | |||
| 563eeca1a9 | |||
| 7a9c954646 | |||
| d768c36afb | |||
| 36ae3b267d | |||
| cd60f394d3 | |||
| 9aba90a6f7 | |||
| 68a8155f49 | |||
| 16695fd4ec | |||
| 9b6c6dc709 | |||
| 7923ed4f0d | |||
| 0b3d1c855c | |||
| d8273719d2 | |||
| c6d2c39ff7 | |||
| 6960afdf0b | |||
| 3a5000ab1d | |||
| 98951bab9e | |||
| 96fc3b8612 | |||
| 2b345b6c2d | |||
| 504662b466 | |||
| f56e6edbe4 | |||
| 191b84d389 | |||
| 8a4350d22e | |||
| cc6dae0f9e | |||
| 58528450e2 | |||
| ebf3559e60 | |||
| 57d20b2b32 | |||
| fd27240b26 | |||
| cad69d335c | |||
| 1f08cca355 | |||
| 7f4f525551 | |||
| b0037b6141 | |||
| 7956c8f58d | |||
| 330c9054b4 | |||
| d444d8552e | |||
| 595bf583c7 | |||
| 3386b99a29 | |||
| 5fd667cdaf | |||
| 4217db9e18 | |||
| b4717e2edb | |||
| 1d5465f21e | |||
| 2f1998fa67 | |||
| a7e998c030 | |||
| 8cc15726ec | |||
| 62e59868b4 | |||
| a64027f4af | |||
| f5a02930ec | |||
| 530ca20ee2 | |||
| f3b84ece3d | |||
| ca2d5957e4 | |||
| 7837214276 | |||
| 994202ca94 | |||
| ff7ceb1442 | |||
| 56545b7f41 | |||
| 586e78dfea | |||
| 92ede4c242 | |||
| 5ca2c2d564 | |||
| 9692aa3c08 | |||
| 10ad1028ae | |||
| 7155856b08 | |||
| 69aa771d44 | |||
| d164b5ae3a | |||
| b34d09f547 | |||
| 9e2850ffad | |||
| 480cface63 | |||
| 85aba589b8 | |||
| e890140aa9 | |||
| 53d56ef3a0 | |||
| b91674799b | |||
| 4bb864e2ac | |||
| 7db091525e | |||
| 695923ed75 | |||
| 1b43ccca6f | |||
| 96a0bad149 | |||
| 243ade15e1 | |||
| 9d3cf990d1 | |||
| 02bcff2223 | |||
| 8f388c86a6 | |||
| 8dc929f0ff | |||
| 509bd7e79b | |||
| 19c665d747 | |||
| cb09086ae8 | |||
| fa915d0b23 | |||
| a383f01406 | |||
| 1a46e80403 | |||
| e8cd230c12 | |||
| 0711dc2c5a | |||
| 486e72457d | |||
| 450e017bdb | |||
| c6d9cfc0d7 | |||
| a0b073d881 | |||
| 4dde16f987 | |||
| f7d2e262f4 | |||
| 34fedb5835 | |||
| ff491be976 | |||
| 7635482191 | |||
| b23001e43f | |||
| 06c8e8f0cb | |||
| ce2cd00fbf | |||
| 651af185c8 | |||
| 6951383ae0 | |||
| 37596e89b4 | |||
| 711fe37dad | |||
| 7fee3d0da0 | |||
| 45a61e9541 | |||
| bd0be2affc | |||
| 7812c0e5c2 | |||
| 7efb6d60bc | |||
| cd31e12bec | |||
| 87755c6097 | |||
| 73f56efe2c | |||
| 20eaa60a97 | |||
| b80f0082e9 | |||
| 1ff800a842 | |||
| 5b0abb1b17 | |||
| 178aa4794a | |||
| 76583cb2fa | |||
| aa484dc5b4 | |||
| 19a098d34b | |||
| db452d9bc0 | |||
| 90efb96635 | |||
| 0cee6de476 | |||
| 854d29330c | |||
| 34a3dd6d46 | |||
| 4787ee3301 | |||
| 7b547e7ae9 | |||
| fe5e31e528 | |||
| 841a838910 | |||
| 4f27fe4f1e | |||
| 96eab86341 | |||
| 95d7a991dc | |||
| dc309afbbd | |||
| 16d65d3665 | |||
| ccb340cf80 | |||
| 56b0f57e11 | |||
| 7c1e056152 | |||
| 08ffa99c78 | |||
| cdede5a009 | |||
| 4cadffa6ea | |||
| 04e13eac55 | |||
| 2b3ae69f63 | |||
| 8f4813f691 | |||
| 5b05baeced | |||
| 3d60e36c98 | |||
| 40c7bd114a | |||
| e0033b31f2 | |||
| 2d3bdda1c8 | |||
| fd40940ef5 | |||
| 6d58f65a1a | |||
| 44775e1791 | |||
| 4be1f4dd73 | |||
| 93bab552c9 | |||
| 023c03ddcd | |||
| a5bffad556 | |||
| 836348cbc0 | |||
| 1ac7570cfb | |||
| 0dceba8a1c | |||
| 599b070779 | |||
| c581e0ad09 | |||
| e14b59af5d | |||
| eff9de3ded | |||
| 4f128c6503 | |||
| 8dc9d4c083 | |||
| 21e3300396 | |||
| d136895598 | |||
| dac3eef57c | |||
| 2fac7dd736 | |||
| 74e2415308 | |||
| 41fae04b69 | |||
| 32a88a342c | |||
| b5bcde5093 | |||
| 68c36e8a18 | |||
| f6a9e1f4d8 | |||
| 2abd42096e | |||
| 922e214c52 | |||
| 6ce8899231 | |||
| cbfad632c2 | |||
| 7804aed5d7 | |||
| b90b1dbbbe | |||
| 020ec54264 | |||
| 0568093a2a | |||
| c9281bf863 | |||
| de451b2fe8 | |||
| ddf5c51737 | |||
| a33ccb32d2 | |||
| 0b03018a7b | |||
| 1b688410e7 | |||
| 6d031af012 | |||
| 67a5151070 | |||
| a4b299bf6e | |||
| 383d1eb406 | |||
| 3901144eae | |||
| 317c6db1d5 | |||
| 1e14f8e2b9 | |||
| 88fc7ca915 | |||
| b983e205d2 | |||
| 9cdbc6ba36 | |||
| 895f5f7398 | |||
| f41b08d573 | |||
| 3e21b6cad3 | |||
| 1a32482f66 | |||
| ee1e083f32 | |||
| ebd3a15140 | |||
| d93edc6375 | |||
| 3ed17f3a2a | |||
| 8d9cfbd3de | |||
| f142d34f83 | |||
| 357ca55dec | |||
| d7a8731027 | |||
| 9117c7d141 | |||
| 472020f90c | |||
| 2256a0dd3a | |||
| 458b5d1e32 | |||
| 1e6abed4aa | |||
| cdd4b426d5 | |||
| 75b60a2949 | |||
| 9ab34ee43a | |||
| 3c9d7706de | |||
| 8b5b954cbb | |||
| b2204925d3 | |||
| 63734155f2 | |||
| eb0ae3400a | |||
| db8db430b9 | |||
| c0b2b1c26d | |||
| 7da20e95e3 | |||
| f30f90e6be | |||
| 7f05b48bd7 | |||
| ea257b95d9 | |||
| e7c399c36a | |||
| d84666fb43 | |||
| 1eb33099af | |||
| e35dbd522f | |||
| db6474ef2a | |||
| e437671baf | |||
| f60d640c8e | |||
| 56c992e51b | |||
| 12ee7b9521 | |||
| c8de557ff7 | |||
| 90adaf29d7 | |||
| a71323f8b3 | |||
| 155995c7f3 | |||
| 319632e996 | |||
| 33d55318d8 | |||
| ec1abf8926 | |||
| 9a41f111b0 | |||
| 7ef6bd0d3f | |||
| 02f0bb3ea5 | |||
| e12b236617 | |||
| 6662a4d7d6 | |||
| 85315d8fc5 | |||
| 9f5a7e4c08 | |||
| ea0e61e6a4 | |||
| c301e9b088 | |||
| 70e861b106 | |||
| f5c6862627 | |||
| d845f1ae5b | |||
| 7c7d67c6c2 | |||
| c9fcbcc61c | |||
| 9ac06e7f85 | |||
| 6eafac2cad | |||
| 60cb0bdfb1 | |||
| 979956315c | |||
| 62ba031702 | |||
| 284cb7bee5 | |||
| 735c22bc98 | |||
| a2beed01a1 | |||
| 93fc6b06a2 | |||
| a327ce8a82 | |||
| f8374929ac | |||
| 5f93290fc7 | |||
| 4d139232bf | |||
| 804947f039 | |||
| 89fb2b57ff | |||
| 1262d11cb3 | |||
| 1ba72db4f8 | |||
| 7d2304e4a1 | |||
| ebf1dc1b08 | |||
| ce31f56eb6 | |||
| 7dd52779dc | |||
| 2eb5cab74b | |||
| db50382b18 | |||
| 32b061c768 | |||
| 740e85d28c | |||
| 568a7f814d | |||
| b99438e550 | |||
| bcdf90a8d9 | |||
| 536c16929b | |||
| d392293b50 | |||
| 16371d4528 | |||
| cdd0b48023 | |||
| 15cac726c4 | |||
| 6dc69a4d5d | |||
| c52dfcf52f | |||
| eaac13b1c1 | |||
| 3e83f3d4ee | |||
| 3845a8f02b | |||
| c932be77f8 | |||
| d89324162f | |||
| a0ef86f287 | |||
| 7255a86b32 | |||
| 81862bf934 | |||
| 81b7e5645c | |||
| 801367b68d | |||
| f2e8f325d1 | |||
| 138743b55f | |||
| 7f8db644d1 | |||
| c7e410c41b | |||
| 08f3b0b612 | |||
| a2782ef7a6 | |||
| 34fac8eb05 | |||
| 56338beae1 | |||
| 17e9f3b41d | |||
| 2c06b9325f | |||
| 2dfb91dcc9 | |||
| 9f20dfb237 | |||
| da2aecc76a | |||
| 7c72cd4399 | |||
| 5647b0430a | |||
| 7c94543da8 | |||
| 2118952120 | |||
| d45927cdf4 | |||
| c8e99e351e | |||
| fb56237122 | |||
| 89152fabde | |||
| 726463d497 | |||
| 055e41ac90 | |||
| 878878e5e4 | |||
| 7742c8a58e | |||
| 04476999f7 | |||
| 5bff7ebaa1 | |||
| 44742ea3ae | |||
| d6ea7fc3a0 | |||
| 2b49cde2c2 | |||
| 1008981306 | |||
| 146f3ad00e | |||
| 5219eff190 | |||
| abfd7b8aea | |||
| d98f64094e | |||
| a8d254738e | |||
| 1c9f2495e3 | |||
| aa4d95f352 | |||
| 558093eab1 | |||
| 865b041474 | |||
| 1888319313 | |||
| 0be7679619 | |||
| bbef6c2bc2 | |||
| be59267747 | |||
| b4477d26b7 | |||
| ce0afb3d80 | |||
| 0b5cd304ea | |||
| e54ad97fa7 | |||
| 66960ea785 | |||
| 72dd3026ca |
@@ -705,3 +705,103 @@
|
||||
* Send certificate renewal errors, OOM errors to cloudron admins
|
||||
* Email bounce alerts are sent to the Cloudron owner
|
||||
|
||||
[0.94.1]
|
||||
* Suppress upgrade emails
|
||||
* Enable unattended upgrades
|
||||
* Standardize on using devicemapper for docker storage backend
|
||||
* Show detailed backup progress
|
||||
* Fix DNSBL issue in mail container
|
||||
* Fix issue where bounce emails were not sent to aliases
|
||||
* Remove tutorial
|
||||
* Restart mail container on certificate change
|
||||
|
||||
[0.97.0]
|
||||
* Fix missing app icon issue
|
||||
* Fix issue where box sends out crash reports incessantly
|
||||
* (API) Allow memory limit to be set to -1 (unlimited)
|
||||
* (API) Move developmentMode flag from manifest to apps route
|
||||
|
||||
[0.98.0]
|
||||
* Send stat on whether email is enabled
|
||||
* Fix bug where heartbeat was sent for self-hosted Cloudrons
|
||||
* Make Cloudron function even when disk is full
|
||||
* Fix thunderbird connection issue
|
||||
* Send more detailed logs for backup failures
|
||||
* Restart nginx if it crashed automatically
|
||||
* Support all DNS providers for managed Cloudrons
|
||||
* Add granular configuration for auto-updates
|
||||
|
||||
[0.99.0]
|
||||
* Fix bug where ports <= 1023 were not reserved
|
||||
* Cleanup graphs UI
|
||||
* Polish webadmin UI
|
||||
* Fix bug where hard disk size was detected incorrectly
|
||||
|
||||
[0.99.1]
|
||||
* Fix bug with duplicate nginx configs
|
||||
|
||||
[0.100.0]
|
||||
* Improve DNS notifications for email
|
||||
* Do not enable HSTS for subdomains
|
||||
|
||||
[0.100.1]
|
||||
* Fix crash when fetching mail records
|
||||
* Fix crash in LDAP server when username and displayName are empty
|
||||
|
||||
[0.101.0]
|
||||
* New base image 0.10.0
|
||||
* Better error handling of unpurchase errors
|
||||
* Validate that cloudron domain name is a subdomain of public suffic list
|
||||
* Add canada and london to S3 backup regions
|
||||
* Bundle Font Awesome as part of webadmin
|
||||
* Fix crash in custom certiicate validation
|
||||
* Get A+ rating in SSL Check
|
||||
* More robust detection and injection of SPF record
|
||||
* Add azure, lightsail, linode, ovh, vultr to provider list
|
||||
|
||||
[0.102.0]
|
||||
* Fix issue where SPF record check was only done 5 times (updated 'async')
|
||||
* Make auto-generated self-signed cert load quickly on Firefox
|
||||
* Ensure we download docker images and have an app data volume on app re-configure
|
||||
* Improve certificate renewal erorr message
|
||||
* Fix disk usage graph
|
||||
* Show Repair UI for errored apps
|
||||
|
||||
[0.102.1]
|
||||
* Add terms link when signing up for Cloudron.io account
|
||||
* Fix issue where Cloudrons with many apps (> 35) were unable to backup
|
||||
* Improve wording of DNS Setup
|
||||
|
||||
[0.103.0]
|
||||
* Do not send crash logs and other notifications to support@cloudron.io for self-hosted instances
|
||||
* Make auto-generated self-signed cert load quickly on Firefox (take 2)
|
||||
|
||||
[0.104.0]
|
||||
* (mail) Fix crash when sending mails to groups with just 1 user
|
||||
* (ldap) Add isadmin attribute to better map users in apps
|
||||
* (ldap) Hide users which have not yet set a username in ldap searches
|
||||
* (core) Add SSH authorized_keys management
|
||||
* (core) Add additional security related headers to the nginx reverse proxy
|
||||
* (ui) Add remote SSH support option
|
||||
* (ui) Fix eventlog display
|
||||
* (ui) Fix CNAME setup information
|
||||
|
||||
[0.105.0]
|
||||
* Always show email related checks
|
||||
* Show outbound SMTP port 25 status
|
||||
* Hide remote feature for normal users
|
||||
* Only list users via ldap searches who have access to the app
|
||||
* Fix installation issue on servers with a differente locale set
|
||||
|
||||
[0.105.1]
|
||||
* Fix crash when setupToken is not provided in activate API
|
||||
* Add inline Docker GPG key
|
||||
* Re-download icon when repairing app
|
||||
* Fix issue where pre-installed apps were not installed correctly
|
||||
* Fix issue where new cloudrons could not be activated
|
||||
|
||||
[0.106.0]
|
||||
* (mail) Fix email forwarding to external domains
|
||||
* (mail) Set maximum email size to 25MB
|
||||
* Remove SimpleAuth addon
|
||||
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 5.5 KiB After Width: | Height: | Size: 14 KiB |
Executable
+193
@@ -0,0 +1,193 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
assertNotEmpty() {
|
||||
: "${!1:? "$1 is not set."}"
|
||||
}
|
||||
|
||||
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)"
|
||||
export JSON="${SOURCE_DIR}/node_modules/.bin/json"
|
||||
|
||||
INSTANCE_TYPE="t2.micro"
|
||||
BLOCK_DEVICE="DeviceName=/dev/sda1,Ebs={VolumeSize=20,DeleteOnTermination=true,VolumeType=gp2}"
|
||||
SSH_KEY_NAME="id_rsa_yellowtent"
|
||||
|
||||
revision=$(git rev-parse HEAD)
|
||||
ami_name=""
|
||||
server_id=""
|
||||
server_ip=""
|
||||
destroy_server="yes"
|
||||
deploy_env="prod"
|
||||
image_id=""
|
||||
|
||||
args=$(getopt -o "" -l "revision:,name:,no-destroy,env:,region:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
--env) deploy_env="$2"; shift 2;;
|
||||
--revision) revision="$2"; shift 2;;
|
||||
--name) ami_name="$2"; shift 2;;
|
||||
--no-destroy) destroy_server="no"; shift 2;;
|
||||
--region)
|
||||
case "$2" in
|
||||
"us-east-1")
|
||||
image_id="ami-6edd3078"
|
||||
security_group="sg-a5e17fd9"
|
||||
subnet_id="subnet-b8fbc0f1"
|
||||
;;
|
||||
"eu-central-1")
|
||||
image_id="ami-5aee2235"
|
||||
security_group="sg-19f5a770" # everything open on eu-central-1
|
||||
subnet_id=""
|
||||
;;
|
||||
*)
|
||||
echo "Unknown aws region $2"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
export AWS_DEFAULT_REGION="$2" # used by the aws cli tool
|
||||
shift 2
|
||||
;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
# TODO fix this
|
||||
export AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY}"
|
||||
export AWS_SECRET_ACCESS_KEY="${AWS_ACCESS_SECRET}"
|
||||
|
||||
readonly ssh_keys="${HOME}/.ssh/id_rsa_yellowtent"
|
||||
readonly SSH="ssh -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}"
|
||||
|
||||
if [[ ! -f "${ssh_keys}" ]]; then
|
||||
echo "caas ssh key is missing at ${ssh_keys} (pick it up from secrets repo)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${image_id}" ]]; then
|
||||
echo "--region is required (us-east-1 or eu-central-1)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function get_pretty_revision() {
|
||||
local git_rev="$1"
|
||||
local sha1=$(git rev-parse --short "${git_rev}" 2>/dev/null)
|
||||
|
||||
echo "${sha1}"
|
||||
}
|
||||
|
||||
function wait_for_ssh() {
|
||||
echo "=> Waiting for ssh connection"
|
||||
while true; do
|
||||
echo -n "."
|
||||
|
||||
if $SSH ubuntu@${server_ip} echo "hello"; then
|
||||
echo ""
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
done
|
||||
}
|
||||
|
||||
now=$(date "+%Y-%m-%d-%H%M%S")
|
||||
pretty_revision=$(get_pretty_revision "${revision}")
|
||||
|
||||
if [[ -z "${ami_name}" ]]; then
|
||||
ami_name="box-${deploy_env}-${pretty_revision}-${now}"
|
||||
fi
|
||||
|
||||
echo "=> Create EC2 instance"
|
||||
id=$(aws ec2 run-instances --image-id "${image_id}" --instance-type "${INSTANCE_TYPE}" --security-group-ids "${security_group}" --block-device-mappings "${BLOCK_DEVICE}" --key-name "${SSH_KEY_NAME}" --subnet-id "${subnet_id}" --associate-public-ip-address \
|
||||
| $JSON Instances \
|
||||
| $JSON 0.InstanceId)
|
||||
|
||||
[[ -z "$id" ]] && exit 1
|
||||
echo "Instance created ID $id"
|
||||
|
||||
echo "=> Waiting for instance to get a public IP"
|
||||
while true; do
|
||||
server_ip=$(aws ec2 describe-instances --instance-ids ${id} \
|
||||
| $JSON Reservations.0.Instances \
|
||||
| $JSON 0.PublicIpAddress)
|
||||
|
||||
if [[ ! -z "${server_ip}" ]]; then
|
||||
echo ""
|
||||
break
|
||||
fi
|
||||
|
||||
echo -n "."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo "Got public IP ${server_ip}"
|
||||
|
||||
wait_for_ssh
|
||||
|
||||
echo "=> Fetching cloudron-setup"
|
||||
while true; do
|
||||
|
||||
if $SSH ubuntu@${server_ip} wget "https://cloudron.io/cloudron-setup" -O "cloudron-setup"; then
|
||||
echo ""
|
||||
break
|
||||
fi
|
||||
|
||||
echo -n "."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
echo "=> Running cloudron-setup"
|
||||
$SSH ubuntu@${server_ip} sudo /bin/bash "cloudron-setup" --env "${deploy_env}" --provider "ami" --skip-reboot
|
||||
|
||||
wait_for_ssh
|
||||
|
||||
echo "=> Removing ssh key"
|
||||
$SSH ubuntu@${server_ip} sudo rm /home/ubuntu/.ssh/authorized_keys /root/.ssh/authorized_keys
|
||||
|
||||
echo "=> Creating AMI"
|
||||
image_id=$(aws ec2 create-image --instance-id "${id}" --name "${ami_name}" | $JSON ImageId)
|
||||
[[ -z "$id" ]] && exit 1
|
||||
echo "Creating AMI with Id ${image_id}"
|
||||
|
||||
echo "=> Waiting for AMI to be created"
|
||||
while true; do
|
||||
state=$(aws ec2 describe-images --image-ids ${image_id} \
|
||||
| $JSON Images \
|
||||
| $JSON 0.State)
|
||||
|
||||
if [[ "${state}" == "available" ]]; then
|
||||
echo ""
|
||||
break
|
||||
fi
|
||||
|
||||
echo -n "."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
if [[ "${destroy_server}" == "yes" ]]; then
|
||||
echo "=> Deleting EC2 instance"
|
||||
|
||||
while true; do
|
||||
state=$(aws ec2 terminate-instances --instance-id "${id}" \
|
||||
| $JSON TerminatingInstances \
|
||||
| $JSON 0.CurrentState.Name)
|
||||
|
||||
if [[ "${state}" == "shutting-down" ]]; then
|
||||
echo ""
|
||||
break
|
||||
fi
|
||||
|
||||
echo -n "."
|
||||
sleep 5
|
||||
done
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Done."
|
||||
echo ""
|
||||
echo "New AMI is: ${image_id}"
|
||||
echo ""
|
||||
@@ -31,7 +31,7 @@ function create_droplet() {
|
||||
|
||||
local image_region="sfo1"
|
||||
local ubuntu_image_slug="ubuntu-16-04-x64"
|
||||
local box_size="512mb"
|
||||
local box_size="1gb"
|
||||
|
||||
local data="{\"name\":\"${box_name}\",\"size\":\"${box_size}\",\"region\":\"${image_region}\",\"image\":\"${ubuntu_image_slug}\",\"ssh_keys\":[ \"${ssh_key_id}\" ],\"backups\":false}"
|
||||
|
||||
|
||||
@@ -17,26 +17,12 @@ export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get -o Dpkg::Options::="--force-confdef" update -y
|
||||
apt-get -o Dpkg::Options::="--force-confdef" dist-upgrade -y
|
||||
|
||||
# https://docs.docker.com/engine/installation/linux/ubuntulinux/
|
||||
echo "==> Installing Docker"
|
||||
apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
|
||||
echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" > /etc/apt/sources.list.d/docker.list
|
||||
apt-get -y update
|
||||
apt-get -y install \
|
||||
aufs-tools \
|
||||
linux-image-extra-$(uname -r) \
|
||||
linux-image-extra-virtual \
|
||||
docker-engine=1.12.5-0~ubuntu-xenial # apt-cache madison docker-engine
|
||||
|
||||
echo "==> Enable memory accounting"
|
||||
sed -e 's/^GRUB_CMDLINE_LINUX="\(.*\)"$/GRUB_CMDLINE_LINUX="\1 cgroup_enable=memory swapaccount=1 panic_on_oops=1 panic=5"/' -i /etc/default/grub
|
||||
update-grub
|
||||
|
||||
echo "==> Installing required packages"
|
||||
|
||||
debconf-set-selections <<< 'mysql-server mysql-server/root_password password password'
|
||||
debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password password'
|
||||
|
||||
# this enables automatic security upgrades (https://help.ubuntu.com/community/AutomaticSecurityUpdates)
|
||||
apt-get -y install \
|
||||
acl \
|
||||
awscli \
|
||||
@@ -44,6 +30,7 @@ apt-get -y install \
|
||||
build-essential \
|
||||
cron \
|
||||
curl \
|
||||
dmsetup \
|
||||
iptables \
|
||||
logrotate \
|
||||
mysql-server-5.7 \
|
||||
@@ -52,6 +39,7 @@ apt-get -y install \
|
||||
pwgen \
|
||||
rcconf \
|
||||
swaks \
|
||||
unattended-upgrades \
|
||||
unbound
|
||||
|
||||
echo "==> Installing node.js"
|
||||
@@ -62,6 +50,60 @@ ln -sf /usr/local/node-6.9.2/bin/npm /usr/bin/npm
|
||||
apt-get install -y python # Install python which is required for npm rebuild
|
||||
[[ "$(python --version 2>&1)" == "Python 2.7."* ]] || die "Expecting python version to be 2.7.x"
|
||||
|
||||
# https://docs.docker.com/engine/installation/linux/ubuntulinux/
|
||||
echo "==> Installing Docker"
|
||||
docker_key="-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: GnuPG v1
|
||||
|
||||
mQINBFWln24BEADrBl5p99uKh8+rpvqJ48u4eTtjeXAWbslJotmC/CakbNSqOb9o
|
||||
ddfzRvGVeJVERt/Q/mlvEqgnyTQy+e6oEYN2Y2kqXceUhXagThnqCoxcEJ3+KM4R
|
||||
mYdoe/BJ/J/6rHOjq7Omk24z2qB3RU1uAv57iY5VGw5p45uZB4C4pNNsBJXoCvPn
|
||||
TGAs/7IrekFZDDgVraPx/hdiwopQ8NltSfZCyu/jPpWFK28TR8yfVlzYFwibj5WK
|
||||
dHM7ZTqlA1tHIG+agyPf3Rae0jPMsHR6q+arXVwMccyOi+ULU0z8mHUJ3iEMIrpT
|
||||
X+80KaN/ZjibfsBOCjcfiJSB/acn4nxQQgNZigna32velafhQivsNREFeJpzENiG
|
||||
HOoyC6qVeOgKrRiKxzymj0FIMLru/iFF5pSWcBQB7PYlt8J0G80lAcPr6VCiN+4c
|
||||
NKv03SdvA69dCOj79PuO9IIvQsJXsSq96HB+TeEmmL+xSdpGtGdCJHHM1fDeCqkZ
|
||||
hT+RtBGQL2SEdWjxbF43oQopocT8cHvyX6Zaltn0svoGs+wX3Z/H6/8P5anog43U
|
||||
65c0A+64Jj00rNDr8j31izhtQMRo892kGeQAaaxg4Pz6HnS7hRC+cOMHUU4HA7iM
|
||||
zHrouAdYeTZeZEQOA7SxtCME9ZnGwe2grxPXh/U/80WJGkzLFNcTKdv+rwARAQAB
|
||||
tDdEb2NrZXIgUmVsZWFzZSBUb29sIChyZWxlYXNlZG9ja2VyKSA8ZG9ja2VyQGRv
|
||||
Y2tlci5jb20+iQI4BBMBAgAiBQJVpZ9uAhsvBgsJCAcDAgYVCAIJCgsEFgIDAQIe
|
||||
AQIXgAAKCRD3YiFXLFJgnbRfEAC9Uai7Rv20QIDlDogRzd+Vebg4ahyoUdj0CH+n
|
||||
Ak40RIoq6G26u1e+sdgjpCa8jF6vrx+smpgd1HeJdmpahUX0XN3X9f9qU9oj9A4I
|
||||
1WDalRWJh+tP5WNv2ySy6AwcP9QnjuBMRTnTK27pk1sEMg9oJHK5p+ts8hlSC4Sl
|
||||
uyMKH5NMVy9c+A9yqq9NF6M6d6/ehKfBFFLG9BX+XLBATvf1ZemGVHQusCQebTGv
|
||||
0C0V9yqtdPdRWVIEhHxyNHATaVYOafTj/EF0lDxLl6zDT6trRV5n9F1VCEh4Aal8
|
||||
L5MxVPcIZVO7NHT2EkQgn8CvWjV3oKl2GopZF8V4XdJRl90U/WDv/6cmfI08GkzD
|
||||
YBHhS8ULWRFwGKobsSTyIvnbk4NtKdnTGyTJCQ8+6i52s+C54PiNgfj2ieNn6oOR
|
||||
7d+bNCcG1CdOYY+ZXVOcsjl73UYvtJrO0Rl/NpYERkZ5d/tzw4jZ6FCXgggA/Zxc
|
||||
jk6Y1ZvIm8Mt8wLRFH9Nww+FVsCtaCXJLP8DlJLASMD9rl5QS9Ku3u7ZNrr5HWXP
|
||||
HXITX660jglyshch6CWeiUATqjIAzkEQom/kEnOrvJAtkypRJ59vYQOedZ1sFVEL
|
||||
MXg2UCkD/FwojfnVtjzYaTCeGwFQeqzHmM241iuOmBYPeyTY5veF49aBJA1gEJOQ
|
||||
TvBR8Q==
|
||||
=Fm3p
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
"
|
||||
echo "$docker_key" | apt-key add -
|
||||
echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" > /etc/apt/sources.list.d/docker.list
|
||||
apt-get -y update
|
||||
|
||||
# create systemd drop-in file
|
||||
mkdir -p /etc/systemd/system/docker.service.d
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=devicemapper" > /etc/systemd/system/docker.service.d/cloudron.conf
|
||||
|
||||
apt-get -y --allow-downgrades install docker-engine=1.12.5-0~ubuntu-xenial # apt-cache madison docker-engine
|
||||
apt-mark hold docker-engine # do not update docker
|
||||
storage_driver=$(docker info | grep "Storage Driver" | sed 's/.*: //')
|
||||
if [[ "${storage_driver}" != "devicemapper" ]]; then
|
||||
echo "Docker is using "${storage_driver}" instead of devicemapper"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "==> Enable memory accounting"
|
||||
apt-get -y install grub2
|
||||
sed -e 's/^GRUB_CMDLINE_LINUX="\(.*\)"$/GRUB_CMDLINE_LINUX="\1 cgroup_enable=memory swapaccount=1 panic_on_oops=1 panic=5"/' -i /etc/default/grub
|
||||
update-grub
|
||||
|
||||
echo "==> Downloading docker images"
|
||||
if [ ! -f "${arg_infraversionpath}/infra_version.js" ]; then
|
||||
echo "No infra_versions.js found"
|
||||
|
||||
@@ -13,8 +13,7 @@ var appHealthMonitor = require('./src/apphealthmonitor.js'),
|
||||
async = require('async'),
|
||||
config = require('./src/config.js'),
|
||||
ldap = require('./src/ldap.js'),
|
||||
server = require('./src/server.js'),
|
||||
simpleauth = require('./src/simpleauth.js');
|
||||
server = require('./src/server.js');
|
||||
|
||||
console.log();
|
||||
console.log('==========================================');
|
||||
@@ -33,7 +32,6 @@ console.log();
|
||||
async.series([
|
||||
server.start,
|
||||
ldap.start,
|
||||
simpleauth.start,
|
||||
appHealthMonitor.start,
|
||||
], function (error) {
|
||||
if (error) {
|
||||
@@ -48,13 +46,11 @@ var NOOP_CALLBACK = function () { };
|
||||
process.on('SIGINT', function () {
|
||||
server.stop(NOOP_CALLBACK);
|
||||
ldap.stop(NOOP_CALLBACK);
|
||||
simpleauth.stop(NOOP_CALLBACK);
|
||||
setTimeout(process.exit.bind(process), 3000);
|
||||
});
|
||||
|
||||
process.on('SIGTERM', function () {
|
||||
server.stop(NOOP_CALLBACK);
|
||||
ldap.stop(NOOP_CALLBACK);
|
||||
simpleauth.stop(NOOP_CALLBACK);
|
||||
setTimeout(process.exit.bind(process), 3000);
|
||||
});
|
||||
|
||||
@@ -318,67 +318,3 @@ cloudron exec
|
||||
> swaks --server "${MAIL_SMTP_SERVER}" -p "${MAIL_SMTP_PORT}" --from "${MAIL_SMTP_USERNAME}@${MAIL_DOMAIN}" --body "Test mail from cloudron app at $(hostname -f)" --auth-user "${MAIL_SMTP_USERNAME}" --auth-password "${MAIL_SMTP_PASSWORD}"
|
||||
```
|
||||
|
||||
## simpleauth
|
||||
|
||||
Simple Auth can be used for authenticating users with a HTTP request. This method of authentication is targeted
|
||||
at applications, which for whatever reason can't use the ldap addon.
|
||||
The response contains an `accessToken` which can then be used to access the [Cloudron API](/references/api.html).
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
SIMPLE_AUTH_SERVER= # the simple auth HTTP server
|
||||
SIMPLE_AUTH_PORT= # the simple auth server port
|
||||
SIMPLE_AUTH_URL= # the simple auth server URL. same as "http://SIMPLE_AUTH_SERVER:SIMPLE_AUTH_PORT
|
||||
SIMPLE_AUTH_CLIENT_ID # a client id for identifying the request originator with the auth server
|
||||
```
|
||||
|
||||
This addons provides two REST APIs:
|
||||
|
||||
**POST /api/v1/login**
|
||||
|
||||
Request JSON body:
|
||||
```
|
||||
{
|
||||
"username": "<username> or <email>",
|
||||
"password": "<password>"
|
||||
}
|
||||
```
|
||||
|
||||
Response 200 with JSON body:
|
||||
```
|
||||
{
|
||||
"accessToken": "<accessToken>",
|
||||
"user": {
|
||||
"id": "<userId>",
|
||||
"username": "<username>",
|
||||
"email": "<email>",
|
||||
"admin": <admin boolean>,
|
||||
"displayName": "<display name>"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**GET /api/v1/logout**
|
||||
|
||||
Request params:
|
||||
```
|
||||
?access_token=<accessToken>
|
||||
```
|
||||
|
||||
Response 200 with JSON body:
|
||||
```
|
||||
{}
|
||||
```
|
||||
|
||||
For debugging, [cloudron exec](https://www.npmjs.com/package/cloudron) can be used to run the `curl` tool within the context of the app:
|
||||
```
|
||||
cloudron exec
|
||||
|
||||
> USERNAME=<enter username>
|
||||
|
||||
> PASSWORD=<enter password>
|
||||
|
||||
> PAYLOAD="{\"clientId\":\"${SIMPLE_AUTH_CLIENT_ID}\", \"username\":\"${USERNAME}\", \"password\":\"${PASSWORD}\"}"
|
||||
|
||||
> curl -H "Content-Type: application/json" -X POST -d "${PAYLOAD}" "${SIMPLE_AUTH_ORIGIN}/api/v1/login"
|
||||
```
|
||||
|
||||
+48
-26
@@ -62,7 +62,7 @@ curl -H "Content-Type: application/json" -H "Authorization: Bearer <token>" http
|
||||
## OAuth
|
||||
|
||||
OAuth authentication is meant to be used by apps. An app can get an OAuth token using the
|
||||
[oauth](addons.html#oauth) or [simpleauth](addons.html#simpleauth) addon.
|
||||
[oauth](addons.html#oauth) addon.
|
||||
|
||||
Tokens obtained via OAuth have a restricted scope wherein they can only access the user's profile.
|
||||
This restriction is so that apps cannot make undesired changes to the user's Cloudron.
|
||||
@@ -151,6 +151,8 @@ If `altDomain` is set, the app can be accessed from `https://<altDomain>`.
|
||||
* `SAMEORIGIN` - allows embedding from the same domain as the app. This is the default.
|
||||
* `ALLOW-FROM https://example.com/` - allows this app to be embedded from example.com
|
||||
|
||||
`memoryLimit` is the maximum memory this app can use (in bytes) including swap. If set to 0, the app uses the `memoryLimit` value set in the manifest. If set to -1, the app gets unlimited memory.
|
||||
|
||||
Read more about the options at [MDN](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options).
|
||||
|
||||
Response (200):
|
||||
@@ -197,7 +199,8 @@ Response (200):
|
||||
health: <enum>, // health of the application
|
||||
location: <string>, // subdomain on which app is installed
|
||||
fqdn: <string>, // the FQDN of this app
|
||||
altDomain: <string> // alternate domain from which this app can be reached
|
||||
altDomain: <string>, // alternate domain from which this app can be reached
|
||||
cnameTarget: <string> || null, // If altDomain is set, this contains the CNAME location for the app
|
||||
accessRestriction: null || { // list of users and groups who can access this application
|
||||
users: [ ],
|
||||
groups: [ ]
|
||||
@@ -207,7 +210,8 @@ Response (200):
|
||||
portBindings: { // mapping from application ports to public ports
|
||||
},
|
||||
iconUrl: <url>, // a relative url providing the icon
|
||||
memoryLimit: <number> // memory constraint in bytes
|
||||
memoryLimit: <number>, // memory constraint in bytes
|
||||
sso: <boolean> // Enable single sign-on
|
||||
}
|
||||
```
|
||||
|
||||
@@ -255,6 +259,8 @@ is integrated with Cloudron Authentication.
|
||||
|
||||
`manifest` is the [application manifest](/references/manifest.html).
|
||||
|
||||
For apps that support optional single sign-on, the `sso` field can be used to disable Cloudron authentication. By default, single sign-on is enabled.
|
||||
|
||||
### List apps
|
||||
|
||||
GET `/api/v1/apps/:appId` <scope>admin</scope>
|
||||
@@ -276,7 +282,8 @@ Response (200):
|
||||
health: <enum>, // health of the application
|
||||
location: <string>, // subdomain on which app is installed
|
||||
fqdn: <string>, // the FQDN of this app
|
||||
altDomain: <string> // alternate domain from which this app can be reached
|
||||
altDomain: <string>, // alternate domain from which this app can be reached
|
||||
cnameTarget: <string> || null, // If altDomain is set, this contains the CNAME location for the app
|
||||
accessRestriction: null || { // list of users and groups who can access this application
|
||||
users: [ ],
|
||||
groups: [ ]
|
||||
@@ -447,7 +454,7 @@ POST `/api/v1/apps/:appId/configure` <scope>admin</scope>
|
||||
|
||||
Re-configures an existing app with id `appId`.
|
||||
|
||||
Configuring an app won't preserve existing data. Cloudron apps are written in a way to support reconfiguring
|
||||
Configuring an app preserves existing data. Cloudron apps are written in a way to support reconfiguring
|
||||
any of the parameters listed below without loss of data.
|
||||
|
||||
Request:
|
||||
@@ -652,6 +659,38 @@ curl -L <url> | openssl aes-256-cbc -d -pass "pass:$<backupKey>" | tar -zxf -
|
||||
|
||||
## Cloudron
|
||||
|
||||
### Activate the Cloudron
|
||||
|
||||
POST `/api/v1/cloudron/activate`
|
||||
|
||||
Activates the Cloudron with an admin username and password.
|
||||
|
||||
Request:
|
||||
```
|
||||
{
|
||||
username: <string>, // the admin username
|
||||
password: <string>, // the admin password
|
||||
email: <email> // the admin email
|
||||
}
|
||||
```
|
||||
|
||||
Response (201):
|
||||
```
|
||||
{
|
||||
"token": "771ee724a66aa557f95af06b4e6c27992f9230f6b1d65d5fbaa34cae9318d453",
|
||||
"expires": 1490224113353
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
The `token` parameter can be used to make further API calls.
|
||||
|
||||
Curl example to activate the cloudron:
|
||||
|
||||
```
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"username": "girish", "password":"MySecret123#", "email": "girish@cloudron.io" }' https://my.cloudron.info/api/v1/cloudron/activate
|
||||
```
|
||||
|
||||
### Update the Cloudron
|
||||
|
||||
POST `/api/v1/cloudron/update` <scope>admin</scope>
|
||||
@@ -680,8 +719,9 @@ Gets information about an in-progress Cloudron update or backup.
|
||||
|
||||
`update` or `backup` is `null` when there is no such activity in progress.
|
||||
|
||||
```
|
||||
Response (200):
|
||||
|
||||
```
|
||||
{
|
||||
update: null || { percent: <number>, message: <string> },
|
||||
backup: null || { percent: <number>, message: <string> }
|
||||
@@ -804,7 +844,7 @@ Response (200):
|
||||
* user.remove
|
||||
* user.update
|
||||
|
||||
`source` contains information on the originator of the action. For example, for user.login, this contains the IP address, the appId and the authType (ldap or simpleauth or oauth).
|
||||
`source` contains information on the originator of the action. For example, for user.login, this contains the IP address, the appId and the authType (ldap or oauth).
|
||||
|
||||
`data` contains information on the event itself. For example, for user.login, this contains the userId that logged in. For app.install, it contains the manifest and location of the app that was installed.
|
||||
|
||||
@@ -964,24 +1004,6 @@ Response (204):
|
||||
{}
|
||||
```
|
||||
|
||||
### Tutorial
|
||||
|
||||
POST `/api/v1/profile/tutorial` <scope>profile</scope>
|
||||
|
||||
Toggles display of the tutorial when the token owner logs in.
|
||||
|
||||
Request:
|
||||
```
|
||||
{
|
||||
showTutorial: <boolean>
|
||||
}
|
||||
```
|
||||
|
||||
Response (204):
|
||||
```
|
||||
{}
|
||||
```
|
||||
|
||||
## Settings
|
||||
|
||||
### Get auto update pattern
|
||||
@@ -1132,7 +1154,7 @@ POST `/api/v1/settings/mail_config` <scope>admin</scope> <scope>internal</scope>
|
||||
|
||||
Sets the email configuration. The Cloudron has a built-in email server for users.
|
||||
This configuration can be used to enable or disable the email server. Note that
|
||||
the Cloudron will always be able to send email on behalf of apps, regardless of
|
||||
the Cloudron will always be able to send email on behalf of apps, regardless of
|
||||
this setting.
|
||||
|
||||
Request:
|
||||
|
||||
@@ -29,7 +29,6 @@ Cloudron provides multiple authentication strategies.
|
||||
|
||||
* OAuth 2.0 provided by the [OAuth addon](/references/addons.html#oauth)
|
||||
* LDAP provided by the [LDAP addon](/references/addons.html#ldap)
|
||||
* Simple Auth provided by [Simple Auth addon](/references/addons.html#simpleauth)
|
||||
|
||||
# Choosing a strategy
|
||||
|
||||
@@ -44,13 +43,6 @@ Applications can be broadly categorized based on their user management as follow
|
||||
|
||||
* No user
|
||||
* Such apps have no concept of logged-in user.
|
||||
* The Cloudron provides a `website visibility` setting that allows a Cloudron admin to optionally
|
||||
install an OAuth proxy in front of such applications. In such a case, a user visiting the website first
|
||||
authenticates with the OAuth proxy and once authenticated is allowed into the application.
|
||||
* When an OAuth proxy is installed, such applications can use the `X-Authenticated-User` header from the
|
||||
[ICAP Extensions](https://tools.ietf.org/html/draft-stecher-icap-subid-00#section-3.4) de facto standard.
|
||||
This value can be used for display purposes or creating meta data for a document.
|
||||
|
||||
|
||||
* Single user
|
||||
* Such apps only have a single user who is usually also the `admin`.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Overview
|
||||
|
||||
The application's Dockerfile must specify the FROM base image to be `cloudron/base:0.9.0`.
|
||||
The application's Dockerfile must specify the FROM base image to be `cloudron/base:0.10.0`.
|
||||
|
||||
The base image already contains most popular software packages including node, nginx, apache,
|
||||
ruby, PHP. Using the base image greatly reduces the size of app images.
|
||||
@@ -17,16 +17,16 @@ install it yourself.
|
||||
|
||||
* Apache 2.4.18
|
||||
* Composer 1.2.0
|
||||
* Go 1.5.4, 1.6.3
|
||||
* Go 1.6.4, 1.7.5 (install under `/usr/local/go-<version>`)
|
||||
* Gunicorn 19.4.5
|
||||
* Java 1.8
|
||||
* Maven 3.3.9
|
||||
* Mongo 2.6.10
|
||||
* MySQL Client 5.7.13
|
||||
* MySQL Client 5.7.17
|
||||
* nginx 1.10.0
|
||||
* Node 0.10.40, 0.12.7, 4.2.6, 4.4.7 (installed under `/usr/local/node-<version>`) [more information](#node-js)
|
||||
* Node 0.10.48, 0.12.18, 4.7.3, 6.9.5 (installed under `/usr/local/node-<version>`) [more information](#node-js)
|
||||
* Perl 5.22.1
|
||||
* PHP 7.0.8
|
||||
* PHP 7.0.13
|
||||
* Postgresql client 9.5.4
|
||||
* Python 2.7.12
|
||||
* Redis 3.0.6
|
||||
@@ -41,16 +41,16 @@ The base image can be inspected by installing [Docker](https://docs.docker.com/i
|
||||
|
||||
Once installed, pull down the base image locally using the following command:
|
||||
```
|
||||
docker pull cloudron/base:0.9.0
|
||||
docker pull cloudron/base:0.10.0
|
||||
```
|
||||
|
||||
To inspect the base image:
|
||||
```
|
||||
docker run -ti cloudron/base:0.9.0 /bin/bash
|
||||
docker run -ti cloudron/base:0.10.0 /bin/bash
|
||||
```
|
||||
|
||||
*Note:* Please use `docker 1.9.0` or above to pull the base image. Doing otherwise results in a base
|
||||
image with an incorrect image id. The image id of `cloudron/base:0.9.0` is `d038af182821`.
|
||||
image with an incorrect image id. The image id of `cloudron/base:0.10.0` is `5ec8ca8525be`.
|
||||
|
||||
# The `cloudron` user
|
||||
|
||||
|
||||
@@ -47,12 +47,14 @@ Type: object
|
||||
Required: no
|
||||
|
||||
Allowed keys
|
||||
* [email](addons.html#email)
|
||||
* [ldap](addons.html#ldap)
|
||||
* [localstorage](addons.html#localstorage)
|
||||
* [mongodb](addons.html#mongodb)
|
||||
* [mysql](addons.html#mysql)
|
||||
* [oauth](addons.html#oauth)
|
||||
* [postgresql](addons.html#postgresql)
|
||||
* [recvmail](addons.html#recvmail)
|
||||
* [redis](addons.html#redis)
|
||||
* [sendmail](addons.html#sendmail)
|
||||
|
||||
@@ -93,26 +95,6 @@ Example:
|
||||
"changelog": "* Add support for IE8 \n* New logo"
|
||||
```
|
||||
|
||||
## configurePath
|
||||
|
||||
Type: path string
|
||||
|
||||
Required: no
|
||||
|
||||
The `configurePath` can be used to specify the absolute path to the configuration / settings
|
||||
page of the app. When this path is present, an absoluted URL is constructed from the app's
|
||||
install location this path and presented to the user in the configuration dialog of the app.
|
||||
|
||||
This is useful for apps that have a main page which does not display a configuration / settings
|
||||
url (i.e) it's hidden for aesthetic reasons. For example, a blogging app like wordpress might
|
||||
keep the admin page url hidden in the main page. Setting the configurationPath makes the
|
||||
configuration url discoverable by the user.
|
||||
|
||||
Example:
|
||||
```
|
||||
"configurePath": "/wp-admin"
|
||||
```
|
||||
|
||||
## contactEmail
|
||||
|
||||
Type: email
|
||||
@@ -150,20 +132,6 @@ Example:
|
||||
"description:": "file://DESCRIPTION.md"
|
||||
```
|
||||
|
||||
## developmentMode
|
||||
|
||||
Type: boolean
|
||||
|
||||
Required: no
|
||||
|
||||
Setting `developmentMode` to true disables readonly rootfs and the default memory limit. In addition,
|
||||
the application *pauses* on start and can be started manually using `cloudron exec`. Note that you
|
||||
cannot submit an app to the store with this field turned on.
|
||||
|
||||
This mode can be used to identify the files being modified by your application - often required to
|
||||
debug situations where your app does not run on a readonly rootfs. Run your app using `cloudron exec`
|
||||
and use `find / -mmin -30` to find file that have been changed or created in the last 30 minutes.
|
||||
|
||||
## healthCheckPath
|
||||
|
||||
Type: url path
|
||||
@@ -312,6 +280,10 @@ The intended use of this field is to display some post installation steps that t
|
||||
complete the installation. For example, displaying the default admin credentials and informing the user to
|
||||
to change it.
|
||||
|
||||
The message can have the following special tags:
|
||||
* `<sso> ... </sso>` - Content in `sso` blocks are shown if SSO enabled.
|
||||
* `<nosso> ... </nosso>`- Content in `nosso` blocks are shows when SSO is disabled.
|
||||
|
||||
## optionalSso
|
||||
|
||||
Type: boolean
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
# Overview
|
||||
|
||||
The Cloudron platform can be installed on public cloud servers from EC2, Digital Ocean, Hetzner,
|
||||
Linode, OVH, Scaleway, Vultr etc. Running Cloudron on a home server or company intranet is work
|
||||
in progress.
|
||||
Linode, OVH, Scaleway, Vultr etc. Cloudron also runs well on a home server or company intranet.
|
||||
|
||||
If you run into any trouble following this guide, ask us at our [chat](https://chat.cloudron.io).
|
||||
|
||||
@@ -20,32 +19,9 @@ The Cloudron requires a domain name when it is installed. Apps are installed int
|
||||
The `my` subdomain is special and is the location of the Cloudron web interface. For this to
|
||||
work, the Cloudron requires a way to programmatically configure the DNS entries of the domain.
|
||||
Note that the Cloudron will never overwrite _existing_ DNS entries and refuse to install
|
||||
apps on existing subdomains.
|
||||
apps on existing subdomains (so, it is safe to reuse an existing domain that runs other services).
|
||||
|
||||
# CLI Tool
|
||||
|
||||
The [Cloudron tool](https://git.cloudron.io/cloudron/cloudron-cli) is useful for managing
|
||||
a Cloudron. <b class="text-danger">The Cloudron CLI tool has to be run on a Laptop or PC</b>
|
||||
|
||||
## Linux & OS X
|
||||
|
||||
Installing the CLI tool requires node.js and npm. The CLI tool can be installed using the following command:
|
||||
|
||||
```
|
||||
npm install -g cloudron
|
||||
```
|
||||
|
||||
Depending on your setup, you may need to run this as root.
|
||||
|
||||
On OS X, it is known to work with the `openssl` package from homebrew.
|
||||
|
||||
See [#14](https://git.cloudron.io/cloudron/cloudron-cli/issues/14) for more information.
|
||||
|
||||
## Windows
|
||||
|
||||
The CLI tool does not work on Windows. Please contact us on our [chat](https://chat.cloudron.io) if you want to help with Windows support.
|
||||
|
||||
# Provider
|
||||
# Cloud Server
|
||||
|
||||
DigitalOcean and EC2 (Amazon Web Services) are frequently tested by us.
|
||||
|
||||
@@ -58,32 +34,24 @@ In addition to those, the Cloudron community has successfully installed the plat
|
||||
* [hosttech](https://www.hosttech.ch/?promocode=53619290)
|
||||
* [Linode](https://www.linode.com/?r=f68d816692c49141e91dd4cef3305da457ac0f75)
|
||||
* [OVH](https://www.ovh.com/)
|
||||
* [Rosehosting](https://secure.rosehosting.com/clientarea/?affid=661)
|
||||
* [Scaleway](https://www.scaleway.com/)
|
||||
* [So you Start](https://www.soyoustart.com/)
|
||||
* [Vultr](http://www.vultr.com/?ref=7063201)
|
||||
* [Vultr](http://www.vultr.com/?ref=7110116-3B)
|
||||
|
||||
Please let us know if any of them requires tweaks or adjustments.
|
||||
|
||||
# Installing
|
||||
|
||||
## Choose Domain
|
||||
|
||||
A domain name is required when installing the Cloudron. Currently, only Second Level Domains
|
||||
are supported. For example, `example.com`, `example.co.uk` will work fine. Choosing a domain
|
||||
name at any other level like `cloudron.example.com` will not work.
|
||||
|
||||
The domain name must use one of the following name servers:
|
||||
* AWS Route 53
|
||||
* Digital Ocean
|
||||
* Wildcard - If your domain does not use any of the name servers above, you can manually add
|
||||
a wildcard (`*`) DNS entry.
|
||||
|
||||
You will have to provide the DNS API credentials after you complete the installation.
|
||||
|
||||
## Create server
|
||||
|
||||
Create an `Ubuntu 16.04 (Xenial)` server with at-least `1gb` RAM. Do not make any changes
|
||||
to vanilla ubuntu. Be sure to allocate a static IPv4 address for your server.
|
||||
Create an `Ubuntu 16.04 (Xenial)` server with at-least `1gb` RAM and 20GB disk space.
|
||||
Do not make any changes to vanilla ubuntu. Be sure to allocate a static IPv4 address
|
||||
for your server.
|
||||
|
||||
Cloudron has a built-in firewall and ports are opened and closed dynamically, as and when
|
||||
apps are installed, re-configured or removed. For this reason, be sure to open all TCP and
|
||||
UDP traffic to the server and leave the traffic management to the Cloudron.
|
||||
|
||||
### Linode
|
||||
|
||||
@@ -95,12 +63,6 @@ Since Linode does not manage SSH keys, be sure to add the public key to
|
||||
Use the [boot script](https://github.com/scaleway-community/scaleway-docker/issues/2) to
|
||||
enable memory accouting.
|
||||
|
||||
## Setup `my` subdomain
|
||||
|
||||
The Cloudron web interface is installed at the `my` subdomain of your domain.
|
||||
Add a `A` DNS record for the `my` subdomain with the IP of the server created
|
||||
above. Doing this will allow the Cloudron to start up with a valid TLS certificate.
|
||||
|
||||
## Run setup
|
||||
|
||||
SSH into your server and run the following commands:
|
||||
@@ -108,17 +70,12 @@ SSH into your server and run the following commands:
|
||||
```
|
||||
wget https://cloudron.io/cloudron-setup
|
||||
chmod +x cloudron-setup
|
||||
./cloudron-setup --domain <domain> --provider <digitalocean|ec2|generic|scaleway>
|
||||
./cloudron-setup --provider <azure|digitalocean|ec2|lightsail|linode|ovh|rosehosting|scaleway|vultr|generic>
|
||||
```
|
||||
|
||||
The setup will take around 10-15 minutes.
|
||||
|
||||
`cloudron-setup` takes the following arguments:
|
||||
|
||||
* `--domain` is the domain name in which apps are installed. Currently, only Second Level
|
||||
Domains are supported. For example, `example.com`, `example.co.uk`, `example.rocks` will
|
||||
work fine. Choosing a domain name at any other level like `cloudron.example.com` will not
|
||||
work.
|
||||
**cloudron-setup** takes the following arguments:
|
||||
|
||||
* `--provider` is the name of your VPS provider. If the name is not on the list, simply
|
||||
choose `generic`. In most cases, the `generic` provider mostly will work fine.
|
||||
@@ -140,23 +97,17 @@ the latest version. You can set this to an older version when restoring a Cloudr
|
||||
|
||||
* `--restore-url` is a backup URL to restore from.
|
||||
|
||||
## Finish setup
|
||||
## Domain setup
|
||||
|
||||
Once the setup script completes, the server will reboot, then visit `https://my.<domain>` to complete the installation.
|
||||
Once the setup script completes, the server will reboot, then visit your server by its
|
||||
IP address (`https://ip`) to complete the installation.
|
||||
|
||||
Please note the following:
|
||||
The setup website will show a certificate warning. Accept the self-signed certificate
|
||||
and proceed to the domain setup.
|
||||
|
||||
1. The website should already have a valid TLS certificate. If you see any certificate warnings, it means your Cloudron was not created correctly.
|
||||
|
||||
2. If you see a login screen, instead of a setup screen, it means that someone else got to your Cloudron first and set it up
|
||||
already! In this unlikely case, simply delete the server and start over.
|
||||
|
||||
Once the setup is done, you can access the admin page in the future at `https://my.<domain>`.
|
||||
|
||||
## DNS
|
||||
|
||||
Cloudron has to be given the API credentials for configuring your domain under `Certs & Domains`
|
||||
in the web UI.
|
||||
Currently, only subdomains of the [Public Suffix List](https://publicsuffix.org/) are supported.
|
||||
For example, `example.com`, `example.co.uk` will work fine. Choosing other non-registrable
|
||||
domain names like `cloudron.example.com` will not work.
|
||||
|
||||
### Route 53
|
||||
|
||||
@@ -203,7 +154,11 @@ If your domain *does not* use Route 53 or Digital Ocean, setup a wildcard (`*`)
|
||||
IP of the server created above. If your DNS provider has an API, please open an
|
||||
[issue](https://git.cloudron.io/cloudron/box/issues) and we may be able to support it.
|
||||
|
||||
## Backups
|
||||
## Finish Setup
|
||||
|
||||
Once the domain setup is done, the Cloudron will configure the DNS and get a SSL certificate. It will automatically redirect to `https://my.<domain>`.
|
||||
|
||||
# Backups
|
||||
|
||||
The Cloudron creates encrypted backups once a day. Each app is backed up independently and these
|
||||
backups have the prefix `app_`. The platform state is backed up independently with the
|
||||
@@ -213,7 +168,7 @@ By default, backups reside in `/var/backups`. Please note that having backups re
|
||||
physical machine as the Cloudron server instance is dangerous and it must be changed to
|
||||
an external storage location like `S3` as soon as possible.
|
||||
|
||||
### Amazon S3
|
||||
## Amazon S3
|
||||
|
||||
Provide S3 backup credentials in the `Settings` page and leave the endpoint field empty.
|
||||
|
||||
@@ -245,7 +200,10 @@ for most use-cases.
|
||||
}
|
||||
```
|
||||
|
||||
### Minio S3
|
||||
The `Encryption key` is an arbitrary passphrase used to encrypt the backups. Keep the passphrase safe; it is
|
||||
required to decrypt the backups when restoring the Cloudron.
|
||||
|
||||
## Minio S3
|
||||
|
||||
[Minio](https://minio.io/) is a distributed object storage server, providing the same API as Amazon S3.
|
||||
Since Cloudron supports S3, any API compatible solution should be supported as well, if this is not the case, let us know.
|
||||
@@ -270,6 +228,8 @@ The information to be copied to the Cloudron's backup settings form may look sim
|
||||
|
||||
<img src="/docs/img/minio_backup_config.png" class="shadow"><br/>
|
||||
|
||||
The `Encryption key` is an arbitrary passphrase used to encrypt the backups. Keep the passphrase safe; it is
|
||||
required to decrypt the backups when restoring the Cloudron.
|
||||
|
||||
# Email
|
||||
|
||||
@@ -286,22 +246,61 @@ reputation should be easy to get back.
|
||||
|
||||
## Checklist
|
||||
|
||||
* Once your Cloudron is ready, setup a Reverse DNS PTR record to be setup for the `my` subdomain.
|
||||
* If you are unable to receive mail, first thing to check is if your VPS provider lets you
|
||||
receive mail on port 25.
|
||||
|
||||
* AWS/EC2 - Fill the PTR [request form](https://aws-portal.amazon.com/gp/aws/html-forms-controller/contactus/ec2-email-limit-rdns-request.
|
||||
* Digital Ocean - New accounts frequently have port 25 blocked. Write to their support to
|
||||
unblock your server.
|
||||
|
||||
* EC2, Lightsail & Scaleway - Edit your security group to allow email.
|
||||
|
||||
* Setup a Reverse DNS PTR record to be setup for the `my` subdomain.
|
||||
**Note:** PTR records are a feature of your VPS provider and not your domain provider.
|
||||
|
||||
* You can verify the PTR record [https://mxtoolbox.com/ReverseLookup.aspx](here).
|
||||
|
||||
* AWS EC2 & Lightsail - Fill the [PTR request form](https://aws-portal.amazon.com/gp/aws/html-forms-controller/contactus/ec2-email-limit-rdns-request).
|
||||
|
||||
* Digital Ocean - Digital Ocean sets up a PTR record based on the droplet's name. So, simply rename
|
||||
your droplet to `my.<domain>`. Note that some new Digital Ocean accounts have [port 25 blocked](https://www.digitalocean.com/community/questions/port-25-smtp-external-access).
|
||||
|
||||
* Scaleway - Edit your security group to allow email. You can also set a PTR record on the interface with your
|
||||
`my.<domain>`.
|
||||
* Linode - Follow this [guide](https://www.linode.com/docs/networking/dns/setting-reverse-dns).
|
||||
|
||||
* Scaleway - Edit your security group to allow email and [reboot the server](https://community.online.net/t/security-group-not-working/2096) for the change to take effect. You can also set a PTR record on the interface with your `my.<domain>`.
|
||||
|
||||
* Check if your IP is listed in any DNSBL list [here](http://multirbl.valli.org/). In most cases,
|
||||
you can apply for removal of your IP by filling out a form at the DNSBL manager site.
|
||||
|
||||
* When using wildcard or manual DNS backends, you have to setup the DMARC, MX records manually.
|
||||
|
||||
* Finally, check your spam score at [mail-tester.com](https://www.mail-tester.com/). The Cloudron
|
||||
should get 100%, if not please let us know.
|
||||
|
||||
# CLI Tool
|
||||
|
||||
The [Cloudron tool](https://git.cloudron.io/cloudron/cloudron-cli) is useful for managing
|
||||
a Cloudron. <b class="text-danger">The Cloudron CLI tool has to be installed & run on a Laptop or PC</b>
|
||||
|
||||
Once installed, you can install, configure, list, backup and restore apps from the command line.
|
||||
|
||||
## Linux & OS X
|
||||
|
||||
Installing the CLI tool requires node.js and npm. The CLI tool can be installed using the following command:
|
||||
|
||||
```
|
||||
npm install -g cloudron
|
||||
```
|
||||
|
||||
Depending on your setup, you may need to run this as root.
|
||||
|
||||
On OS X, it is known to work with the `openssl` package from homebrew.
|
||||
|
||||
See [#14](https://git.cloudron.io/cloudron/cloudron-cli/issues/14) for more information.
|
||||
|
||||
## Windows
|
||||
|
||||
The CLI tool does not work on Windows. Please contact us on our [chat](https://chat.cloudron.io) if you want to help with Windows support.
|
||||
|
||||
# Updates
|
||||
|
||||
Apps installed from the Cloudron Store are automatically updated every night.
|
||||
@@ -323,9 +322,9 @@ with the latest code and restoring it from the last backup.
|
||||
|
||||
To upgrade follow these steps closely:
|
||||
|
||||
* Create a new backup - `cloudron machine backup create <domain>`
|
||||
* Create a new backup - `cloudron machine backup create`
|
||||
|
||||
* List the latest backup - `cloudron machine backup list <domain>`
|
||||
* List the latest backup - `cloudron machine backup list`
|
||||
|
||||
* Make the backup available for the new cloudron instance:
|
||||
|
||||
@@ -337,7 +336,7 @@ To upgrade follow these steps closely:
|
||||
|
||||
<img src="/docs/img/aws_backup_link.png" class="shadow haze"><br/>
|
||||
|
||||
* `File system` - When storing backups in `/var/backups`, you have to make the box and the app backups available to the new Cloudron instance's `/var/backups`. This can be achieved in a variety of ways depending on the situation: like scp'ing the backup files to the machine before installation, mounting the external backup hard drive into the new Cloudron's `/var/backup` OR downloading a copy of the backup using `cloudron machine backup download <domain>` and uploading them to the new machine. After doing so, pass `file:///var/backups/<path to box backup>` as the `--restore-url` below.
|
||||
* `File system` - When storing backups in `/var/backups`, you have to make the box and the app backups available to the new Cloudron instance's `/var/backups`. This can be achieved in a variety of ways depending on the situation: like scp'ing the backup files to the machine before installation, mounting the external backup hard drive into the new Cloudron's `/var/backup` OR downloading a copy of the backup using `cloudron machine backup download` and uploading them to the new machine. After doing so, pass `file:///var/backups/<path to box backup>` as the `--restore-url` below.
|
||||
|
||||
* Create a new Cloudron by following the [installing](/references/selfhosting.html#installing) section.
|
||||
When running the setup script, pass in the `--encryption-key` and `--restore-url` flags.
|
||||
@@ -348,22 +347,28 @@ Similar to the initial installation, a Cloudron upgrade looks like:
|
||||
$ ssh root@newserverip
|
||||
> wget https://cloudron.io/cloudron-setup
|
||||
> chmod +x cloudron-setup
|
||||
> ./cloudron-setup --domain <domain> --provider <digitalocean|ec2|generic|scaleway> --encryption-key <key> --restore-url <publicS3Url>
|
||||
> ./cloudron-setup --provider <digitalocean|ec2|generic|scaleway> --domain <example.com> --encryption-key <key> --restore-url <publicS3Url>
|
||||
```
|
||||
|
||||
Note: When upgrading an old version of Cloudron (<= 0.94.0), pass the `--version 0.94.1` flag and then continue updating
|
||||
from that.
|
||||
|
||||
* Finally, once you see the newest version being displayed in your Cloudron webinterface, you can safely delete the old server instance.
|
||||
|
||||
# Restore
|
||||
|
||||
To restore a Cloudron from a specific backup:
|
||||
|
||||
* Select the backup - `cloudron machine backup list <domain>`
|
||||
* Select the backup - `cloudron machine backup list`
|
||||
|
||||
* Make the box backup public (this can be done from the S3 console). Also, copy the URL of
|
||||
the backup for use as the `restore-url` below.
|
||||
* Make the backup public
|
||||
|
||||
* `S3` - Make the box backup publicly readable - files starting with `box_` (from v0.94.0) or `backup_`. This can be done from the AWS S3 console. Once the box has restored, you can make it private again.
|
||||
|
||||
* `File system` - When storing backups in `/var/backups`, you have to make the box and the app backups available to the new Cloudron instance's `/var/backups`. This can be achieved in a variety of ways depending on the situation: like scp'ing the backup files to the new machine before Cloudron installation OR mounting an external backup hard drive into the new Cloudron's `/var/backup` OR downloading a copy of the backup using `cloudron machine backup download` and uploading them to the new machine. After doing so, pass `file:///var/backups/<path to box backup>` as the `--restore-url` below.
|
||||
|
||||
* Create a new Cloudron by following the [installing](/references/selfhosting.html#installing) section.
|
||||
When running the setup script, pass in the `version`, `restore-key` and `restore-url` flags.
|
||||
When running the setup script, pass in the `version`, `encryption-key`, `domain` and `restore-url` flags.
|
||||
The `version` field is the version of the Cloudron that the backup corresponds to (it is embedded
|
||||
in the backup file name).
|
||||
|
||||
@@ -377,6 +382,14 @@ You can SSH into your Cloudron and collect logs:
|
||||
* `docker ps` will give you the list of containers. The addon containers are named as `mail`, `postgresql`,
|
||||
`mysql` etc. If you want to get a specific container's log output, `journalctl -a CONTAINER_ID=<container_id>`.
|
||||
|
||||
# Alerts
|
||||
|
||||
The Cloudron will notify the Cloudron administrator via email if apps go down, run out of memory, have updates
|
||||
available etc.
|
||||
|
||||
You will have to setup a 3rd party service like [Cloud Watch](https://aws.amazon.com/cloudwatch/) or [UptimeRobot](http://uptimerobot.com/) to monitor the Cloudron itself. You can use `https://my.<domain>/api/v1/cloudron/status`
|
||||
as the health check URL.
|
||||
|
||||
# Help
|
||||
|
||||
If you run into any problems, join us at our [chat](https://chat.cloudron.io) or [email us](mailto:support@cloudron.io).
|
||||
|
||||
@@ -75,7 +75,7 @@ A Dockerfile contains commands to assemble an image.
|
||||
Create a file named `tutorial/Dockerfile` with the following content:
|
||||
|
||||
```dockerfile
|
||||
FROM cloudron/base:0.9.0
|
||||
FROM cloudron/base:0.10.0
|
||||
|
||||
ADD server.js /app/code/server.js
|
||||
|
||||
@@ -171,7 +171,7 @@ Login successful.
|
||||
Build scheduled with id 76cebfdd-7822-4f3d-af17-b3eb393ae604
|
||||
Downloading source
|
||||
Building
|
||||
Step 0 : FROM cloudron/base:0.9.0
|
||||
Step 0 : FROM cloudron/base:0.10.0
|
||||
---> 97583855cc0c
|
||||
Step 1 : ADD server.js /app/code
|
||||
---> b09b97ecdfbc
|
||||
@@ -333,7 +333,7 @@ and modify our Dockerfile to look like this:
|
||||
File `tutorial/Dockerfile`
|
||||
|
||||
```dockerfile
|
||||
FROM cloudron/base:0.9.0
|
||||
FROM cloudron/base:0.10.0
|
||||
|
||||
ADD server.js /app/code/server.js
|
||||
ADD package.json /app/code/package.json
|
||||
|
||||
+19
-13
@@ -5,8 +5,8 @@ This tutorial outlines how to package an existing web application for the Cloudr
|
||||
If you are aware of Docker and Heroku, you should feel at home packaging for the
|
||||
Cloudron. Roughly, the steps involved are:
|
||||
|
||||
* Create a Dockerfile for your application. If your application already has
|
||||
a Dockerfile, you should able to reuse most of it. By virtue of Docker, the Cloudron
|
||||
* Create a Dockerfile for your application. If your application already has a Dockerfile, it
|
||||
is a good starting point for packaging for the Cloudron. By virtue of Docker, the Cloudron
|
||||
is able to run apps written in any language/framework.
|
||||
|
||||
* Create a CloudronManifest.json that provides information like title, author, description
|
||||
@@ -79,27 +79,27 @@ console.log("Server running at port 8000");
|
||||
The Dockerfile contains instructions on how to create an image for your application.
|
||||
|
||||
```Dockerfile
|
||||
FROM cloudron/base:0.9.0
|
||||
FROM cloudron/base:0.10.0
|
||||
|
||||
ADD server.js /app/code/server.js
|
||||
|
||||
CMD [ "/usr/local/node-4.2.1/bin/node", "/app/code/server.js" ]
|
||||
CMD [ "/usr/local/node-4.4.7/bin/node", "/app/code/server.js" ]
|
||||
```
|
||||
|
||||
The `FROM` command specifies that we want to start off with Cloudron's [base image](/references/baseimage.html).
|
||||
All Cloudron apps **must** start from this base image. This approach conserves space on the Cloudron since
|
||||
Docker images tend to be quiet large.
|
||||
Docker images tend to be quite large and also helps us to do a security audit on apps more easily.
|
||||
|
||||
The `ADD` command copies the source code of the app into the directory `/app/code`. There is nothing special
|
||||
about the `/app/code` directory and it is merely a convention we use to store the application code.
|
||||
|
||||
The `CMD` command specifies how to run the server. The base image already contains many different versions of
|
||||
node.js. We use Node 4.2.1 here.
|
||||
node.js. We use Node 4.4.7 here.
|
||||
|
||||
This Dockerfile can be built and run locally as:
|
||||
```
|
||||
docker build -t tutorial .
|
||||
docker run -p 8000:8000 -ti tutorial
|
||||
docker run -p 8000:8000 -t tutorial
|
||||
```
|
||||
|
||||
## Manifest
|
||||
@@ -188,7 +188,7 @@ Build scheduled with id e7706847-f2e3-4ba2-9638-3f334a9453a5
|
||||
Waiting for build to begin, this may take a bit...
|
||||
Downloading source
|
||||
Building
|
||||
Step 1 : FROM cloudron/base:0.9.0
|
||||
Step 1 : FROM cloudron/base:0.10.0
|
||||
---> be9fc6312b2d
|
||||
Step 2 : ADD server.js /app/code/server.js
|
||||
---> 10513e428d7a
|
||||
@@ -271,14 +271,18 @@ You can also execute arbitrary commands:
|
||||
$ cloudron exec env # display the env variables that your app is running with
|
||||
```
|
||||
|
||||
### DevelopmentMode
|
||||
### Debugging
|
||||
|
||||
When debugging complex startup scripts, one can specify `"developmentMode": true,` in the CloudronManifest.json.
|
||||
This will ignore the `RUN` command, specified in the Dockerfile and allows the developer to interactively test
|
||||
the startup scripts using `cloudron exec`.
|
||||
An app can be placed in `debug` mode by passing `--debug` to `cloudron install` or `cloudron configure`.
|
||||
Doing so, runs the app in a non-readonly rootfs and unlimited memory. By default, this will also ignore
|
||||
the `RUN` command specified in the Dockerfile. The developer can then interactively test the app and
|
||||
startup scripts using `cloudron exec`.
|
||||
|
||||
**Note:** that an app running in this mode has full read/write access to the filesystem and all memory limits are lifted.
|
||||
This mode can be used to identify the files being modified by your application - often required to
|
||||
debug situations where your app does not run on a readonly rootfs. Run your app using `cloudron exec`
|
||||
and use `find / -mmin -30` to find file that have been changed or created in the last 30 minutes.
|
||||
|
||||
You can turn off debugging mode using `cloudron configure --no-debug`.
|
||||
|
||||
# Addons
|
||||
|
||||
@@ -385,6 +389,8 @@ field in the manifest.
|
||||
Design your application runtime for concurrent use by 50 users. The Cloudron is not designed for
|
||||
concurrent access by 100s or 1000s of users.
|
||||
|
||||
An app can determine it's memory limit by reading `/sys/fs/cgroup/memory/memory.limit_in_bytes`.
|
||||
|
||||
## Authentication
|
||||
|
||||
Apps should integrate with one of the [authentication strategies](/references/authentication.html).
|
||||
|
||||
+12
-3
@@ -40,12 +40,21 @@ gulp.task('3rdparty', function () {
|
||||
// JavaScript
|
||||
// --------------
|
||||
|
||||
if (argv.help || argv.h) {
|
||||
console.log('Supported arguments for "gulp develop":');
|
||||
console.log(' --client-id <clientId>');
|
||||
console.log(' --client-secret <clientSecret>');
|
||||
console.log(' --api-origin <cloudron api uri>');
|
||||
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
gulp.task('js', ['js-index', 'js-setup', 'js-setupdns', 'js-update'], function () {});
|
||||
|
||||
var oauth = {
|
||||
clientId: argv.clientId || process.env.CLOUDRON_CLIENT_ID || 'cid-webadmin',
|
||||
clientSecret: argv.clientSecret || process.env.CLOUDRON_CLIENT_SECRET || 'unused',
|
||||
apiOrigin: argv.apiOrigin || process.env.CLOUDRON_API_ORIGIN || ''
|
||||
clientId: argv.clientId || 'cid-webadmin',
|
||||
clientSecret: argv.clientSecret || 'unused',
|
||||
apiOrigin: argv.apiOrigin || ''
|
||||
};
|
||||
|
||||
console.log();
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
var dbm = require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
var url = require('url');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
var fs = require('fs'),
|
||||
async = require('async'),
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN resetToken VARCHAR(128) DEFAULT ""', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('DELETE FROM tokens', [], function (error) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE authcodes ADD COLUMN expiresAt BIGINT NOT NULL', function (error) {
|
||||
@@ -13,4 +12,4 @@ exports.down = function(db, callback) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE appPortBindings ADD COLUMN environmentVariable VARCHAR(128) NOT NULL', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE appPortBindings DROP COLUMN containerPort', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('DELETE FROM tokens', [], function (error) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN version', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN healthy, ADD COLUMN health VARCHAR(128)', [], function (error) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN lastBackupId VARCHAR(128)', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN createdAt TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
// everyday at 1am
|
||||
@@ -8,5 +7,4 @@ exports.up = function(db, callback) {
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('DELETE * FROM settings WHERE name="autoupdate_pattern"', [ ], callback);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
'use strict';
|
||||
|
||||
var safe = require('safetydance');
|
||||
var type = dbm.dataType;
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var tz = safe.fs.readFileSync('/etc/timezone', 'utf8');
|
||||
@@ -12,4 +12,3 @@ exports.up = function(db, callback) {
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('DELETE * FROM settings WHERE name="time_zone"', [ ], callback);
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN lastManifestJson VARCHAR(2048)', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps CHANGE lastManifestJson lastBackupConfigJson VARCHAR(2048)', [], function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN oldConfigJson VARCHAR(2048)', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('DELETE FROM settings', [ ], callback);
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN oauthProxy BOOLEAN DEFAULT 0', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps CHANGE accessRestriction accessRestrictionJson VARCHAR(2048)', [], function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps MODIFY manifestJson TEXT', [], function (error) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN displayName VARCHAR(512) DEFAULT ""', function (error) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN memoryLimit BIGINT DEFAULT 0', function (error) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var cmd = "CREATE TABLE groups(" +
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var cmd = "CREATE TABLE IF NOT EXISTS groupMembers(" +
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
'use strict';
|
||||
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var async = require('async');
|
||||
|
||||
var ADMIN_GROUP_ID = 'admin'; // see groups.js
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var cmd = "CREATE TABLE backups(" +
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups ADD COLUMN configJson TEXT', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups DROP COLUMN configJson', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups CHANGE filename id VARCHAR(128)', [], function (error) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users MODIFY username VARCHAR(254) UNIQUE', [], function (error) {
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
'use strict';
|
||||
|
||||
var dbm = dbm || require('db-migrate');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN altDomain VARCHAR(256)', function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var cmd = "CREATE TABLE eventlog(" +
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN showTutorial BOOLEAN DEFAULT 0', function (error) {
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
'use strict';
|
||||
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var cmd = 'CREATE TABLE mailboxes(' +
|
||||
'name VARCHAR(128) NOT NULL,' +
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
// imports mailbox entries for existing users
|
||||
exports.up = function(db, callback) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN lastBackupConfigJson', function (error) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps MODIFY installationProgress TEXT', [], function (error) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN xFrameOptions VARCHAR(512)', function (error) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT id FROM users', function (error, results) {
|
||||
@@ -14,4 +13,3 @@ exports.up = function(db, callback) {
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('DELETE * FROM settings WHERE name="mail_config"', [ ], callback);
|
||||
};
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
'use strict';
|
||||
|
||||
var dbm = dbm || require('db-migrate');
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
@@ -71,4 +71,3 @@ exports.down = function(db, callback) {
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN sso BOOLEAN DEFAULT 1', function (error) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN oauthProxy', function (error) {
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users DROP COLUMN showTutorial', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN showTutorial BOOLEAN DEFAULT 0', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN debugModeJson TEXT', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN debugModeJson ', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups MODIFY dependsOn TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups MODIFY dependsOn VARCHAR(4096)', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -19,7 +19,6 @@ CREATE TABLE IF NOT EXISTS users(
|
||||
modifiedAt VARCHAR(512) NOT NULL,
|
||||
admin INTEGER NOT NULL,
|
||||
displayName VARCHAR(512) DEFAULT '',
|
||||
showTutorial BOOLEAN DEFAULT 0,
|
||||
PRIMARY KEY(id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS groups(
|
||||
@@ -61,13 +60,14 @@ CREATE TABLE IF NOT EXISTS apps(
|
||||
manifestJson TEXT,
|
||||
httpPort INTEGER, // this is the nginx proxy port and not manifest.httpPort
|
||||
location VARCHAR(128) NOT NULL UNIQUE,
|
||||
dnsRecordId VARCHAR(512),
|
||||
dnsRecordId VARCHAR(512), // tracks any id that we got back to track dns updates (unused)
|
||||
accessRestrictionJson TEXT, // { users: [ ], groups: [ ] }
|
||||
createdAt TIMESTAMP(2) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
memoryLimit BIGINT DEFAULT 0,
|
||||
altDomain VARCHAR(256),
|
||||
xFrameOptions VARCHAR(512),
|
||||
sso BOOLEAN DEFAULT 1, // whether user chose to enable SSO
|
||||
debugModeJson TEXT, // options for development mode
|
||||
|
||||
lastBackupId VARCHAR(128), // tracks last valid backup, can be removed
|
||||
|
||||
@@ -105,7 +105,7 @@ CREATE TABLE IF NOT EXISTS backups(
|
||||
creationTime TIMESTAMP,
|
||||
version VARCHAR(128) NOT NULL, /* app version or box version */
|
||||
type VARCHAR(16) NOT NULL, /* 'box' or 'app' */
|
||||
dependsOn VARCHAR(4096), /* comma separate list of objects this backup depends on */
|
||||
dependsOn TEXT, /* comma separate list of objects this backup depends on */
|
||||
state VARCHAR(16) NOT NULL,
|
||||
|
||||
PRIMARY KEY (filename));
|
||||
|
||||
Generated
+3434
-1142
File diff suppressed because it is too large
Load Diff
+7
-6
@@ -13,11 +13,11 @@
|
||||
"node >=4.0.0 <=4.1.1"
|
||||
],
|
||||
"dependencies": {
|
||||
"async": "^1.2.1",
|
||||
"async": "^2.1.4",
|
||||
"aws-sdk": "^2.1.46",
|
||||
"body-parser": "^1.13.1",
|
||||
"checksum": "^0.1.1",
|
||||
"cloudron-manifestformat": "^2.5.1",
|
||||
"cloudron-manifestformat": "^2.8.0",
|
||||
"connect-ensure-login": "^0.1.1",
|
||||
"connect-lastmile": "^0.1.0",
|
||||
"connect-timeout": "^1.5.0",
|
||||
@@ -25,12 +25,14 @@
|
||||
"cookie-session": "^1.1.0",
|
||||
"cron": "^1.0.9",
|
||||
"csurf": "^1.6.6",
|
||||
"db-migrate": "^0.9.2",
|
||||
"db-migrate": "^0.10.0-beta.20",
|
||||
"db-migrate-mysql": "^1.1.10",
|
||||
"debug": "^2.2.0",
|
||||
"dockerode": "^2.2.10",
|
||||
"ejs": "^2.2.4",
|
||||
"ejs-cli": "^1.2.0",
|
||||
"express": "^4.12.4",
|
||||
"express-rate-limit": "^2.6.0",
|
||||
"express-session": "^1.11.3",
|
||||
"gulp-sass": "^3.0.0",
|
||||
"hat": "0.0.3",
|
||||
@@ -58,15 +60,14 @@
|
||||
"proxy-middleware": "^0.13.0",
|
||||
"safetydance": "^0.1.1",
|
||||
"semver": "^4.3.6",
|
||||
"showdown": "^1.4.4",
|
||||
"showdown": "^1.6.0",
|
||||
"split": "^1.0.0",
|
||||
"superagent": "^1.8.3",
|
||||
"supererror": "^0.7.1",
|
||||
"tldjs": "^1.6.2",
|
||||
"underscore": "^1.7.0",
|
||||
"valid-url": "^1.0.9",
|
||||
"validator": "^4.9.0",
|
||||
"x509": "^0.2.4"
|
||||
"validator": "^4.9.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"bootstrap-sass": "^3.3.3",
|
||||
|
||||
+81
-33
@@ -7,15 +7,24 @@ if [[ ${EUID} -ne 0 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(lsb_release -rs) != "16.04" ]]; then
|
||||
echo "Cloudron requires Ubuntu 16.04" > /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# change this to a hash when we make a upgrade release
|
||||
readonly LOG_FILE="/var/log/cloudron-setup.log"
|
||||
readonly MINIMUM_DISK_SIZE_GB="20" # this is the size of "/" and required to fit in docker images
|
||||
readonly MINIMUM_MEMORY="992" # this is mostly reported for 1GB main memory due to 1000 vs 1024
|
||||
readonly DATA_FILE="/root/cloudron-install-data.json"
|
||||
readonly MINIMUM_DISK_SIZE_GB="19" # this is the size of "/" and required to fit in docker images 19 is a safe bet for different reporting on 20GB min
|
||||
readonly MINIMUM_MEMORY="974" # this is mostly reported for 1GB main memory (DO 992, EC2 990, Linode 989, Serverdiscounter.com 974)
|
||||
|
||||
readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 2400"
|
||||
|
||||
# copied from cloudron-resize-fs.sh
|
||||
readonly physical_memory=$(free -m | awk '/Mem:/ { print $2 }')
|
||||
readonly physical_memory=$(LC_ALL=C free -m | awk '/Mem:/ { print $2 }')
|
||||
readonly disk_device="$(for d in $(find /dev -type b); do [ "$(mountpoint -d /)" = "$(mountpoint -x $d)" ] && echo $d && break; done)"
|
||||
readonly disk_size_gb=$(fdisk -l ${disk_device} | grep "Disk ${disk_device}" | awk '{ printf "%.0f", $3 }')
|
||||
readonly disk_size_bytes=$(LC_ALL=C fdisk -l ${disk_device} | grep "Disk ${disk_device}" | awk '{ printf $5 }')
|
||||
readonly disk_size_gb=$((${disk_size_bytes}/1024/1024/1024))
|
||||
|
||||
# verify the system has minimum requirements met
|
||||
if [[ "${physical_memory}" -lt "${MINIMUM_MEMORY}" ]]; then
|
||||
@@ -38,10 +47,13 @@ dnsProvider="manual"
|
||||
tlsProvider="le-prod"
|
||||
versionsUrl="https://s3.amazonaws.com/prod-cloudron-releases/versions.json"
|
||||
requestedVersion="latest"
|
||||
apiServer="https://api.cloudron.io"
|
||||
apiServerOrigin="https://api.cloudron.io"
|
||||
dataJson=""
|
||||
prerelease="false"
|
||||
sourceTarballUrl=""
|
||||
rebootServer="true"
|
||||
|
||||
args=$(getopt -o "" -l "domain:,help,skip-baseimage-init,data:,provider:,encryption-key:,restore-url:,tls-provider:,version:,versions-url:,api-server:,dns-provider:" -n "$0" -- "$@")
|
||||
args=$(getopt -o "" -l "domain:,help,skip-baseimage-init,data:,provider:,encryption-key:,restore-url:,tls-provider:,version:,versions-url:,api-server:,dns-provider:,env:,prerelease,skip-reboot,source-url:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
@@ -54,10 +66,26 @@ while true; do
|
||||
--tls-provider) tlsProvider="$2"; shift 2;;
|
||||
--dns-provider) dnsProvider="$2"; shift 2;;
|
||||
--version) requestedVersion="$2"; shift 2;;
|
||||
--env)
|
||||
if [[ "$2" == "dev" ]]; then
|
||||
apiServerOrigin="https://api.dev.cloudron.io"
|
||||
versionsUrl="https://s3.amazonaws.com/dev-cloudron-releases/versions.json"
|
||||
tlsProvider="le-staging"
|
||||
prerelease="true"
|
||||
elif [[ "$2" == "staging" ]]; then
|
||||
apiServerOrigin="https://api.staging.cloudron.io"
|
||||
versionsUrl="https://s3.amazonaws.com/staging-cloudron-releases/versions.json"
|
||||
tlsProvider="le-staging"
|
||||
prerelease="true"
|
||||
fi
|
||||
shift 2;;
|
||||
--versions-url) versionsUrl="$2"; shift 2;;
|
||||
--api-server) apiServer="$2"; shift 2;;
|
||||
--api-server) apiServerOrigin="$2"; shift 2;;
|
||||
--skip-baseimage-init) initBaseImage="false"; shift;;
|
||||
--skip-reboot) rebootServer="false"; shift;;
|
||||
--data) dataJson="$2"; shift 2;;
|
||||
--prerelease) prerelease="true"; shift;;
|
||||
--source-url) sourceTarballUrl="$2"; version="0.0.1+custom"; shift 2;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
@@ -66,15 +94,27 @@ done
|
||||
# validate arguments in the absence of data
|
||||
if [[ -z "${dataJson}" ]]; then
|
||||
if [[ -z "${provider}" ]]; then
|
||||
echo "--provider is required (generic, scaleway, ec2, digitalocean)"
|
||||
echo "--provider is required (azure, digitalocean, ec2, lightsail, linode, ovh, scaleway, vultr or generic)"
|
||||
exit 1
|
||||
elif [[ \
|
||||
"${provider}" != "generic" && \
|
||||
"${provider}" != "scaleway" && \
|
||||
"${provider}" != "ami" && \
|
||||
"${provider}" != "azure" && \
|
||||
"${provider}" != "digitalocean" && \
|
||||
"${provider}" != "ec2" && \
|
||||
"${provider}" != "digitalocean" \
|
||||
"${provider}" != "lightsail" && \
|
||||
"${provider}" != "linode" && \
|
||||
"${provider}" != "ovh" && \
|
||||
"${provider}" != "rosehosting" && \
|
||||
"${provider}" != "scaleway" && \
|
||||
"${provider}" != "vultr" && \
|
||||
"${provider}" != "generic" \
|
||||
]]; then
|
||||
echo "--provider must be one of: generic, scaleway, ec2, digitalocean"
|
||||
echo "--provider must be one of: azure, digitalocean, ec2, lightsail, linode, ovh, rosehosting, scaleway, vultr or generic"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${tlsProvider}" != "fallback" && "${tlsProvider}" != "le-prod" && "${tlsProvider}" != "le-staging" ]]; then
|
||||
echo "--tls-provider must be one of: le-prod, le-staging, fallback"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -99,7 +139,7 @@ echo " Join us at https://chat.cloudron.io for any questions."
|
||||
echo ""
|
||||
|
||||
if [[ "${initBaseImage}" == "true" ]]; then
|
||||
echo "=> Updating apt and installing script dependancies"
|
||||
echo "=> Updating apt and installing script dependencies"
|
||||
if ! apt-get update &>> "${LOG_FILE}"; then
|
||||
echo "Could not update package repositories"
|
||||
exit 1
|
||||
@@ -112,15 +152,18 @@ if [[ "${initBaseImage}" == "true" ]]; then
|
||||
fi
|
||||
|
||||
echo "=> Checking version"
|
||||
releaseJson=$(curl -s "${versionsUrl}")
|
||||
if [[ "$requestedVersion" == "latest" ]]; then
|
||||
version=$(echo "${releaseJson}" | python3 -c 'import json,sys,collections;obj=json.load(sys.stdin, object_pairs_hook=collections.OrderedDict);latest=list(v for v in obj if "-pre" not in v)[-1];print(latest)')
|
||||
else
|
||||
version="${requestedVersion}"
|
||||
fi
|
||||
if ! sourceTarballUrl=$(echo "${releaseJson}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj[sys.argv[1]]["sourceTarballUrl"])' "${version}"); then
|
||||
echo "No source code for version ${requestedVersion}"
|
||||
exit 1
|
||||
if [[ "${sourceTarballUrl}" == "" ]]; then
|
||||
releaseJson=$($curl -s "${versionsUrl}")
|
||||
if [[ "$requestedVersion" == "latest" ]]; then
|
||||
pre=$([[ "${prerelease}" == "true" ]] && echo "null" || echo "-pre")
|
||||
version=$(echo "${releaseJson}" | python3 -c "import json,sys,collections;obj=json.load(sys.stdin, object_pairs_hook=collections.OrderedDict);latest=list(v for v in obj if '${pre}' not in v)[-1];print(latest)")
|
||||
else
|
||||
version="${requestedVersion}"
|
||||
fi
|
||||
if ! sourceTarballUrl=$(echo "${releaseJson}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj[sys.argv[1]]["sourceTarballUrl"])' "${version}"); then
|
||||
echo "No source code for version ${requestedVersion}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Build data
|
||||
@@ -131,7 +174,7 @@ if [[ -z "${dataJson}" ]]; then
|
||||
"boxVersionsUrl": "${versionsUrl}",
|
||||
"fqdn": "${domain}",
|
||||
"provider": "${provider}",
|
||||
"apiServerOrigin": "${apiServer}",
|
||||
"apiServerOrigin": "${apiServerOrigin}",
|
||||
"tlsConfig": {
|
||||
"provider": "${tlsProvider}"
|
||||
},
|
||||
@@ -143,6 +186,9 @@ if [[ -z "${dataJson}" ]]; then
|
||||
"backupFolder": "/var/backups",
|
||||
"key": "${encryptionKey}"
|
||||
},
|
||||
"updateConfig": {
|
||||
"prerelease": ${prerelease}
|
||||
},
|
||||
"version": "${version}"
|
||||
}
|
||||
EOF
|
||||
@@ -153,7 +199,7 @@ EOF
|
||||
"boxVersionsUrl": "${versionsUrl}",
|
||||
"fqdn": "${domain}",
|
||||
"provider": "${provider}",
|
||||
"apiServerOrigin": "${apiServer}",
|
||||
"apiServerOrigin": "${apiServerOrigin}",
|
||||
"restore": {
|
||||
"url": "${restoreUrl}",
|
||||
"key": "${encryptionKey}"
|
||||
@@ -170,43 +216,45 @@ fi
|
||||
echo "=> Downloading version ${version} ..."
|
||||
box_src_tmp_dir=$(mktemp -dt box-src-XXXXXX)
|
||||
|
||||
if ! curl -sL "${sourceTarballUrl}" | tar -zxf - -C "${box_src_tmp_dir}"; then
|
||||
if ! $curl -sL "${sourceTarballUrl}" | tar -zxf - -C "${box_src_tmp_dir}"; then
|
||||
echo "Could not download source tarball. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${initBaseImage}" == "true" ]]; then
|
||||
echo "=> Installing base dependencies and downloading docker images (this takes some time) ..."
|
||||
echo -n "=> Installing base dependencies and downloading docker images (this takes some time) ..."
|
||||
if ! /bin/bash "${box_src_tmp_dir}/baseimage/initializeBaseUbuntuImage.sh" "${provider}" "../src" &>> "${LOG_FILE}"; then
|
||||
echo "Init script failed. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo "=> Installing version ${version} (this takes some time) ..."
|
||||
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" --data "${data}" &>> "${LOG_FILE}"; then
|
||||
echo "${data}" > "${DATA_FILE}"
|
||||
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" --data-file "${DATA_FILE}" &>> "${LOG_FILE}"; then
|
||||
echo "Failed to install cloudron. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
rm "${DATA_FILE}"
|
||||
|
||||
echo -n "=> Waiting for cloudron to be ready (this takes some time) ..."
|
||||
while true; do
|
||||
echo -n "."
|
||||
if status=$(curl -q -f "http://localhost:3000/api/v1/cloudron/status" 2>/dev/null); then
|
||||
if status=$($curl -q -f "http://localhost:3000/api/v1/cloudron/status" 2>/dev/null); then
|
||||
[[ -z "$domain" ]] && break # with no domain, we are up and running
|
||||
[[ "$status" == *"\"tls\": true"* ]] && break # with a domain, wait for the cert
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
|
||||
echo -e "\n\nRebooting this server now to let bootloader changes take effect.\n"
|
||||
|
||||
if [[ -n "${domain}" ]]; then
|
||||
echo -e "Visit https://my.${domain} to finish setup once the server has rebooted.\n"
|
||||
echo -e "\n\nVisit https://my.${domain} to finish setup once the server has rebooted.\n"
|
||||
else
|
||||
echo -e "Visit https://<IP> to finish setup once the server has rebooted.\n"
|
||||
echo -e "\n\nVisit https://<IP> to finish setup once the server has rebooted.\n"
|
||||
fi
|
||||
|
||||
if [[ "${initBaseImage}" == "true" ]]; then
|
||||
if [[ "${rebootServer}" == "true" ]]; then
|
||||
echo -e "\n\nRebooting this server now to let bootloader changes take effect.\n"
|
||||
systemctl reboot
|
||||
fi
|
||||
|
||||
@@ -2,30 +2,23 @@
|
||||
|
||||
set -eu
|
||||
|
||||
assertNotEmpty() {
|
||||
: "${!1:? "$1 is not set."}"
|
||||
}
|
||||
|
||||
# Only GNU getopt supports long options. OS X comes bundled with the BSD getopt
|
||||
# brew install gnu-getopt to get the GNU getopt on OS X
|
||||
[[ $(uname -s) == "Darwin" ]] && GNU_GETOPT="/usr/local/opt/gnu-getopt/bin/getopt" || GNU_GETOPT="getopt"
|
||||
readonly GNU_GETOPT
|
||||
|
||||
args=$(${GNU_GETOPT} -o "" -l "revision:,output:,no-upload" -n "$0" -- "$@")
|
||||
args=$(${GNU_GETOPT} -o "" -l "revision:,output:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
|
||||
delete_bundle="yes"
|
||||
commitish="HEAD"
|
||||
upload="yes"
|
||||
bundle_file=""
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
--revision) commitish="$2"; shift 2;;
|
||||
--output) bundle_file="$2"; delete_bundle="no"; shift 2;;
|
||||
--no-upload) upload="no"; shift;;
|
||||
--output) bundle_file="$2"; shift 2;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
@@ -90,21 +83,5 @@ echo "Create final tarball"
|
||||
echo "Cleaning up ${bundle_dir}"
|
||||
rm -rf "${bundle_dir}"
|
||||
|
||||
if [[ "${upload}" == "yes" ]]; then
|
||||
echo "Uploading bundle to S3"
|
||||
echo "Tarball saved at ${bundle_file}"
|
||||
|
||||
assertNotEmpty AWS_DEV_ACCESS_KEY
|
||||
assertNotEmpty AWS_DEV_SECRET_KEY
|
||||
|
||||
# That special header is needed to allow access with singed urls created with different aws credentials than the ones the file got uploaded
|
||||
s3cmd --multipart-chunk-size-mb=5 --ssl --acl-public --access_key="${AWS_DEV_ACCESS_KEY}" --secret_key="${AWS_DEV_SECRET_KEY}" --no-mime-magic put "${bundle_file}" "s3://dev-cloudron-releases/box-${version}.tar.gz"
|
||||
|
||||
versions_file_url="https://dev-cloudron-releases.s3.amazonaws.com/box-${version}.tar.gz"
|
||||
echo "The URL for the versions file is: ${versions_file_url}"
|
||||
fi
|
||||
|
||||
if [[ "${delete_bundle}" == "no" ]]; then
|
||||
echo "Tarball preserved at ${bundle_file}"
|
||||
else
|
||||
rm "${bundle_file}"
|
||||
fi
|
||||
|
||||
@@ -16,10 +16,6 @@ readonly box_src_tmp_dir="$(realpath ${script_dir}/..)"
|
||||
|
||||
readonly is_update=$([[ -f "${CLOUDRON_CONF}" ]] && echo "yes" || echo "no")
|
||||
|
||||
# create a provision file for testing. %q escapes args. %q is reused as much as necessary to satisfy $@
|
||||
(echo -e "#!/bin/bash\n"; printf "%q " "${script_dir}/installer.sh" "$@") > /root/provision.sh
|
||||
chmod +x /root/provision.sh
|
||||
|
||||
arg_data=""
|
||||
|
||||
args=$(getopt -o "" -l "data:,data-file:" -n "$0" -- "$@")
|
||||
@@ -56,7 +52,7 @@ fi
|
||||
|
||||
if [[ "${is_update}" == "yes" ]]; then
|
||||
echo "Setting up update splash screen"
|
||||
"${box_src_tmp_dir}/setup/splashpage.sh" --data "${arg_data}" # show splash from new code
|
||||
"${box_src_tmp_dir}/setup/splashpage.sh" --data "${arg_data}" || true # show splash from new code
|
||||
${BOX_SRC_DIR}/setup/stop.sh # stop the old code
|
||||
fi
|
||||
|
||||
@@ -68,9 +64,5 @@ rm -rf "${BOX_SRC_DIR}"
|
||||
mv "${box_src_tmp_dir}" "${BOX_SRC_DIR}"
|
||||
chown -R "${USER}:${USER}" "${BOX_SRC_DIR}"
|
||||
|
||||
# create a start file for testing. %q escapes args
|
||||
(echo -e "#!/bin/bash\n"; printf "%q " "${BOX_SRC_DIR}/setup/start.sh" --data "${arg_data}") > /home/yellowtent/setup_start.sh
|
||||
chmod +x /home/yellowtent/setup_start.sh
|
||||
|
||||
echo "==> installer: calling box setup script"
|
||||
"${BOX_SRC_DIR}/setup/start.sh" --data "${arg_data}"
|
||||
|
||||
@@ -11,6 +11,11 @@ readonly ADMIN_LOCATION="my" # keep this in sync with constants.js
|
||||
|
||||
echo "Setting up nginx update page"
|
||||
|
||||
if [[ ! -f "${DATA_DIR}/nginx/applications/admin.conf" ]]; then
|
||||
echo "No admin.conf found. This Cloudron has no domain yet. Skip splash setup"
|
||||
exit
|
||||
fi
|
||||
|
||||
source "${script_dir}/argparser.sh" "$@" # this injects the arg_* variables used below
|
||||
|
||||
# keep this is sync with config.js appFqdn()
|
||||
|
||||
+83
-53
@@ -6,11 +6,12 @@ echo "==> Cloudron Start"
|
||||
|
||||
readonly USER="yellowtent"
|
||||
readonly DATA_FILE="/root/user_data.img"
|
||||
readonly BOX_SRC_DIR="/home/${USER}/box"
|
||||
readonly DATA_DIR="/home/${USER}/data"
|
||||
readonly CONFIG_DIR="/home/${USER}/configs"
|
||||
readonly SETUP_PROGRESS_JSON="/home/yellowtent/setup/website/progress.json"
|
||||
readonly ADMIN_LOCATION="my" # keep this in sync with constants.js
|
||||
readonly HOME_DIR="/home/${USER}"
|
||||
readonly BOX_SRC_DIR="${HOME_DIR}/box"
|
||||
readonly DATA_DIR="${HOME_DIR}/data" # app and platform data
|
||||
readonly BOX_DATA_DIR="${HOME_DIR}/boxdata" # box data
|
||||
readonly CONFIG_DIR="${HOME_DIR}/configs"
|
||||
readonly SETUP_PROGRESS_JSON="${HOME_DIR}/setup/website/progress.json"
|
||||
|
||||
readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 2400"
|
||||
|
||||
@@ -18,8 +19,6 @@ readonly script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
source "${script_dir}/argparser.sh" "$@" # this injects the arg_* variables used below
|
||||
|
||||
readonly is_update=$([[ -f "${CONFIG_DIR}/cloudron.conf" ]] && echo "true" || echo "false")
|
||||
|
||||
set_progress() {
|
||||
local percent="$1"
|
||||
local message="$2"
|
||||
@@ -28,7 +27,7 @@ set_progress() {
|
||||
(echo "{ \"update\": { \"percent\": \"${percent}\", \"message\": \"${message}\" }, \"backup\": {} }" > "${SETUP_PROGRESS_JSON}") 2> /dev/null || true # as this will fail in non-update mode
|
||||
}
|
||||
|
||||
set_progress "10" "Configuring host"
|
||||
set_progress "20" "Configuring host"
|
||||
sed -e 's/^#NTP=/NTP=0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org/' -i /etc/systemd/timesyncd.conf
|
||||
timedatectl set-ntp 1
|
||||
timedatectl set-timezone UTC
|
||||
@@ -69,14 +68,20 @@ cp "${script_dir}/start/docker-cloudron-app.apparmor" /etc/apparmor.d/docker-clo
|
||||
systemctl enable apparmor
|
||||
systemctl restart apparmor
|
||||
|
||||
usermod yellowtent -a -G docker
|
||||
usermod ${USER} -a -G docker
|
||||
temp_file=$(mktemp)
|
||||
sed -e 's,^ExecStart=.*$,ExecStart=/usr/bin/docker daemon -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs,' /lib/systemd/system/docker.service > "${temp_file}"
|
||||
# create systemd drop-in. some apps do not work with aufs
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=devicemapper --dns=172.18.0.1 --dns-search=." > "${temp_file}"
|
||||
|
||||
systemctl enable docker
|
||||
if ! diff -q /lib/systemd/system/docker.service "${temp_file}" >/dev/null; then
|
||||
mv "${temp_file}" /lib/systemd/system/docker.service
|
||||
# restart docker if options changed
|
||||
if [[ ! -f /etc/systemd/system/docker.service.d/cloudron.conf ]] || ! diff -q /etc/systemd/system/docker.service.d/cloudron.conf "${temp_file}" >/dev/null; then
|
||||
mkdir -p /etc/systemd/system/docker.service.d
|
||||
mv "${temp_file}" /etc/systemd/system/docker.service.d/cloudron.conf
|
||||
systemctl daemon-reload
|
||||
systemctl restart docker
|
||||
fi
|
||||
docker network create --subnet=172.18.0.0/16 cloudron || true
|
||||
|
||||
# caas has ssh on port 202 and we disable password login
|
||||
if [[ "${arg_provider}" == "caas" ]]; then
|
||||
@@ -92,12 +97,6 @@ if [[ "${arg_provider}" == "caas" ]]; then
|
||||
fi
|
||||
|
||||
echo "==> Setup btrfs data"
|
||||
if ! grep -q loop.ko /lib/modules/`uname -r`/modules.builtin; then
|
||||
# on scaleway loop is not built-in
|
||||
echo "loop" >> /etc/modules
|
||||
modprobe loop
|
||||
fi
|
||||
|
||||
if [[ ! -d "${DATA_DIR}" ]]; then
|
||||
echo "==> Mounting loopback btrfs"
|
||||
truncate -s "8192m" "${DATA_FILE}" # 8gb start (this will get resized dynamically by cloudron-resize-fs.service)
|
||||
@@ -108,24 +107,34 @@ fi
|
||||
|
||||
# keep these in sync with paths.js
|
||||
echo "==> Ensuring directories"
|
||||
[[ "${is_update}" == "false" ]] && btrfs subvolume create "${DATA_DIR}/box"
|
||||
mkdir -p "${DATA_DIR}/box/appicons"
|
||||
mkdir -p "${DATA_DIR}/box/certs"
|
||||
mkdir -p "${DATA_DIR}/box/acme" # acme keys
|
||||
mkdir -p "${DATA_DIR}/graphite"
|
||||
mkdir -p "${DATA_DIR}/box/mail/dkim"
|
||||
|
||||
if [[ -n "${arg_fqdn}" ]]; then
|
||||
mkdir -p "${DATA_DIR}/box/mail/dkim/${arg_fqdn}"
|
||||
if ! btrfs subvolume show "${DATA_DIR}/mail" &> /dev/null; then
|
||||
# Migrate mail data to new format
|
||||
docker stop mail || true # otherwise the move below might fail if mail container writes in the middle
|
||||
rm -rf "${DATA_DIR}/mail" # this used to be mail container's run directory
|
||||
btrfs subvolume create "${DATA_DIR}/mail"
|
||||
[[ -d "${DATA_DIR}/box/mail" ]] && mv "${DATA_DIR}/box/mail/"* "${DATA_DIR}/mail"
|
||||
rm -rf "${DATA_DIR}/box/mail"
|
||||
fi
|
||||
mkdir -p "${DATA_DIR}/graphite"
|
||||
mkdir -p "${DATA_DIR}/mail/dkim"
|
||||
|
||||
mkdir -p "${DATA_DIR}/mysql"
|
||||
mkdir -p "${DATA_DIR}/postgresql"
|
||||
mkdir -p "${DATA_DIR}/mongodb"
|
||||
mkdir -p "${DATA_DIR}/snapshots"
|
||||
mkdir -p "${DATA_DIR}/addons"
|
||||
mkdir -p "${DATA_DIR}/addons/mail"
|
||||
mkdir -p "${DATA_DIR}/collectd/collectd.conf.d"
|
||||
mkdir -p "${DATA_DIR}/acme" # acme challenges
|
||||
mkdir -p "${DATA_DIR}/acme"
|
||||
|
||||
mkdir -p "${BOX_DATA_DIR}"
|
||||
if btrfs subvolume show "${DATA_DIR}/box" &> /dev/null; then
|
||||
# Migrate box data out of data volume
|
||||
mv "${DATA_DIR}/box/"* "${BOX_DATA_DIR}"
|
||||
btrfs subvolume delete "${DATA_DIR}/box"
|
||||
fi
|
||||
mkdir -p "${BOX_DATA_DIR}/appicons"
|
||||
mkdir -p "${BOX_DATA_DIR}/certs"
|
||||
mkdir -p "${BOX_DATA_DIR}/acme" # acme keys
|
||||
|
||||
echo "==> Configuring journald"
|
||||
sed -e "s/^#SystemMaxUse=.*$/SystemMaxUse=100M/" \
|
||||
@@ -138,31 +147,38 @@ sed -e "s/^WatchdogSec=.*$/WatchdogSec=3min/" \
|
||||
-i /lib/systemd/system/systemd-journald.service
|
||||
|
||||
# Give user access to system logs
|
||||
usermod -a -G systemd-journal yellowtent
|
||||
usermod -a -G systemd-journal ${USER}
|
||||
mkdir -p /var/log/journal # in some images, this directory is not created making system log to /run/systemd instead
|
||||
chown root:systemd-journal /var/log/journal
|
||||
systemctl daemon-reload
|
||||
systemctl restart systemd-journald
|
||||
setfacl -n -m u:yellowtent:r /var/log/journal/*/system.journal
|
||||
setfacl -n -m u:${USER}:r /var/log/journal/*/system.journal
|
||||
|
||||
echo "==> Creating config directory"
|
||||
rm -rf "${CONFIG_DIR}" && mkdir "${CONFIG_DIR}"
|
||||
|
||||
echo "==> Setting up unbound"
|
||||
# DO uses Google nameservers by default. This causes RBL queries to fail (host 2.0.0.127.zen.spamhaus.org)
|
||||
# We do not use dnsmasq because it is not a recursive resolver and defaults to the value in the interfaces file (which is Google DNS!)
|
||||
# We listen on 0.0.0.0 because there is no way control ordering of docker (which creates the 172.18.0.0/16) and unbound
|
||||
echo -e "server:\n\tinterface: 0.0.0.0\n\taccess-control: 127.0.0.1 allow\n\taccess-control: 172.18.0.1/16 allow\n\tcache-max-negative-ttl: 30" > /etc/unbound/unbound.conf.d/cloudron-network.conf
|
||||
|
||||
echo "==> Adding systemd services"
|
||||
cp -r "${script_dir}/start/systemd/." /etc/systemd/system/
|
||||
systemctl daemon-reload
|
||||
systemctl enable unbound
|
||||
systemctl enable cloudron.target
|
||||
systemctl enable iptables-restore
|
||||
|
||||
# For logrotate
|
||||
systemctl enable --now cron
|
||||
|
||||
# DO uses Google nameservers by default. This causes RBL queries to fail (host 2.0.0.127.zen.spamhaus.org)
|
||||
# We do not use dnsmasq because it is not a recursive resolver and defaults to the value in the interfaces file (which is Google DNS!)
|
||||
systemctl enable --now unbound
|
||||
# ensure unbound runs
|
||||
systemctl restart unbound
|
||||
|
||||
echo "==> Configuring sudoers"
|
||||
rm -f /etc/sudoers.d/yellowtent
|
||||
cp "${script_dir}/start/sudoers" /etc/sudoers.d/yellowtent
|
||||
rm -f /etc/sudoers.d/${USER}
|
||||
cp "${script_dir}/start/sudoers" /etc/sudoers.d/${USER}
|
||||
|
||||
echo "==> Configuring collectd"
|
||||
rm -rf /etc/collectd
|
||||
@@ -178,9 +194,15 @@ mkdir -p "${DATA_DIR}/nginx/applications"
|
||||
mkdir -p "${DATA_DIR}/nginx/cert"
|
||||
cp "${script_dir}/start/nginx/nginx.conf" "${DATA_DIR}/nginx/nginx.conf"
|
||||
cp "${script_dir}/start/nginx/mime.types" "${DATA_DIR}/nginx/mime.types"
|
||||
if ! grep -q "^Restart=" /etc/systemd/system/multi-user.target.wants/nginx.service; then
|
||||
# default nginx service file does not restart on crash
|
||||
echo -e "\n[Service]\nRestart=always\n" >> /etc/systemd/system/multi-user.target.wants/nginx.service
|
||||
systemctl daemon-reload
|
||||
fi
|
||||
systemctl start nginx
|
||||
|
||||
# bookkeep the version as part of data
|
||||
echo "{ \"version\": \"${arg_version}\", \"boxVersionsUrl\": \"${arg_box_versions_url}\" }" > "${DATA_DIR}/box/version"
|
||||
echo "{ \"version\": \"${arg_version}\", \"boxVersionsUrl\": \"${arg_box_versions_url}\" }" > "${BOX_DATA_DIR}/version"
|
||||
|
||||
# remove old snapshots. if we do want to keep this around, we will have to fix the chown -R below
|
||||
# which currently fails because these are readonly fs
|
||||
@@ -188,14 +210,18 @@ echo "==> Cleaning up snapshots"
|
||||
find "${DATA_DIR}/snapshots" -mindepth 1 -maxdepth 1 | xargs --no-run-if-empty btrfs subvolume delete
|
||||
|
||||
# restart mysql to make sure it has latest config
|
||||
# wait for all running mysql jobs
|
||||
cp "${script_dir}/start/mysql.cnf" /etc/mysql/mysql.cnf
|
||||
while true; do
|
||||
if ! systemctl list-jobs | grep mysql; then break; fi
|
||||
echo "Waiting for mysql jobs..."
|
||||
sleep 1
|
||||
done
|
||||
systemctl restart mysql
|
||||
if [[ ! -f /etc/mysql/mysql.cnf ]] || ! diff -q "${script_dir}/start/mysql.cnf" /etc/mysql/mysql.cnf >/dev/null; then
|
||||
# wait for all running mysql jobs
|
||||
cp "${script_dir}/start/mysql.cnf" /etc/mysql/mysql.cnf
|
||||
while true; do
|
||||
if ! systemctl list-jobs | grep mysql; then break; fi
|
||||
echo "Waiting for mysql jobs..."
|
||||
sleep 1
|
||||
done
|
||||
systemctl restart mysql
|
||||
else
|
||||
systemctl start mysql
|
||||
fi
|
||||
|
||||
readonly mysql_root_password="password"
|
||||
mysqladmin -u root -ppassword password password # reset default root password
|
||||
@@ -207,14 +233,15 @@ if [[ -n "${arg_restore_url}" ]]; then
|
||||
echo "==> Downloading backup: ${arg_restore_url} and key: ${arg_restore_key}"
|
||||
|
||||
while true; do
|
||||
if $curl -L "${arg_restore_url}" | openssl aes-256-cbc -d -pass "pass:${arg_restore_key}" | tar -zxf - -C "${DATA_DIR}/box"; then break; fi
|
||||
if $curl -L "${arg_restore_url}" | openssl aes-256-cbc -d -pass "pass:${arg_restore_key}" \
|
||||
| tar -zxf - --overwrite --transform="s,^box/\?,boxdata/," --transform="s,^mail/\?,data/mail/," --show-transformed-names -C "${HOME_DIR}"; then break; fi
|
||||
echo "Failed to download data, trying again"
|
||||
done
|
||||
|
||||
set_progress "35" "Setting up MySQL"
|
||||
if [[ -f "${DATA_DIR}/box/box.mysqldump" ]]; then
|
||||
if [[ -f "${BOX_DATA_DIR}/box.mysqldump" ]]; then
|
||||
echo "==> Importing existing database into MySQL"
|
||||
mysql -u root -p${mysql_root_password} box < "${DATA_DIR}/box/box.mysqldump"
|
||||
mysql -u root -p${mysql_root_password} box < "${BOX_DATA_DIR}/box.mysqldump"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -263,10 +290,8 @@ CONF_END
|
||||
echo "==> Changing ownership"
|
||||
chown "${USER}:${USER}" -R "${CONFIG_DIR}"
|
||||
chown "${USER}:${USER}" -R "${DATA_DIR}/nginx" "${DATA_DIR}/collectd" "${DATA_DIR}/addons" "${DATA_DIR}/acme"
|
||||
# during updates, do not trample mail ownership behind the the mail container's back
|
||||
find "${DATA_DIR}/box" -mindepth 1 -maxdepth 1 -not -path "${DATA_DIR}/box/mail" -print0 | xargs -0 chown -R "${USER}:${USER}"
|
||||
chown "${USER}:${USER}" "${DATA_DIR}/box"
|
||||
chown "${USER}:${USER}" -R "${DATA_DIR}/box/mail/dkim" # this is owned by box currently since it generates the keys
|
||||
chown "${USER}:${USER}" -R "${BOX_DATA_DIR}"
|
||||
chown "${USER}:${USER}" -R "${DATA_DIR}/mail/dkim" # this is owned by box currently since it generates the keys
|
||||
chown "${USER}:${USER}" "${DATA_DIR}/INFRA_VERSION" 2>/dev/null || true
|
||||
chown "${USER}:${USER}" "${DATA_DIR}"
|
||||
|
||||
@@ -291,9 +316,14 @@ if [[ ! -z "${arg_tls_config}" ]]; then
|
||||
-e "REPLACE INTO settings (name, value) VALUES (\"tls_config\", '$arg_tls_config')" box
|
||||
fi
|
||||
|
||||
echo "==> Generating dhparams (takes forever)"
|
||||
if [[ ! -f "${BOX_DATA_DIR}/dhparams.pem" ]]; then
|
||||
openssl dhparam -out "${BOX_DATA_DIR}/dhparams.pem" 2048
|
||||
fi
|
||||
|
||||
set_progress "60" "Starting Cloudron"
|
||||
systemctl start cloudron.target
|
||||
|
||||
sleep 2 # give systemd sometime to start the processes
|
||||
|
||||
set_progress "100" "Done"
|
||||
set_progress "90" "Almost done"
|
||||
|
||||
@@ -13,12 +13,12 @@ disk_device="$(for d in $(find /dev -type b); do [ "$(mountpoint -d /)" = "$(mou
|
||||
existing_swap=$(cat /proc/meminfo | grep SwapTotal | awk '{ printf "%.0f", $2/1024 }')
|
||||
|
||||
# all sizes are in mb
|
||||
readonly physical_memory=$(free -m | awk '/Mem:/ { print $2 }')
|
||||
readonly physical_memory=$(LC_ALL=C free -m | awk '/Mem:/ { print $2 }')
|
||||
readonly swap_size=$((${physical_memory} - ${existing_swap})) # if you change this, fix enoughResourcesAvailable() in client.js
|
||||
readonly app_count=$((${physical_memory} / 200)) # estimated app count
|
||||
readonly disk_size_gb=$(fdisk -l ${disk_device} | grep "Disk ${disk_device}" | awk '{ printf "%.0f", $3 }')
|
||||
readonly disk_size=$((disk_size_gb * 1024))
|
||||
readonly system_size=10240 # 10 gigs for system libs, apps images, installer, box code and tmp
|
||||
readonly disk_size_bytes=$(LC_ALL=C fdisk -l ${disk_device} | grep "Disk ${disk_device}" | awk '{ printf $5 }') # can't rely on fdisk human readable units, using bytes instead
|
||||
readonly disk_size=$((${disk_size_bytes}/1024/1024))
|
||||
readonly system_size=10240 # 10 gigs for system libs, apps images, installer, box code, data and tmp
|
||||
readonly ext4_reserved=$((disk_size * 5 / 100)) # this can be changes using tune2fs -m percent /dev/vda1
|
||||
|
||||
echo "Disk device: ${disk_device}"
|
||||
|
||||
@@ -25,12 +25,22 @@ server {
|
||||
# https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # don't use SSLv3 ref: POODLE
|
||||
ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH';
|
||||
add_header Strict-Transport-Security "max-age=15768000; includeSubDomains";
|
||||
# ciphers according to https://weakdh.org/sysadmin.html
|
||||
ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
|
||||
ssl_dhparam /home/yellowtent/boxdata/dhparams.pem;
|
||||
add_header Strict-Transport-Security "max-age=15768000";
|
||||
|
||||
# https://developer.mozilla.org/en-US/docs/Web/HTTP/X-Frame-Options
|
||||
add_header X-Frame-Options "<%= xFrameOptions %>";
|
||||
|
||||
# https://github.com/twitter/secureheaders
|
||||
# https://www.owasp.org/index.php/OWASP_Secure_Headers_Project#tab=Compatibility_Matrix
|
||||
# https://wiki.mozilla.org/Security/Guidelines/Web_Security
|
||||
add_header X-XSS-Protection "1; mode=block";
|
||||
add_header X-Download-Options "noopen";
|
||||
add_header X-Content-Type-Options "nosniff";
|
||||
add_header X-Permitted-Cross-Domain-Policies "none";
|
||||
|
||||
proxy_http_version 1.1;
|
||||
proxy_intercept_errors on;
|
||||
proxy_read_timeout 3500;
|
||||
|
||||
@@ -36,3 +36,6 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmbackup.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/update.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/update.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/authorized_keys.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/authorized_keys.sh
|
||||
|
||||
@@ -4,7 +4,7 @@ OnFailure=crashnotifier@%n.service
|
||||
StopWhenUnneeded=true
|
||||
; journald crashes result in a EPIPE in node. Cannot ignore it as it results in loss of logs.
|
||||
BindsTo=systemd-journald.service
|
||||
After=mysql.service
|
||||
After=mysql.service nginx.service
|
||||
; As cloudron-resize-fs is a one-shot, the Wants= automatically ensures that the service *finishes*
|
||||
Wants=cloudron-resize-fs.service
|
||||
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
# The default ubuntu unbound service uses SysV fallback mode, we want a proper unit file so unbound gets restarted correctly
|
||||
|
||||
[Unit]
|
||||
Description=Unbound DNS Resolver
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
PIDFile=/run/unbound.pid
|
||||
ExecStart=/usr/sbin/unbound -d
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
+1
-59
@@ -106,18 +106,6 @@ var KNOWN_ADDONS = {
|
||||
teardown: NOOP,
|
||||
backup: NOOP,
|
||||
restore: NOOP
|
||||
},
|
||||
simpleauth: {
|
||||
setup: setupSimpleAuth,
|
||||
teardown: teardownSimpleAuth,
|
||||
backup: NOOP,
|
||||
restore: setupSimpleAuth
|
||||
},
|
||||
_docker: {
|
||||
setup: NOOP,
|
||||
teardown: NOOP,
|
||||
backup: NOOP,
|
||||
restore: NOOP
|
||||
}
|
||||
};
|
||||
|
||||
@@ -219,7 +207,6 @@ function getBindsSync(app, addons) {
|
||||
|
||||
for (var addon in addons) {
|
||||
switch (addon) {
|
||||
case '_docker': binds.push('/var/run/docker.sock:/var/run/docker.sock:rw'); break;
|
||||
case 'localstorage': binds.push(path.join(paths.DATA_DIR, app.id, 'data') + ':/app/data:rw'); break;
|
||||
default: break;
|
||||
}
|
||||
@@ -287,57 +274,12 @@ function teardownOauth(app, options, callback) {
|
||||
debugApp(app, 'teardownOauth');
|
||||
|
||||
clients.delByAppIdAndType(app.id, clients.TYPE_OAUTH, function (error) {
|
||||
if (error && error.reason !== ClientsError.NOT_FOUND) console.error(error);
|
||||
if (error && error.reason !== ClientsError.NOT_FOUND) debug(error);
|
||||
|
||||
appdb.unsetAddonConfig(app.id, 'oauth', callback);
|
||||
});
|
||||
}
|
||||
|
||||
function setupSimpleAuth(app, options, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (!app.sso) return callback(null);
|
||||
|
||||
var appId = app.id;
|
||||
var scope = 'profile';
|
||||
|
||||
clients.delByAppIdAndType(app.id, clients.TYPE_SIMPLE_AUTH, function (error) { // remove existing creds
|
||||
if (error && error.reason !== ClientsError.NOT_FOUND) return callback(error);
|
||||
|
||||
clients.add(appId, clients.TYPE_SIMPLE_AUTH, '', scope, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var env = [
|
||||
'SIMPLE_AUTH_SERVER=172.18.0.1',
|
||||
'SIMPLE_AUTH_PORT=' + config.get('simpleAuthPort'),
|
||||
'SIMPLE_AUTH_URL=http://172.18.0.1:' + config.get('simpleAuthPort'), // obsolete, remove
|
||||
'SIMPLE_AUTH_ORIGIN=http://172.18.0.1:' + config.get('simpleAuthPort'),
|
||||
'SIMPLE_AUTH_CLIENT_ID=' + result.id
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting simple auth addon config to %j', env);
|
||||
|
||||
appdb.setAddonConfig(appId, 'simpleauth', env, callback);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function teardownSimpleAuth(app, options, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debugApp(app, 'teardownSimpleAuth');
|
||||
|
||||
clients.delByAppIdAndType(app.id, clients.TYPE_SIMPLE_AUTH, function (error) {
|
||||
if (error && error.reason !== ClientsError.NOT_FOUND) console.error(error);
|
||||
|
||||
appdb.unsetAddonConfig(app.id, 'simpleauth', callback);
|
||||
});
|
||||
}
|
||||
|
||||
function setupEmail(app, options, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
|
||||
+23
-7
@@ -1,5 +1,3 @@
|
||||
/* jslint node:true */
|
||||
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
@@ -54,13 +52,14 @@ var assert = require('assert'),
|
||||
async = require('async'),
|
||||
database = require('./database.js'),
|
||||
DatabaseError = require('./databaseerror'),
|
||||
mailboxdb = require('./mailboxdb.js'),
|
||||
safe = require('safetydance'),
|
||||
util = require('util');
|
||||
|
||||
var APPS_FIELDS_PREFIXED = [ 'apps.id', 'apps.appStoreId', 'apps.installationState', 'apps.installationProgress', 'apps.runState',
|
||||
'apps.health', 'apps.containerId', 'apps.manifestJson', 'apps.httpPort', 'apps.location', 'apps.dnsRecordId',
|
||||
'apps.accessRestrictionJson', 'apps.lastBackupId', 'apps.oldConfigJson', 'apps.memoryLimit', 'apps.altDomain',
|
||||
'apps.xFrameOptions', 'apps.sso' ].join(',');
|
||||
'apps.xFrameOptions', 'apps.sso', 'apps.debugModeJson' ].join(',');
|
||||
|
||||
var PORT_BINDINGS_FIELDS = [ 'hostPort', 'environmentVariable', 'appId' ].join(',');
|
||||
|
||||
@@ -98,6 +97,10 @@ function postProcess(result) {
|
||||
result.xFrameOptions = result.xFrameOptions || 'SAMEORIGIN';
|
||||
|
||||
result.sso = !!result.sso; // make it bool
|
||||
|
||||
assert(result.debugModeJson === null || typeof result.debugModeJson === 'string');
|
||||
result.debugMode = safe.JSON.parse(result.debugModeJson);
|
||||
delete result.debugModeJson;
|
||||
}
|
||||
|
||||
function get(id, callback) {
|
||||
@@ -185,11 +188,12 @@ function add(id, appStoreId, manifest, location, portBindings, data, callback) {
|
||||
var installationState = data.installationState || exports.ISTATE_PENDING_INSTALL;
|
||||
var lastBackupId = data.lastBackupId || null; // used when cloning
|
||||
var sso = 'sso' in data ? data.sso : null;
|
||||
var debugModeJson = data.debugMode ? JSON.stringify(data.debugMode) : null;
|
||||
|
||||
var queries = [ ];
|
||||
var queries = [];
|
||||
queries.push({
|
||||
query: 'INSERT INTO apps (id, appStoreId, manifestJson, installationState, location, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, lastBackupId, sso) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
|
||||
args: [ id, appStoreId, manifestJson, installationState, location, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, lastBackupId, sso ]
|
||||
query: 'INSERT INTO apps (id, appStoreId, manifestJson, installationState, location, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, lastBackupId, sso, debugModeJson) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
|
||||
args: [ id, appStoreId, manifestJson, installationState, location, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, lastBackupId, sso, debugModeJson ]
|
||||
});
|
||||
|
||||
Object.keys(portBindings).forEach(function (env) {
|
||||
@@ -199,6 +203,14 @@ function add(id, appStoreId, manifest, location, portBindings, data, callback) {
|
||||
});
|
||||
});
|
||||
|
||||
// only allocate a mailbox if mailboxName is set
|
||||
if (data.mailboxName) {
|
||||
queries.push({
|
||||
query: 'INSERT INTO mailboxes (name, ownerId, ownerType) VALUES (?, ?, ?)',
|
||||
args: [ data.mailboxName, id, mailboxdb.TYPE_APP ]
|
||||
});
|
||||
}
|
||||
|
||||
database.transaction(queries, function (error) {
|
||||
if (error && error.code === 'ER_DUP_ENTRY') return callback(new DatabaseError(DatabaseError.ALREADY_EXISTS, error.message));
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
@@ -239,13 +251,14 @@ function del(id, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var queries = [
|
||||
{ query: 'DELETE FROM mailboxes WHERE ownerId=?', args: [ id ] },
|
||||
{ query: 'DELETE FROM appPortBindings WHERE appId = ?', args: [ id ] },
|
||||
{ query: 'DELETE FROM apps WHERE id = ?', args: [ id ] }
|
||||
];
|
||||
|
||||
database.transaction(queries, function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
if (results[1].affectedRows !== 1) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
|
||||
if (results[2].affectedRows !== 1) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
@@ -299,6 +312,9 @@ function updateWithConstraints(id, app, constraints, callback) {
|
||||
} else if (p === 'accessRestriction') {
|
||||
fields.push('accessRestrictionJson = ?');
|
||||
values.push(JSON.stringify(app[p]));
|
||||
} else if (p === 'debugMode') {
|
||||
fields.push('debugModeJson = ?');
|
||||
values.push(JSON.stringify(app[p]));
|
||||
} else if (p !== 'portBindings') {
|
||||
fields.push(p + ' = ?');
|
||||
values.push(app[p]);
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
'use strict';
|
||||
|
||||
var appdb = require('./appdb.js'),
|
||||
apps = require('./apps.js'),
|
||||
assert = require('assert'),
|
||||
async = require('async'),
|
||||
config = require('./config.js'),
|
||||
DatabaseError = require('./databaseerror.js'),
|
||||
debug = require('debug')('box:apphealthmonitor'),
|
||||
docker = require('./docker.js').connection,
|
||||
@@ -50,7 +50,7 @@ function setHealth(app, health, callback) {
|
||||
|
||||
debugApp(app, 'marking as unhealthy since not seen for more than %s minutes', UNHEALTHY_THRESHOLD/(60 * 1000));
|
||||
|
||||
if (app.appStoreId !== '') mailer.appDied(app); // do not send mails for dev apps
|
||||
if (!app.debugMode) mailer.appDied(app); // do not send mails for dev apps
|
||||
gHealthInfo[app.id].emailSent = true;
|
||||
} else {
|
||||
debugApp(app, 'waiting for sometime to update the app health');
|
||||
@@ -93,7 +93,7 @@ function checkAppHealth(app, callback) {
|
||||
var healthCheckUrl = 'http://127.0.0.1:' + app.httpPort + manifest.healthCheckPath;
|
||||
superagent
|
||||
.get(healthCheckUrl)
|
||||
.set('Host', config.appFqdn(app.location)) // required for some apache configs with rewrite rules
|
||||
.set('Host', app.fqdn) // required for some apache configs with rewrite rules
|
||||
.redirects(0)
|
||||
.timeout(HEALTHCHECK_INTERVAL)
|
||||
.end(function (error, res) {
|
||||
@@ -111,13 +111,13 @@ function checkAppHealth(app, callback) {
|
||||
}
|
||||
|
||||
function processApps(callback) {
|
||||
appdb.getAll(function (error, apps) {
|
||||
apps.getAll(function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.each(apps, checkAppHealth, function (error) {
|
||||
async.each(result, checkAppHealth, function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
var alive = apps
|
||||
var alive = result
|
||||
.filter(function (a) { return a.installationState === appdb.ISTATE_INSTALLED && a.runState === appdb.RSTATE_RUNNING && a.health === appdb.HEALTH_HEALTHY; })
|
||||
.map(function (a) { return (a.location || 'naked_domain') + '|' + a.manifest.id; }).join(', ');
|
||||
|
||||
@@ -138,7 +138,7 @@ function run() {
|
||||
|
||||
/*
|
||||
OOM can be tested using stress tool like so:
|
||||
docker run -ti -m 100M cloudron/base:0.9.0 /bin/bash
|
||||
docker run -ti -m 100M cloudron/base:0.10.0 /bin/bash
|
||||
apt-get update && apt-get install stress
|
||||
stress --vm 1 --vm-bytes 200M --vm-hang 0
|
||||
*/
|
||||
@@ -166,7 +166,7 @@ function processDockerEvents() {
|
||||
debug('OOM Context: %s', context);
|
||||
|
||||
// do not send mails for dev apps
|
||||
if ((!app || app.appStoreId !== '') && (now - lastOomMailTime > OOM_MAIL_LIMIT)) {
|
||||
if ((!app || !app.debugMode) && (now - lastOomMailTime > OOM_MAIL_LIMIT)) {
|
||||
mailer.oomEvent(program, context); // app can be null if it's an addon crash
|
||||
lastOomMailTime = now;
|
||||
}
|
||||
|
||||
+82
-64
@@ -129,18 +129,21 @@ function validateHostname(location, fqdn) {
|
||||
|
||||
// validate the port bindings
|
||||
function validatePortBindings(portBindings, tcpPorts) {
|
||||
assert.strictEqual(typeof portBindings, 'object');
|
||||
|
||||
// keep the public ports in sync with firewall rules in scripts/initializeBaseUbuntuImage.sh
|
||||
// these ports are reserved even if we listen only on 127.0.0.1 because we setup HostIp to be 127.0.0.1
|
||||
// for custom tcp ports
|
||||
var RESERVED_PORTS = [
|
||||
22, /* ssh */
|
||||
25, /* smtp */
|
||||
53, /* dns */
|
||||
80, /* http */
|
||||
143, /* imap */
|
||||
202, /* caas ssh */
|
||||
443, /* https */
|
||||
465, /* smtps */
|
||||
587, /* submission */
|
||||
919, /* ssh */
|
||||
993, /* imaps */
|
||||
2003, /* graphite (lo) */
|
||||
2004, /* graphite (lo) */
|
||||
@@ -149,7 +152,6 @@ function validatePortBindings(portBindings, tcpPorts) {
|
||||
config.get('sysadminPort'), /* sysadmin app server (lo) */
|
||||
config.get('smtpPort'), /* internal smtp port (lo) */
|
||||
config.get('ldapPort'), /* ldap server (lo) */
|
||||
config.get('simpleAuthPort'), /* simple auth server (lo) */
|
||||
3306, /* mysql (lo) */
|
||||
4190, /* managesieve */
|
||||
8000 /* graphite (lo) */
|
||||
@@ -162,9 +164,9 @@ function validatePortBindings(portBindings, tcpPorts) {
|
||||
if (!/^[a-zA-Z0-9_]+$/.test(env)) return new AppsError(AppsError.BAD_FIELD, env + ' is not valid environment variable');
|
||||
|
||||
if (!Number.isInteger(portBindings[env])) return new AppsError(AppsError.BAD_FIELD, portBindings[env] + ' is not an integer');
|
||||
if (portBindings[env] <= 0 || portBindings[env] > 65535) return new AppsError(AppsError.BAD_FIELD, portBindings[env] + ' is out of range');
|
||||
|
||||
if (RESERVED_PORTS.indexOf(portBindings[env]) !== -1) return new AppsError(AppsError.PORT_RESERVED, String(portBindings[env]));
|
||||
if (portBindings[env] <= 1023 || portBindings[env] > 65535) return new AppsError(AppsError.BAD_FIELD, portBindings[env] + ' is not in permitted range');
|
||||
|
||||
}
|
||||
|
||||
// it is OK if there is no 1-1 mapping between values in manifest.tcpPorts and portBindings. missing values implies
|
||||
@@ -207,6 +209,9 @@ function validateMemoryLimit(manifest, memoryLimit) {
|
||||
// this is needed so an app update can change the value in the manifest, and if not set by the user, the new value should be used
|
||||
if (memoryLimit === 0) return null;
|
||||
|
||||
// a special value that indicates unlimited memory
|
||||
if (memoryLimit === -1) return null;
|
||||
|
||||
if (memoryLimit < min) return new AppsError(AppsError.BAD_FIELD, 'memoryLimit too small');
|
||||
if (memoryLimit > max) return new AppsError(AppsError.BAD_FIELD, 'memoryLimit too large');
|
||||
|
||||
@@ -227,6 +232,16 @@ function validateXFrameOptions(xFrameOptions) {
|
||||
return (uri.protocol === 'http:' || uri.protocol === 'https:') ? null : new AppsError(AppsError.BAD_FIELD, 'xFrameOptions ALLOW-FROM uri must be a valid http[s] uri' );
|
||||
}
|
||||
|
||||
function validateDebugMode(debugMode) {
|
||||
assert.strictEqual(typeof debugMode, 'object');
|
||||
|
||||
if (debugMode === null) return null;
|
||||
if ('cmd' in debugMode && debugMode.cmd !== null && !Array.isArray(debugMode.cmd)) return new AppsError(AppsError.BAD_FIELD, 'debugMode.cmd must be an array or null' );
|
||||
if ('readonlyRootfs' in debugMode && typeof debugMode.readonlyRootfs !== 'boolean') return new AppsError(AppsError.BAD_FIELD, 'debugMode.readonlyRootfs must be a boolean' );
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function getDuplicateErrorDetails(location, portBindings, error) {
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof portBindings, 'object');
|
||||
@@ -234,7 +249,7 @@ function getDuplicateErrorDetails(location, portBindings, error) {
|
||||
|
||||
var match = error.message.match(/ER_DUP_ENTRY: Duplicate entry '(.*)' for key/);
|
||||
if (!match) {
|
||||
console.error('Unexpected SQL error message.', error);
|
||||
debug('Unexpected SQL error message.', error);
|
||||
return new AppsError(AppsError.INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
@@ -262,7 +277,7 @@ function getAppConfig(app) {
|
||||
}
|
||||
|
||||
function getIconUrlSync(app) {
|
||||
var iconPath = paths.APPICONS_DIR + '/' + app.id + '.png';
|
||||
var iconPath = paths.APP_ICONS_DIR + '/' + app.id + '.png';
|
||||
return fs.existsSync(iconPath) ? '/api/v1/apps/' + app.id + '/icon' : null;
|
||||
}
|
||||
|
||||
@@ -280,11 +295,9 @@ function hasAccessTo(app, user, callback) {
|
||||
if (!app.accessRestriction.groups) return callback(null, false);
|
||||
|
||||
async.some(app.accessRestriction.groups, function (groupId, iteratorDone) {
|
||||
groups.isMember(groupId, user.id, function (error, member) {
|
||||
iteratorDone(!error && member); // async.some does not take error argument in callback
|
||||
});
|
||||
}, function (result) {
|
||||
callback(null, result);
|
||||
groups.isMember(groupId, user.id, iteratorDone);
|
||||
}, function (error, result) {
|
||||
callback(null, !error && result);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -298,6 +311,7 @@ function get(appId, callback) {
|
||||
|
||||
app.iconUrl = getIconUrlSync(app);
|
||||
app.fqdn = app.altDomain || config.appFqdn(app.location);
|
||||
app.cnameTarget = app.altDomain ? config.appFqdn(app.location) : null;
|
||||
|
||||
callback(null, app);
|
||||
});
|
||||
@@ -316,6 +330,7 @@ function getByIpAddress(ip, callback) {
|
||||
|
||||
app.iconUrl = getIconUrlSync(app);
|
||||
app.fqdn = app.altDomain || config.appFqdn(app.location);
|
||||
app.cnameTarget = app.altDomain ? config.appFqdn(app.location) : null;
|
||||
|
||||
callback(null, app);
|
||||
});
|
||||
@@ -331,6 +346,7 @@ function getAll(callback) {
|
||||
apps.forEach(function (app) {
|
||||
app.iconUrl = getIconUrlSync(app);
|
||||
app.fqdn = app.altDomain || config.appFqdn(app.location);
|
||||
app.cnameTarget = app.altDomain ? config.appFqdn(app.location) : null;
|
||||
});
|
||||
|
||||
callback(null, apps);
|
||||
@@ -344,11 +360,9 @@ function getAllByUser(user, callback) {
|
||||
getAll(function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.filter(result, function (app, callback) {
|
||||
hasAccessTo(app, user, function (error, hasAccess) {
|
||||
callback(hasAccess);
|
||||
});
|
||||
}, callback.bind(null, null)); // never error
|
||||
async.filter(result, function (app, iteratorDone) {
|
||||
hasAccessTo(app, user, iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -370,7 +384,7 @@ function purchase(appId, appstoreId, callback) {
|
||||
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 404) return callback(new AppsError(AppsError.NOT_FOUND));
|
||||
if (result.statusCode === 403) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
@@ -412,10 +426,13 @@ function unpurchase(appId, appstoreId, callback) {
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
if (result.statusCode === 404) return callback(null); // was never purchased
|
||||
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
superagent.del(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
if (result.statusCode !== 204) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
@@ -476,7 +493,8 @@ function install(data, auditSource, callback) {
|
||||
memoryLimit = data.memoryLimit || 0,
|
||||
altDomain = data.altDomain || null,
|
||||
xFrameOptions = data.xFrameOptions || 'SAMEORIGIN',
|
||||
sso = 'sso' in data ? data.sso : null;
|
||||
sso = 'sso' in data ? data.sso : null,
|
||||
debugMode = data.debugMode || null;
|
||||
|
||||
assert(data.appStoreId || data.manifest); // atleast one of them is required
|
||||
|
||||
@@ -504,9 +522,12 @@ function install(data, auditSource, callback) {
|
||||
error = validateXFrameOptions(xFrameOptions);
|
||||
if (error) return callback(error);
|
||||
|
||||
error = validateDebugMode(debugMode);
|
||||
if (error) return callback(error);
|
||||
|
||||
if ('sso' in data && !('optionalSso' in manifest)) return callback(new AppsError(AppsError.BAD_FIELD, 'sso can only be specified for apps with optionalSso'));
|
||||
// if sso was unspecified, enable it by default if possible
|
||||
if (sso === null) sso = !!manifest.addons['simpleauth'] || !!manifest.addons['ldap'] || !!manifest.addons['oauth'];
|
||||
if (sso === null) sso = !!manifest.addons['ldap'] || !!manifest.addons['oauth'];
|
||||
|
||||
if (altDomain !== null && !validator.isFQDN(altDomain)) return callback(new AppsError(AppsError.BAD_FIELD, 'Invalid alt domain'));
|
||||
|
||||
@@ -515,7 +536,7 @@ function install(data, auditSource, callback) {
|
||||
if (icon) {
|
||||
if (!validator.isBase64(icon)) return callback(new AppsError(AppsError.BAD_FIELD, 'icon is not base64'));
|
||||
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APPICONS_DIR, appId + '.png'), new Buffer(icon, 'base64'))) {
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_ICONS_DIR, appId + '.png'), new Buffer(icon, 'base64'))) {
|
||||
return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving icon:' + safe.error.message));
|
||||
}
|
||||
}
|
||||
@@ -533,30 +554,26 @@ function install(data, auditSource, callback) {
|
||||
memoryLimit: memoryLimit,
|
||||
altDomain: altDomain,
|
||||
xFrameOptions: xFrameOptions,
|
||||
sso: sso
|
||||
sso: sso,
|
||||
debugMode: debugMode,
|
||||
mailboxName: (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app'
|
||||
};
|
||||
|
||||
var from = (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app';
|
||||
mailboxdb.add(from, appId, mailboxdb.TYPE_APP, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(new AppsError(AppsError.ALREADY_EXISTS, 'Mailbox already exists'));
|
||||
appdb.add(appId, appStoreId, manifest, location, portBindings, data, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails(location, portBindings, error));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
appdb.add(appId, appStoreId, manifest, location, portBindings, data, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails(location, portBindings, error));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
// save cert to boxdata/certs
|
||||
if (cert && key) {
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.cert'), cert)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving cert: ' + safe.error.message));
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.key'), key)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving key: ' + safe.error.message));
|
||||
}
|
||||
|
||||
// save cert to data/box/certs
|
||||
if (cert && key) {
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.cert'), cert)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving cert: ' + safe.error.message));
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.key'), key)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving key: ' + safe.error.message));
|
||||
}
|
||||
taskmanager.restartAppTask(appId);
|
||||
|
||||
taskmanager.restartAppTask(appId);
|
||||
eventlog.add(eventlog.ACTION_APP_INSTALL, auditSource, { appId: appId, location: location, manifest: manifest });
|
||||
|
||||
eventlog.add(eventlog.ACTION_APP_INSTALL, auditSource, { appId: appId, location: location, manifest: manifest });
|
||||
|
||||
callback(null, { id : appId });
|
||||
});
|
||||
callback(null, { id : appId });
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -612,7 +629,13 @@ function configure(appId, data, auditSource, callback) {
|
||||
if (error) return callback(error);
|
||||
}
|
||||
|
||||
// save cert to data/box/certs. TODO: move this to apptask when we have a real task queue
|
||||
if ('debugMode' in data) {
|
||||
values.debugMode = data.debugMode;
|
||||
error = validateDebugMode(values.debugMode);
|
||||
if (error) return callback(error);
|
||||
}
|
||||
|
||||
// save cert to boxdata/certs. TODO: move this to apptask when we have a real task queue
|
||||
if ('cert' in data && 'key' in data) {
|
||||
if (data.cert && data.key) {
|
||||
error = certificates.validateCertificate(data.cert, data.key, config.appFqdn(location));
|
||||
@@ -683,11 +706,11 @@ function update(appId, data, auditSource, callback) {
|
||||
if (data.icon) {
|
||||
if (!validator.isBase64(data.icon)) return callback(new AppsError(AppsError.BAD_FIELD, 'icon is not base64'));
|
||||
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APPICONS_DIR, appId + '.png'), new Buffer(data.icon, 'base64'))) {
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_ICONS_DIR, appId + '.png'), new Buffer(data.icon, 'base64'))) {
|
||||
return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving icon:' + safe.error.message));
|
||||
}
|
||||
} else {
|
||||
safe.fs.unlinkSync(path.join(paths.APPICONS_DIR, appId + '.png'));
|
||||
safe.fs.unlinkSync(path.join(paths.APP_ICONS_DIR, appId + '.png'));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -699,12 +722,16 @@ function update(appId, data, auditSource, callback) {
|
||||
// this allows cloudron install -f --app <appid> for an app installed from the appStore
|
||||
if (app.manifest.id !== values.manifest.id) {
|
||||
if (!data.force) return callback(new AppsError(AppsError.BAD_FIELD, 'manifest id does not match. force to override'));
|
||||
// clear appStoreId so that this app does not get updates anymore. this will mark it as a dev app
|
||||
// clear appStoreId so that this app does not get updates anymore
|
||||
values.appStoreId = '';
|
||||
}
|
||||
|
||||
// do not update apps in debug mode
|
||||
if (app.debugMode && !data.force) return callback(new AppsError(AppsError.BAD_STATE, 'debug mode enabled. force to override'));
|
||||
|
||||
// Ensure we update the memory limit in case the new app requires more memory as a minimum
|
||||
if (values.manifest.memoryLimit && app.memoryLimit < values.manifest.memoryLimit) {
|
||||
// 0 and -1 are special values for memory limit indicating unset and unlimited
|
||||
if (app.memoryLimit > 0 && values.manifest.memoryLimit && app.memoryLimit < values.manifest.memoryLimit) {
|
||||
values.memoryLimit = values.manifest.memoryLimit;
|
||||
}
|
||||
|
||||
@@ -867,24 +894,19 @@ function clone(appId, data, auditSource, callback) {
|
||||
accessRestriction: app.accessRestriction,
|
||||
xFrameOptions: app.xFrameOptions,
|
||||
lastBackupId: backupId,
|
||||
sso: !!app.sso
|
||||
sso: !!app.sso,
|
||||
mailboxName: (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app'
|
||||
};
|
||||
|
||||
var from = (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app';
|
||||
mailboxdb.add(from, newAppId, mailboxdb.TYPE_APP, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(new AppsError(AppsError.ALREADY_EXISTS, 'Mailbox already exists'));
|
||||
appdb.add(newAppId, appStoreId, manifest, location, portBindings, data, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails(location, portBindings, error));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
appdb.add(newAppId, appStoreId, manifest, location, portBindings, data, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails(location, portBindings, error));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
taskmanager.restartAppTask(newAppId);
|
||||
|
||||
taskmanager.restartAppTask(newAppId);
|
||||
eventlog.add(eventlog.ACTION_APP_CLONE, auditSource, { appId: newAppId, oldAppId: appId, backupId: backupId, location: location, manifest: manifest });
|
||||
|
||||
eventlog.add(eventlog.ACTION_APP_CLONE, auditSource, { appId: newAppId, oldAppId: appId, backupId: backupId, location: location, manifest: manifest });
|
||||
|
||||
callback(null, { id : newAppId });
|
||||
});
|
||||
callback(null, { id : newAppId });
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -904,18 +926,14 @@ function uninstall(appId, auditSource, callback) {
|
||||
unpurchase(appId, result.appStoreId, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
mailboxdb.delByOwnerId(appId, function (error) {
|
||||
if (error && error.reason !== DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
taskmanager.stopAppTask(appId, function () {
|
||||
appdb.setInstallationCommand(appId, appdb.ISTATE_PENDING_UNINSTALL, function (error) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND, 'No such app'));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
taskmanager.stopAppTask(appId, function () {
|
||||
appdb.setInstallationCommand(appId, appdb.ISTATE_PENDING_UNINSTALL, function (error) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND, 'No such app'));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
eventlog.add(eventlog.ACTION_APP_UNINSTALL, auditSource, { appId: appId });
|
||||
|
||||
eventlog.add(eventlog.ACTION_APP_UNINSTALL, auditSource, { appId: appId });
|
||||
|
||||
taskmanager.startAppTask(appId, callback);
|
||||
});
|
||||
taskmanager.startAppTask(appId, callback);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
+23
-15
@@ -190,6 +190,9 @@ function downloadIcon(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// nothing to download if we dont have an appStoreId
|
||||
if (!app.appStoreId) return callback(null);
|
||||
|
||||
debugApp(app, 'Downloading icon of %s@%s', app.appStoreId, app.manifest.version);
|
||||
|
||||
var iconUrl = config.apiServerOrigin() + '/api/v1/apps/' + app.appStoreId + '/versions/' + app.manifest.version + '/icon';
|
||||
@@ -203,22 +206,21 @@ function downloadIcon(app, callback) {
|
||||
if (error && !error.response) return retryCallback(new Error('Network error downloading icon:' + error.message));
|
||||
if (res.statusCode !== 200) return retryCallback(null); // ignore error. this can also happen for apps installed with cloudron-cli
|
||||
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APPICONS_DIR, app.id + '.png'), res.body)) return retryCallback(new Error('Error saving icon:' + safe.error.message));
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_ICONS_DIR, app.id + '.png'), res.body)) return retryCallback(new Error('Error saving icon:' + safe.error.message));
|
||||
|
||||
retryCallback(null);
|
||||
});
|
||||
}, callback);
|
||||
}
|
||||
|
||||
function registerSubdomain(app, callback) {
|
||||
function registerSubdomain(app, overwrite, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof overwrite, 'boolean');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
sysinfo.getIp(function (error, ip) {
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(error);
|
||||
|
||||
// even though the bare domain is already registered in the appstore, we still
|
||||
// need to register it so that we have a dnsRecordId to wait for it to complete
|
||||
async.retry({ times: 200, interval: 5000 }, function (retryCallback) {
|
||||
debugApp(app, 'Registering subdomain location [%s]', app.location);
|
||||
|
||||
@@ -228,7 +230,7 @@ function registerSubdomain(app, callback) {
|
||||
|
||||
// refuse to update any existing DNS record for custom domains that we did not create
|
||||
// note that the appstore sets up the naked domain for non-custom domains
|
||||
if (config.isCustomDomain() && values.length !== 0 && !app.dnsRecordId) return retryCallback(null, new Error('DNS Record already exists'));
|
||||
if (config.isCustomDomain() && values.length !== 0 && !overwrite) return retryCallback(null, new Error('DNS Record already exists'));
|
||||
|
||||
subdomains.upsert(app.location, 'A', [ ip ], function (error, changeId) {
|
||||
if (error && (error.reason === SubdomainError.STILL_BUSY || error.reason === SubdomainError.EXTERNAL_ERROR)) return retryCallback(error); // try again
|
||||
@@ -255,7 +257,7 @@ function unregisterSubdomain(app, location, callback) {
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
sysinfo.getIp(function (error, ip) {
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.retry({ times: 30, interval: 5000 }, function (retryCallback) {
|
||||
@@ -278,7 +280,7 @@ function removeIcon(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
fs.unlink(path.join(paths.APPICONS_DIR, app.id + '.png'), function (error) {
|
||||
fs.unlink(path.join(paths.APP_ICONS_DIR, app.id + '.png'), function (error) {
|
||||
if (error && error.code !== 'ENOENT') debugApp(app, 'cannot remove icon : %s', error);
|
||||
callback(null);
|
||||
});
|
||||
@@ -293,7 +295,7 @@ function waitForDnsPropagation(app, callback) {
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
sysinfo.getIp(function (error, ip) {
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(error);
|
||||
|
||||
subdomains.waitForDns(config.appFqdn(app.location), ip, 'A', { interval: 5000, times: 120 }, callback);
|
||||
@@ -353,7 +355,6 @@ function install(app, callback) {
|
||||
addons.teardownAddons.bind(null, app, app.manifest.addons),
|
||||
deleteVolume.bind(null, app),
|
||||
unregisterSubdomain.bind(null, app, app.location),
|
||||
// removeIcon.bind(null, app), // do not remove icon for non-appstore installs
|
||||
|
||||
reserveHttpPort.bind(null, app),
|
||||
|
||||
@@ -361,7 +362,7 @@ function install(app, callback) {
|
||||
downloadIcon.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '30, Registering subdomain' }),
|
||||
registerSubdomain.bind(null, app),
|
||||
registerSubdomain.bind(null, app, false /* overwrite */),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '40, Downloading image' }),
|
||||
docker.downloadImage.bind(null, app.manifest),
|
||||
@@ -453,7 +454,6 @@ function restore(app, callback) {
|
||||
|
||||
docker.deleteImage(app.oldConfig.manifest, done);
|
||||
},
|
||||
removeIcon.bind(null, app),
|
||||
|
||||
reserveHttpPort.bind(null, app),
|
||||
|
||||
@@ -461,7 +461,7 @@ function restore(app, callback) {
|
||||
downloadIcon.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '55, Registering subdomain' }), // ip might change during upgrades
|
||||
registerSubdomain.bind(null, app),
|
||||
registerSubdomain.bind(null, app, true /* overwrite */),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '60, Downloading image' }),
|
||||
docker.downloadImage.bind(null, app.manifest),
|
||||
@@ -523,8 +523,17 @@ function configure(app, callback) {
|
||||
|
||||
reserveHttpPort.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '20, Downloading icon' }),
|
||||
downloadIcon.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '35, Registering subdomain' }),
|
||||
registerSubdomain.bind(null, app),
|
||||
registerSubdomain.bind(null, app, true /* overwrite */),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '40, Downloading image' }),
|
||||
docker.downloadImage.bind(null, app.manifest),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '45, Ensuring volume' }),
|
||||
createVolume.bind(null, app),
|
||||
|
||||
// re-setup addons since they rely on the app's fqdn (e.g oauth)
|
||||
updateApp.bind(null, app, { installationProgress: '50, Setting up addons' }),
|
||||
@@ -592,7 +601,6 @@ function update(app, callback) {
|
||||
|
||||
docker.deleteImage(app.oldConfig.manifest, done);
|
||||
},
|
||||
// removeIcon.bind(null, app), // do not remove icon, otherwise the UI breaks for a short time...
|
||||
|
||||
function (next) {
|
||||
if (app.installationState === appdb.ISTATE_PENDING_FORCE_UPDATE) return next(null);
|
||||
|
||||
+9
-3
@@ -352,9 +352,11 @@ function backupBoxAndApps(auditSource, callback) {
|
||||
var processed = 0;
|
||||
var step = 100/(allApps.length+1);
|
||||
|
||||
progress.set(progress.BACKUP, processed, '');
|
||||
progress.set(progress.BACKUP, step * processed, '');
|
||||
|
||||
async.mapSeries(allApps, function iterator(app, iteratorCallback) {
|
||||
progress.set(progress.BACKUP, step * processed, 'Backing up ' + (app.altDomain || config.appFqdn(app.location)));
|
||||
|
||||
++processed;
|
||||
|
||||
backupApp(app, app.manifest, prefix, function (error, backupId) {
|
||||
@@ -363,7 +365,7 @@ function backupBoxAndApps(auditSource, callback) {
|
||||
return iteratorCallback(error);
|
||||
}
|
||||
|
||||
progress.set(progress.BACKUP, step * processed, 'Backed up app at ' + app.location);
|
||||
progress.set(progress.BACKUP, step * processed, 'Backed up ' + (app.altDomain || config.appFqdn(app.location)));
|
||||
|
||||
iteratorCallback(null, backupId || null); // clear backupId if is in BAD_STATE and never backed up
|
||||
});
|
||||
@@ -375,6 +377,8 @@ function backupBoxAndApps(auditSource, callback) {
|
||||
|
||||
backupIds = backupIds.filter(function (id) { return id !== null; }); // remove apps in bad state that were never backed up
|
||||
|
||||
progress.set(progress.BACKUP, step * processed, 'Backing up system data');
|
||||
|
||||
backupBoxWithAppBackupIds(backupIds, prefix, function (error, filename) {
|
||||
progress.set(progress.BACKUP, 100, error ? error.message : '');
|
||||
|
||||
@@ -398,7 +402,7 @@ function backup(auditSource, callback) {
|
||||
backupBoxAndApps(auditSource, function (error) { // start the backup operation in the background
|
||||
if (error) {
|
||||
debug('backup failed.', error);
|
||||
mailer.backupFailed(JSON.stringify(error));
|
||||
mailer.backupFailed(error);
|
||||
}
|
||||
|
||||
locker.unlock(locker.OP_FULL_BACKUP);
|
||||
@@ -410,6 +414,8 @@ function backup(auditSource, callback) {
|
||||
function ensureBackup(auditSource, callback) {
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
|
||||
debug('ensureBackup: %j', auditSource);
|
||||
|
||||
getPaged(1, 1, function (error, backups) {
|
||||
if (error) {
|
||||
debug('Unable to list backups', error);
|
||||
|
||||
+6
-2
@@ -352,12 +352,16 @@ Acme.prototype.createKeyAndCsr = function (domain, callback) {
|
||||
Acme.prototype.downloadChain = function (linkHeader, callback) {
|
||||
if (!linkHeader) return new AcmeError(AcmeError.EXTERNAL_ERROR, 'Empty link header when downloading certificate chain');
|
||||
|
||||
debug('downloadChain: linkHeader %s', linkHeader);
|
||||
|
||||
var linkInfo = parseLinks(linkHeader);
|
||||
if (!linkInfo || !linkInfo.up) return new AcmeError(AcmeError.EXTERNAL_ERROR, 'Failed to parse link header when downloading certificate chain');
|
||||
|
||||
debug('downloadChain: downloading from %s', this.caOrigin + linkInfo.up);
|
||||
var intermediateCertUrl = linkInfo.up.startsWith('https://') ? linkInfo.up : (this.caOrigin + linkInfo.up);
|
||||
|
||||
superagent.get(this.caOrigin + linkInfo.up).buffer().parse(function (res, done) {
|
||||
debug('downloadChain: downloading from %s', intermediateCertUrl);
|
||||
|
||||
superagent.get(intermediateCertUrl).buffer().parse(function (res, done) {
|
||||
var data = [ ];
|
||||
res.on('data', function(chunk) { data.push(chunk); });
|
||||
res.on('end', function () { res.text = Buffer.concat(data); done(); });
|
||||
|
||||
+93
-37
@@ -1,16 +1,25 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
ensureFallbackCertificate: ensureFallbackCertificate,
|
||||
|
||||
renewAll: renewAll,
|
||||
setFallbackCertificate: setFallbackCertificate,
|
||||
setAdminCertificate: setAdminCertificate,
|
||||
CertificatesError: CertificatesError,
|
||||
|
||||
ensureFallbackCertificate: ensureFallbackCertificate,
|
||||
setFallbackCertificate: setFallbackCertificate,
|
||||
|
||||
validateCertificate: validateCertificate,
|
||||
ensureCertificate: ensureCertificate,
|
||||
getAdminCertificatePath: getAdminCertificatePath,
|
||||
adminCertificateExists: adminCertificateExists,
|
||||
|
||||
setAdminCertificate: setAdminCertificate,
|
||||
getAdminCertificate: getAdminCertificate,
|
||||
|
||||
renewAll: renewAll,
|
||||
|
||||
initialize: initialize,
|
||||
uninitialize: uninitialize,
|
||||
|
||||
events: null,
|
||||
|
||||
EVENT_CERT_CHANGED: 'cert_changed',
|
||||
|
||||
// exported for testing
|
||||
_getApi: getApi
|
||||
@@ -34,8 +43,7 @@ var acme = require('./cert/acme.js'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js'),
|
||||
user = require('./user.js'),
|
||||
util = require('util'),
|
||||
x509 = require('x509');
|
||||
util = require('util');
|
||||
|
||||
function CertificatesError(reason, errorOrMessage) {
|
||||
assert.strictEqual(typeof reason, 'string');
|
||||
@@ -60,6 +68,20 @@ CertificatesError.INTERNAL_ERROR = 'Internal Error';
|
||||
CertificatesError.INVALID_CERT = 'Invalid certificate';
|
||||
CertificatesError.NOT_FOUND = 'Not Found';
|
||||
|
||||
function initialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
exports.events = new (require('events').EventEmitter)();
|
||||
callback();
|
||||
}
|
||||
|
||||
function uninitialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
exports.events = null;
|
||||
callback();
|
||||
}
|
||||
|
||||
function getApi(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
@@ -99,7 +121,7 @@ function ensureFallbackCertificate(callback) {
|
||||
var fallbackCertPath = path.join(paths.NGINX_CERT_DIR, 'host.cert');
|
||||
var fallbackKeyPath = path.join(paths.NGINX_CERT_DIR, 'host.key');
|
||||
|
||||
if (fs.existsSync(certFilePath) && fs.existsSync(keyFilePath)) { // user's custom fallback certs (when restoring, updating)
|
||||
if (fs.existsSync(certFilePath) && fs.existsSync(keyFilePath)) { // existing custom fallback certs (when restarting, restoring, updating)
|
||||
debug('ensureFallbackCertificate: using fallback certs provided by user');
|
||||
if (!safe.child_process.execSync('cp ' + certFilePath + ' ' + fallbackCertPath)) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
|
||||
if (!safe.child_process.execSync('cp ' + keyFilePath + ' ' + fallbackKeyPath)) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
|
||||
@@ -107,7 +129,8 @@ function ensureFallbackCertificate(callback) {
|
||||
return callback();
|
||||
}
|
||||
|
||||
if (config.tlsCert() && config.tlsKey()) { // cert from CaaS or cloudron-setup
|
||||
if (config.tlsCert() && config.tlsKey()) {
|
||||
// cert from CaaS or cloudron-setup. these files should _not_ be part of the backup
|
||||
debug('ensureFallbackCertificate: using CaaS/cloudron-setup fallback certs');
|
||||
if (!safe.fs.writeFileSync(fallbackCertPath, config.tlsCert())) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
|
||||
if (!safe.fs.writeFileSync(fallbackKeyPath, config.tlsKey())) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
|
||||
@@ -115,11 +138,16 @@ function ensureFallbackCertificate(callback) {
|
||||
return callback();
|
||||
}
|
||||
|
||||
// generate a self-signed cert (FIXME: this cert does not cover the naked domain. needs SAN)
|
||||
// generate a self-signed cert. it's in backup dir so that we don't create a new cert across restarts
|
||||
// FIXME: this cert does not cover the naked domain. needs SAN
|
||||
if (config.fqdn()) {
|
||||
debug('ensureFallbackCertificate: generating self-signed certificate');
|
||||
var certCommand = util.format('openssl req -x509 -newkey rsa:2048 -keyout %s -out %s -days 3650 -subj /CN=*.%s -nodes', fallbackKeyPath, fallbackCertPath, config.fqdn());
|
||||
var certCommand = util.format('openssl req -x509 -newkey rsa:2048 -keyout %s -out %s -days 3650 -subj /CN=*.%s -nodes', keyFilePath, certFilePath, config.fqdn());
|
||||
safe.child_process.execSync(certCommand);
|
||||
|
||||
if (!safe.child_process.execSync('cp ' + certFilePath + ' ' + fallbackCertPath)) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
|
||||
if (!safe.child_process.execSync('cp ' + keyFilePath + ' ' + fallbackKeyPath)) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
|
||||
|
||||
return callback();
|
||||
} else {
|
||||
debug('ensureFallbackCertificate: cannot generate fallback certificate without domain');
|
||||
@@ -218,6 +246,8 @@ function renewAll(auditSource, callback) {
|
||||
configureFunc(function (ignoredError) {
|
||||
if (ignoredError) debug('fallbackExpiredCertificates: error reconfiguring app', ignoredError);
|
||||
|
||||
exports.events.emit(exports.EVENT_CERT_CHANGED, domain);
|
||||
|
||||
iteratorCallback(); // move to next app
|
||||
});
|
||||
});
|
||||
@@ -237,25 +267,30 @@ function validateCertificate(cert, key, fqdn) {
|
||||
if (!cert && key) return new Error('missing cert');
|
||||
if (cert && !key) return new Error('missing key');
|
||||
|
||||
var content;
|
||||
try {
|
||||
content = x509.parseCert(cert);
|
||||
} catch (e) {
|
||||
return new Error('invalid cert: ' + e.message);
|
||||
}
|
||||
|
||||
// check expiration
|
||||
if (content.notAfter < new Date()) return new Error('cert expired');
|
||||
|
||||
function matchesDomain(domain) {
|
||||
if (typeof domain !== 'string') return false;
|
||||
if (domain === fqdn) return true;
|
||||
if (domain.indexOf('*') === 0 && domain.slice(2) === fqdn.slice(fqdn.indexOf('.') + 1)) return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// check domain
|
||||
var domains = content.altNames.concat(content.subject.commonName);
|
||||
// get commonName (http://stackoverflow.com/questions/17353122/parsing-strings-crt-files)
|
||||
var result = safe.child_process.execSync('openssl x509 -noout -subject | sed -r "s|.*CN=(.*)|\\1|; s|/[^/]*=.*$||"', { encoding: 'utf8', input: cert });
|
||||
if (!result) return new Error(util.format('could not get CN'));
|
||||
var commonName = result.trim();
|
||||
debug('validateCertificate: detected commonName as %s', commonName);
|
||||
|
||||
// https://github.com/drwetter/testssl.sh/pull/383
|
||||
var cmd = `openssl x509 -noout -text | grep -A3 "Subject Alternative Name" | \
|
||||
grep "DNS:" | \
|
||||
sed -e "s/DNS://g" -e "s/ //g" -e "s/,/ /g" -e "s/othername:<unsupported>//g"`;
|
||||
result = safe.child_process.execSync(cmd, { encoding: 'utf8', input: cert });
|
||||
var altNames = result ? [ ] : result.trim().split(' '); // might fail if cert has no SAN
|
||||
debug('validateCertificate: detected altNames as %j', altNames);
|
||||
|
||||
// check altNames
|
||||
var domains = altNames.concat(commonName);
|
||||
if (!domains.some(matchesDomain)) return new Error(util.format('cert is not valid for this domain. Expecting %s in %j', fqdn, domains));
|
||||
|
||||
// http://httpd.apache.org/docs/2.0/ssl/ssl_faq.html#verify
|
||||
@@ -263,6 +298,10 @@ function validateCertificate(cert, key, fqdn) {
|
||||
var keyModulus = safe.child_process.execSync('openssl rsa -noout -modulus', { encoding: 'utf8', input: key });
|
||||
if (certModulus !== keyModulus) return new Error('key does not match the cert');
|
||||
|
||||
// check expiration
|
||||
result = safe.child_process.execSync('openssl x509 -checkend 0', { encoding: 'utf8', input: cert });
|
||||
if (!result) return new Error('cert expired');
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -282,6 +321,8 @@ function setFallbackCertificate(cert, key, callback) {
|
||||
if (!safe.fs.writeFileSync(path.join(paths.NGINX_CERT_DIR, 'host.cert'), cert)) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
|
||||
if (!safe.fs.writeFileSync(path.join(paths.NGINX_CERT_DIR, 'host.key'), key)) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
|
||||
|
||||
exports.events.emit(exports.EVENT_CERT_CHANGED, '*.' + config.fqdn());
|
||||
|
||||
nginx.reload(function (error) {
|
||||
if (error) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, error));
|
||||
|
||||
@@ -296,15 +337,14 @@ function getFallbackCertificatePath(callback) {
|
||||
callback(null, path.join(paths.NGINX_CERT_DIR, 'host.cert'), path.join(paths.NGINX_CERT_DIR, 'host.key'));
|
||||
}
|
||||
|
||||
// FIXME: setting admin cert needs to restart the mail container because it uses admin cert
|
||||
function setAdminCertificate(cert, key, callback) {
|
||||
assert.strictEqual(typeof cert, 'string');
|
||||
assert.strictEqual(typeof key, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var vhost = config.adminFqdn();
|
||||
var certFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.cert');
|
||||
var keyFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.key');
|
||||
var certFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.user.cert');
|
||||
var keyFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.user.key');
|
||||
|
||||
var error = validateCertificate(cert, key, vhost);
|
||||
if (error) return callback(new CertificatesError(CertificatesError.INVALID_CERT, error.message));
|
||||
@@ -313,6 +353,8 @@ function setAdminCertificate(cert, key, callback) {
|
||||
if (!safe.fs.writeFileSync(certFilePath, cert)) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
|
||||
if (!safe.fs.writeFileSync(keyFilePath, key)) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
|
||||
|
||||
exports.events.emit(exports.EVENT_CERT_CHANGED, vhost);
|
||||
|
||||
nginx.configureAdmin(certFilePath, keyFilePath, constants.NGINX_ADMIN_CONFIG_FILE_NAME, config.adminFqdn(), callback);
|
||||
}
|
||||
|
||||
@@ -320,20 +362,33 @@ function getAdminCertificatePath(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var vhost = config.adminFqdn();
|
||||
var certFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.cert');
|
||||
var keyFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.key');
|
||||
var certFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.user.cert');
|
||||
var keyFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.user.key');
|
||||
|
||||
if (fs.existsSync(certFilePath) && fs.existsSync(keyFilePath)) return callback(null, certFilePath, keyFilePath);
|
||||
|
||||
certFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.cert');
|
||||
keyFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.key');
|
||||
|
||||
if (fs.existsSync(certFilePath) && fs.existsSync(keyFilePath)) return callback(null, certFilePath, keyFilePath);
|
||||
|
||||
getFallbackCertificatePath(callback);
|
||||
}
|
||||
|
||||
function adminCertificateExists() {
|
||||
var vhost = config.adminFqdn();
|
||||
var certFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.cert');
|
||||
var keyFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.key');
|
||||
function getAdminCertificate(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
return (fs.existsSync(certFilePath) && fs.existsSync(keyFilePath));
|
||||
getAdminCertificatePath(function (error, certFilePath, keyFilePath) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var cert = safe.fs.readFileSync(certFilePath);
|
||||
if (!cert) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error));
|
||||
|
||||
var key = safe.fs.readFileSync(keyFilePath);
|
||||
if (!cert) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error));
|
||||
|
||||
return callback(null, cert, key);
|
||||
});
|
||||
}
|
||||
|
||||
function ensureCertificate(app, callback) {
|
||||
@@ -357,10 +412,11 @@ function ensureCertificate(app, callback) {
|
||||
debug('ensureCertificate: %s. certificate already exists at %s', domain, keyFilePath);
|
||||
|
||||
if (!isExpiringSync(certFilePath, 24 * 1)) return callback(null, certFilePath, keyFilePath);
|
||||
debug('ensureCertificate: %s cert require renewal', domain);
|
||||
} else {
|
||||
debug('ensureCertificate: %s cert does not exist', domain);
|
||||
}
|
||||
|
||||
debug('ensureCertificate: %s cert require renewal', domain);
|
||||
|
||||
getApi(app, function (error, api, apiOptions) {
|
||||
if (error) return callback(error);
|
||||
|
||||
|
||||
@@ -32,7 +32,6 @@ exports = module.exports = {
|
||||
TYPE_EXTERNAL: 'external',
|
||||
TYPE_BUILT_IN: 'built-in',
|
||||
TYPE_OAUTH: 'addon-oauth',
|
||||
TYPE_SIMPLE_AUTH: 'addon-simpleauth',
|
||||
TYPE_PROXY: 'addon-proxy'
|
||||
};
|
||||
|
||||
@@ -192,7 +191,6 @@ function getAll(callback) {
|
||||
|
||||
if (record.type === exports.TYPE_PROXY) record.name = result.manifest.title + ' Website Proxy';
|
||||
if (record.type === exports.TYPE_OAUTH) record.name = result.manifest.title + ' OAuth';
|
||||
if (record.type === exports.TYPE_SIMPLE_AUTH) record.name = result.manifest.title + ' Simple Auth';
|
||||
|
||||
record.location = result.location;
|
||||
|
||||
|
||||
+143
-93
@@ -8,6 +8,7 @@ exports = module.exports = {
|
||||
activate: activate,
|
||||
getConfig: getConfig,
|
||||
getStatus: getStatus,
|
||||
dnsSetup: dnsSetup,
|
||||
|
||||
sendHeartbeat: sendHeartbeat,
|
||||
sendAliveStatus: sendAliveStatus,
|
||||
@@ -24,7 +25,9 @@ exports = module.exports = {
|
||||
readDkimPublicKeySync: readDkimPublicKeySync,
|
||||
refreshDNS: refreshDNS,
|
||||
|
||||
configureAdmin: configureAdmin
|
||||
events: null,
|
||||
|
||||
EVENT_ACTIVATED: 'activated'
|
||||
};
|
||||
|
||||
var apps = require('./apps.js'),
|
||||
@@ -69,8 +72,6 @@ var REBOOT_CMD = path.join(__dirname, 'scripts/reboot.sh'),
|
||||
UPDATE_CMD = path.join(__dirname, 'scripts/update.sh'),
|
||||
RETIRE_CMD = path.join(__dirname, 'scripts/retire.sh');
|
||||
|
||||
var IP_BASED_SETUP_NAME = 'ip_based_setup'; // This will be used for cert and nginx config file names
|
||||
|
||||
var NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
|
||||
// result to not depend on the appstore
|
||||
@@ -113,6 +114,7 @@ CloudronError.BAD_FIELD = 'Field error';
|
||||
CloudronError.INTERNAL_ERROR = 'Internal Error';
|
||||
CloudronError.EXTERNAL_ERROR = 'External Error';
|
||||
CloudronError.ALREADY_PROVISIONED = 'Already Provisioned';
|
||||
CloudronError.ALREADY_SETUP = 'Already Setup';
|
||||
CloudronError.BAD_STATE = 'Bad state';
|
||||
CloudronError.ALREADY_UPTODATE = 'No Update Available';
|
||||
CloudronError.NOT_FOUND = 'Not found';
|
||||
@@ -121,40 +123,63 @@ CloudronError.SELF_UPGRADE_NOT_SUPPORTED = 'Self upgrade not supported';
|
||||
function initialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
exports.events = new (require('events').EventEmitter)();
|
||||
|
||||
gConfigState = { dns: false, tls: false, configured: false };
|
||||
gUpdatingDns = false;
|
||||
gBoxAndUserDetails = null;
|
||||
|
||||
async.series([
|
||||
certificates.initialize,
|
||||
settings.initialize,
|
||||
platform.initialize,
|
||||
installAppBundle,
|
||||
syncConfigState
|
||||
checkConfigState,
|
||||
configureDefaultServer
|
||||
], callback);
|
||||
}
|
||||
|
||||
function uninitialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
exports.events = null;
|
||||
|
||||
platform.events.removeListener(platform.EVENT_READY, onPlatformReady);
|
||||
|
||||
async.series([
|
||||
cron.uninitialize,
|
||||
taskmanager.pauseTasks,
|
||||
mailer.stop,
|
||||
platform.uninitialize
|
||||
platform.uninitialize,
|
||||
certificates.uninitialize,
|
||||
settings.uninitialize
|
||||
], callback);
|
||||
}
|
||||
|
||||
function onConfigured(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
debug('onConfigured');
|
||||
// if we hit here, the domain has to be set, this is a logic issue if it isn't
|
||||
assert(config.fqdn());
|
||||
|
||||
debug('onConfigured: current state: %j', gConfigState);
|
||||
|
||||
if (gConfigState.configured) return callback(); // re-entracy flag
|
||||
|
||||
gConfigState.configured = true;
|
||||
|
||||
platform.events.on(platform.EVENT_READY, onPlatformReady);
|
||||
settings.events.on(settings.DNS_CONFIG_KEY, function () { refreshDNS(); });
|
||||
|
||||
async.series([
|
||||
clients.addDefaultClients,
|
||||
cron.initialize,
|
||||
certificates.ensureFallbackCertificate,
|
||||
platform.initialize, // requires fallback certs in mail container
|
||||
platform.start, // requires fallback certs for mail container
|
||||
ensureDkimKey,
|
||||
addDnsRecords,
|
||||
configureAdmin,
|
||||
mailer.start
|
||||
mailer.start,
|
||||
cron.initialize // do not send heartbeats until we are "ready"
|
||||
], callback);
|
||||
}
|
||||
|
||||
@@ -172,39 +197,64 @@ function getConfigStateSync() {
|
||||
return gConfigState;
|
||||
}
|
||||
|
||||
function isConfigured(callback) {
|
||||
// set of rules to see if we have the configs required for cloudron to function
|
||||
// note this checks for missing configs and not invalid configs
|
||||
function checkConfigState(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
if (!config.fqdn()) return callback(null, false);
|
||||
if (!config.fqdn()) {
|
||||
settings.events.once(settings.DNS_CONFIG_KEY, function () { checkConfigState(); }); // check again later
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
settings.getDnsConfig(function (error, dnsConfig) {
|
||||
if (error) return callback(error);
|
||||
debug('checkConfigState: configured');
|
||||
|
||||
if (!dnsConfig) return callback(null, false);
|
||||
onConfigured(callback);
|
||||
}
|
||||
|
||||
var isConfigured = (config.isCustomDomain() && (dnsConfig.provider === 'route53' || dnsConfig.provider === 'digitalocean' || dnsConfig.provider === 'noop' || dnsConfig.provider === 'manual')) ||
|
||||
(!config.isCustomDomain() && dnsConfig.provider === 'caas');
|
||||
function dnsSetup(dnsConfig, domain, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback(null, isConfigured);
|
||||
if (config.fqdn()) return callback(new CloudronError(CloudronError.ALREADY_SETUP));
|
||||
|
||||
settings.setDnsConfig(dnsConfig, domain, function (error) {
|
||||
if (error && error.reason === SettingsError.BAD_FIELD) return callback(new CloudronError(CloudronError.BAD_FIELD, error.message));
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
|
||||
config.set('fqdn', domain); // set fqdn only after dns config is valid, otherwise cannot re-setup if we failed
|
||||
|
||||
onConfigured(); // do not block
|
||||
|
||||
callback();
|
||||
});
|
||||
}
|
||||
|
||||
function syncConfigState(callback) {
|
||||
function configureDefaultServer(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
isConfigured(function (error, configured) {
|
||||
debug('configureDefaultServer: domain %s', config.fqdn());
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback();
|
||||
|
||||
var certFilePath = path.join(paths.NGINX_CERT_DIR, 'default.cert');
|
||||
var keyFilePath = path.join(paths.NGINX_CERT_DIR, 'default.key');
|
||||
|
||||
if (!fs.existsSync(certFilePath) || !fs.existsSync(keyFilePath)) {
|
||||
debug('configureDefaultServer: create new cert');
|
||||
|
||||
var cn = 'cloudron-' + (new Date()).toISOString(); // randomize date a bit to keep firefox happy
|
||||
var certCommand = util.format('openssl req -x509 -newkey rsa:2048 -keyout %s -out %s -days 3650 -subj /CN=%s -nodes', keyFilePath, certFilePath, cn);
|
||||
safe.child_process.execSync(certCommand);
|
||||
}
|
||||
|
||||
safe.fs.unlinkSync(path.join(paths.NGINX_APPCONFIG_DIR,'ip_based_setup.conf'));
|
||||
|
||||
nginx.configureAdmin(certFilePath, keyFilePath, 'default.conf', '', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('syncConfigState: configured = %s', configured);
|
||||
debug('configureDefaultServer: done');
|
||||
|
||||
if (configured) {
|
||||
gConfigState.configured = true;
|
||||
return onConfigured(callback);
|
||||
}
|
||||
|
||||
settings.events.once(settings.DNS_CONFIG_KEY, function () { syncConfigState(); }); // check again later
|
||||
callback();
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -215,42 +265,23 @@ function configureAdmin(callback) {
|
||||
|
||||
debug('configureAdmin');
|
||||
|
||||
sysinfo.getIp(function (error, ip) {
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var certFilePath = path.join(paths.NGINX_CERT_DIR, IP_BASED_SETUP_NAME + '-' + ip + '.cert');
|
||||
var keyFilePath = path.join(paths.NGINX_CERT_DIR, IP_BASED_SETUP_NAME + '-' + ip + '.key');
|
||||
|
||||
// check if we already have a cert for this IP, otherwise create one, this is mostly useful for servers with changing IPs
|
||||
if (!fs.existsSync(certFilePath) || !fs.existsSync(keyFilePath)) {
|
||||
debug('configureAdmin: create new cert for %s', ip);
|
||||
|
||||
var certCommand = util.format('openssl req -x509 -newkey rsa:2048 -keyout %s -out %s -days 3650 -subj /CN=%s -nodes', keyFilePath, certFilePath, ip);
|
||||
safe.child_process.execSync(certCommand);
|
||||
}
|
||||
|
||||
// always create a configuration for the ip
|
||||
nginx.configureAdmin(certFilePath, keyFilePath, IP_BASED_SETUP_NAME + '.conf', '', function (error) {
|
||||
subdomains.waitForDns(config.adminFqdn(), ip, 'A', { interval: 30000, times: 50000 }, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
// skip my.domain.com setup if we don't have a domain
|
||||
if (!config.fqdn()) return callback(null);
|
||||
gConfigState.dns = true;
|
||||
|
||||
subdomains.waitForDns(config.adminFqdn(), ip, 'A', { interval: 30000, times: 50000 }, function (error) {
|
||||
if (error) return callback(error);
|
||||
certificates.ensureCertificate({ location: constants.ADMIN_LOCATION }, function (error, certFilePath, keyFilePath) {
|
||||
if (error) { // currently, this can never happen
|
||||
debug('Error obtaining certificate. Proceed anyway', error);
|
||||
return callback();
|
||||
}
|
||||
|
||||
gConfigState.dns = true;
|
||||
gConfigState.tls = true;
|
||||
|
||||
certificates.ensureCertificate({ location: constants.ADMIN_LOCATION }, function (error, certFilePath, keyFilePath) {
|
||||
if (error) { // currently, this can never happen
|
||||
debug('Error obtaining certificate. Proceed anyway', error);
|
||||
return callback();
|
||||
}
|
||||
|
||||
gConfigState.tls = true;
|
||||
|
||||
nginx.configureAdmin(certFilePath, keyFilePath, constants.NGINX_ADMIN_CONFIG_FILE_NAME, config.adminFqdn(), callback);
|
||||
});
|
||||
nginx.configureAdmin(certFilePath, keyFilePath, constants.NGINX_ADMIN_CONFIG_FILE_NAME, config.adminFqdn(), callback);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -315,6 +346,8 @@ function activate(username, password, email, displayName, ip, auditSource, callb
|
||||
|
||||
eventlog.add(eventlog.ACTION_ACTIVATE, auditSource, { });
|
||||
|
||||
exports.events.emit(exports.EVENT_ACTIVATED);
|
||||
|
||||
callback(null, { token: token, expires: expires });
|
||||
});
|
||||
});
|
||||
@@ -406,7 +439,7 @@ function getConfig(callback) {
|
||||
}
|
||||
|
||||
function sendHeartbeat() {
|
||||
if (!config.token() || !config.fqdn()) return;
|
||||
if (config.provider() !== 'caas') return;
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/boxes/' + config.fqdn() + '/heartbeat';
|
||||
superagent.post(url).query({ token: config.token(), version: config.version() }).timeout(30 * 1000).end(function (error, result) {
|
||||
@@ -419,7 +452,7 @@ function sendHeartbeat() {
|
||||
function sendAliveStatus(callback) {
|
||||
if (typeof callback !== 'function') {
|
||||
callback = function (error) {
|
||||
if (error && error.reason !== CloudronError.INTERNAL_ERROR) console.error(error);
|
||||
if (error && error.reason !== CloudronError.INTERNAL_ERROR) debug(error);
|
||||
else if (error) debug(error);
|
||||
};
|
||||
}
|
||||
@@ -435,7 +468,11 @@ function sendAliveStatus(callback) {
|
||||
domain: config.fqdn(),
|
||||
version: config.version(),
|
||||
provider: config.provider(),
|
||||
backendSettings: backendSettings
|
||||
backendSettings: backendSettings,
|
||||
machine: {
|
||||
cpus: os.cpus(),
|
||||
totalmem: os.totalmem()
|
||||
}
|
||||
};
|
||||
|
||||
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
@@ -460,13 +497,16 @@ function sendAliveStatus(callback) {
|
||||
},
|
||||
backupConfig: {
|
||||
provider: result[settings.BACKUP_CONFIG_KEY].provider
|
||||
}
|
||||
},
|
||||
mailConfig: {
|
||||
enabled: result[settings.MAIL_CONFIG_KEY].enabled
|
||||
},
|
||||
autoupdatePattern: result[settings.AUTOUPDATE_PATTERN_KEY],
|
||||
timeZone: result[settings.TIME_ZONE_KEY]
|
||||
};
|
||||
|
||||
// Caas Cloudrons do not store appstore credentials in their local database
|
||||
if (config.provider() === 'caas') {
|
||||
if (!config.token()) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, 'no token set'));
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/exchangeBoxTokenWithUserToken';
|
||||
superagent.post(url).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new CloudronError(CloudronError.EXTERNAL_ERROR, error));
|
||||
@@ -477,7 +517,11 @@ function sendAliveStatus(callback) {
|
||||
} else {
|
||||
settings.getAppstoreConfig(function (error, result) {
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
if (!result.token) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, 'not registered yet'));
|
||||
|
||||
if (!result.token) {
|
||||
debug('sendAliveStatus: Cloudron not yet registered');
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
sendAliveStatusWithAppstoreConfig(backendSettings, result);
|
||||
});
|
||||
@@ -485,12 +529,7 @@ function sendAliveStatus(callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function readDkimPublicKeySync() {
|
||||
if (!config.fqdn()) {
|
||||
debug('Cannot read dkim public key without a domain.', safe.error);
|
||||
return null;
|
||||
}
|
||||
|
||||
function ensureDkimKey(callback) {
|
||||
var dkimPath = path.join(paths.MAIL_DATA_DIR, 'dkim/' + config.fqdn());
|
||||
var dkimPrivateKeyFile = path.join(dkimPath, 'private');
|
||||
var dkimPublicKeyFile = path.join(dkimPath, 'public');
|
||||
@@ -509,6 +548,18 @@ function readDkimPublicKeySync() {
|
||||
debug('DKIM keys already present');
|
||||
}
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
function readDkimPublicKeySync() {
|
||||
if (!config.fqdn()) {
|
||||
debug('Cannot read dkim public key without a domain.', safe.error);
|
||||
return null;
|
||||
}
|
||||
|
||||
var dkimPath = path.join(paths.MAIL_DATA_DIR, 'dkim/' + config.fqdn());
|
||||
var dkimPublicKeyFile = path.join(dkimPath, 'public');
|
||||
|
||||
var publicKey = safe.fs.readFileSync(dkimPublicKeyFile, 'utf8');
|
||||
|
||||
if (publicKey === null) {
|
||||
@@ -523,6 +574,7 @@ function readDkimPublicKeySync() {
|
||||
}
|
||||
|
||||
// NOTE: if you change the SPF record here, be sure the wait check in mailer.js
|
||||
// https://agari.zendesk.com/hc/en-us/articles/202952749-How-long-can-my-SPF-record-be-
|
||||
function txtRecordsWithSpf(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
@@ -531,21 +583,25 @@ function txtRecordsWithSpf(callback) {
|
||||
|
||||
debug('txtRecordsWithSpf: current txt records - %j', txtRecords);
|
||||
|
||||
var i, validSpf;
|
||||
var i, matches, validSpf;
|
||||
|
||||
for (i = 0; i < txtRecords.length; i++) {
|
||||
if (txtRecords[i].indexOf('"v=spf1 ') !== 0) continue; // not SPF
|
||||
matches = txtRecords[i].match(/^("?v=spf1) /); // DO backend may return without quotes
|
||||
if (matches === null) continue;
|
||||
|
||||
validSpf = txtRecords[i].indexOf(' a:' + config.adminFqdn() + ' ') !== -1;
|
||||
break;
|
||||
// this won't work if the entry is arbitrarily "split" across quoted strings
|
||||
validSpf = txtRecords[i].indexOf('a:' + config.adminFqdn()) !== -1;
|
||||
break; // there can only be one SPF record
|
||||
}
|
||||
|
||||
if (validSpf) return callback(null, null);
|
||||
|
||||
if (i == txtRecords.length) {
|
||||
txtRecords[i] = '"v=spf1 a:' + config.adminFqdn() + ' ~all"';
|
||||
} else {
|
||||
txtRecords[i] = '"v=spf1 a:' + config.adminFqdn() + ' ' + txtRecords[i].slice('"v=spf1 '.length);
|
||||
if (!matches) { // no spf record was found, create one
|
||||
txtRecords.push('"v=spf1 a:' + config.adminFqdn() + ' ~all"');
|
||||
debug('txtRecordsWithSpf: adding txt record');
|
||||
} else { // just add ourself
|
||||
txtRecords[i] = matches[1] + ' a:' + config.adminFqdn() + txtRecords[i].slice(matches[1].length);
|
||||
debug('txtRecordsWithSpf: inserting txt record');
|
||||
}
|
||||
|
||||
return callback(null, txtRecords);
|
||||
@@ -566,7 +622,7 @@ function addDnsRecords(callback) {
|
||||
var dkimKey = readDkimPublicKeySync();
|
||||
if (!dkimKey) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, new Error('Failed to read dkim public key')));
|
||||
|
||||
sysinfo.getIp(function (error, ip) {
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
|
||||
var webadminRecord = { subdomain: constants.ADMIN_LOCATION, type: 'A', values: [ ip ] };
|
||||
@@ -639,7 +695,7 @@ function update(boxUpdateInfo, auditSource, callback) {
|
||||
debug('Starting upgrade');
|
||||
doUpgrade(boxUpdateInfo, function (error) {
|
||||
if (error) {
|
||||
console.error('Upgrade failed with error:', error);
|
||||
debug('Upgrade failed with error:', error);
|
||||
locker.unlock(locker.OP_BOX_UPDATE);
|
||||
}
|
||||
});
|
||||
@@ -647,7 +703,7 @@ function update(boxUpdateInfo, auditSource, callback) {
|
||||
debug('Starting update');
|
||||
doUpdate(boxUpdateInfo, function (error) {
|
||||
if (error) {
|
||||
console.error('Update failed with error:', error);
|
||||
debug('Update failed with error:', error);
|
||||
locker.unlock(locker.OP_BOX_UPDATE);
|
||||
}
|
||||
});
|
||||
@@ -759,6 +815,8 @@ function doUpdate(boxUpdateInfo, callback) {
|
||||
|
||||
debug('updating box %s %j', boxUpdateInfo.sourceTarballUrl, data);
|
||||
|
||||
progress.set(progress.UPDATE, 5, 'Downloading and extracting new version');
|
||||
|
||||
shell.sudo('update', [ UPDATE_CMD, boxUpdateInfo.sourceTarballUrl, JSON.stringify(data) ], function (error) {
|
||||
if (error) return updateError(error);
|
||||
|
||||
@@ -886,7 +944,7 @@ function migrate(options, callback) {
|
||||
|
||||
if (!options.domain) return doMigrate(options, callback);
|
||||
|
||||
var dnsConfig = _.pick(options, 'domain', 'provider', 'accessKeyId', 'secretAccessKey', 'region', 'endpoint');
|
||||
var dnsConfig = _.pick(options, 'domain', 'provider', 'accessKeyId', 'secretAccessKey', 'region', 'endpoint', 'token');
|
||||
|
||||
settings.setDnsConfig(dnsConfig, options.domain, function (error) {
|
||||
if (error && error.reason === SettingsError.BAD_FIELD) return callback(new CloudronError(CloudronError.BAD_FIELD, error.message));
|
||||
@@ -900,7 +958,7 @@ function migrate(options, callback) {
|
||||
function refreshDNS(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
sysinfo.getIp(function (error, ip) {
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
|
||||
debug('refreshDNS: current ip %s', ip);
|
||||
@@ -914,15 +972,7 @@ function refreshDNS(callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.each(result, function (app, callback) {
|
||||
// get the current record before updating it
|
||||
subdomains.get(app.location, 'A', function (error, values) {
|
||||
if (error) return callback(error);
|
||||
|
||||
// refuse to update any existing DNS record for custom domains that we did not create
|
||||
if (values.length !== 0 && !app.dnsRecordId) return callback(null, new Error('DNS Record already exists'));
|
||||
|
||||
subdomains.upsert(app.location, 'A', [ ip ], callback);
|
||||
});
|
||||
subdomains.upsert(app.location, 'A', [ ip ], callback);
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
|
||||
+2
-6
@@ -64,7 +64,7 @@ function saveSync() {
|
||||
fs.writeFileSync(cloudronConfigFileName, JSON.stringify(data, null, 4)); // functions are ignored by JSON.stringify
|
||||
}
|
||||
|
||||
function _reset (callback) {
|
||||
function _reset(callback) {
|
||||
safe.fs.unlinkSync(cloudronConfigFileName);
|
||||
|
||||
initConfig();
|
||||
@@ -79,12 +79,11 @@ function initConfig() {
|
||||
data.token = null;
|
||||
data.boxVersionsUrl = null;
|
||||
data.version = null;
|
||||
data.isCustomDomain = false;
|
||||
data.isCustomDomain = true;
|
||||
data.webServerOrigin = null;
|
||||
data.smtpPort = 2525; // // this value comes from mail container
|
||||
data.sysadminPort = 3001;
|
||||
data.ldapPort = 3002;
|
||||
data.simpleAuthPort = 3004;
|
||||
data.provider = 'caas';
|
||||
data.appBundle = [ ];
|
||||
|
||||
@@ -116,9 +115,6 @@ function initConfig() {
|
||||
saveSync();
|
||||
}
|
||||
|
||||
// cleanup any old config file we have for tests
|
||||
if (exports.TEST) safe.fs.unlinkSync(cloudronConfigFileName);
|
||||
|
||||
initConfig();
|
||||
|
||||
// set(obj) or set(key, value)
|
||||
|
||||
+3
-1
@@ -36,6 +36,8 @@ exports = module.exports = {
|
||||
|
||||
DEMO_USERNAME: 'cloudron',
|
||||
|
||||
DKIM_SELECTOR: 'cloudron'
|
||||
DKIM_SELECTOR: 'cloudron',
|
||||
|
||||
AUTOUPDATE_PATTERN_NEVER: 'never'
|
||||
};
|
||||
|
||||
|
||||
+10
-3
@@ -11,6 +11,7 @@ var apps = require('./apps.js'),
|
||||
certificates = require('./certificates.js'),
|
||||
cloudron = require('./cloudron.js'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
CronJob = require('cron').CronJob,
|
||||
debug = require('debug')('box:cron'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
@@ -51,9 +52,15 @@ function initialize(callback) {
|
||||
gHeartbeatJob = new CronJob({
|
||||
cronTime: '00 */1 * * * *', // every minute
|
||||
onTick: cloudron.sendHeartbeat,
|
||||
start: true
|
||||
start: false
|
||||
});
|
||||
cloudron.sendHeartbeat(); // latest unpublished version of CronJob has runOnInit
|
||||
// hack: send the first heartbeat only after we are running for 60 seconds
|
||||
// required as we end up sending a heartbeat and then cloudron-setup reboots the server
|
||||
setTimeout(function () {
|
||||
if (!gHeartbeatJob) return; // already uninitalized
|
||||
gHeartbeatJob.start();
|
||||
cloudron.sendHeartbeat();
|
||||
}, 1000 * 60);
|
||||
|
||||
var randomHourMinute = Math.floor(60*Math.random());
|
||||
gAliveJob = new CronJob({
|
||||
@@ -175,7 +182,7 @@ function autoupdatePatternChanged(pattern) {
|
||||
|
||||
if (gAutoupdaterJob) gAutoupdaterJob.stop();
|
||||
|
||||
if (pattern === 'never') return;
|
||||
if (pattern === constants.AUTOUPDATE_PATTERN_NEVER) return;
|
||||
|
||||
gAutoupdaterJob = new CronJob({
|
||||
cronTime: pattern,
|
||||
|
||||
@@ -85,6 +85,7 @@ function upsert(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode === 422) return callback(new SubdomainError(SubdomainError.BAD_FIELD, result.body.message));
|
||||
if (result.statusCode !== 201) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body)));
|
||||
|
||||
return callback(null);
|
||||
@@ -100,6 +101,7 @@ function upsert(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode === 422) return callback(new SubdomainError(SubdomainError.BAD_FIELD, result.body.message));
|
||||
if (result.statusCode !== 200) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body)));
|
||||
|
||||
return callback(null);
|
||||
@@ -190,6 +192,11 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to resolve nameservers for this domain'));
|
||||
if (error || !nameservers) return callback(new SubdomainError(SubdomainError.BAD_FIELD, error ? error.message : 'Unable to get nameservers'));
|
||||
|
||||
if (nameservers.map(function (n) { return n.toLowerCase(); }).indexOf('ns1.digitalocean.com') === -1) {
|
||||
debug('verifyDnsConfig: %j does not contains DO NS', nameservers);
|
||||
return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Domain nameservers are not set to Digital Ocean'));
|
||||
}
|
||||
|
||||
upsert(credentials, domain, 'my', 'A', [ ip ], function (error, changeId) {
|
||||
if (error) return callback(error);
|
||||
|
||||
|
||||
+13
-17
@@ -10,7 +10,7 @@ exports = module.exports = {
|
||||
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
debug = require('debug')('box:dns/noop'),
|
||||
debug = require('debug')('box:dns/manual'),
|
||||
dns = require('native-dns'),
|
||||
SubdomainError = require('../subdomains.js').SubdomainError,
|
||||
util = require('util');
|
||||
@@ -60,18 +60,14 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
dns.resolveNs(domain, function (error, nameservers) {
|
||||
if (error || !nameservers) return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to get nameservers'));
|
||||
|
||||
// async.every only reports bools
|
||||
var stashedError = null;
|
||||
|
||||
async.every(nameservers, function (nameserver, callback) {
|
||||
async.every(nameservers, function (nameserver, everyNsCallback) {
|
||||
// ns records cannot have cname
|
||||
dns.resolve4(nameserver, function (error, nsIps) {
|
||||
if (error || !nsIps || nsIps.length === 0) {
|
||||
stashedError = new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
return callback(false);
|
||||
return everyNsCallback(new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to resolve nameservers for this domain'));
|
||||
}
|
||||
|
||||
async.every(nsIps, function (nsIp, callback) {
|
||||
async.every(nsIps, function (nsIp, everyIpCallback) {
|
||||
var req = dns.Request({
|
||||
question: dns.Question({ name: adminDomain, type: 'A' }),
|
||||
server: { address: nsIp },
|
||||
@@ -80,20 +76,20 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
|
||||
req.on('timeout', function () {
|
||||
debug('nameserver %s (%s) timed out when trying to resolve %s', nameserver, nsIp, adminDomain);
|
||||
return callback(true); // should be ok if dns server is down
|
||||
return everyIpCallback(null, true); // should be ok if dns server is down
|
||||
});
|
||||
|
||||
req.on('message', function (error, message) {
|
||||
if (error) {
|
||||
debug('nameserver %s (%s) returned error trying to resolve %s: %s', nameserver, nsIp, adminDomain, error);
|
||||
return callback(false);
|
||||
return everyIpCallback(null, false);
|
||||
}
|
||||
|
||||
var answer = message.answer;
|
||||
|
||||
if (!answer || answer.length === 0) {
|
||||
debug('bad answer from nameserver %s (%s) resolving %s (%s): %j', nameserver, nsIp, adminDomain, 'A', message);
|
||||
return callback(false);
|
||||
return everyIpCallback(null, false);
|
||||
}
|
||||
|
||||
debug('verifyDnsConfig: ns: %s (%s), name:%s Actual:%j Expecting:%s', nameserver, nsIp, adminDomain, answer, ip);
|
||||
@@ -102,17 +98,17 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
return a.address === ip;
|
||||
});
|
||||
|
||||
if (match) return callback(true); // done!
|
||||
if (match) return everyIpCallback(null, true); // done!
|
||||
|
||||
callback(false);
|
||||
everyIpCallback(null, false);
|
||||
});
|
||||
|
||||
req.send();
|
||||
}, callback);
|
||||
}, everyNsCallback);
|
||||
});
|
||||
}, function (success) {
|
||||
if (stashedError) return callback(stashedError);
|
||||
if (!success) return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'The domain ' + adminDomain + ' does not resolve to the servers IP'));
|
||||
}, function (error, success) {
|
||||
if (error) return callback(error);
|
||||
if (!success) return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'The domain ' + adminDomain + ' does not resolve to the server\'s IP ' + ip));
|
||||
|
||||
callback(null, { provider: dnsConfig.provider, wildcard: !!dnsConfig.wildcard });
|
||||
});
|
||||
|
||||
+2
-2
@@ -48,8 +48,8 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
|
||||
function waitForDns(domain, value, type, options, callback) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(type === 'A' || type === 'CNAME');
|
||||
assert(typeof value === 'string' || util.isRegExp(value));
|
||||
assert(type === 'A' || type === 'CNAME' || type === 'TXT');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
|
||||
+1
-1
@@ -241,7 +241,7 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
}
|
||||
|
||||
upsert(credentials, domain, 'my', 'A', [ ip ], function (error, changeId) {
|
||||
if (error) return callback(new SubdomainError(SubdomainError.INTERNAL_ERROR, error));
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: A record added with change id %s', changeId);
|
||||
|
||||
|
||||
+18
-12
@@ -7,12 +7,12 @@ var assert = require('assert'),
|
||||
debug = require('debug')('box:dns/waitfordns'),
|
||||
dns = require('native-dns'),
|
||||
SubdomainError = require('../subdomains.js').SubdomainError,
|
||||
tld = require('tldjs');
|
||||
tld = require('tldjs'),
|
||||
util = require('util');
|
||||
|
||||
// the first arg to callback is not an error argument; this is required for async.every
|
||||
function isChangeSynced(domain, value, type, nameserver, callback) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(util.isRegExp(value));
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof nameserver, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
@@ -33,31 +33,33 @@ function isChangeSynced(domain, value, type, nameserver, callback) {
|
||||
|
||||
req.on('timeout', function () {
|
||||
debug('nameserver %s (%s) timed out when trying to resolve %s', nameserver, nsIp, domain);
|
||||
return iteratorCallback(true); // should be ok if dns server is down
|
||||
return iteratorCallback(null, true); // should be ok if dns server is down
|
||||
});
|
||||
|
||||
req.on('message', function (error, message) {
|
||||
if (error) {
|
||||
debug('nameserver %s (%s) returned error trying to resolve %s: %s', nameserver, nsIp, domain, error);
|
||||
return iteratorCallback(false);
|
||||
return iteratorCallback(null, false);
|
||||
}
|
||||
|
||||
var answer = message.answer;
|
||||
|
||||
if (!answer || answer.length === 0) {
|
||||
debug('bad answer from nameserver %s (%s) resolving %s (%s): %j', nameserver, nsIp, domain, type, message);
|
||||
return iteratorCallback(false);
|
||||
return iteratorCallback(null, false);
|
||||
}
|
||||
|
||||
debug('isChangeSynced: ns: %s (%s), name:%s Actual:%j Expecting:%s', nameserver, nsIp, domain, answer, value);
|
||||
|
||||
var match = answer.some(function (a) {
|
||||
return ((type === 'A' && a.address === value) || (type === 'CNAME' && a.data === value));
|
||||
return ((type === 'A' && value.test(a.address)) ||
|
||||
(type === 'CNAME' && value.test(a.data)) ||
|
||||
(type === 'TXT' && value.test(a.data.join(''))));
|
||||
});
|
||||
|
||||
if (match) return iteratorCallback(true); // done!
|
||||
if (match) return iteratorCallback(null, true); // done!
|
||||
|
||||
iteratorCallback(false);
|
||||
iteratorCallback(null, false);
|
||||
});
|
||||
|
||||
req.send();
|
||||
@@ -68,12 +70,16 @@ function isChangeSynced(domain, value, type, nameserver, callback) {
|
||||
// check if IP change has propagated to every nameserver
|
||||
function waitForDns(domain, value, type, options, callback) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(type === 'A' || type === 'CNAME');
|
||||
assert(typeof value === 'string' || util.isRegExp(value));
|
||||
assert(type === 'A' || type === 'CNAME' || type === 'TXT');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var zoneName = tld.getDomain(domain);
|
||||
if (typeof value === 'string') {
|
||||
// http://stackoverflow.com/questions/3561493/is-there-a-regexp-escape-function-in-javascript
|
||||
value = new RegExp('^' + value.replace(/[-\/\\^$*+?.()|[\]{}]/g, '\\$&') + '$');
|
||||
}
|
||||
debug('waitForIp: domain %s to be %s in zone %s.', domain, value, zoneName);
|
||||
|
||||
var attempt = 1;
|
||||
@@ -83,7 +89,7 @@ function waitForDns(domain, value, type, options, callback) {
|
||||
dns.resolveNs(zoneName, function (error, nameservers) {
|
||||
if (error || !nameservers) return retryCallback(error || new SubdomainError(SubdomainError.EXTERNAL_ERROR, 'Unable to get nameservers'));
|
||||
|
||||
async.every(nameservers, isChangeSynced.bind(null, domain, value, type), function (synced) {
|
||||
async.every(nameservers, isChangeSynced.bind(null, domain, value, type), function (error, synced) {
|
||||
debug('waitForIp: %s %s ns: %j', domain, synced ? 'done' : 'not done', nameservers);
|
||||
|
||||
retryCallback(synced ? null : new SubdomainError(SubdomainError.EXTERNAL_ERROR, 'ETRYAGAIN'));
|
||||
|
||||
+3
-13
@@ -54,14 +54,6 @@ function debugApp(app, args) {
|
||||
debug(prefix + ' ' + util.format.apply(util, Array.prototype.slice.call(arguments, 1)));
|
||||
}
|
||||
|
||||
function targetBoxVersion(manifest) {
|
||||
if ('targetBoxVersion' in manifest) return manifest.targetBoxVersion;
|
||||
|
||||
if ('minBoxVersion' in manifest) return manifest.minBoxVersion;
|
||||
|
||||
return '99999.99999.99999'; // compatible with the latest version
|
||||
}
|
||||
|
||||
function pullImage(manifest, callback) {
|
||||
var docker = exports.connection;
|
||||
|
||||
@@ -135,7 +127,6 @@ function createSubcontainer(app, name, cmd, options, callback) {
|
||||
isAppContainer = !cmd; // non app-containers are like scheduler containers
|
||||
|
||||
var manifest = app.manifest;
|
||||
var developmentMode = !!manifest.developmentMode;
|
||||
var exposedPorts = {}, dockerPortBindings = { };
|
||||
var domain = app.altDomain || config.appFqdn(app.location);
|
||||
var stdEnv = [
|
||||
@@ -165,8 +156,7 @@ function createSubcontainer(app, name, cmd, options, callback) {
|
||||
// first check db record, then manifest
|
||||
var memoryLimit = app.memoryLimit || manifest.memoryLimit || 0;
|
||||
|
||||
if (developmentMode) {
|
||||
// developerMode does not restrict memory usage
|
||||
if (memoryLimit === -1) { // unrestricted
|
||||
memoryLimit = 0;
|
||||
} else if (memoryLimit === 0 || memoryLimit < constants.DEFAULT_MEMORY_LIMIT) { // ensure we never go below minimum (in case we change the default)
|
||||
memoryLimit = constants.DEFAULT_MEMORY_LIMIT;
|
||||
@@ -187,7 +177,7 @@ function createSubcontainer(app, name, cmd, options, callback) {
|
||||
name: name, // used for filtering logs
|
||||
Tty: isAppContainer,
|
||||
Image: app.manifest.dockerImage,
|
||||
Cmd: (isAppContainer && developmentMode) ? [ '/bin/bash', '-c', 'echo "Development mode. Use cloudron exec to debug. Sleeping" && sleep infinity' ] : cmd,
|
||||
Cmd: (isAppContainer && app.debugMode && app.debugMode.cmd) ? app.debugMode.cmd : cmd,
|
||||
Env: stdEnv.concat(addonEnv).concat(portEnv),
|
||||
ExposedPorts: isAppContainer ? exposedPorts : { },
|
||||
Volumes: { // see also ReadonlyRootfs
|
||||
@@ -205,7 +195,7 @@ function createSubcontainer(app, name, cmd, options, callback) {
|
||||
MemorySwap: memoryLimit, // Memory + Swap
|
||||
PortBindings: isAppContainer ? dockerPortBindings : { },
|
||||
PublishAllPorts: false,
|
||||
ReadonlyRootfs: !developmentMode, // see also Volumes in startContainer
|
||||
ReadonlyRootfs: app.debugMode ? !!app.debugMode.readonlyRootfs : true,
|
||||
RestartPolicy: {
|
||||
"Name": isAppContainer ? "always" : "no",
|
||||
"MaximumRetryCount": 0
|
||||
|
||||
+2
-1
@@ -8,7 +8,7 @@ exports = module.exports = {
|
||||
getAllPaged: getAllPaged,
|
||||
cleanup: cleanup,
|
||||
|
||||
// keep in sync with webadmin index.js filter
|
||||
// keep in sync with webadmin index.js filter and CLI tool
|
||||
ACTION_ACTIVATE: 'cloudron.activate',
|
||||
ACTION_APP_CLONE: 'app.clone',
|
||||
ACTION_APP_CONFIGURE: 'app.configure',
|
||||
@@ -16,6 +16,7 @@ exports = module.exports = {
|
||||
ACTION_APP_RESTORE: 'app.restore',
|
||||
ACTION_APP_UNINSTALL: 'app.uninstall',
|
||||
ACTION_APP_UPDATE: 'app.update',
|
||||
ACTION_APP_LOGIN: 'app.login',
|
||||
ACTION_BACKUP_FINISH: 'backup.finish',
|
||||
ACTION_BACKUP_START: 'backup.start',
|
||||
ACTION_CERTIFICATE_RENEWAL: 'certificate.renew',
|
||||
|
||||
+10
-4
@@ -25,7 +25,8 @@ exports = module.exports = {
|
||||
var assert = require('assert'),
|
||||
constants = require('./constants.js'),
|
||||
database = require('./database.js'),
|
||||
DatabaseError = require('./databaseerror');
|
||||
DatabaseError = require('./databaseerror'),
|
||||
mailboxdb = require('./mailboxdb.js');
|
||||
|
||||
var GROUPS_FIELDS = [ 'id', 'name' ].join(',');
|
||||
|
||||
@@ -88,10 +89,14 @@ function add(id, name, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var data = [ id, name ];
|
||||
database.query('INSERT INTO groups (id, name) VALUES (?, ?)',
|
||||
data, function (error, result) {
|
||||
|
||||
var queries = [];
|
||||
queries.push({ query: 'INSERT INTO mailboxes (name, ownerId, ownerType) VALUES (?, ?, ?)', args: [ name, id, mailboxdb.TYPE_GROUP ] });
|
||||
queries.push({ query: 'INSERT INTO groups (id, name) VALUES (?, ?)', args: [ id, name ] });
|
||||
|
||||
database.transaction(queries, function (error, result) {
|
||||
if (error && error.code === 'ER_DUP_ENTRY') return callback(new DatabaseError(DatabaseError.ALREADY_EXISTS, error));
|
||||
if (error || result.affectedRows !== 1) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
if (error || result[1].affectedRows !== 1) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
@@ -105,6 +110,7 @@ function del(id, callback) {
|
||||
var queries = [];
|
||||
queries.push({ query: 'DELETE FROM groupMembers WHERE groupId = ?', args: [ id ] });
|
||||
queries.push({ query: 'DELETE FROM groups WHERE id = ?', args: [ id ] });
|
||||
queries.push({ query: 'DELETE FROM mailboxes WHERE ownerId=?', args: [ id ] });
|
||||
|
||||
database.transaction(queries, function (error, result) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
+5
-16
@@ -24,7 +24,6 @@ var assert = require('assert'),
|
||||
constants = require('./constants.js'),
|
||||
DatabaseError = require('./databaseerror.js'),
|
||||
groupdb = require('./groupdb.js'),
|
||||
mailboxdb = require('./mailboxdb.js'),
|
||||
util = require('util'),
|
||||
uuid = require('node-uuid');
|
||||
|
||||
@@ -60,7 +59,7 @@ GroupError.NOT_ALLOWED = 'Not Allowed';
|
||||
function validateGroupname(name) {
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
|
||||
if (name.length < 2) return new GroupError(GroupError.BAD_FIELD, 'name must be atleast 2 chars');
|
||||
if (name.length < 1) return new GroupError(GroupError.BAD_FIELD, 'name must be atleast 1 char');
|
||||
if (name.length >= 200) return new GroupError(GroupError.BAD_FIELD, 'name too long');
|
||||
|
||||
if (constants.RESERVED_NAMES.indexOf(name) !== -1) return new GroupError(GroupError.BAD_FIELD, 'name is reserved');
|
||||
@@ -85,16 +84,11 @@ function create(name, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var id = 'gid-' + uuid.v4();
|
||||
mailboxdb.add(name, id /* owner */, mailboxdb.TYPE_GROUP, function (error) {
|
||||
groupdb.add(id, name, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(new GroupError(GroupError.ALREADY_EXISTS));
|
||||
if (error) return callback(new GroupError(GroupError.INTERNAL_ERROR, error));
|
||||
|
||||
groupdb.add(id, name, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(new GroupError(GroupError.ALREADY_EXISTS));
|
||||
if (error) return callback(new GroupError(GroupError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, { id: id, name: name });
|
||||
});
|
||||
callback(null, { id: id, name: name });
|
||||
});
|
||||
}
|
||||
|
||||
@@ -105,16 +99,11 @@ function remove(id, callback) {
|
||||
// never allow admin group to be deleted
|
||||
if (id === constants.ADMIN_GROUP_ID) return callback(new GroupError(GroupError.NOT_ALLOWED));
|
||||
|
||||
mailboxdb.delByOwnerId(id, function (error) {
|
||||
groupdb.del(id, function (error) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new GroupError(GroupError.NOT_FOUND));
|
||||
if (error) return callback(new GroupError(GroupError.INTERNAL_ERROR, error));
|
||||
|
||||
groupdb.del(id, function (error) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new GroupError(GroupError.NOT_FOUND));
|
||||
if (error) return callback(new GroupError(GroupError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user