Compare commits
617 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| d97ee5d425 | |||
| a1be30c35a | |||
| ba3cb3b646 | |||
| daadefe6b9 | |||
| 12c849398e | |||
| 392492be04 | |||
| 4fd0c3c66c | |||
| 7d2e6d8d4d | |||
| f3e7249bdc | |||
| 53afb2606a | |||
| fbce71d031 | |||
| 1adde7d8e8 | |||
| d23599ba24 | |||
| ac35bcf9f0 | |||
| e4c5dfda60 | |||
| 99cfe564ae | |||
| 70a3cdc9bc | |||
| bd52068695 | |||
| ae54b57ca7 | |||
| 0eb3c26c05 | |||
| ca007ff979 | |||
| 2eb5c39388 | |||
| 014ce9df66 | |||
| a4dff215f1 | |||
| 0db4387013 | |||
| d81abfb2f0 | |||
| 0d880cf1e3 | |||
| b24d600b31 | |||
| 2ac52fc64f | |||
| 3bf07a3143 | |||
| cf883046b3 | |||
| 5e9808ad79 | |||
| 83ddf0a62c | |||
| cb7fea97af | |||
| 3a4ee3ec8c | |||
| 96dbda3949 | |||
| 7facf17ac6 | |||
| a939367ab6 | |||
| fd52f0ded4 | |||
| 84ba20493e | |||
| 4f9a9906c9 | |||
| 204340eac0 | |||
| d72fffb61f | |||
| cf4f0af0be | |||
| 07d0601342 | |||
| 4cd0e4d38d | |||
| 4f1a596123 | |||
| d3990eff39 | |||
| 6eab8bbdce | |||
| 61e130fb71 | |||
| 0c2267f9b4 | |||
| a4e822f1c0 | |||
| e9c5837059 | |||
| 17406e4560 | |||
| eb99f8b844 | |||
| 4fec2fe124 | |||
| 4045eb7a33 | |||
| 99d8baf36f | |||
| db7a4b75ae | |||
| dcd8c82a75 | |||
| d577756851 | |||
| 1e9c1e6ed0 | |||
| ecc76ed368 | |||
| 9e61f76aad | |||
| 11c2cecc9e | |||
| b5aed7b00a | |||
| 4d177e0d29 | |||
| f070082586 | |||
| f3483e6a92 | |||
| 91e56223ce | |||
| 631b830f4c | |||
| 63364ae017 | |||
| 3b162c6648 | |||
| b4fb73934b | |||
| 8f04163262 | |||
| 10b6664134 | |||
| 454ca86507 | |||
| 34020064bc | |||
| 67486b8177 | |||
| 6a4be98f19 | |||
| d5fb048364 | |||
| 6dd4d40692 | |||
| 04d6f94108 | |||
| 8d49f5a229 | |||
| f80713ba2f | |||
| 91f25465a4 | |||
| aa5cc68301 | |||
| acd00222e5 | |||
| 5697bcf43f | |||
| 5b7cc6642a | |||
| ee528470a7 | |||
| 3eed481d22 | |||
| 97b37cb45c | |||
| 49de39a1f3 | |||
| 6fe390b957 | |||
| 1a68467ff2 | |||
| 40de715f20 | |||
| 8d9fbb9cea | |||
| e3910d6587 | |||
| 50e712a93e | |||
| 1c8ddc10db | |||
| 1007a85fde | |||
| a0903f0890 | |||
| 1c40e51999 | |||
| c07df68558 | |||
| fd5a05db6c | |||
| 19d825db48 | |||
| 2862fec819 | |||
| 2df74ebe96 | |||
| 5794aaee0a | |||
| 229ca7f86b | |||
| 7edf43c627 | |||
| ae1dff980a | |||
| 01d0e56332 | |||
| 00990b6837 | |||
| 5886671fba | |||
| 5088cb47d9 | |||
| 60ae4972b0 | |||
| ad8ddf80f5 | |||
| c4d313a2c0 | |||
| 140e9fdd94 | |||
| 82b5c11374 | |||
| 3307b581af | |||
| 45e68ef6da | |||
| 4d7f9ba9a5 | |||
| 6d0cdc36b2 | |||
| 79541a68a5 | |||
| 845d386478 | |||
| 8771de5c12 | |||
| 76246b2952 | |||
| f994b68701 | |||
| 77558c823c | |||
| dd6a19ea85 | |||
| 16978f8c1a | |||
| f311c3da1c | |||
| 423e355fd6 | |||
| 8fadb3badc | |||
| 3845065085 | |||
| 801d848908 | |||
| e6eda1283c | |||
| a553755f4a | |||
| cd52459f05 | |||
| 1802201e9e | |||
| 2d72f49261 | |||
| cd42a6c2ea | |||
| 65f949e669 | |||
| f3fec9a33c | |||
| 13182de57f | |||
| c33566b553 | |||
| 4faf247898 | |||
| 9952a986eb | |||
| 40aaffe365 | |||
| 3745e96a6f | |||
| 157ce06f93 | |||
| 822dfb8af5 | |||
| 9ead482dc6 | |||
| 865c0a7aa7 | |||
| c760c42f92 | |||
| ded31b977e | |||
| 4781c4e364 | |||
| 8e123b017e | |||
| 658cbcdab9 | |||
| 0cc980f539 | |||
| da7648fe3f | |||
| 8db1073980 | |||
| f74f17af02 | |||
| 87ca05281d | |||
| 9780f77fa8 | |||
| 0bddd5a2c6 | |||
| 20f2a6e4c6 | |||
| 6d47737de7 | |||
| e8f9552ff9 | |||
| 9c76c5fc27 | |||
| f9d5f92397 | |||
| 3a2a05dfce | |||
| 5a291fa2a4 | |||
| 84d34ec004 | |||
| 63fca38f0b | |||
| e3b2799230 | |||
| 2efe72519e | |||
| eb3ae2c34f | |||
| eba79cd859 | |||
| d7d8cf97ed | |||
| 089f7301b8 | |||
| fb4f13eb13 | |||
| 89878ff9ad | |||
| ba62f577fa | |||
| 4c5bd2d318 | |||
| 3c318a72f7 | |||
| 23532eafea | |||
| 5b7a080d98 | |||
| 0a44b8c23b | |||
| c0c07c2839 | |||
| 96d2b32a9f | |||
| 795c2ad91c | |||
| fc9a9c3f87 | |||
| d141d6ba21 | |||
| 479da5393a | |||
| 307334ef81 | |||
| c1ec7a06bf | |||
| 1126a0fc1e | |||
| b5f678613b | |||
| b7e3447a46 | |||
| 32fa3b8a51 | |||
| fe0e4000a6 | |||
| 9ceeb70fc2 | |||
| aa8b4f1fba | |||
| 95ba51dfb2 | |||
| c74fb07ff7 | |||
| 03f1326073 | |||
| daa4c66e7f | |||
| 571abc56fe | |||
| 4aaeccecbd | |||
| 4287d69397 | |||
| de328e34d8 | |||
| 8d45ce6971 | |||
| fa3f173e8a | |||
| 414e9bdf05 | |||
| c342e52e7d | |||
| 78aa9c66f7 | |||
| 986ec02ac6 | |||
| 4e0bb9187a | |||
| 9c8a8571b4 | |||
| 7f30b8de9d | |||
| d1bfa4875a | |||
| 0250e1ea59 | |||
| 924fb337e8 | |||
| 0c9dce0c9f | |||
| 9e9470c6af | |||
| 471539d64b | |||
| 95127a868d | |||
| f34d429052 | |||
| 82e53bce36 | |||
| b04a417cfc | |||
| 77641f4b51 | |||
| 765d20c8be | |||
| d2420de594 | |||
| 8e9da38451 | |||
| ddb69eb25c | |||
| 11697f11cf | |||
| 35a2a656d3 | |||
| 6fc69c05ca | |||
| 65cff35be6 | |||
| 7467907c09 | |||
| d6c32a2632 | |||
| 7dc277a80c | |||
| 4881d090f0 | |||
| 48330423c6 | |||
| 88e844b545 | |||
| f45da2efc4 | |||
| f422614e7b | |||
| d164f881ca | |||
| 4994a5da49 | |||
| 393317d114 | |||
| 8de940ae36 | |||
| 374130d12a | |||
| 05fcdb0a67 | |||
| 23827974d8 | |||
| ae2c0f3503 | |||
| cbb93ef7ad | |||
| 4d3c6f7caa | |||
| 4f3c846e2b | |||
| 6ef2f974ae | |||
| 180cafad0c | |||
| f707f59765 | |||
| 969ef3fb11 | |||
| 7af3f85d7c | |||
| ffc0a75545 | |||
| d5b5bdb104 | |||
| 8ae65661dd | |||
| 423c4446de | |||
| 53cffd5133 | |||
| 15ff1fb093 | |||
| 195d388990 | |||
| d008e871da | |||
| 3e6295de92 | |||
| 788004245a | |||
| be5221d5b8 | |||
| dacc66bb35 | |||
| 5f26c3a2c1 | |||
| 228af62c39 | |||
| b531922175 | |||
| dad58efc94 | |||
| 7a3d3a3c74 | |||
| e5c42f2b90 | |||
| 6cbf64b88e | |||
| 9635f9aa24 | |||
| 893f9d87bc | |||
| bfda0d4891 | |||
| 65a62f9fbf | |||
| 6d74f7e26f | |||
| 14ca0c1623 | |||
| 3f6e8273a7 | |||
| 287b96925a | |||
| 608cc1e036 | |||
| 5fa27c4954 | |||
| 8deadece05 | |||
| 797dc26f47 | |||
| ddf7823b19 | |||
| 923e1d0524 | |||
| 339bc71435 | |||
| 863612356d | |||
| 56cdaefecc | |||
| 9e611b6ae3 | |||
| 7e26b4091b | |||
| d7702b96e5 | |||
| 41edd3778d | |||
| 0ac69cc6c9 | |||
| fbb01b1ce7 | |||
| a723203b28 | |||
| a995037f0a | |||
| 5ad4600fd4 | |||
| 8ddb670445 | |||
| ca5723bbc7 | |||
| 1b0a81cb3f | |||
| d92a2b070c | |||
| 4703f1afda | |||
| 3fad5e856c | |||
| cc66830a2d | |||
| 880f7b4cd3 | |||
| 5b9d4daafe | |||
| 410420e9d5 | |||
| ca7f80414e | |||
| 81b705b25b | |||
| 11c7ba1957 | |||
| f79e1993cb | |||
| fe71dc22fc | |||
| e3c72fa6ce | |||
| 27a542daec | |||
| aeba8e8fd2 | |||
| a0e122e578 | |||
| 29ae2cf8ca | |||
| abe72442ae | |||
| 8e134f3ae8 | |||
| 70042021aa | |||
| 6cc708136e | |||
| 00ac78c839 | |||
| 6f0c271e6e | |||
| ef3a125ce4 | |||
| d91e8bb87b | |||
| a7d7935451 | |||
| 8c011ea9b0 | |||
| c41b2c32f5 | |||
| 1e90ec95d3 | |||
| 1cca0aee6e | |||
| be73ec4b66 | |||
| 6c8b9b8799 | |||
| 0aea7cc347 | |||
| e15c3f05c2 | |||
| f516dddf30 | |||
| 8fb1bc29d1 | |||
| cc8f8b2339 | |||
| f7338c8210 | |||
| c04e8f33c5 | |||
| 8e1f190079 | |||
| 019cff8851 | |||
| 8a76788e7a | |||
| 33492333c7 | |||
| 710cdc7bb8 | |||
| 0471a14894 | |||
| e4b1b73408 | |||
| a6c2c608e4 | |||
| 1cd36319ff | |||
| a65611a37b | |||
| 4769d14414 | |||
| 64c2f3d8c3 | |||
| 2083efdef8 | |||
| a5efdb067d | |||
| 0584ace954 | |||
| 77d23d6c15 | |||
| a0c3a531b3 | |||
| 31ea1e677c | |||
| 2479af23ab | |||
| 83f36981f7 | |||
| 851e70be6e | |||
| f0ba126156 | |||
| 9dd51575ab | |||
| cf701b8eb0 | |||
| 9b4e81b476 | |||
| 788873e858 | |||
| 926224bd5d | |||
| d9a0bf457d | |||
| 6a5b0c194f | |||
| 9f117fcfdc | |||
| fe01d1bf28 | |||
| a94d44da75 | |||
| 8ded006dea | |||
| 5424a658f3 | |||
| b268a18695 | |||
| 06f1b9dd1b | |||
| e82bdfc996 | |||
| 40f5d0412b | |||
| 65af062c33 | |||
| 68a1efe3d3 | |||
| 3cb4d4b1ab | |||
| 0c706cffc0 | |||
| 5f888341ea | |||
| cf69a8c4ce | |||
| a3ece64273 | |||
| 4bda11edcf | |||
| 3913a8367b | |||
| bac0ea17c2 | |||
| d7f77de6c6 | |||
| d2d2818b0b | |||
| b58fe9edd6 | |||
| dee8bec2dc | |||
| d5db9657ca | |||
| 9378b949fb | |||
| ad9cb00f13 | |||
| 5ccca76b17 | |||
| cec52e14f6 | |||
| 977936018f | |||
| 261d15f0f7 | |||
| 14fe1dde58 | |||
| 737bbd26ee | |||
| f5db7c974f | |||
| 7303a09f2f | |||
| e3cfaabb74 | |||
| 3cae400b63 | |||
| 5dd10e7cd2 | |||
| f12358a10c | |||
| 23e3b0bd91 | |||
| 7a39cdda97 | |||
| 5460027a49 | |||
| 4cfee06297 | |||
| d0147a5e67 | |||
| a1dfc2b47b | |||
| 5eaade1079 | |||
| 19d8b90a12 | |||
| 6bc764090c | |||
| d64c4927aa | |||
| 9c45dec8b0 | |||
| a21750a4c9 | |||
| dda16331f6 | |||
| 7b93150047 | |||
| a98177fe71 | |||
| d95e68926b | |||
| ff3a748398 | |||
| 9354784f01 | |||
| e021a4b377 | |||
| 4fac5a785f | |||
| 4d42c116ce | |||
| 3879b55642 | |||
| 9d61ccaa45 | |||
| 6d8cf8456e | |||
| 5e1ad4ad93 | |||
| b29a6014d5 | |||
| bd7625031e | |||
| 9e881d1934 | |||
| 31f93f0255 | |||
| 67a7624da0 | |||
| 7fdf491815 | |||
| 798c2ff921 | |||
| 42c138e134 | |||
| b1b389dd7d | |||
| 8911081f85 | |||
| 9605fe3842 | |||
| bb91faf23c | |||
| ba56f7d15d | |||
| 6e73761983 | |||
| 588812a13a | |||
| b6d8721aed | |||
| da835afde1 | |||
| 5e22caa6e7 | |||
| 937931e885 | |||
| c2b140208e | |||
| f9a4d00b3c | |||
| eb2ef47df1 | |||
| cdb5dc2c53 | |||
| f6a2406091 | |||
| c7134d2da3 | |||
| 1692842bf0 | |||
| 8d78f06a34 | |||
| 1694a1536c | |||
| e0b9dc3623 | |||
| 644bc54a0d | |||
| b2d062bdf8 | |||
| 894d7a6e72 | |||
| fee513594f | |||
| 456c183622 | |||
| 0488aada9f | |||
| 54f7cf5f64 | |||
| e1740a0d4b | |||
| bac7d3ad84 | |||
| 6402b0c221 | |||
| 1f55bb52fc | |||
| 1029402d1e | |||
| abb371d81e | |||
| 779c9d79b3 | |||
| 832c11d785 | |||
| 85fb63298e | |||
| 25b9d5a746 | |||
| 6dc900bbd8 | |||
| e32b313cf2 | |||
| a01dea3932 | |||
| aa0e820605 | |||
| 13db61a0e2 | |||
| fce2cdce7f | |||
| 89bb690152 | |||
| 5c203dc759 | |||
| 4d737d535a | |||
| 558acf27a3 | |||
| 3da503ab8e | |||
| 299e8aceeb | |||
| b422a27be8 | |||
| f2312a6768 | |||
| 178ffe20a8 | |||
| 3b8edd4896 | |||
| f16aab7f80 | |||
| 09118d6b06 | |||
| bd57ee9461 | |||
| 1fbbe036ce | |||
| 94d7bc5328 | |||
| d709a5cfe4 | |||
| 188f000507 | |||
| 51d5b96fa1 | |||
| 11d12c591e | |||
| 245d17ad25 | |||
| e05e9c3ead | |||
| 8102d431e8 | |||
| 0f76cbbb95 | |||
| 2a45a9bbd4 | |||
| e68d627f72 | |||
| 1a3e3638ff | |||
| 8f912d8a1b | |||
| d891058f8c | |||
| 71fe094be1 | |||
| da857f520b | |||
| 39ff21bdf4 | |||
| 72dd7c74d5 | |||
| 7c7ef15e1c | |||
| b320e15ea7 | |||
| aa22ab8847 | |||
| 3e23c3efce | |||
| c4f96bbd6b | |||
| 649092ecb0 | |||
| 128a3b03c9 | |||
| 847ef6626f | |||
| 4643daeeec | |||
| 38178afd31 | |||
| 9c6324631d | |||
| 3a17bf9a0f | |||
| 602f8bcd04 | |||
| 785ae765a4 | |||
| c85120834c | |||
| 89d36b8ad4 | |||
| b9711d7b47 | |||
| 4f9273819a | |||
| e0d7850135 | |||
| 2c871705c7 | |||
| 2bb99db2c7 | |||
| 3fc5757e97 | |||
| 92ff19ffce | |||
| e9456f70f9 | |||
| ffbda22145 | |||
| b92ae44578 | |||
| b6ffc966cd | |||
| b42bc52093 | |||
| 806b458ff1 | |||
| d5d4e237bd | |||
| 956fe86250 | |||
| 4d000e377f | |||
| 39e827be04 | |||
| e50b4cb7ec | |||
| 1938ec635b | |||
| 03a3d367a4 | |||
| 38c2f75b5e | |||
| 9d98b55881 | |||
| 18e59c4754 | |||
| 64cb951206 | |||
| 77df520b07 | |||
| 32f94a03ce | |||
| fc6ce4945f | |||
| 17b7d89db9 | |||
| 6ea741e92f | |||
| 790ad4e74d | |||
| f92297cc99 | |||
| 0c6c835a39 | |||
| 514341172c | |||
| e535ffa778 | |||
| b86cfabd17 | |||
| b44f0b78a1 | |||
| 76d234d0bf | |||
| a694acba44 | |||
| 046120befc | |||
| b65fee4b73 | |||
| 153dcc1826 | |||
| fa4725176c | |||
| e42607fec6 | |||
| 297c1ff266 | |||
| 5afe75f137 | |||
| 4cfc85f6d3 | |||
| b03f901bbf | |||
| b9dfac94ed | |||
| c905adde1e | |||
| 0e7efa77a5 | |||
| 875ca0307f | |||
| 543c9843ba | |||
| 83254a16f9 | |||
| 466dfdf81f | |||
| 3d60a04b36 | |||
| 103cb10cad | |||
| 29ef079a83 | |||
| a55645770e | |||
| 132ddd2671 | |||
| fa5891b149 | |||
| d01929debc | |||
| 7c01ee58b5 | |||
| ec89f8719c | |||
| 9145022a2c | |||
| 9ae8ce3296 | |||
| eabf27f0c9 | |||
| 3102a15dff | |||
| 7747c482d4 | |||
| 444ca1888b | |||
| 86ccf5ea84 | |||
| ef088293b6 |
@@ -805,3 +805,137 @@
|
||||
* (mail) Set maximum email size to 25MB
|
||||
* Remove SimpleAuth addon
|
||||
|
||||
[0.107.0]
|
||||
* Support CSP for webinterface and OAuth views
|
||||
* (mail) Fix issue where Cloudron is only used to send emails
|
||||
|
||||
[0.108.0]
|
||||
* Redirect to /setupdns.html when restoring
|
||||
* Fix setting custom avatar
|
||||
* Do not allocate more than 4GB swap
|
||||
* Generate real passwords for sendmail/recvmail addons
|
||||
* Rate limit all authentication routes to prevent password brute force
|
||||
* Generate 128 byte password for MySQL multi-db addon
|
||||
|
||||
[0.109.0]
|
||||
* Add Referrer-policy
|
||||
* Add tooltip for admin email field explaining it is local & private
|
||||
* Verify AMI instance id during DNS setup instead of admin account setup
|
||||
* Split platform and app data folders and get rid of btrfs volumes
|
||||
|
||||
[0.110.0]
|
||||
* Fix disk usage graphs
|
||||
* Add --data-dir to cloudron-setup that allows customizing data location
|
||||
* Add UI to restore from any app backup
|
||||
* (mysql) Use utf8mb4 encoding for databases and backups
|
||||
* Allow installing a new app from a backup
|
||||
* Fix download of large files (> 1GB)
|
||||
* Fix app backup regression
|
||||
|
||||
[0.120.0]
|
||||
* Update Docker to 17.03.1-ce
|
||||
* Rework backup backend logic
|
||||
* Add UI to download logs
|
||||
* Fix crash when checking mail dns settings
|
||||
* Allow backup retention duration to be configured
|
||||
* Add minio backend for backups
|
||||
* Fix issue where Cloudron's with errored apps won't backup when using fs backend
|
||||
* Fix DNS check issue where PTR records was read from hosts file
|
||||
|
||||
[0.120.1]
|
||||
* Fix managed Cloudron backup cleanup
|
||||
|
||||
[0.130.0]
|
||||
* Use Cloudron DNS server only for containers created by Cloudron
|
||||
* Make Cloudron always start even if DNS credentials are invalid
|
||||
* Show warning if DNS configuration is not valid
|
||||
* Drop the '.enc' extension for non-encrypted backups
|
||||
* Do not encrypt backups when the backup key is empty
|
||||
* Do a multipart S3 download for slow internet connections
|
||||
* Support naked domains as external location
|
||||
|
||||
[0.130.1]
|
||||
* Fix app configure dialog regression
|
||||
|
||||
[0.130.2]
|
||||
* Fix app configure dialog regression and dns setup screen
|
||||
|
||||
[0.130.3]
|
||||
* Show error message if setup fails due to reserved username
|
||||
* (security) Do not print password in the logs in the configure route
|
||||
* Fix restore of unencrypted backups
|
||||
* Fix bug where FS backups have incorrect extension for unencrypted backups
|
||||
|
||||
[0.140.0]
|
||||
* HTTP2 support
|
||||
* Condense the dns checks in the settings view
|
||||
* Document new app store submission guidelines
|
||||
|
||||
[0.150.0]
|
||||
* Disable dnsmasq on OVH
|
||||
* Scale redis memory based on the app's memory limit
|
||||
* (security) Do not print the ssl cert in debug logs
|
||||
* Add noop storage backend to temporarily disable backups
|
||||
* Replace native-dns module with dig to prevent spurious crashes
|
||||
* Cleanup unfinished and errored backups
|
||||
* Set a timelimit of 4 hours for backup to finish
|
||||
|
||||
[0.160.0]
|
||||
* Fix disk graphs when using device mapper
|
||||
* Prevent email view from flickering
|
||||
* Prepare for 1.0
|
||||
|
||||
[1.0.0]
|
||||
* Make selfhosting great again
|
||||
|
||||
[1.0.1]
|
||||
* Notification improvements
|
||||
|
||||
[1.0.2]
|
||||
* Notification improvements
|
||||
|
||||
[1.1.0]
|
||||
* Add support for email catch-all
|
||||
* Support Cloudrons on subdomains
|
||||
|
||||
[1.1.1]
|
||||
* Notification improvements
|
||||
|
||||
[1.1.2]
|
||||
* Notification improvements
|
||||
|
||||
[1.1.3]
|
||||
* Notification improvements
|
||||
|
||||
[1.2.0]
|
||||
* Relay emails optionally via external SMTP server email (mailgun, sendgrid etc)
|
||||
* (experimental) Preserver the docker storage driver across updates
|
||||
* Reduce mysql password length to 48
|
||||
|
||||
[1.2.1]
|
||||
* Set max ttl of unbound to 5 minutes
|
||||
* Fix issue where mail container does not cleanup LDAP connections properly
|
||||
* Update node to 6.11.1
|
||||
|
||||
[1.3.0]
|
||||
* Add option to configure robots.txt for each app from the web interface
|
||||
* Make sure zoneName is not lost across updates
|
||||
* Save manually triggered app backups under a datetime prefix
|
||||
* Optionally disable FROM validation check in the mail container. This will allow apps to send emails with arbitrary FROM addresses
|
||||
* Set X-Forwarded-Port in the reverse proxy. This fixes a problem with plugins of certain apps (like Jetpack)
|
||||
* Send a weekly activity digest about pending and applied Cloudron and app updates
|
||||
|
||||
[1.4.0]
|
||||
* (mail) Update Haraka to 2.8.14. Contains many stability fixes
|
||||
* Exoscale SOS can now be used for backup storage
|
||||
* Fix cron pattern that made Cloudron erroneously send out weekly digest mails every hour on wednesday
|
||||
* Add Cloudflare DNS backend (thanks @abhishek)
|
||||
* Ensure Cloudron is only be installed on EXT4 root file system (required by Docker)
|
||||
* Mark app package major releases as blocking and require approval by Cloudron admin
|
||||
|
||||
[1.4.1]
|
||||
* Do not display backup region when using minio and exoscale SOS
|
||||
* Fix javascript error in email view
|
||||
* Add html version of the digest email
|
||||
* Fix issue where collectd was collecting information about devicemapper mounts
|
||||
|
||||
|
||||
@@ -46,10 +46,14 @@ Try our demo at https://my-demo.cloudron.me (username: cloudron password: cloudr
|
||||
## Installing
|
||||
|
||||
You can install the Cloudron platform on your own server or get a managed server
|
||||
from cloudron.io.
|
||||
from cloudron.io. In either case, the Cloudron platform will keep your server and
|
||||
apps up-to-date and secure.
|
||||
|
||||
* [Selfhosting](https://cloudron.io/references/selfhosting.html)
|
||||
* [Managed Hosting](https://cloudron.io/pricing.html)
|
||||
* [Selfhosting](https://cloudron.io/references/selfhosting.html) - [Pricing](https://cloudron.io/pricing.html)
|
||||
* [Managed Hosting](https://cloudron.io/managed.html)
|
||||
|
||||
The wiki has instructions on how you can install and update the Cloudron and the
|
||||
apps from source.
|
||||
|
||||
## Documentation
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@ debconf-set-selections <<< 'mysql-server mysql-server/root_password_again passwo
|
||||
apt-get -y install \
|
||||
acl \
|
||||
awscli \
|
||||
btrfs-tools \
|
||||
build-essential \
|
||||
cron \
|
||||
curl \
|
||||
@@ -42,57 +41,30 @@ apt-get -y install \
|
||||
unattended-upgrades \
|
||||
unbound
|
||||
|
||||
# this ensures that unattended upgades are enabled, if it was disabled during ubuntu install time (see #346)
|
||||
# debconf-set-selection of unattended-upgrades/enable_auto_updates + dpkg-reconfigure does not work
|
||||
cp /usr/share/unattended-upgrades/20auto-upgrades /etc/apt/apt.conf.d/20auto-upgrades
|
||||
|
||||
echo "==> Installing node.js"
|
||||
mkdir -p /usr/local/node-6.9.2
|
||||
curl -sL https://nodejs.org/dist/v6.9.2/node-v6.9.2-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-6.9.2
|
||||
ln -sf /usr/local/node-6.9.2/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-6.9.2/bin/npm /usr/bin/npm
|
||||
mkdir -p /usr/local/node-6.11.1
|
||||
curl -sL https://nodejs.org/dist/v6.11.1/node-v6.11.1-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-6.11.1
|
||||
ln -sf /usr/local/node-6.11.1/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-6.11.1/bin/npm /usr/bin/npm
|
||||
apt-get install -y python # Install python which is required for npm rebuild
|
||||
[[ "$(python --version 2>&1)" == "Python 2.7."* ]] || die "Expecting python version to be 2.7.x"
|
||||
|
||||
# https://docs.docker.com/engine/installation/linux/ubuntulinux/
|
||||
echo "==> Installing Docker"
|
||||
docker_key="-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: GnuPG v1
|
||||
|
||||
mQINBFWln24BEADrBl5p99uKh8+rpvqJ48u4eTtjeXAWbslJotmC/CakbNSqOb9o
|
||||
ddfzRvGVeJVERt/Q/mlvEqgnyTQy+e6oEYN2Y2kqXceUhXagThnqCoxcEJ3+KM4R
|
||||
mYdoe/BJ/J/6rHOjq7Omk24z2qB3RU1uAv57iY5VGw5p45uZB4C4pNNsBJXoCvPn
|
||||
TGAs/7IrekFZDDgVraPx/hdiwopQ8NltSfZCyu/jPpWFK28TR8yfVlzYFwibj5WK
|
||||
dHM7ZTqlA1tHIG+agyPf3Rae0jPMsHR6q+arXVwMccyOi+ULU0z8mHUJ3iEMIrpT
|
||||
X+80KaN/ZjibfsBOCjcfiJSB/acn4nxQQgNZigna32velafhQivsNREFeJpzENiG
|
||||
HOoyC6qVeOgKrRiKxzymj0FIMLru/iFF5pSWcBQB7PYlt8J0G80lAcPr6VCiN+4c
|
||||
NKv03SdvA69dCOj79PuO9IIvQsJXsSq96HB+TeEmmL+xSdpGtGdCJHHM1fDeCqkZ
|
||||
hT+RtBGQL2SEdWjxbF43oQopocT8cHvyX6Zaltn0svoGs+wX3Z/H6/8P5anog43U
|
||||
65c0A+64Jj00rNDr8j31izhtQMRo892kGeQAaaxg4Pz6HnS7hRC+cOMHUU4HA7iM
|
||||
zHrouAdYeTZeZEQOA7SxtCME9ZnGwe2grxPXh/U/80WJGkzLFNcTKdv+rwARAQAB
|
||||
tDdEb2NrZXIgUmVsZWFzZSBUb29sIChyZWxlYXNlZG9ja2VyKSA8ZG9ja2VyQGRv
|
||||
Y2tlci5jb20+iQI4BBMBAgAiBQJVpZ9uAhsvBgsJCAcDAgYVCAIJCgsEFgIDAQIe
|
||||
AQIXgAAKCRD3YiFXLFJgnbRfEAC9Uai7Rv20QIDlDogRzd+Vebg4ahyoUdj0CH+n
|
||||
Ak40RIoq6G26u1e+sdgjpCa8jF6vrx+smpgd1HeJdmpahUX0XN3X9f9qU9oj9A4I
|
||||
1WDalRWJh+tP5WNv2ySy6AwcP9QnjuBMRTnTK27pk1sEMg9oJHK5p+ts8hlSC4Sl
|
||||
uyMKH5NMVy9c+A9yqq9NF6M6d6/ehKfBFFLG9BX+XLBATvf1ZemGVHQusCQebTGv
|
||||
0C0V9yqtdPdRWVIEhHxyNHATaVYOafTj/EF0lDxLl6zDT6trRV5n9F1VCEh4Aal8
|
||||
L5MxVPcIZVO7NHT2EkQgn8CvWjV3oKl2GopZF8V4XdJRl90U/WDv/6cmfI08GkzD
|
||||
YBHhS8ULWRFwGKobsSTyIvnbk4NtKdnTGyTJCQ8+6i52s+C54PiNgfj2ieNn6oOR
|
||||
7d+bNCcG1CdOYY+ZXVOcsjl73UYvtJrO0Rl/NpYERkZ5d/tzw4jZ6FCXgggA/Zxc
|
||||
jk6Y1ZvIm8Mt8wLRFH9Nww+FVsCtaCXJLP8DlJLASMD9rl5QS9Ku3u7ZNrr5HWXP
|
||||
HXITX660jglyshch6CWeiUATqjIAzkEQom/kEnOrvJAtkypRJ59vYQOedZ1sFVEL
|
||||
MXg2UCkD/FwojfnVtjzYaTCeGwFQeqzHmM241iuOmBYPeyTY5veF49aBJA1gEJOQ
|
||||
TvBR8Q==
|
||||
=Fm3p
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
"
|
||||
echo "$docker_key" | apt-key add -
|
||||
echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" > /etc/apt/sources.list.d/docker.list
|
||||
apt-get -y update
|
||||
|
||||
# create systemd drop-in file
|
||||
mkdir -p /etc/systemd/system/docker.service.d
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=devicemapper" > /etc/systemd/system/docker.service.d/cloudron.conf
|
||||
|
||||
apt-get -y --allow-downgrades install docker-engine=1.12.5-0~ubuntu-xenial # apt-cache madison docker-engine
|
||||
apt-mark hold docker-engine # do not update docker
|
||||
curl -sL https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_17.03.1~ce-0~ubuntu-xenial_amd64.deb -o /tmp/docker.deb
|
||||
# apt install with install deps (as opposed to dpkg -i)
|
||||
apt install -y /tmp/docker.deb
|
||||
rm /tmp/docker.deb
|
||||
|
||||
storage_driver=$(docker info | grep "Storage Driver" | sed 's/.*: //')
|
||||
if [[ "${storage_driver}" != "devicemapper" ]]; then
|
||||
echo "Docker is using "${storage_driver}" instead of devicemapper"
|
||||
@@ -124,3 +96,11 @@ if ! apt-get install -y collectd collectd-utils; then
|
||||
sed -e 's/^FQDNLookup true/FQDNLookup false/' -i /etc/collectd/collectd.conf
|
||||
fi
|
||||
|
||||
# Disable bind for good measure (on online.net, kimsufi servers these are pre-installed and conflicts with unbound)
|
||||
systemctl stop bind9 || true
|
||||
systemctl disable bind9 || true
|
||||
|
||||
# on ovh images dnsmasq seems to run by default
|
||||
systemctl stop dnsmasq || true
|
||||
systemctl disable dnsmasq || true
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 33 KiB |
|
Before Width: | Height: | Size: 22 KiB |
|
Before Width: | Height: | Size: 9.0 KiB |
|
Before Width: | Height: | Size: 28 KiB |
|
Before Width: | Height: | Size: 5.5 KiB |
|
Before Width: | Height: | Size: 18 KiB |
|
Before Width: | Height: | Size: 74 KiB |
|
Before Width: | Height: | Size: 30 KiB |
|
Before Width: | Height: | Size: 5.5 KiB |
|
Before Width: | Height: | Size: 19 KiB |
|
Before Width: | Height: | Size: 5.5 KiB |
|
Before Width: | Height: | Size: 51 KiB |
|
Before Width: | Height: | Size: 132 KiB |
|
Before Width: | Height: | Size: 71 KiB |
|
Before Width: | Height: | Size: 14 KiB |
|
Before Width: | Height: | Size: 19 KiB |
|
Before Width: | Height: | Size: 5.7 KiB |
|
Before Width: | Height: | Size: 16 KiB |
|
Before Width: | Height: | Size: 49 KiB |
|
Before Width: | Height: | Size: 11 KiB |
|
Before Width: | Height: | Size: 36 KiB |
|
Before Width: | Height: | Size: 9.4 KiB |
|
Before Width: | Height: | Size: 9.7 KiB |
@@ -1,320 +0,0 @@
|
||||
# Overview
|
||||
|
||||
Addons are services like database, authentication, email, caching that are part of the
|
||||
Cloudron runtime. Setup, provisioning, scaling and maintanence of addons is taken care of
|
||||
by the runtime.
|
||||
|
||||
The fundamental idea behind addons is to allow sharing of Cloudron resources across applications.
|
||||
For example, a single MySQL server instance can be used across multiple apps. The Cloudron
|
||||
runtime sets up addons in such a way that apps are isolated from each other.
|
||||
|
||||
# Using Addons
|
||||
|
||||
Addons are opt-in and must be specified in the [Cloudron Manifest](/references/manifest.html).
|
||||
When the app runs, environment variables contain the necessary information to access the addon.
|
||||
For example, the mysql addon sets the `MYSQL_URL` environment variable which is the
|
||||
connection string that can be used to connect to the database.
|
||||
|
||||
When working with addons, developers need to remember the following:
|
||||
* Environment variables are subject to change every time the app restarts. This can happen if the
|
||||
Cloudron is rebooted or restored or the app crashes or an addon is re-provisioned. For this reason,
|
||||
applications must never cache the value of environment variables across restarts.
|
||||
|
||||
* Addons must be setup or updated on each application start up. Most applications use DB migration frameworks
|
||||
for this purpose to setup and update the DB schema.
|
||||
|
||||
* Addons are configured in the [addons section](/references/manifest.html#addons) of the manifest as below:
|
||||
```
|
||||
{
|
||||
...
|
||||
"addons": {
|
||||
"oauth": { },
|
||||
"redis" : { }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
# All addons
|
||||
|
||||
## email
|
||||
|
||||
This addon allows an app to send and recieve emails on behalf of the user. The intended use case is webmail applications.
|
||||
|
||||
If an app wants to send mail (e.g notifications), it must use the [sendmail](/references/addons#sendmail)
|
||||
addon. If the app wants to receive email (e.g user replying to notification), it must use the
|
||||
[recvmail](/references/addons#recvmail) addon instead.
|
||||
|
||||
Apps using the IMAP and ManageSieve services below must be prepared to accept self-signed certificates (this is not a problem
|
||||
because these are addresses internal to the Cloudron).
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
MAIL_SMTP_SERVER= # SMTP server IP or hostname. Supports STARTTLS (TLS upgrade is enforced).
|
||||
MAIL_SMTP_PORT= # SMTP server port
|
||||
MAIL_IMAP_SERVER= # IMAP server IP or hostname. TLS required.
|
||||
MAIL_IMAP_PORT= # IMAP server port
|
||||
MAIL_SIEVE_SERVER= # ManageSieve server IP or hostname. TLS required.
|
||||
MAIL_SIEVE_PORT= # ManageSieve server port
|
||||
MAIL_DOMAIN= # Domain of the mail server
|
||||
```
|
||||
|
||||
## ldap
|
||||
|
||||
This addon provides LDAP based authentication via LDAP version 3.
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
LDAP_SERVER= # ldap server IP
|
||||
LDAP_PORT= # ldap server port
|
||||
LDAP_URL= # ldap url of the form ldap://ip:port
|
||||
LDAP_USERS_BASE_DN= # ldap users base dn of the form ou=users,dc=cloudron
|
||||
LDAP_GROUPS_BASE_DN= # ldap groups base dn of the form ou=groups,dc=cloudron
|
||||
LDAP_BIND_DN= # DN to perform LDAP requests
|
||||
LDAP_BIND_PASSWORD= # Password to perform LDAP requests
|
||||
```
|
||||
|
||||
For debugging, [cloudron exec](https://www.npmjs.com/package/cloudron) can be used to run the `ldapsearch` client within the context of the app:
|
||||
```
|
||||
cloudron exec
|
||||
|
||||
# list users
|
||||
> ldapsearch -x -h "${LDAP_SERVER}" -p "${LDAP_PORT}" -b "${LDAP_USERS_BASE_DN}"
|
||||
|
||||
# list users with authentication (Substitute username and password below)
|
||||
> ldapsearch -x -D cn=<username>,${LDAP_USERS_BASE_DN} -w <password> -h "${LDAP_SERVER}" -p "${LDAP_PORT}" -b "${LDAP_USERS_BASE_DN}"
|
||||
|
||||
# list admins
|
||||
> ldapsearch -x -h "${LDAP_SERVER}" -p "${LDAP_PORT}" -b "${LDAP_USERS_BASE_DN}" "memberof=cn=admins,${LDAP_GROUPS_BASE_DN}"
|
||||
|
||||
# list groups
|
||||
> ldapsearch -x -h "${LDAP_SERVER}" -p "${LDAP_PORT}" -b "${LDAP_GROUPS_BASE_DN}"
|
||||
```
|
||||
|
||||
## localstorage
|
||||
|
||||
Since all Cloudron apps run within a read-only filesystem, this addon provides a writeable folder under `/app/data/`.
|
||||
All contents in that folder are included in the backup. On first run, this folder will be empty. File added in this path
|
||||
as part of the app's image (Dockerfile) won't be present. A common pattern is to create the directory structure required
|
||||
the app as part of the app's startup script.
|
||||
|
||||
The permissions and ownership of data within that directory are not guranteed to be preserved. For this reason, each app
|
||||
has to restore permissions as required by the app as part of the app's startup script.
|
||||
|
||||
If the app is running under the recommeneded `cloudron` user, this can be achieved with:
|
||||
```
|
||||
chown -R cloudron:cloudron /app/data
|
||||
```
|
||||
|
||||
## mongodb
|
||||
|
||||
By default, this addon provide mongodb 2.6.3.
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
MONGODB_URL= # mongodb url
|
||||
MONGODB_USERNAME= # username
|
||||
MONGODB_PASSWORD= # password
|
||||
MONGODB_HOST= # server IP/hostname
|
||||
MONGODB_PORT= # server port
|
||||
MONGODB_DATABASE= # database name
|
||||
```
|
||||
|
||||
For debugging, [cloudron exec](https://www.npmjs.com/package/cloudron) can be used to run the `mongo` shell within the context of the app:
|
||||
```
|
||||
cloudron exec
|
||||
|
||||
# mongo -u "${MONGODB_USERNAME}" -p "${MONGODB_PASSWORD}" ${MONGODB_HOST}:${MONGODB_PORT}/${MONGODB_DATABASE}
|
||||
|
||||
```
|
||||
## mysql
|
||||
|
||||
By default, this addon provides a single database on MySQL 5.6.19. The database is already created and the application
|
||||
only needs to create the tables.
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
MYSQL_URL= # the mysql url (only set when using a single database, see below)
|
||||
MYSQL_USERNAME= # username
|
||||
MYSQL_PASSWORD= # password
|
||||
MYSQL_HOST= # server IP/hostname
|
||||
MYSQL_PORT= # server port
|
||||
MYSQL_DATABASE= # database name (only set when using a single database, see below)
|
||||
```
|
||||
|
||||
For debugging, [cloudron exec](https://www.npmjs.com/package/cloudron) can be used to run the `mysql` client within the context of the app:
|
||||
```
|
||||
cloudron exec
|
||||
|
||||
> mysql --user=${MYSQL_USERNAME} --password=${MYSQL_PASSWORD} --host=${MYSQL_HOST} ${MYSQL_DATABASE}
|
||||
|
||||
```
|
||||
|
||||
The `multipleDatabases` option can be set to `true` if the app requires more than one database. When enabled,
|
||||
the following environment variables are injected:
|
||||
|
||||
```
|
||||
MYSQL_DATABASE_PREFIX= # prefix to use to create databases
|
||||
```
|
||||
|
||||
## oauth
|
||||
|
||||
The Cloudron OAuth 2.0 provider can be used in an app to implement Single Sign-On.
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
OAUTH_CLIENT_ID= # client id
|
||||
OAUTH_CLIENT_SECRET= # client secret
|
||||
```
|
||||
|
||||
The callback url required for the OAuth transaction can be contructed from the environment variables below:
|
||||
|
||||
```
|
||||
APP_DOMAIN= # hostname of the app
|
||||
APP_ORIGIN= # origin of the app of the form https://domain
|
||||
API_ORIGIN= # origin of the OAuth provider of the form https://my-cloudrondomain
|
||||
```
|
||||
|
||||
OAuth2 URLs can be constructed as follows:
|
||||
|
||||
```
|
||||
AuthorizationURL = ${API_ORIGIN}/api/v1/oauth/dialog/authorize # see above for API_ORIGIN
|
||||
TokenURL = ${API_ORIGIN}/api/v1/oauth/token
|
||||
```
|
||||
|
||||
The token obtained via OAuth has a restricted scope wherein they can only access the [profile API](/references/api.html#profile). This restriction
|
||||
is so that apps cannot make undesired changes to the user's Cloudron.
|
||||
|
||||
We currently provide OAuth2 integration for Ruby [omniauth](https://github.com/cloudron-io/omniauth-cloudron) and Node.js [passport](https://github.com/cloudron-io/passport-cloudron).
|
||||
|
||||
## postgresql
|
||||
|
||||
By default, this addon provides PostgreSQL 9.4.4.
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
POSTGRESQL_URL= # the postgresql url
|
||||
POSTGRESQL_USERNAME= # username
|
||||
POSTGRESQL_PASSWORD= # password
|
||||
POSTGRESQL_HOST= # server name
|
||||
POSTGRESQL_PORT= # server port
|
||||
POSTGRESQL_DATABASE= # database name
|
||||
```
|
||||
|
||||
The postgresql addon whitelists the hstore and pg_trgm extensions to be installable by the database owner.
|
||||
|
||||
For debugging, [cloudron exec](https://www.npmjs.com/package/cloudron) can be used to run the `psql` client within the context of the app:
|
||||
```
|
||||
cloudron exec
|
||||
|
||||
> PGPASSWORD=${POSTGRESQL_PASSWORD} psql -h ${POSTGRESQL_HOST} -p ${POSTGRESQL_PORT} -U ${POSTGRESQL_USERNAME} -d ${POSTGRESQL_DATABASE}
|
||||
```
|
||||
|
||||
## recvmail
|
||||
|
||||
The recvmail addon can be used to receive email for the application.
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
MAIL_IMAP_SERVER= # the IMAP server. this can be an IP or DNS name
|
||||
MAIL_IMAP_PORT= # the IMAP server port
|
||||
MAIL_IMAP_USERNAME= # the username to use for authentication
|
||||
MAIL_IMAP_PASSWORD= # the password to use for authentication
|
||||
MAIL_TO= # the "To" address to use
|
||||
MAIL_DOMAIN= # the mail for which email will be received
|
||||
```
|
||||
|
||||
The IMAP server only accepts TLS connections. The app must be prepared to accept self-signed certs (this is not a problem because the
|
||||
imap address is internal to the Cloudron).
|
||||
|
||||
For debugging, [cloudron exec](https://www.npmjs.com/package/cloudron) can be used to run the `openssl` tool within the context of the app:
|
||||
```
|
||||
cloudron exec
|
||||
|
||||
> openssl s_client -connect "${MAIL_IMAP_SERVER}:${MAIL_IMAP_PORT}" -crlf
|
||||
```
|
||||
|
||||
The IMAP command `? LOGIN username password` can then be used to test the authentication.
|
||||
|
||||
## redis
|
||||
|
||||
By default, this addon provides redis 2.8.13. The redis is configured to be persistent and data is preserved across updates
|
||||
and restarts.
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
REDIS_URL= # the redis url
|
||||
REDIS_HOST= # server name
|
||||
REDIS_PORT= # server port
|
||||
REDIS_PASSWORD= # password
|
||||
```
|
||||
|
||||
For debugging, [cloudron exec](https://www.npmjs.com/package/cloudron) can be used to run the `redis-cli` client within the context of the app:
|
||||
```
|
||||
cloudron exec
|
||||
|
||||
> redis-cli -h "${REDIS_HOST}" -p "${REDIS_PORT}" -a "${REDIS_PASSWORD}"
|
||||
```
|
||||
|
||||
## scheduler
|
||||
|
||||
The scheduler addon can be used to run tasks at periodic intervals (cron).
|
||||
|
||||
Scheduler can be configured as below:
|
||||
```
|
||||
"scheduler": {
|
||||
"update_feeds": {
|
||||
"schedule": "*/5 * * * *",
|
||||
"command": "/app/code/update_feed.sh"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
In the above example, `update_feeds` is the name of the task and is an arbitrary string.
|
||||
|
||||
`schedule` values must fall within the following ranges:
|
||||
|
||||
* Minutes: 0-59
|
||||
* Hours: 0-23
|
||||
* Day of Month: 1-31
|
||||
* Months: 0-11
|
||||
* Day of Week: 0-6
|
||||
|
||||
_NOTE_: scheduler does not support seconds
|
||||
|
||||
`schedule` supports ranges (like standard cron):
|
||||
|
||||
* Asterisk. E.g. *
|
||||
* Ranges. E.g. 1-3,5
|
||||
* Steps. E.g. */2
|
||||
|
||||
`command` is executed through a shell (sh -c). The command runs in the same launch environment
|
||||
as the application. Environment variables, volumes (`/tmp` and `/run`) are all
|
||||
shared with the main application.
|
||||
|
||||
If a task is still running when a new instance of the task is scheduled to be started, the previous
|
||||
task instance is killed.
|
||||
|
||||
|
||||
## sendmail
|
||||
|
||||
The sendmail addon can be used to send email from the application.
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
MAIL_SMTP_SERVER= # the mail server (relay) that apps can use. this can be an IP or DNS name
|
||||
MAIL_SMTP_PORT= # the mail server port
|
||||
MAIL_SMTP_USERNAME= # the username to use for authentication as well as the `from` username when sending emails
|
||||
MAIL_SMTP_PASSWORD= # the password to use for authentication
|
||||
MAIL_FROM= # the "From" address to use
|
||||
MAIL_DOMAIN= # the domain name to use for email sending (i.e username@domain)
|
||||
```
|
||||
|
||||
The SMTP server does not require STARTTLS. If STARTTLS is used, the app must be prepared to accept self-signed certs.
|
||||
|
||||
For debugging, [cloudron exec](https://www.npmjs.com/package/cloudron) can be used to run the `swaks` tool within the context of the app:
|
||||
```
|
||||
cloudron exec
|
||||
|
||||
> swaks --server "${MAIL_SMTP_SERVER}" -p "${MAIL_SMTP_PORT}" --from "${MAIL_SMTP_USERNAME}@${MAIL_DOMAIN}" --body "Test mail from cloudron app at $(hostname -f)" --auth-user "${MAIL_SMTP_USERNAME}" --auth-password "${MAIL_SMTP_PASSWORD}"
|
||||
```
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
# Introduction
|
||||
|
||||
The Cloudron platform is designed to easily install and run web applications.
|
||||
The application architecture is designed to let the Cloudron take care of system
|
||||
operations like updates, backups, firewalls, domain management, certificate management
|
||||
etc. This allows app developers to focus on their application logic instead of deployment.
|
||||
|
||||
At a high level, an application provides an `image` and a `manifest`. The image is simply
|
||||
a docker image that is a bundle of the application code and it's dependencies. The manifest
|
||||
file specifies application runtime requirements like database type and authentication scheme.
|
||||
It also provides meta information for display purposes in the [Cloudron Store](/appstore.html)
|
||||
like the title, icon and pricing.
|
||||
|
||||
Web applications like blogs, wikis, password managers, code hosting, document editing,
|
||||
file syncers, notes, email, forums are a natural fit for the Cloudron. Decentralized "social"
|
||||
networks are also good app candidates for the Cloudron.
|
||||
|
||||
# Image
|
||||
|
||||
Application images are created using [Docker](https://www.docker.io). Docker provides a way
|
||||
to package (and containerize) the application as a filesystem which contains it's code, system libraries
|
||||
and just about anything the app requires. This flexible approach allows the application to use just
|
||||
about any language or framework.
|
||||
|
||||
Application images are instantiated as `containers`. Cloudron can run one or more isolated instances
|
||||
of the same application as one or more containers.
|
||||
|
||||
Containerizing your application provides the following benefits:
|
||||
* Apps run in the familiar environment that they were packaged for and can have libraries
|
||||
and packages that are independent of the host OS.
|
||||
* Containers isolate applications from one another.
|
||||
|
||||
The [base image](/references/baseimage.html) is the parent of all app images.
|
||||
|
||||
# Cloudron Manifest
|
||||
|
||||
Each app provides a `CloudronManifest.json` that specifies information required for the
|
||||
`Cloudron Store` and for the installation of the image in the Cloudron.
|
||||
|
||||
Information required for container installation includes:
|
||||
* List of `addons` like databases, caches, authentication mechanisms and file systems
|
||||
* The http port on which the container is listening for incoming requests
|
||||
* Additional TCP ports on which the application is listening to (for e.g., git, ssh,
|
||||
irc protocols)
|
||||
|
||||
Information required for the Cloudron Store includes:
|
||||
* Unique App Id
|
||||
* Title
|
||||
* Version
|
||||
* Logo
|
||||
|
||||
See the [manifest reference](/references/manifest.html) for more information.
|
||||
|
||||
# Addons
|
||||
|
||||
Addons are services like database, authentication, email, caching that are part of the
|
||||
Cloudron. Setup, provisioning, scaling and maintenance of addons is taken care of by the
|
||||
Cloudron.
|
||||
|
||||
The fundamental idea behind addons is to allow resource sharing across applications.
|
||||
For example, a single MySQL server instance can be used across multiple apps. The Cloudron
|
||||
sets up addons in such a way that apps are isolated from each other.
|
||||
|
||||
Addons are opt-in and must be specified in the Cloudron Manifest. When the app runs, environment
|
||||
variables contain the necessary information to access the addon. See the
|
||||
[addon reference](/references/addons.html) for more information.
|
||||
|
||||
# Authentication
|
||||
|
||||
The Cloudron provides a centralized dashboard to manage users, roles and permissions. Applications
|
||||
do not create or manage user credentials on their own and instead use one of the various
|
||||
authentication strategies provided by the Cloudron.
|
||||
|
||||
Authentication strategies include OAuth 2.0, LDAP or Simple Auth. See the
|
||||
[Authentication Reference](/references/authentication.html) for more information.
|
||||
|
||||
Authorizing users is application specific and it is only authentication that is delegated to the
|
||||
Cloudron.
|
||||
|
||||
# Cloudron Store
|
||||
|
||||
Cloudron Store provides a market place to publish and optionally monetize your app. Submitting to the
|
||||
Cloudron Store enables any Cloudron user to discover, purchase and install your application with
|
||||
a few clicks.
|
||||
|
||||
# What next?
|
||||
|
||||
* [Package an existing app for the Cloudron](/tutorials/packaging.html)
|
||||
@@ -1,105 +0,0 @@
|
||||
# Overview
|
||||
|
||||
Cloudron provides a centralized dashboard to manage users, roles and permissions. Applications
|
||||
do not create or manage user credentials on their own and instead use one of the various
|
||||
authentication strategies provided by the Cloudron.
|
||||
|
||||
Note that authentication only identifies a user and does not indicate if the user is authorized
|
||||
to perform an action in the application. Authorizing users is application specific and must be
|
||||
implemented by the application.
|
||||
|
||||
# Users & Admins
|
||||
|
||||
Cloudron user management is intentionally very simple. The owner (first user) of the
|
||||
Cloudron is `admin` by default. The `admin` role allows one to install, uninstall and reconfigure
|
||||
applications on the Cloudron.
|
||||
|
||||
A Cloudron `admin` can create one or more users. Cloudron users can login and use any of the installed
|
||||
apps in the Cloudron. In general, adding a cloudron user is akin to adding a person from one's family
|
||||
or organization or team because such users gain access to all apps in the Cloudron. Removing a user
|
||||
immediately revokes access from all apps.
|
||||
|
||||
A Cloudron `admin` can give admin privileges to one or more Cloudron users.
|
||||
|
||||
Each Cloudron user has an unique `username` and an `email`.
|
||||
|
||||
# Strategies
|
||||
|
||||
Cloudron provides multiple authentication strategies.
|
||||
|
||||
* OAuth 2.0 provided by the [OAuth addon](/references/addons.html#oauth)
|
||||
* LDAP provided by the [LDAP addon](/references/addons.html#ldap)
|
||||
|
||||
# Choosing a strategy
|
||||
|
||||
Applications can be broadly categorized based on their user management as follows:
|
||||
|
||||
* Multi-user aware
|
||||
* Such apps have a full fledged user system and support multiple users and groups.
|
||||
* These apps should use OAuth or LDAP.
|
||||
* LDAP and OAuth APIs allow apps to detect if the user is a cloudron `admin`. Apps should use this flag
|
||||
to show the application's admin panel for such users.
|
||||
|
||||
|
||||
* No user
|
||||
* Such apps have no concept of logged-in user.
|
||||
|
||||
* Single user
|
||||
* Such apps only have a single user who is usually also the `admin`.
|
||||
* These apps can use Simple Auth or LDAP since they can authenticate users with a simple HTTP or LDAP request.
|
||||
* Such apps _must_ set the `singleUser` property in the manifest which will restrict login to a single user
|
||||
(configurable through the Cloudron's admin panel).
|
||||
|
||||
# Public and Private apps
|
||||
|
||||
`Private` apps display content only when they have a signed-in user. These apps can choose one of the
|
||||
authentication strategies listed above.
|
||||
|
||||
`Public` apps display content to any visiting user (e.g a blog). These apps have a `login` url to allow
|
||||
the editors & admins to login. This path can be optionally set as the `configurePath` in the manifest for
|
||||
discoverability (for example, some blogs hide the login link).
|
||||
|
||||
Some apps allow the user to choose `private` or `public` mode or some other combination. Such configuration
|
||||
is done at app install time and cannot be changed using a settings interface. It is tempting to show the user
|
||||
a configuration dialog on first installation to switch the modes. This, however, leads the user to believe that
|
||||
this configuration can be changed at any time later. In the case where this setting can be changed dynamically
|
||||
from a settings ui in the app, it's better to simply put some sensible defaults and let the user discover
|
||||
the settings. In the case where such settings cannot be changed dynamically, it is best to simply publish two
|
||||
separate apps in the Cloudron store each with a different configuration.
|
||||
|
||||
# External User Registration
|
||||
|
||||
Some apps allow external users to register and create accounts. For example, a public company chat that
|
||||
can invite anyone to join or a blog allowing registered commenters.
|
||||
|
||||
Such applications must track Cloudron users and external registered users independently (for example, using a flag).
|
||||
As a thumb rule, apps must provide separate login buttons for each of the possible user sources. Such a design prevents
|
||||
external users from (inadvertently) spoofing Cloudron users.
|
||||
|
||||
Naively handling user registration enables attacks of the following kind:
|
||||
* An external user named `foo` registers in the app.
|
||||
* A LDAP user named `foo` is later created on the Cloudron.
|
||||
* When a user named `foo` logs in, the app cannot determine the correct `foo` anymore. Making separate login buttons for each
|
||||
login source clears the confusion for both the user and the app.
|
||||
|
||||
# Userid
|
||||
|
||||
The preferred approach to track users in an application is a uuid or the Cloudron `username`.
|
||||
The `username` in Cloudron is unique and cannot be changed.
|
||||
|
||||
Tracking users using `email` field is error prone since that may be changed by the user anytime.
|
||||
|
||||
# Single Sign-on
|
||||
|
||||
Single sign-on (SSO) is a property where a user logged in one application automatically logs into
|
||||
another application without having to re-enter his credentials. When applications implement the
|
||||
OAuth strategy, they automatically take part in Cloudron SSO. When a user signs in one application with
|
||||
OAuth, they will automatically log into any other app implementing OAuth.
|
||||
|
||||
Conversely, signing off from one app, logs them off from all the apps.
|
||||
|
||||
# Security
|
||||
|
||||
The LDAP and Simple Auth strategies require the user to provide their plain text passwords to the
|
||||
application. This might be a cause of concern and app developers are thus highly encouraged to integrate
|
||||
with OAuth. OAuth also has the advantage of supporting Single Sign On.
|
||||
@@ -1,94 +0,0 @@
|
||||
# Overview
|
||||
|
||||
The application's Dockerfile must specify the FROM base image to be `cloudron/base:0.10.0`.
|
||||
|
||||
The base image already contains most popular software packages including node, nginx, apache,
|
||||
ruby, PHP. Using the base image greatly reduces the size of app images.
|
||||
|
||||
The goal of the base image is simply to provide pre-downloaded software packages. The packages
|
||||
are not configured in any way and it's up to the application to configure them as they choose.
|
||||
For example, while `apache` is installed, there are no meaningful site configurations that the
|
||||
application can use.
|
||||
|
||||
# Packages
|
||||
|
||||
The following packages are part of the base image. If you need another version, you will have to
|
||||
install it yourself.
|
||||
|
||||
* Apache 2.4.18
|
||||
* Composer 1.2.0
|
||||
* Go 1.6.4, 1.7.5 (install under `/usr/local/go-<version>`)
|
||||
* Gunicorn 19.4.5
|
||||
* Java 1.8
|
||||
* Maven 3.3.9
|
||||
* Mongo 2.6.10
|
||||
* MySQL Client 5.7.17
|
||||
* nginx 1.10.0
|
||||
* Node 0.10.48, 0.12.18, 4.7.3, 6.9.5 (installed under `/usr/local/node-<version>`) [more information](#node-js)
|
||||
* Perl 5.22.1
|
||||
* PHP 7.0.13
|
||||
* Postgresql client 9.5.4
|
||||
* Python 2.7.12
|
||||
* Redis 3.0.6
|
||||
* Ruby 2.3.1
|
||||
* sqlite3 3.11.0
|
||||
* Supervisor 3.2.0
|
||||
* uwsgi 2.0.12
|
||||
|
||||
# Inspecting the base image
|
||||
|
||||
The base image can be inspected by installing [Docker](https://docs.docker.com/installation/).
|
||||
|
||||
Once installed, pull down the base image locally using the following command:
|
||||
```
|
||||
docker pull cloudron/base:0.10.0
|
||||
```
|
||||
|
||||
To inspect the base image:
|
||||
```
|
||||
docker run -ti cloudron/base:0.10.0 /bin/bash
|
||||
```
|
||||
|
||||
*Note:* Please use `docker 1.9.0` or above to pull the base image. Doing otherwise results in a base
|
||||
image with an incorrect image id. The image id of `cloudron/base:0.10.0` is `5ec8ca8525be`.
|
||||
|
||||
# The `cloudron` user
|
||||
|
||||
The base image contains a user named `cloudron` that apps can use to run their app.
|
||||
|
||||
It is good security practice to run apps as a non-previleged user.
|
||||
|
||||
# Env vars
|
||||
|
||||
The following environment variables are set as part of the application runtime.
|
||||
|
||||
## API_ORIGIN
|
||||
|
||||
API_ORIGIN is set to the HTTP(S) origin of this Cloudron's API. For example,
|
||||
`https://my-girish.cloudron.us`.
|
||||
|
||||
## APP_DOMAIN
|
||||
|
||||
APP_DOMAIN is set to the domain name of the application. For example, `app-girish.cloudron.us`.
|
||||
|
||||
## APP_ORIGIN
|
||||
|
||||
APP_ORIGIN is set to the HTTP(S) origin on the application. This is origin which the
|
||||
user can use to reach the application. For example, `https://app-girish.cloudron.us`.
|
||||
|
||||
## CLOUDRON
|
||||
|
||||
CLOUDRON is always set to '1'. This is useful to write Cloudron specific code.
|
||||
|
||||
## WEBADMIN_ORIGIN
|
||||
|
||||
WEBADMIN_ORIGIN is set to the HTTP(S) origin of the Cloudron's web admin. For example,
|
||||
`https://my-girish.cloudron.us`.
|
||||
|
||||
# Node.js
|
||||
|
||||
The base image comes pre-installed with various node.js versions.
|
||||
|
||||
They can be used by adding `ENV PATH /usr/local/node-<version>/bin:$PATH`.
|
||||
|
||||
See [Packages](/references/baseimage.html#packages) for available versions.
|
||||
@@ -1,93 +0,0 @@
|
||||
# Best practices
|
||||
|
||||
## Overview
|
||||
|
||||
This document explains the spirit of what makes a Cloudron app.
|
||||
|
||||
## No Setup
|
||||
|
||||
Cloudron apps do not show a setup screen after installation and should choose reasonable
|
||||
defaults.
|
||||
|
||||
Databases, email configuration should be automatically picked up using [addons](/references/addons.html).
|
||||
|
||||
Admin role for the application can be detected dynamically using one of the [authentication](/references/authentication.html)
|
||||
strategies.
|
||||
|
||||
## Image
|
||||
|
||||
The Dockerfile contains a specification for building an application image.
|
||||
|
||||
* Install any required software packages in the Dockerfile.
|
||||
|
||||
* Create static configuration files in the Dockerfile.
|
||||
|
||||
* Create symlinks to dynamic configuration files under `/run` in the Dockerfile.
|
||||
|
||||
* Docker supports restarting processes natively. Should your application crash, it will
|
||||
be restarted automatically. If your application is a single process, you do not require
|
||||
any process manager.
|
||||
|
||||
* The main process must handle `SIGTERM` and forward it as required to child processes. `bash`
|
||||
does not automatically forward signals to child processes. For this reason, when using a startup
|
||||
shell script, remember to use `exec <app>` as the last line. Doing so will replace bash with your
|
||||
program and allows your program to handle signals as required.
|
||||
|
||||
* Use `supervisor`, `pm2` or any of the other process managers if you application has more
|
||||
then one component. This excludes web servers like apache, nginx which can already manage their
|
||||
children by themselves. Be sure to pick a process manager that forwards signals to child processes.
|
||||
|
||||
* Disable auto updates for apps. Updates must be triggered through the Cloudron Store. This allows the admin
|
||||
to manage updates and downtime in a central location (the Cloudron Webadmin).
|
||||
|
||||
## File system
|
||||
|
||||
The Cloudron runs the application image as read-only. The app can only write to the following directories:
|
||||
|
||||
* `/tmp` - use this for temporary files.
|
||||
|
||||
* `/run` - use this for runtime configration and any dynamic data.
|
||||
|
||||
* `/app/data` - When the `localstorage` addon is enabled, any data under this directory is automatically backed up.
|
||||
|
||||
## Logging
|
||||
|
||||
Cloudron applications stream their logs to stdout and stderr. In contrast to logging
|
||||
to files, this approach has many advantages:
|
||||
|
||||
* App does not need to rotate logs and the Cloudron takes care of managing logs
|
||||
* App does not need special mechanism to release log file handles (on a log rotate)
|
||||
* Integrates better with tooling like `cloudron cli`
|
||||
|
||||
This document gives you some recipes for configuring popular libraries to log to stdout. See
|
||||
[base image](/references/baseimage.html#configuring) on how to configure various libraries to log to stdout/stderr.
|
||||
|
||||
|
||||
## Memory
|
||||
|
||||
By default, applications get 256MB RAM (including swap). This can be changed using the `memoryLimit` field in the manifest.
|
||||
|
||||
Design your application runtime for concurrent use by 10s of users. The Cloudron is not designed for concurrent access by
|
||||
100s or 1000s of users.
|
||||
|
||||
## Startup
|
||||
|
||||
* Apps must not present a post-installation screen on first run. It should be already pre-configured for
|
||||
a specific purpose.
|
||||
|
||||
* Do not run as `root`. Apps can use the `cloudron` user which is part of the [base image](/references/baseimage.html)
|
||||
for this purpose or create their own.
|
||||
|
||||
* When using the `localstorage` addon, the application must change the ownership of files in `/app/data` as desired using `chown`. This
|
||||
is necessary because file permissions may not be correctly preserved across backup, restore, application and base image
|
||||
updates.
|
||||
|
||||
* Addon information (mail, database) is exposed as environment variables. An application must use these values directly
|
||||
and not cache them across restarts. If the variables are stored in a configuration file, then the configuration file
|
||||
must be regenerated on every application start. This is usually done using a configuration template that is patched
|
||||
on every startup.
|
||||
|
||||
## Authentication
|
||||
|
||||
Apps should integrate with one of the [authentication strategies](/references/authentication.html).
|
||||
This saves the user from having to manage separate set of users for different apps.
|
||||
@@ -1,47 +0,0 @@
|
||||
# Cloudron Button
|
||||
|
||||
The `Cloudron Button` allows anyone to install an application with
|
||||
the click of a button on their Cloudron.
|
||||
|
||||
The button can be added to just about any website including the application's website
|
||||
and README.md files in GitHub repositories.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
The `Cloudron Button` is intended to work only for applications that have been
|
||||
published on the Cloudron Store. The [basic tutorial](/tutorials/basic.html#publishing)
|
||||
gives an overview of how to package and publish your application for the
|
||||
Cloudron Store.
|
||||
|
||||
## HTML Snippet
|
||||
|
||||
```
|
||||
<img src="https://cloudron.io/img/button32.png" href="https://cloudron.io/button.html?app=<appid>">
|
||||
```
|
||||
|
||||
_Note_: Replace `<appid>` with your application's id.
|
||||
|
||||
## Markdown Snippet
|
||||
|
||||
```
|
||||
[](https://cloudron.io/button.html?app=<appid>)
|
||||
```
|
||||
|
||||
_Note_: Replace `<appid>` with your application's id.
|
||||
|
||||
|
||||
## Button Height
|
||||
|
||||
The button may be used in different heights - 32, 48 and 64 pixels.
|
||||
|
||||
[](https://cloudron.io/button.html?app=io.gogs.cloudronapp)
|
||||
|
||||
[](https://cloudron.io/button.html?app=io.gogs.cloudronapp)
|
||||
|
||||
[](https://cloudron.io/button.html?app=io.gogs.cloudronapp)
|
||||
|
||||
or as SVG
|
||||
|
||||
[](https://cloudron.io/button.html?app=io.gogs.cloudronapp)
|
||||
|
||||
_Note_: Clicking the buttons above will install [Gogs](http://gogs.io/) on your Cloudron.
|
||||
@@ -1,441 +0,0 @@
|
||||
# Overview
|
||||
|
||||
Every Cloudron Application contains a `CloudronManifest.json`.
|
||||
|
||||
The manifest contains two categories of information:
|
||||
|
||||
* Information about displaying the app on the Cloudron Store. For example,
|
||||
the title, author information, description etc
|
||||
|
||||
* Information for installing the app on the Cloudron. This includes fields
|
||||
like httpPort, tcpPorts.
|
||||
|
||||
A CloudronManifest.json can **only** contain fields that are listed as part of this
|
||||
specification. The Cloudron Store and the Cloudron *may* reject applications that have
|
||||
extra fields.
|
||||
|
||||
Here is an example manifest:
|
||||
|
||||
```
|
||||
{
|
||||
"id": "com.example.test",
|
||||
"title": "Example Application",
|
||||
"author": "Girish Ramakrishnan <girish@cloudron.io>",
|
||||
"description": "This is an example app",
|
||||
"tagline": "A great beginning",
|
||||
"version": "0.0.1",
|
||||
"healthCheckPath": "/",
|
||||
"httpPort": 8000,
|
||||
"addons": {
|
||||
"localstorage": {}
|
||||
},
|
||||
"manifestVersion": 1,
|
||||
"website": "https://www.example.com",
|
||||
"contactEmail": "support@clourdon.io",
|
||||
"icon": "file://icon.png",
|
||||
"tags": [ "test", "collaboration" ],
|
||||
"mediaLinks": [ "https://images.rapgenius.com/fd0175ef780e2feefb30055be9f2e022.520x343x1.jpg" ]
|
||||
}
|
||||
```
|
||||
|
||||
# Fields
|
||||
|
||||
## addons
|
||||
|
||||
Type: object
|
||||
|
||||
Required: no
|
||||
|
||||
Allowed keys
|
||||
* [email](addons.html#email)
|
||||
* [ldap](addons.html#ldap)
|
||||
* [localstorage](addons.html#localstorage)
|
||||
* [mongodb](addons.html#mongodb)
|
||||
* [mysql](addons.html#mysql)
|
||||
* [oauth](addons.html#oauth)
|
||||
* [postgresql](addons.html#postgresql)
|
||||
* [recvmail](addons.html#recvmail)
|
||||
* [redis](addons.html#redis)
|
||||
* [sendmail](addons.html#sendmail)
|
||||
|
||||
The `addons` object lists all the [addons](addons.html) and the addon configuration used by the application.
|
||||
|
||||
Example:
|
||||
```
|
||||
"addons": {
|
||||
"localstorage": {},
|
||||
"mongodb": {}
|
||||
}
|
||||
```
|
||||
|
||||
## author
|
||||
|
||||
Type: string
|
||||
|
||||
Required: yes
|
||||
|
||||
The `author` field contains the name and email of the app developer (or company).
|
||||
|
||||
Example:
|
||||
```
|
||||
"author": "Cloudron UG <girish@cloudron.io>"
|
||||
```
|
||||
|
||||
## changelog
|
||||
|
||||
Type: markdown string
|
||||
|
||||
Required: no (required for submitting to the Cloudron Store)
|
||||
|
||||
The `changelog` field contains the changes in this version of the application. This string
|
||||
can be a markdown style bulleted list.
|
||||
|
||||
Example:
|
||||
```
|
||||
"changelog": "* Add support for IE8 \n* New logo"
|
||||
```
|
||||
|
||||
## contactEmail
|
||||
|
||||
Type: email
|
||||
|
||||
Required: yes
|
||||
|
||||
The `contactEmail` field contains the email address that Cloudron users can contact for any
|
||||
bug reports and suggestions.
|
||||
|
||||
Example:
|
||||
```
|
||||
"contactEmail": "support@testapp.com"
|
||||
```
|
||||
|
||||
## description
|
||||
|
||||
Type: markdown string
|
||||
|
||||
Required: yes
|
||||
|
||||
The `description` field contains a detailed description of the app. This information is shown
|
||||
to the user when they install the app from the Cloudron Store.
|
||||
|
||||
Example:
|
||||
```
|
||||
"description": "This is a detailed description of this app."
|
||||
```
|
||||
|
||||
A large `description` can be unweildy to manage and edit inside the CloudronManifest.json. For
|
||||
this reason, the `description` can also contain a file reference. The Cloudron CLI tool fills up
|
||||
the description from this file when publishing your application.
|
||||
|
||||
Example:
|
||||
```
|
||||
"description:": "file://DESCRIPTION.md"
|
||||
```
|
||||
|
||||
## healthCheckPath
|
||||
|
||||
Type: url path
|
||||
|
||||
Required: yes
|
||||
|
||||
The `healthCheckPath` field is used by the Cloudron Runtime to determine if your app is running and
|
||||
responsive. The app must return a 2xx HTTP status code as a response when this path is queried. In
|
||||
most cases, the default "/" will suffice but there might be cases where periodically querying "/"
|
||||
is an expensive operation. In addition, the app might want to use a specialized route should it
|
||||
want to perform some specialized internal checks.
|
||||
|
||||
Example:
|
||||
```
|
||||
"healthCheckPath": "/"
|
||||
```
|
||||
## httpPort
|
||||
|
||||
Type: positive integer
|
||||
|
||||
Required: yes
|
||||
|
||||
The `httpPort` field contains the TCP port on which your app is listening for HTTP requests. This
|
||||
is the HTTP port the Cloudron will use to access your app internally.
|
||||
|
||||
While not required, it is good practice to mark this port as `EXPOSE` in the Dockerfile.
|
||||
|
||||
Cloudron Apps are containerized and thus two applications can listen on the same port. In reality,
|
||||
they are in different network namespaces and do not conflict with each other.
|
||||
|
||||
Note that this port has to be HTTP and not HTTPS or any other non-HTTP protocol. HTTPS proxying is
|
||||
handled by the Cloudron platform (since it owns the certificates).
|
||||
|
||||
Example:
|
||||
```
|
||||
"httpPort": 8080
|
||||
```
|
||||
|
||||
## icon
|
||||
|
||||
Type: local image filename
|
||||
|
||||
Required: no (required for submitting to the Cloudron Store)
|
||||
|
||||
The `icon` field is used to display the application icon/logo in the Cloudron Store. Icons are expected
|
||||
to be square of size 256x256.
|
||||
|
||||
```
|
||||
"icon": "file://icon.png"
|
||||
```
|
||||
|
||||
## id
|
||||
|
||||
Type: reverse domain string
|
||||
|
||||
Required: yes
|
||||
|
||||
The `id` is a unique human friendly Cloudron Store id. This is similar to reverse domain string names used
|
||||
as java package names. The convention is to base the `id` based on a domain that you own.
|
||||
|
||||
The Cloudron tooling allows you to build applications with any `id`. However, you will be unable to publish
|
||||
the application if the id is already in use by another application.
|
||||
|
||||
```
|
||||
"id": "io.cloudron.testapp"
|
||||
```
|
||||
|
||||
## manifestVersion
|
||||
|
||||
Type: integer
|
||||
|
||||
Required: yes
|
||||
|
||||
`manifestVersion` specifies the version of the manifest and is always set to 1.
|
||||
|
||||
```
|
||||
"manifestVersion": 1
|
||||
```
|
||||
|
||||
## mediaLinks
|
||||
|
||||
Type: array of urls
|
||||
|
||||
Required: no (required for submitting to the Cloudron Store)
|
||||
|
||||
The `mediaLinks` field contains an array of links that the Cloudron Store uses to display a slide show of pictures of the application.
|
||||
|
||||
They have to be publicly reachable via `https` and should have an aspect ratio of 3 to 1.
|
||||
For example `600px by 200px` (with/height).
|
||||
|
||||
```
|
||||
"mediaLinks": [
|
||||
"https://s3.amazonaws.com/cloudron-app-screenshots/org.owncloud.cloudronapp/556f6a1d82d5e27a7c4fca427ebe6386d373304f/2.jpg",
|
||||
"https://images.rapgenius.com/fd0175ef780e2feefb30055be9f2e022.520x343x1.jpg"
|
||||
]
|
||||
```
|
||||
|
||||
## memoryLimit
|
||||
|
||||
Type: bytes (integer)
|
||||
|
||||
Required: no
|
||||
|
||||
The `memoryLimit` field is the maximum amount of memory (including swap) in bytes an app is allowed to consume before it
|
||||
gets killed and restarted.
|
||||
|
||||
By default, all apps have a memoryLimit of 256MB. For example, to have a limit of 500MB,
|
||||
|
||||
```
|
||||
"memoryLimit": 524288000
|
||||
```
|
||||
|
||||
## maxBoxVersion
|
||||
|
||||
Type: semver string
|
||||
|
||||
Required: no
|
||||
|
||||
The `maxBoxVersion` field is the maximum box version that the app can possibly run on. Attempting to install the app on
|
||||
a box greater than `maxBoxVersion` will fail.
|
||||
|
||||
This is useful when a new box release introduces features which are incompatible with the app. This situation is quite
|
||||
unlikely and it is recommended to leave this unset.
|
||||
|
||||
## minBoxVersion
|
||||
|
||||
Type: semver string
|
||||
|
||||
Required: no
|
||||
|
||||
The `minBoxVersion` field is the minimum box version that the app can possibly run on. Attempting to install the app on
|
||||
a box lesser than `minBoxVersion` will fail.
|
||||
|
||||
This is useful when the app relies on features that are only available from a certain version of the box. If unset, the
|
||||
default value is `0.0.1`.
|
||||
|
||||
## postInstallMessage
|
||||
|
||||
Type: markdown string
|
||||
|
||||
Required: no
|
||||
|
||||
The `postInstallMessageField` is a message that is displayed to the user after an app is installed.
|
||||
|
||||
The intended use of this field is to display some post installation steps that the user has to carry out to
|
||||
complete the installation. For example, displaying the default admin credentials and informing the user to
|
||||
to change it.
|
||||
|
||||
The message can have the following special tags:
|
||||
* `<sso> ... </sso>` - Content in `sso` blocks are shown if SSO enabled.
|
||||
* `<nosso> ... </nosso>`- Content in `nosso` blocks are shows when SSO is disabled.
|
||||
|
||||
## optionalSso
|
||||
|
||||
Type: boolean
|
||||
|
||||
Required: no
|
||||
|
||||
The `optionalSso` field can be set to true for apps that can be installed optionally without using the Cloudron user management.
|
||||
|
||||
This only applies if any Cloudron auth related addons are used. When set, the Cloudron will not inject the auth related addon environment variables.
|
||||
Any app startup scripts have to be able to deal with missing env variables in this case.
|
||||
|
||||
## tagline
|
||||
|
||||
Type: one-line string
|
||||
|
||||
Required: no (required for submitting to the Cloudron Store)
|
||||
|
||||
The `tagline` is used by the Cloudron Store to display a single line short description of the application.
|
||||
|
||||
```
|
||||
"tagline": "The very best note keeper"
|
||||
```
|
||||
|
||||
## tags
|
||||
|
||||
Type: Array of strings
|
||||
|
||||
Required: no (required for submitting to the Cloudron Store)
|
||||
|
||||
The `tags` are used by the Cloudron Store for filtering searches by keyword.
|
||||
|
||||
```
|
||||
"tags": [ "git", "version control", "scm" ]
|
||||
```
|
||||
|
||||
## targetBoxVersion
|
||||
|
||||
Type: semver string
|
||||
|
||||
Required: no
|
||||
|
||||
The `targetBoxVersion` field is the box version that the app was tested on. By definition, this version has to be greater
|
||||
than the `minBoxVersion`.
|
||||
|
||||
The box uses this value to enable compatibility behavior of APIs. For example, an app sets the targetBoxVersion to 0.0.5
|
||||
and is published on the store. Later, box version 0.0.10 introduces a new feature that conflicts with how apps used
|
||||
to run in 0.0.5 (say SELinux was enabled for apps). When the box runs such an app, it ensures compatible behavior
|
||||
and will disable the SELinux feature for the app.
|
||||
|
||||
If unspecified, this value defaults to `minBoxVersion`.
|
||||
|
||||
## tcpPorts
|
||||
|
||||
Type: object
|
||||
|
||||
Required: no
|
||||
|
||||
Syntax: Each key is the environment variable. Each value is an object containing `title`, `description` and `defaultValue`.
|
||||
An optional `containerPort` may be specified.
|
||||
|
||||
The `tcpPorts` field provides information on the non-http TCP ports/services that your application is listening on. During
|
||||
installation, the user can decide how these ports are exposed from their Cloudron.
|
||||
|
||||
For example, if the application runs an SSH server at port 29418, this information is listed here. At installation time,
|
||||
the user can decide any of the following:
|
||||
* Expose the port with the suggested `defaultValue` to the outside world. This will only work if no other app is being exposed at same port.
|
||||
* Provide an alternate value on which the port is to be exposed to outside world.
|
||||
* Disable the port/service.
|
||||
|
||||
To illustrate, the application lists the ports as below:
|
||||
```
|
||||
"tcpPorts": {
|
||||
"SSH_PORT": {
|
||||
"title": "SSH Port",
|
||||
"description": "SSH Port over which repos can be pushed & pulled",
|
||||
"defaultValue": 29418,
|
||||
"containerPort": 22
|
||||
}
|
||||
},
|
||||
```
|
||||
|
||||
In the above example:
|
||||
* `SSH_PORT` is an app specific environment variable. Only strings, numbers and _ (underscore) are allowed. The author has to ensure that they don't clash with platform profided variable names.
|
||||
|
||||
* `title` is a short one line information about this port/service.
|
||||
|
||||
* `description` is a multi line description about this port/service.
|
||||
|
||||
* `defaultValue` is the recommended port value to be shown in the app installation UI.
|
||||
|
||||
* `containerPort` is the port that the app is listening on (recall that each app has it's own networking namespace).
|
||||
|
||||
In more detail:
|
||||
|
||||
* If the user decides to disable the SSH service, this environment variable `SSH_PORT` is absent. Applications _must_ detect this on
|
||||
start up and disable these services.
|
||||
|
||||
* `SSH_PORT` is set to the value of the exposed port. Should the user choose to expose the SSH server on port 6000, then the
|
||||
value of SSH_PORT is 6000.
|
||||
|
||||
* `defaultValue` is **only** used for display purposes in the app installation UI. This value is independent of the value
|
||||
that the app is listening on. For example, the app can run an SSH server at port 22 but still recommend a value of 29418 to the user.
|
||||
|
||||
* `containerPort` is the port that the app is listening on. The Cloudron runtime will _bridge_ the user chosen external port
|
||||
with the app specific `containerPort`. Cloudron Apps are containerized and each app has it's own networking namespace.
|
||||
As a result, different apps can have the same `containerPort` value because these values are namespaced.
|
||||
|
||||
* The environment variable `SSH_PORT` may be used by the app to display external URLs. For example, the app might want to display
|
||||
the SSH URL. In such a case, it would be incorrect to use the `containerPort` 22 or the `defaultValue` 29418 since this is not
|
||||
the value chosen by the user.
|
||||
|
||||
* `containerPort` is optional and can be omitted, in which case the bridged port numbers are the same internally and externally.
|
||||
Some apps use the same variable (in their code) for listen port and user visible display strings. When packaging these apps,
|
||||
it might be simpler to listen on `SSH_PORT` internally. In such cases, the app can omit the `containerPort` value and should
|
||||
instead reconfigure itself to listen internally on `SSH_PORT` on each start up.
|
||||
|
||||
## title
|
||||
|
||||
Type: string
|
||||
|
||||
Required: yes
|
||||
|
||||
The `title` is the primary application title displayed on the Cloudron Store.
|
||||
|
||||
Example:
|
||||
```
|
||||
"title": "Gitlab"
|
||||
```
|
||||
|
||||
## version
|
||||
|
||||
Type: semver string
|
||||
|
||||
Required: yes
|
||||
|
||||
The `version` field specifies a [semver](http://semver.org/) string. The version is used by the Cloudron to compare versions and to
|
||||
determine if an update is available.
|
||||
|
||||
Example:
|
||||
```
|
||||
"version": "1.1.0"
|
||||
```
|
||||
|
||||
## website
|
||||
|
||||
Type: url
|
||||
|
||||
Required: yes
|
||||
|
||||
The `website` field is a URL where the user can read more about the application.
|
||||
|
||||
Example:
|
||||
```
|
||||
"website": "https://example.com/myapp"
|
||||
```
|
||||
@@ -1,61 +0,0 @@
|
||||
# Configuration Recipes
|
||||
|
||||
## nginx
|
||||
|
||||
`nginx` is often used as a reverse proxy in front of the application, to dispatch to different backend programs based on the request route or other characteristics. In such a case it is recommended to run nginx and the application through a process manager like `supervisor`.
|
||||
|
||||
Example nginx supervisor configuration file:
|
||||
```
|
||||
[program:nginx]
|
||||
directory=/tmp
|
||||
command=/usr/sbin/nginx -g "daemon off;"
|
||||
user=root
|
||||
autostart=true
|
||||
autorestart=true
|
||||
stdout_logfile=/var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile=/var/log/supervisor/%(program_name)s.log
|
||||
```
|
||||
|
||||
The nginx configuration, provided with the base image, can be used by adding an application specific config file under `/etc/nginx/sites-enabled/` when building the docker image.
|
||||
|
||||
```
|
||||
ADD <app config file> /etc/nginx/sites-enabled/<app config file>
|
||||
```
|
||||
|
||||
Since the base image nginx configuration is unpatched from the ubuntu package, the application configuration has to ensure nginx is using `/run/` instead of `/var/lib/nginx/` to support the read-only filesystem nature of a Cloudron application.
|
||||
|
||||
Example nginx app config file:
|
||||
```
|
||||
client_body_temp_path /run/client_body;
|
||||
proxy_temp_path /run/proxy_temp;
|
||||
fastcgi_temp_path /run/fastcgi_temp;
|
||||
scgi_temp_path /run/scgi_temp;
|
||||
uwsgi_temp_path /run/uwsgi_temp;
|
||||
|
||||
server {
|
||||
listen 8000;
|
||||
|
||||
root /app/code/dist;
|
||||
|
||||
location /api/v1/ {
|
||||
proxy_pass http://127.0.0.1:8001;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_read_timeout 86400;
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## supervisor
|
||||
|
||||
Use this in the program's config:
|
||||
|
||||
```
|
||||
[program:app]
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
```
|
||||
@@ -1,395 +0,0 @@
|
||||
# Overview
|
||||
|
||||
The Cloudron platform can be installed on public cloud servers from EC2, Digital Ocean, Hetzner,
|
||||
Linode, OVH, Scaleway, Vultr etc. Cloudron also runs well on a home server or company intranet.
|
||||
|
||||
If you run into any trouble following this guide, ask us at our [chat](https://chat.cloudron.io).
|
||||
|
||||
# Understand
|
||||
|
||||
Before installing the Cloudron, it is helpful to understand Cloudron's design. The Cloudron
|
||||
intends to make self-hosting effortless. It takes care of updates, backups, firewall, dns setup,
|
||||
certificate management etc. All app and user configuration is carried out using the web interface.
|
||||
|
||||
This approach to self-hosting means that the Cloudron takes complete ownership of the server and
|
||||
only tracks changes that were made via the web interface. Any external changes made to the server
|
||||
(i.e other than via the Cloudron web interface or API) may be lost across updates.
|
||||
|
||||
The Cloudron requires a domain name when it is installed. Apps are installed into subdomains.
|
||||
The `my` subdomain is special and is the location of the Cloudron web interface. For this to
|
||||
work, the Cloudron requires a way to programmatically configure the DNS entries of the domain.
|
||||
Note that the Cloudron will never overwrite _existing_ DNS entries and refuse to install
|
||||
apps on existing subdomains (so, it is safe to reuse an existing domain that runs other services).
|
||||
|
||||
# Cloud Server
|
||||
|
||||
DigitalOcean and EC2 (Amazon Web Services) are frequently tested by us.
|
||||
|
||||
Please use the below links to support us with referrals:
|
||||
* [Amazon EC2](https://aws.amazon.com/ec2/)
|
||||
* [DigitalOcean](https://m.do.co/c/933831d60a1e)
|
||||
|
||||
In addition to those, the Cloudron community has successfully installed the platform on those providers:
|
||||
* [Amazon Lightsail](https://amazonlightsail.com/)
|
||||
* [hosttech](https://www.hosttech.ch/?promocode=53619290)
|
||||
* [Linode](https://www.linode.com/?r=f68d816692c49141e91dd4cef3305da457ac0f75)
|
||||
* [OVH](https://www.ovh.com/)
|
||||
* [Rosehosting](https://secure.rosehosting.com/clientarea/?affid=661)
|
||||
* [Scaleway](https://www.scaleway.com/)
|
||||
* [So you Start](https://www.soyoustart.com/)
|
||||
* [Vultr](http://www.vultr.com/?ref=7110116-3B)
|
||||
|
||||
Please let us know if any of them requires tweaks or adjustments.
|
||||
|
||||
# Installing
|
||||
|
||||
## Create server
|
||||
|
||||
Create an `Ubuntu 16.04 (Xenial)` server with at-least `1gb` RAM and 20GB disk space.
|
||||
Do not make any changes to vanilla ubuntu. Be sure to allocate a static IPv4 address
|
||||
for your server.
|
||||
|
||||
Cloudron has a built-in firewall and ports are opened and closed dynamically, as and when
|
||||
apps are installed, re-configured or removed. For this reason, be sure to open all TCP and
|
||||
UDP traffic to the server and leave the traffic management to the Cloudron.
|
||||
|
||||
### Linode
|
||||
|
||||
Since Linode does not manage SSH keys, be sure to add the public key to
|
||||
`/root/.ssh/authorized_keys`.
|
||||
|
||||
### Scaleway
|
||||
|
||||
Use the [boot script](https://github.com/scaleway-community/scaleway-docker/issues/2) to
|
||||
enable memory accouting.
|
||||
|
||||
## Run setup
|
||||
|
||||
SSH into your server and run the following commands:
|
||||
|
||||
```
|
||||
wget https://cloudron.io/cloudron-setup
|
||||
chmod +x cloudron-setup
|
||||
./cloudron-setup --provider <azure|digitalocean|ec2|lightsail|linode|ovh|rosehosting|scaleway|vultr|generic>
|
||||
```
|
||||
|
||||
The setup will take around 10-15 minutes.
|
||||
|
||||
**cloudron-setup** takes the following arguments:
|
||||
|
||||
* `--provider` is the name of your VPS provider. If the name is not on the list, simply
|
||||
choose `generic`. In most cases, the `generic` provider mostly will work fine.
|
||||
If the Cloudron does not complete initialization, it may mean that
|
||||
we have to add some vendor specific quirks. Please open a
|
||||
[bug report](https://git.cloudron.io/cloudron/box/issues) in that case.
|
||||
|
||||
Optional arguments for installation:
|
||||
|
||||
* `--tls-provider` is the name of the SSL/TLS certificate backend. Defaults to Let's encrypt.
|
||||
Specifying `fallback` will setup the Cloudron to use the fallback wildcard certificate.
|
||||
Initially a self-signed one is provided, which can be overwritten later in the admin interface.
|
||||
This may be useful for non-public installations.
|
||||
|
||||
Optional arguments used for update and restore:
|
||||
|
||||
* `--version` is the version of Cloudron to install. By default, the setup script installs
|
||||
the latest version. You can set this to an older version when restoring a Cloudron from a backup.
|
||||
|
||||
* `--restore-url` is a backup URL to restore from.
|
||||
|
||||
## Domain setup
|
||||
|
||||
Once the setup script completes, the server will reboot, then visit your server by its
|
||||
IP address (`https://ip`) to complete the installation.
|
||||
|
||||
The setup website will show a certificate warning. Accept the self-signed certificate
|
||||
and proceed to the domain setup.
|
||||
|
||||
Currently, only subdomains of the [Public Suffix List](https://publicsuffix.org/) are supported.
|
||||
For example, `example.com`, `example.co.uk` will work fine. Choosing other non-registrable
|
||||
domain names like `cloudron.example.com` will not work.
|
||||
|
||||
### Route 53
|
||||
|
||||
Create root or IAM credentials and choose `Route 53` as the DNS provider.
|
||||
|
||||
* For root credentials:
|
||||
* In AWS Console, under your name in the menu bar, click `Security Credentials`
|
||||
* Click on `Access Keys` and create a key pair.
|
||||
* For IAM credentials:
|
||||
* You can use the following policy to create IAM credentials:
|
||||
|
||||
```
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "route53:*",
|
||||
"Resource": [
|
||||
"arn:aws:route53:::hostedzone/<hosted zone id>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"route53:ListHostedZones",
|
||||
"route53:GetChange"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Digital Ocean
|
||||
|
||||
Create an API token with read+write access and choose `Digital Ocean` as the DNS provider.
|
||||
|
||||
### Other
|
||||
|
||||
If your domain *does not* use Route 53 or Digital Ocean, setup a wildcard (`*`) DNS `A` record that points to the
|
||||
IP of the server created above. If your DNS provider has an API, please open an
|
||||
[issue](https://git.cloudron.io/cloudron/box/issues) and we may be able to support it.
|
||||
|
||||
## Finish Setup
|
||||
|
||||
Once the domain setup is done, the Cloudron will configure the DNS and get a SSL certificate. It will automatically redirect to `https://my.<domain>`.
|
||||
|
||||
# Backups
|
||||
|
||||
The Cloudron creates encrypted backups once a day. Each app is backed up independently and these
|
||||
backups have the prefix `app_`. The platform state is backed up independently with the
|
||||
prefix `box_`.
|
||||
|
||||
By default, backups reside in `/var/backups`. Please note that having backups reside in the same
|
||||
physical machine as the Cloudron server instance is dangerous and it must be changed to
|
||||
an external storage location like `S3` as soon as possible.
|
||||
|
||||
## Amazon S3
|
||||
|
||||
Provide S3 backup credentials in the `Settings` page and leave the endpoint field empty.
|
||||
|
||||
Create a bucket in S3 (You have to have an account at [AWS](https://aws.amazon.com/)). The bucket can be setup to periodically delete old backups by
|
||||
adding a lifecycle rule using the AWS console. S3 supports both permanent deletion
|
||||
or moving objects to the cheaper Glacier storage class based on an age attribute.
|
||||
With the current daily backup schedule a setting of two days should be sufficient
|
||||
for most use-cases.
|
||||
|
||||
* For root credentials:
|
||||
* In AWS Console, under your name in the menu bar, click `Security Credentials`
|
||||
* Click on `Access Keys` and create a key pair.
|
||||
* For IAM credentials:
|
||||
* You can use the following policy to create IAM credentials:
|
||||
|
||||
```
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "s3:*",
|
||||
"Resource": [
|
||||
"arn:aws:s3:::<your bucket name>",
|
||||
"arn:aws:s3:::<your bucket name>/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The `Encryption key` is an arbitrary passphrase used to encrypt the backups. Keep the passphrase safe; it is
|
||||
required to decrypt the backups when restoring the Cloudron.
|
||||
|
||||
## Minio S3
|
||||
|
||||
[Minio](https://minio.io/) is a distributed object storage server, providing the same API as Amazon S3.
|
||||
Since Cloudron supports S3, any API compatible solution should be supported as well, if this is not the case, let us know.
|
||||
|
||||
Minio can be setup, by following the [installation instructions](https://docs.minio.io/) on any server, which is reachable by the Cloudron.
|
||||
Do not setup Minio on the same server as the Cloudron, this will inevitably result in data loss, if backups are stored on the same instance.
|
||||
|
||||
Once setup, minio will print the necessary information, like login credentials, region and endpoints in its logs.
|
||||
|
||||
```
|
||||
$ ./minio server ./storage
|
||||
|
||||
Endpoint: http://192.168.10.113:9000 http://127.0.0.1:9000
|
||||
AccessKey: GFAWYNJEY7PUSLTHYHT6
|
||||
SecretKey: /fEWk66E7GsPnzE1gohqKDovaytLcxhr0tNWnv3U
|
||||
Region: us-east-1
|
||||
```
|
||||
|
||||
First create a new bucket for the backups, using the minio commandline tools or the webinterface. The bucket has to have **read and write** permissions.
|
||||
|
||||
The information to be copied to the Cloudron's backup settings form may look similar to:
|
||||
|
||||
<img src="/docs/img/minio_backup_config.png" class="shadow"><br/>
|
||||
|
||||
The `Encryption key` is an arbitrary passphrase used to encrypt the backups. Keep the passphrase safe; it is
|
||||
required to decrypt the backups when restoring the Cloudron.
|
||||
|
||||
# Email
|
||||
|
||||
Cloudron has a built-in email server. By default, it only sends out email on behalf of apps
|
||||
(for example, password reset or notification). You can enable the email server for sending
|
||||
and receiving mail on the `settings` page. This feature is only available if you have setup
|
||||
a DNS provider like Digital Ocean or Route53.
|
||||
|
||||
Your server's IP plays a big role in how emails from our Cloudron get handled. Spammers
|
||||
frequently abuse public IP addresses and as a result your Cloudron might possibly start
|
||||
out with a bad reputation. The good news is that most IP based blacklisting services cool
|
||||
down over time. The Cloudron sets up DNS entries for SPF, DKIM, DMARC automatically and
|
||||
reputation should be easy to get back.
|
||||
|
||||
## Checklist
|
||||
|
||||
* If you are unable to receive mail, first thing to check is if your VPS provider lets you
|
||||
receive mail on port 25.
|
||||
|
||||
* Digital Ocean - New accounts frequently have port 25 blocked. Write to their support to
|
||||
unblock your server.
|
||||
|
||||
* EC2, Lightsail & Scaleway - Edit your security group to allow email.
|
||||
|
||||
* Setup a Reverse DNS PTR record to be setup for the `my` subdomain.
|
||||
**Note:** PTR records are a feature of your VPS provider and not your domain provider.
|
||||
|
||||
* You can verify the PTR record [https://mxtoolbox.com/ReverseLookup.aspx](here).
|
||||
|
||||
* AWS EC2 & Lightsail - Fill the [PTR request form](https://aws-portal.amazon.com/gp/aws/html-forms-controller/contactus/ec2-email-limit-rdns-request).
|
||||
|
||||
* Digital Ocean - Digital Ocean sets up a PTR record based on the droplet's name. So, simply rename
|
||||
your droplet to `my.<domain>`. Note that some new Digital Ocean accounts have [port 25 blocked](https://www.digitalocean.com/community/questions/port-25-smtp-external-access).
|
||||
|
||||
* Linode - Follow this [guide](https://www.linode.com/docs/networking/dns/setting-reverse-dns).
|
||||
|
||||
* Scaleway - Edit your security group to allow email and [reboot the server](https://community.online.net/t/security-group-not-working/2096) for the change to take effect. You can also set a PTR record on the interface with your `my.<domain>`.
|
||||
|
||||
* Check if your IP is listed in any DNSBL list [here](http://multirbl.valli.org/). In most cases,
|
||||
you can apply for removal of your IP by filling out a form at the DNSBL manager site.
|
||||
|
||||
* When using wildcard or manual DNS backends, you have to setup the DMARC, MX records manually.
|
||||
|
||||
* Finally, check your spam score at [mail-tester.com](https://www.mail-tester.com/). The Cloudron
|
||||
should get 100%, if not please let us know.
|
||||
|
||||
# CLI Tool
|
||||
|
||||
The [Cloudron tool](https://git.cloudron.io/cloudron/cloudron-cli) is useful for managing
|
||||
a Cloudron. <b class="text-danger">The Cloudron CLI tool has to be installed & run on a Laptop or PC</b>
|
||||
|
||||
Once installed, you can install, configure, list, backup and restore apps from the command line.
|
||||
|
||||
## Linux & OS X
|
||||
|
||||
Installing the CLI tool requires node.js and npm. The CLI tool can be installed using the following command:
|
||||
|
||||
```
|
||||
npm install -g cloudron
|
||||
```
|
||||
|
||||
Depending on your setup, you may need to run this as root.
|
||||
|
||||
On OS X, it is known to work with the `openssl` package from homebrew.
|
||||
|
||||
See [#14](https://git.cloudron.io/cloudron/cloudron-cli/issues/14) for more information.
|
||||
|
||||
## Windows
|
||||
|
||||
The CLI tool does not work on Windows. Please contact us on our [chat](https://chat.cloudron.io) if you want to help with Windows support.
|
||||
|
||||
# Updates
|
||||
|
||||
Apps installed from the Cloudron Store are automatically updated every night.
|
||||
|
||||
The Cloudron platform itself updates in two ways: update or upgrade.
|
||||
|
||||
### Update
|
||||
|
||||
An **update** is applied onto the running server instance. Such updates are performed
|
||||
every night. You can also use the Cloudron UI to initiate an update immediately.
|
||||
|
||||
The Cloudron will always make a complete backup before attempting an update. In the unlikely
|
||||
case an update fails, it can be [restored](/references/selfhosting.html#restore).
|
||||
|
||||
### Upgrade
|
||||
|
||||
An **upgrade** requires a new OS image. This process involves creating a new server from scratch
|
||||
with the latest code and restoring it from the last backup.
|
||||
|
||||
To upgrade follow these steps closely:
|
||||
|
||||
* Create a new backup - `cloudron machine backup create`
|
||||
|
||||
* List the latest backup - `cloudron machine backup list`
|
||||
|
||||
* Make the backup available for the new cloudron instance:
|
||||
|
||||
* `S3` - When storing backup ins S3, make the latest box backup public - files starting with `box_` (from v0.94.0) or `backup_`. This can be done from the AWS S3 console as seen here:
|
||||
|
||||
<img src="/docs/img/aws_backup_public.png" class="shadow haze"><br/>
|
||||
|
||||
Copy the new public URL of the latest backup for use as the `--restore-url` below.
|
||||
|
||||
<img src="/docs/img/aws_backup_link.png" class="shadow haze"><br/>
|
||||
|
||||
* `File system` - When storing backups in `/var/backups`, you have to make the box and the app backups available to the new Cloudron instance's `/var/backups`. This can be achieved in a variety of ways depending on the situation: like scp'ing the backup files to the machine before installation, mounting the external backup hard drive into the new Cloudron's `/var/backup` OR downloading a copy of the backup using `cloudron machine backup download` and uploading them to the new machine. After doing so, pass `file:///var/backups/<path to box backup>` as the `--restore-url` below.
|
||||
|
||||
* Create a new Cloudron by following the [installing](/references/selfhosting.html#installing) section.
|
||||
When running the setup script, pass in the `--encryption-key` and `--restore-url` flags.
|
||||
The `--encryption-key` is the backup encryption key. It can be displayed with `cloudron machine info`
|
||||
|
||||
Similar to the initial installation, a Cloudron upgrade looks like:
|
||||
```
|
||||
$ ssh root@newserverip
|
||||
> wget https://cloudron.io/cloudron-setup
|
||||
> chmod +x cloudron-setup
|
||||
> ./cloudron-setup --provider <digitalocean|ec2|generic|scaleway> --domain <example.com> --encryption-key <key> --restore-url <publicS3Url>
|
||||
```
|
||||
|
||||
Note: When upgrading an old version of Cloudron (<= 0.94.0), pass the `--version 0.94.1` flag and then continue updating
|
||||
from that.
|
||||
|
||||
* Finally, once you see the newest version being displayed in your Cloudron webinterface, you can safely delete the old server instance.
|
||||
|
||||
# Restore
|
||||
|
||||
To restore a Cloudron from a specific backup:
|
||||
|
||||
* Select the backup - `cloudron machine backup list`
|
||||
|
||||
* Make the backup public
|
||||
|
||||
* `S3` - Make the box backup publicly readable - files starting with `box_` (from v0.94.0) or `backup_`. This can be done from the AWS S3 console. Once the box has restored, you can make it private again.
|
||||
|
||||
* `File system` - When storing backups in `/var/backups`, you have to make the box and the app backups available to the new Cloudron instance's `/var/backups`. This can be achieved in a variety of ways depending on the situation: like scp'ing the backup files to the new machine before Cloudron installation OR mounting an external backup hard drive into the new Cloudron's `/var/backup` OR downloading a copy of the backup using `cloudron machine backup download` and uploading them to the new machine. After doing so, pass `file:///var/backups/<path to box backup>` as the `--restore-url` below.
|
||||
|
||||
* Create a new Cloudron by following the [installing](/references/selfhosting.html#installing) section.
|
||||
When running the setup script, pass in the `version`, `encryption-key`, `domain` and `restore-url` flags.
|
||||
The `version` field is the version of the Cloudron that the backup corresponds to (it is embedded
|
||||
in the backup file name).
|
||||
|
||||
* Make the box backup private, once the upgrade is complete.
|
||||
|
||||
# Debug
|
||||
|
||||
You can SSH into your Cloudron and collect logs:
|
||||
|
||||
* `journalctl -a -u box` to get debug output of box related code.
|
||||
* `docker ps` will give you the list of containers. The addon containers are named as `mail`, `postgresql`,
|
||||
`mysql` etc. If you want to get a specific container's log output, `journalctl -a CONTAINER_ID=<container_id>`.
|
||||
|
||||
# Alerts
|
||||
|
||||
The Cloudron will notify the Cloudron administrator via email if apps go down, run out of memory, have updates
|
||||
available etc.
|
||||
|
||||
You will have to setup a 3rd party service like [Cloud Watch](https://aws.amazon.com/cloudwatch/) or [UptimeRobot](http://uptimerobot.com/) to monitor the Cloudron itself. You can use `https://my.<domain>/api/v1/cloudron/status`
|
||||
as the health check URL.
|
||||
|
||||
# Help
|
||||
|
||||
If you run into any problems, join us at our [chat](https://chat.cloudron.io) or [email us](mailto:support@cloudron.io).
|
||||
@@ -1,354 +0,0 @@
|
||||
# Introduction
|
||||
|
||||
The Cloudron is the best platform self-hosting web applications on your server. You
|
||||
can easily install apps on it, add users, manage access restriction and keep your
|
||||
server and apps updated with no effort.
|
||||
|
||||
You might wonder that there are so many 1-click app solutions out there and what is so special
|
||||
about Cloudron? As the name implies, 1-click installers simply install code into a server
|
||||
and leave it at that. There's so much more to do:
|
||||
|
||||
1. Configure a domain to point to your server
|
||||
2. Setup SSL certificates and renew them periodically
|
||||
3. Ensure apps are backed up correctly
|
||||
4. Ensure apps are uptodate and secure
|
||||
5. Have a mechanism to quickly restore apps from a backup
|
||||
6. Manage users across all your apps
|
||||
7. Get alerts and notifications about the status of apps
|
||||
|
||||
... and so on ...
|
||||
|
||||
We made the Cloudron to dramatically lower the bar for people to run apps on servers. Just provide
|
||||
a domain name, install apps and add users. All the server management tasks listed above is
|
||||
completely automated.
|
||||
|
||||
If you want to learn more about the secret sauce that makes the Cloudron, please read our
|
||||
[architecture overview](/references/architecture.html).
|
||||
|
||||
# Use cases
|
||||
|
||||
Here are some of the apps you can run on a Cloudron:
|
||||
|
||||
* RSS Reader
|
||||
* Chat, IRC, Jabber servers
|
||||
* Public forum
|
||||
* Blog
|
||||
* File syncing and sharing
|
||||
* Code hosting
|
||||
* Email
|
||||
|
||||
Our list of apps is growing everyday, so be sure to [follow us on twitter](https://twitter.com/cloudron_io).
|
||||
|
||||
# Activation
|
||||
|
||||
When you first create the Cloudron, the setup wizard will ask you to setup an administrator
|
||||
account. Don't worry, a Cloudron adminstrator doesn't need to know anything about maintaining
|
||||
a server! It's the whole reason why we made the Cloudron. Being a Cloudron administrator is
|
||||
more analagous to being the owner of a smartphone. You can always add more administrators to
|
||||
the Cloudron from the `Users` menu item.
|
||||
|
||||
<img src="/docs/img/webadmin_domain.png" class="shadow">
|
||||
|
||||
The Cloudron administration page is located at the `my` subdomain. You might want to bookmark
|
||||
this link!
|
||||
|
||||
# Apps
|
||||
|
||||
## Installation
|
||||
|
||||
You can install apps on the Cloudron by choosing the `App Store` menu item. Use the 'Search' bar
|
||||
to search for apps.
|
||||
|
||||
Clicking on app gives you information about the app.
|
||||
|
||||
<img src="/docs/img/app_info.png" class="shadow">
|
||||
|
||||
Clicking the `Install` button will show an install dialog like below:
|
||||
|
||||
<img src="/docs/img/app_install.png" class="shadow">
|
||||
|
||||
The `Location` field is the subdomain in which your app will be installed. For example, if you use the
|
||||
`mail` location for your web mail client, then it will be accessible at `mail.<domain>`.
|
||||
|
||||
Tip: You can access the apps directly on your browser using `mail.<domain>`. You don't have to
|
||||
visit the Cloudron administration panel.
|
||||
|
||||
`Access control` specifies who can access this app.
|
||||
|
||||
* `Every Cloudron user` - Any user in your Cloudron can access the app. Initially, you are the only
|
||||
user in your Cloudron. Unless you explicitly invite others, nobody else can access these apps.
|
||||
Note that the term 'access' depends on the app. For a blog, this means that nobody can post new
|
||||
blog posts (but anybody can view them). For a chat server, this might mean that nobody can access
|
||||
your chat server.
|
||||
|
||||
* `Restrict to groups` - Only users in the groups can access the app.
|
||||
|
||||
## Updates
|
||||
|
||||
All your apps automatically update as and when the application author releases an update. The Cloudron
|
||||
will attempt to update around midnight of your timezone.
|
||||
|
||||
Some app updates are not automatic. This can happen if a new version of the app has removed some features
|
||||
that you were relying on. In such a case, the update has to be manually approved. This is simply a matter
|
||||
of clicking the `Update` button (the green star) after you read about the changes.
|
||||
|
||||
<img src="/docs/img/app_update.png" class="shadow">
|
||||
|
||||
## Backups
|
||||
|
||||
<i>If you self-host, please refer to the [self-hosting documentation](/references/selfhosting.html#backups) for backups.</i>
|
||||
|
||||
All apps are automatically backed up every day. Backups are stored encrypted in Amazon S3. You don't have
|
||||
to do anything about it. The [Cloudron CLI](https://git.cloudron.io/cloudron/cloudron-cli) tool can be used
|
||||
to download application backups.
|
||||
|
||||
## Configuration
|
||||
|
||||
Apps can be reconfigured using the `Configure` button.
|
||||
|
||||
<img src="/docs/img/app_configure_button.png" class="shadow">
|
||||
|
||||
Click on the wrench button will bring up the configure dialog.
|
||||
|
||||
<img src="/docs/img/app_configure.png" class="shadow">
|
||||
|
||||
You can do the following:
|
||||
* Change the location to move the app to another subdomain. Say, you want to move your blog from `blog` to `about`.
|
||||
* Change who can access the app.
|
||||
|
||||
Changing an app's configuration has a small downtime (usually around a minute).
|
||||
|
||||
## Restore
|
||||
|
||||
Apps can be restored to a previous backup by clicking on the `Restore` button.
|
||||
|
||||
<img src="/docs/img/app_restore_button.png" class="shadow">
|
||||
|
||||
Note that restoring previous data might also restore the previous version of the software. For example, you might
|
||||
be currently using Version 5 of the app. If you restore to a backup that was made with Version 3 of the app, then the restore
|
||||
operation will install Version 3 of the app. This is because the latest version may not be able to handle old data.
|
||||
|
||||
## Uninstall
|
||||
|
||||
You can uninstall an app by clicking the `Uninstall` button.
|
||||
|
||||
<img src="/docs/img/app_uninstall_button.png" class="shadow">
|
||||
|
||||
Note that all data associated with the app will be immediately removed from the Cloudron. App data might still
|
||||
persist in your old backups and the [CLI tool](https://git.cloudron.io/cloudron/cloudron-cli) provides a way to
|
||||
restore from those old backups should it be required.
|
||||
|
||||
## Embedding Apps
|
||||
|
||||
It is possible to embed Cloudron apps into other websites. By default, this is disabled to prevent
|
||||
[Clickjacking](https://cloudron.io/blog/2016-07-15-site-embedding.html).
|
||||
|
||||
You can set a website that is allowed to embed your Cloudron app using the app's [Configure dialog](#configuration).
|
||||
Click on 'Show Advanced Settings...' and enter the embedder website name.
|
||||
|
||||
# Custom domain
|
||||
|
||||
When you create a Cloudron from cloudron.io, we provide a subdomain under `cloudron.me` like `girish.cloudron.me`.
|
||||
Apps are available under that subdomain using a hyphenated name like `blog-girish.cloudron.me`.
|
||||
|
||||
Domain names are a thing of pride and the Cloudron makes it easy to make your apps accessible from memorable locations like `blog.girish.in`.
|
||||
|
||||
## Single app on a custom domain
|
||||
|
||||
This approach is applicable if you desire that only a single app be accessing from a custom
|
||||
domain. For this, open the app's configure dialog and choose `External Domain` in the location dropdown.
|
||||
|
||||
<img src="/docs/img/app_external_domain.png" class="shadow">
|
||||
|
||||
This dialog will suggest you to add a `CNAME` record. Once you setup a CNAME record with your DNS provider,
|
||||
the app will be accessible from that external domain.
|
||||
|
||||
## Entire Cloudron on a custom domain
|
||||
|
||||
This approach is applicable if you want all your apps to be accessible from subdomains of your custom domain.
|
||||
For example, `blog.girish.in`, `notes.girish.in`, `owncloud.girish.in`, `mail.girish.in` and so on. This
|
||||
approach is also the only way that the Cloudron supports for sending and receiving emails from your domain.
|
||||
|
||||
For this, go to the 'Domains & Certs' menu item.
|
||||
|
||||
<img src="/docs/img/custom_domain_menu.png" class="shadow">
|
||||
|
||||
Change the domain name to your custom domain. Currently, we require that your domain be hosted on AWS Route53.
|
||||
|
||||
<img src="/docs/img/custom_domain_change.png" class="shadow">
|
||||
|
||||
Moving to a custom domain will retain all your apps and data and will take around 15 minutes. If you require assistance with another provider,
|
||||
<a href="mailto:support@cloudron.io">just let us know</a>.
|
||||
|
||||
# User management
|
||||
|
||||
## Users
|
||||
|
||||
You can invite new users (friends, family, colleagues) with their email address from the `Users` menu. They will
|
||||
receive an invite to sign up with your Cloudron. They can now access the apps that you have given them access
|
||||
to.
|
||||
|
||||
<img src="/docs/img/users.png" class="shadow">
|
||||
|
||||
To remove a user, simply remove them from the list. Note that the removed user cannot access any app anymore.
|
||||
|
||||
## Administrators
|
||||
|
||||
A Cloudron administrator is a special right given to an existing Cloudron user allowing them to manage
|
||||
apps and users. To make an existing user an administator, click the edit (pencil) button corresponding to
|
||||
the user and check the `Allow this user to manage apps, groups and other users` checkbox.
|
||||
|
||||
<img src="/docs/img/administrator.png" class="shadow">
|
||||
|
||||
## Groups
|
||||
|
||||
Groups provide a convenient way to group users. It's purpose is two-fold:
|
||||
|
||||
* You can assign one or more groups to apps to restrict who can access for an app.
|
||||
* Each group is a mailing list (forwarding address) constituting of it's members.
|
||||
|
||||
You can create a group by using the `Groups` menu item.
|
||||
|
||||
<img src="/docs/img/groups.png" class="shadow">
|
||||
|
||||
To set the access restriction use the app's configure dialog.
|
||||
|
||||
<img src="/docs/img/app_access_control.png" class="shadow">
|
||||
|
||||
You can now send mails to `groupname@<domain>` to address all the group members.
|
||||
|
||||
# Login
|
||||
|
||||
## Cloudron admin
|
||||
|
||||
The Cloudron admin page is always located at the `my` subdomain of your Cloudron domain. For custom domains,
|
||||
this will be like `my.girish.in`. For domains from cloudron.io, this will be like `my-girish.cloudron.me`.
|
||||
|
||||
## Apps (single sign-on)
|
||||
|
||||
An important feature of the Cloudron is Single Sign-On. You use the same username & password for logging in
|
||||
to all your apps. No more having to manage separate set of credentials for each service!
|
||||
|
||||
## Single user apps
|
||||
|
||||
Some apps only work with a single user. For example, a notes app might allow only a single user to login and add
|
||||
notes. For such apps, you will be prompted during installation to select the single user who can access the app.
|
||||
|
||||
<img src="/docs/img/app_single_user.png" class="shadow">
|
||||
|
||||
If you want multiple users to use the app independently, simply install the app multiple times to different locations.
|
||||
|
||||
# Email
|
||||
|
||||
The Cloudron has a built-in email server. The primary email address is the same as the username. Emails can be sent
|
||||
and received from `<username>@<domain>`. The Cloudron does not allow masquerading - one user cannot send email
|
||||
pretending to be another user.
|
||||
|
||||
## Enabling Email
|
||||
|
||||
By default, Cloudron's email server only allows apps to send email. To enable users to send and receive email,
|
||||
turn on the option under `Settings`. Turning on this option also allows apps to _receive_ email.
|
||||
|
||||
Once email is enabled, the Cloudron will keep the the `MX` DNS record updated.
|
||||
|
||||
<img src="/docs/img/enable_email.png" class="shadow">
|
||||
|
||||
## Receiving email using IMAP
|
||||
|
||||
Use the following settings to receive email.
|
||||
|
||||
* Server Name - Use the `my` subdomain of your Cloudron
|
||||
* Port - 993
|
||||
* Connection Security - TLS
|
||||
* Username/password - Same as your Cloudron credentials
|
||||
|
||||
## Sending email using SMTP
|
||||
|
||||
Use the following settings to send email.
|
||||
|
||||
* Server Name - Use the `my` subdomain of your Cloudron
|
||||
* Port - 587
|
||||
* Connection Security - STARTTLS
|
||||
* Username/password - Same as your Cloudron credentials
|
||||
|
||||
## Email filters using Sieve
|
||||
|
||||
Use the following settings to setup email filtering users via Manage Sieve.
|
||||
|
||||
* Server Name - Use the `my` subdomain of your Cloudron
|
||||
* Port - 4190
|
||||
* Connection Security - TLS
|
||||
* Username/password - Same as your Cloudron credentials
|
||||
|
||||
The [Rainloop](https://cloudron.io/appstore.html?app=net.rainloop.cloudronapp) and [Roundcube](https://cloudron.io/appstore.html?app=net.roundcube.cloudronapp)
|
||||
apps are already pre-configured to use the above settings.
|
||||
|
||||
## Aliases
|
||||
|
||||
You can configure one or more aliases alongside the primary email address of each user. You can set aliases by editing the
|
||||
user's settings, available behind the edit button in the user listing. Note that aliases cannot conflict with existing user names.
|
||||
|
||||
<img src="/docs/img/email_alias.png" class="shadow">
|
||||
|
||||
Currently, it is not possible to login using the alias for SMTP/IMAP/Sieve services. Instead, add the alias as an identity in
|
||||
your mail client but login using the Cloudron credentials.
|
||||
|
||||
## Subaddresses
|
||||
|
||||
Emails addressed to `<username>+tag@<domain>` will be delivered to the `username` mailbox. You can use this feature to give out emails of the form
|
||||
`username+kayak@<domain>`, `username+aws@<domain>` and so on and have them all delivered to your mailbox.
|
||||
|
||||
## Forwarding addresses
|
||||
|
||||
Each group on the Cloudron is also a forwarding address. Mails can be addressed to `group@<domain>` and the mail will
|
||||
be sent to each user who is part of the group.
|
||||
|
||||
## Marking Spam
|
||||
|
||||
The spam detection agent on the Cloudron requires training to identify spam. To do this, simply move your junk mails
|
||||
to a pre-created folder named `Spam`. Most mail clients have a Junk or Spam button which does this automatically.
|
||||
|
||||
# Graphs
|
||||
|
||||
The Graphs view shows an overview of the disk and memory usage on your Cloudron.
|
||||
|
||||
<img src="/docs/img/graphs.png" class="shadow">
|
||||
|
||||
The `Disk Usage` graph shows you how much disk space you have left. Note that the Cloudron will
|
||||
send the Cloudron admins an email notification when the disk is ~90% full.
|
||||
|
||||
The `Apps` Memory graph shows the memory consumed by each installed app. You can click on each segment
|
||||
on the graph to see the memory consumption over time in the chart below it.
|
||||
|
||||
The `System` Memory graph shows the overall memory consumption on the entire Cloudron. If you see
|
||||
the Free memory < 50MB frequently, you should consider upgrading to a Cloudron with more memory.
|
||||
|
||||
# Activity log
|
||||
|
||||
The `Activity` view shows the activity on your Cloudron. It includes information about who is using
|
||||
the apps on your Cloudron and also tracks configuration changes.
|
||||
|
||||
<img src="/docs/img/activity.png" class="shadow">
|
||||
|
||||
# Domains and SSL Certificates
|
||||
|
||||
All apps on the Cloudron can only be reached by `https`. The Cloudron automatically installs and
|
||||
renews certificates for your apps as needed. Should installation of certificate fail for reasons
|
||||
beyond it's control, Cloudron admins will get a notification about it.
|
||||
|
||||
# API Access
|
||||
|
||||
All the operations listed in this manual like installing app, configuring users and groups, are
|
||||
completely programmable with a [REST API](/references/api.html).
|
||||
|
||||
# Moving to a larger Cloudron
|
||||
|
||||
When using a Cloudron from cloudron.io, it is easy to migrate your apps and data to a bigger server.
|
||||
In the `Settings` page, you can change the plan.
|
||||
|
||||
<insert picture>
|
||||
|
||||
# Command line tool
|
||||
|
||||
If you are a software developer or a sysadmin, the Cloudron comes with a CLI tool that can be
|
||||
used to develop custom apps for the Cloudron. Read more about it [here](https://git.cloudron.io/cloudron/cloudron-cli).
|
||||
@@ -1,621 +0,0 @@
|
||||
# Overview
|
||||
|
||||
This tutorial provides an introduction to developing applications
|
||||
for the Cloudron using node.js.
|
||||
|
||||
# Installation
|
||||
|
||||
## Install CLI tool
|
||||
|
||||
The Cloudron CLI tool allows you to install, configure and test apps on your Cloudron.
|
||||
|
||||
Installing the CLI tool requires [node.js](https://nodejs.org/) and
|
||||
[npm](https://www.npmjs.com/). You can then install the CLI tool using the following
|
||||
command:
|
||||
|
||||
```
|
||||
sudo npm install -g cloudron
|
||||
```
|
||||
|
||||
Note: Depending on your setup, you can run the above command without `sudo`.
|
||||
|
||||
## Testing your installation
|
||||
|
||||
The `cloudron` command should now be available in your path.
|
||||
|
||||
Let's login to the Cloudron as follows:
|
||||
|
||||
```
|
||||
$ cloudron login
|
||||
Cloudron Hostname: craft.selfhost.io
|
||||
|
||||
Enter credentials for craft.selfhost.io:
|
||||
Username: girish
|
||||
Password:
|
||||
Login successful.
|
||||
```
|
||||
|
||||
## Your First Application
|
||||
|
||||
Creating an application for Cloudron can be summarized as follows:
|
||||
|
||||
1. Create a web application using any language/framework. This web application must run a HTTP server
|
||||
and can optionally provide other services using custom protocols (like git, ssh, TCP etc).
|
||||
|
||||
2. Create a [Dockerfile](http://docs.docker.com/engine/reference/builder/) that specifies how to create
|
||||
an application ```image```. An ```image``` is essentially a bundle of the application source code
|
||||
and it's dependencies.
|
||||
|
||||
3. Create a [CloudronManifest.json](/references/manifest.html) file that provides essential information
|
||||
about the app. This includes information required for the Cloudron Store like title, version, icon and
|
||||
runtime requirements like `addons`.
|
||||
|
||||
## Simple Web application
|
||||
|
||||
To keep things simple, we will start by deploying a trivial node.js server running on port 8000.
|
||||
|
||||
Create a new project folder `tutorial/` and add a file named `tutorial/server.js` with the following content:
|
||||
```javascript
|
||||
var http = require("http");
|
||||
|
||||
var server = http.createServer(function (request, response) {
|
||||
response.writeHead(200, {"Content-Type": "text/plain"});
|
||||
response.end("Hello World\n");
|
||||
});
|
||||
|
||||
server.listen(8000);
|
||||
|
||||
console.log("Server running at port 8000");
|
||||
```
|
||||
|
||||
## Dockerfile
|
||||
|
||||
A Dockerfile contains commands to assemble an image.
|
||||
|
||||
Create a file named `tutorial/Dockerfile` with the following content:
|
||||
|
||||
```dockerfile
|
||||
FROM cloudron/base:0.10.0
|
||||
|
||||
ADD server.js /app/code/server.js
|
||||
|
||||
CMD [ "/usr/local/node-0.12.7/bin/node", "/app/code/server.js" ]
|
||||
```
|
||||
|
||||
The `FROM` command specifies that we want to start off with Cloudron's [base image](/references/baseimage.html).
|
||||
All Cloudron apps **must** start from this base image.
|
||||
|
||||
The `ADD` command copies the source code of the app into the directory `/app/code`.
|
||||
While this example only copies a single file, the ADD command can be used to copy directory trees as well.
|
||||
See the [Dockerfile](https://docs.docker.com/reference/builder/#add) documentation for more details.
|
||||
|
||||
The `CMD` command specifies how to run the server. There are multiple versions of node available under `/usr/local`. We
|
||||
choose node v0.12.7 for our app.
|
||||
|
||||
## CloudronManifest.json
|
||||
|
||||
The `CloudronManifest.json` specifies
|
||||
|
||||
* Information about displaying the app on the Cloudron Store. For example,
|
||||
the title, author information, description etc
|
||||
|
||||
* Information for installing the app on the Cloudron. This includes fields
|
||||
like httpPort, tcpPorts.
|
||||
|
||||
Create the CloudronManifest.json using the following command:
|
||||
|
||||
```
|
||||
$ cloudron init
|
||||
id: io.cloudron.tutorial # unique id for this app. use reverse domain name convention
|
||||
author: John Doe # developer or company name of the for user <email>
|
||||
title: Tutorial App # Cloudron Store title of this app
|
||||
description: App that uses node.js # A string or local file reference like file://DESCRIPTION.md
|
||||
tagline: Changing the world one app at a time # A tag line for this app for the Cloudron Store
|
||||
website: https://cloudron.io # A link to this app's website
|
||||
contactEmail: support@cloudron.io # Contact email of developer or company
|
||||
httPort: 8000 # The http port on which this application listens to
|
||||
```
|
||||
|
||||
The above command creates a CloudronManifest.json:
|
||||
|
||||
File ```tutorial/CloudronManifest.json```
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "io.cloudron.tutorial",
|
||||
"author": "John Doe",
|
||||
"title": "Tutorial App",
|
||||
"description": "App that uses node.js",
|
||||
"tagline": "Changing the world one app at a time",
|
||||
"version": "0.0.1",
|
||||
"healthCheckPath": "/",
|
||||
"httpPort": 8000,
|
||||
"addons": {
|
||||
"localstorage": {}
|
||||
},
|
||||
"minBoxVersion": "0.0.1",
|
||||
"manifestVersion": 1,
|
||||
"website": "https://cloudron.io",
|
||||
"contactEmail": "support@cloudron.io",
|
||||
"icon": "",
|
||||
"mediaLinks": []
|
||||
}
|
||||
```
|
||||
|
||||
You can read in more detail about each field in the [Manifest reference](/references/manifest.html).
|
||||
|
||||
# Installing
|
||||
|
||||
## Building
|
||||
|
||||
We now have all the necessary files in place to build and deploy the app to the Cloudron.
|
||||
Building creates an image of the app using the Dockerfile which can then be used to deploy
|
||||
to the Cloudron.
|
||||
|
||||
Building, pushing and pulling docker images is very bandwidth and CPU intensive. To alleviate this
|
||||
problem, apps are built using the `build service` which uses `cloudron.io` account credentials.
|
||||
|
||||
**Warning**: As of this writing, the build service uses the public Docker registry and the images that are built
|
||||
can be downloaded by anyone. This means that your source code will be viewable by others.
|
||||
|
||||
Initiate a build using ```cloudron build```:
|
||||
```
|
||||
$ cloudron build
|
||||
Building io.cloudron.tutorial@0.0.1
|
||||
|
||||
Appstore login:
|
||||
Email: ramakrishnan.girish@gmail.com # cloudron.io account
|
||||
Password: # Enter password
|
||||
Login successful.
|
||||
|
||||
Build scheduled with id 76cebfdd-7822-4f3d-af17-b3eb393ae604
|
||||
Downloading source
|
||||
Building
|
||||
Step 0 : FROM cloudron/base:0.10.0
|
||||
---> 97583855cc0c
|
||||
Step 1 : ADD server.js /app/code
|
||||
---> b09b97ecdfbc
|
||||
Removing intermediate container 03c1e1f77acb
|
||||
Step 2 : CMD /usr/local/node-0.12.7/bin/node /app/code/main.js
|
||||
---> Running in 370f59d87ab2
|
||||
---> 53b51eabcb89
|
||||
Removing intermediate container 370f59d87ab2
|
||||
Successfully built 53b51eabcb89
|
||||
The push refers to a repository [cloudron/img-2074d69134a7e0da3d6cdf3c53e241c4] (len: 1)
|
||||
Sending image list
|
||||
Pushing repository cloudron/img-2074d69134a7e0da3d6cdf3c53e241c4 (1 tags)
|
||||
Image already pushed, skipping 57f52d167bbb
|
||||
Image successfully pushed b09b97ecdfbc
|
||||
Image successfully pushed 53b51eabcb89
|
||||
Pushing tag for rev [53b51eabcb89] on {https://cdn-registry-1.docker.io/v1/repositories/cloudron/img-2074d69134a7e0da3d6cdf3c53e241c4/tags/76cebfdd-7822-4f3d-af17-b3eb393ae604}
|
||||
Build succeeded
|
||||
```
|
||||
|
||||
## Installing
|
||||
|
||||
Now that we have built the image, we can install our latest build on the Cloudron
|
||||
using the following command:
|
||||
|
||||
```
|
||||
$ cloudron install
|
||||
Using cloudron craft.selfhost.io
|
||||
Using build 76cebfdd-7822-4f3d-af17-b3eb393ae604 from 1 hour ago
|
||||
Location: tutorial # This is the location into which the application installs
|
||||
App is being installed with id: 4dedd3bb-4bae-41ef-9f32-7f938995f85e
|
||||
|
||||
=> Waiting to start installation
|
||||
=> Registering subdomain .
|
||||
=> Verifying manifest .
|
||||
=> Downloading image ..............
|
||||
=> Creating volume .
|
||||
=> Creating container
|
||||
=> Setting up collectd profile ................
|
||||
=> Waiting for DNS propagation ...
|
||||
|
||||
App is installed.
|
||||
```
|
||||
|
||||
This makes the app available at https://tutorial-craft.selfhost.io.
|
||||
|
||||
Open the app in your default browser:
|
||||
```
|
||||
cloudron open
|
||||
```
|
||||
|
||||
You should see `Hello World`.
|
||||
|
||||
# Testing
|
||||
|
||||
The application testing cycle involves `cloudron build` and `cloudron install`.
|
||||
Note that `cloudron install` updates an existing app in place.
|
||||
|
||||
You can view the logs using `cloudron logs`. When the app is running you can follow the logs
|
||||
using `cloudron logs -f`.
|
||||
|
||||
For example, you can see the console.log output in our server.js with the command below:
|
||||
|
||||
```
|
||||
$ cloudron logs
|
||||
Using cloudron craft.selfhost.io
|
||||
2015-05-08T03:28:40.233940616Z Server running at port 8000
|
||||
```
|
||||
|
||||
It is also possible to run a *shell* and *execute* arbitrary commands in the context of the application
|
||||
process by using `cloudron exec`. By default, exec simply drops you into an interactive bash shell with
|
||||
which you can inspect the file system and the environment.
|
||||
|
||||
```
|
||||
$ cloudron exec
|
||||
```
|
||||
|
||||
You can also execute arbitrary commands:
|
||||
```
|
||||
$ cloudron exec env # display the env variables that your app is running with
|
||||
```
|
||||
|
||||
# Storing data
|
||||
|
||||
For file system storage, an app can use the `localstorage` addon to store data under `/app/data`.
|
||||
When the `localstorage` addon is active, any data under /app/data is automatically backed up. When an
|
||||
app is updated, /app/data already contains the data generated by the previous version.
|
||||
|
||||
*Note*: For convenience, the initial CloudronManifest.json generated by `cloudron init` already contains this
|
||||
addon.
|
||||
|
||||
Let us put this theory into action by saving a *visit counter* as a file.
|
||||
*server.js* has been modified to count the number of visitors on the site by storing a counter
|
||||
in a file named ```counter.dat```.
|
||||
|
||||
File ```tutorial/server.js```
|
||||
|
||||
```javascript
|
||||
var http = require('http'),
|
||||
fs = require('fs'),
|
||||
util = require('util');
|
||||
|
||||
var COUNTER_FILE = '/app/data/counter.dat';
|
||||
|
||||
var server = http.createServer(function (request, response) {
|
||||
var counter = 0;
|
||||
if (fs.existsSync(COUNTER_FILE)) {
|
||||
// read existing counter if it exists
|
||||
counter = parseInt(fs.readFileSync(COUNTER_FILE, 'utf8'), 10);
|
||||
}
|
||||
|
||||
response.writeHead(200, {"Content-Type": "text/plain"});
|
||||
response.end(util.format("Hello World. %s visitors have visited this page\n", counter));
|
||||
++counter; // bump the counter
|
||||
fs.writeFileSync(COUNTER_FILE, counter + '', 'utf8'); // save back counter
|
||||
});
|
||||
|
||||
server.listen(8000);
|
||||
|
||||
console.log("Server running at port 8000");
|
||||
```
|
||||
|
||||
Now every time you refresh the page you will notice that the counter bumps up. You will
|
||||
also notice that if you make changes to the app and do a `cloudron install`, the `counter.dat`
|
||||
is *retained* across updates.
|
||||
|
||||
# Database
|
||||
|
||||
Most web applications require a database of some form. In theory, it is possible to run any
|
||||
database you want as part of the application image. This is, however, a waste of server resources
|
||||
should every app runs it's own database server.
|
||||
|
||||
To solve this, the Cloudron provides shareable resources like databases in form of ```addons```.
|
||||
The database server is managed by the Cloudron and the application simply needs to request access to
|
||||
the database in the CloudronManifest.json. While the database server itself is a shared resource, the
|
||||
databases are exclusive to the application. Each database is password protected and accessible only
|
||||
to the application. Databases and tables can be configured without restriction as the application
|
||||
requires.
|
||||
|
||||
Cloudron currently provides `mysql`, `postgresql`, `mongodb`, `redis` database addons.
|
||||
|
||||
For this tutorial, let us try to save the counter in `redis` addon. For this, we make use of the
|
||||
[redis](https://www.npmjs.com/package/redis) module.
|
||||
|
||||
Since this is a node.js app, let's add a very basic `package.json` containing the `redis` module dependency.
|
||||
|
||||
File `tutorial/package.json`
|
||||
```json
|
||||
{
|
||||
"name": "tutorial",
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"redis": "^0.12.1"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
and modify our Dockerfile to look like this:
|
||||
|
||||
File `tutorial/Dockerfile`
|
||||
|
||||
```dockerfile
|
||||
FROM cloudron/base:0.10.0
|
||||
|
||||
ADD server.js /app/code/server.js
|
||||
ADD package.json /app/code/package.json
|
||||
|
||||
WORKDIR /app/code
|
||||
RUN npm install --production
|
||||
|
||||
CMD [ "/usr/local/node-0.12.7/bin/node", "/app/code/server.js" ]
|
||||
```
|
||||
|
||||
Notice the new `RUN` command which installs the node module dependencies in package.json using `npm install`.
|
||||
|
||||
Since we want to use redis, we have to modify the CloudronManifest.json to make redis available for this app.
|
||||
|
||||
File `tutorial/CloudronManifest.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "io.cloudron.tutorial",
|
||||
"author": "John Doe",
|
||||
"title": "Tutorial App",
|
||||
"description": "App that uses node.js",
|
||||
"tagline": "Changing the world one app at a time",
|
||||
"version": "0.0.1",
|
||||
"healthCheckPath": "/",
|
||||
"httpPort": 8000,
|
||||
"addons": {
|
||||
"localstorage": {},
|
||||
"redis": {}
|
||||
},
|
||||
"minBoxVersion": "0.0.1",
|
||||
"manifestVersion": 1,
|
||||
"website": "https://cloudron.io",
|
||||
"contactEmail": "support@cloudron.io",
|
||||
"icon": "",
|
||||
"mediaLinks": []
|
||||
}
|
||||
```
|
||||
|
||||
When the application runs, environment variables `REDIS_HOST`, `REDIS_PORT` and
|
||||
`REDIS_PASSWORD` are injected. You can read about the environment variables in the
|
||||
[Redis reference](/references/addons.html#redis).
|
||||
|
||||
Let's change `server.js` to use redis instead of file backed counting:
|
||||
|
||||
File ```tutorial/server.js```
|
||||
|
||||
```javascript
|
||||
var http = require('http'),
|
||||
fs = require('fs'),
|
||||
util = require('util'),
|
||||
redis = require('redis');
|
||||
|
||||
var redisClient = redis.createClient(process.env.REDIS_PORT, process.env.REDIS_HOST);
|
||||
redisClient.auth(process.env.REDIS_PASSWORD);
|
||||
redisClient.on("error", function (err) {
|
||||
console.log("Redis Client Error " + err);
|
||||
});
|
||||
|
||||
var COUNTER_KEY = 'counter';
|
||||
|
||||
var server = http.createServer(function (request, response) {
|
||||
redisClient.get(COUNTER_KEY, function (err, reply) {
|
||||
var counter = (!err && reply) ? parseInt(reply, 10) : 0;
|
||||
response.writeHead(200, {"Content-Type": "text/plain"});
|
||||
response.end(util.format("Hello World. %s visitors have visited this page\n", counter));
|
||||
redisClient.incr(COUNTER_KEY);
|
||||
});
|
||||
});
|
||||
|
||||
server.listen(8000);
|
||||
|
||||
console.log("Server running at port 8000");
|
||||
```
|
||||
|
||||
Simply `cloudron build` and `cloudron install` to test your app!
|
||||
|
||||
# Authentication
|
||||
|
||||
The Cloudron has a centralized panel for managing users and groups. Apps can integrate Single Sign-On
|
||||
authentication using LDAP or OAuth.
|
||||
|
||||
Note that apps that are single user can skip Single Sign-On support. The Cloudron implements an `OAuth
|
||||
proxy` (accessed through the app configuration dialog) that optionally lets the Cloudron admin make the
|
||||
app visible only for logged in users.
|
||||
|
||||
## LDAP
|
||||
|
||||
Let's start out by adding the [ldap](/references/addons.html#ldap) addon to the manifest.
|
||||
|
||||
File `tutorial/CloudronManifest.json`
|
||||
```json
|
||||
{
|
||||
"id": "io.cloudron.tutorial",
|
||||
"author": "John Doe",
|
||||
"title": "Tutorial App",
|
||||
"description": "App that uses node.js",
|
||||
"tagline": "Changing the world one app at a time",
|
||||
"version": "0.0.1",
|
||||
"healthCheckPath": "/",
|
||||
"httpPort": 8000,
|
||||
"addons": {
|
||||
"localstorage": {},
|
||||
"ldap": {}
|
||||
},
|
||||
"minBoxVersion": "0.0.1",
|
||||
"manifestVersion": 1,
|
||||
"website": "https://cloudron.io",
|
||||
"contactEmail": "support@cloudron.io",
|
||||
"icon": "",
|
||||
"mediaLinks": []
|
||||
}
|
||||
```
|
||||
|
||||
Building and installing the app shows that the app gets new LDAP specific environment variables.
|
||||
|
||||
```
|
||||
$ cloudron build
|
||||
$ cloudron install
|
||||
$ cloudron exec env | grep LDAP
|
||||
LDAP_SERVER=172.17.42.1
|
||||
LDAP_PORT=3002
|
||||
LDAP_URL=ldap://172.17.42.1:3002
|
||||
LDAP_USERS_BASE_DN=ou=users,dc=cloudron
|
||||
LDAP_GROUPS_BASE_DN=ou=groups,dc=cloudron
|
||||
```
|
||||
|
||||
Let's test the environment variables to use by using the [ldapjs](http://www.ldapjs.org) npm module.
|
||||
We start by adding ldapjs to package.json.
|
||||
|
||||
File `tutorial/package.json`
|
||||
```json
|
||||
{
|
||||
"name": "tutorial",
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"ldapjs": "^0.7.1"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The server code has been modified to authenticate using the `X-Username` and `X-Password` headers for
|
||||
any path other than '/'.
|
||||
|
||||
File `tutorial/server.js`
|
||||
```javascript
|
||||
var http = require("http"),
|
||||
ldap = require('ldapjs');
|
||||
|
||||
var ldapClient = ldap.createClient({ url: process.env.LDAP_URL });
|
||||
|
||||
var server = http.createServer(function (request, response) {
|
||||
if (request.url === '/') {
|
||||
response.writeHead(200, {"Content-Type": "text/plain"});
|
||||
return response.end();
|
||||
}
|
||||
|
||||
var username = request.headers['x-username'] || '';
|
||||
var password = request.headers['x-password'] || '';
|
||||
var ldapDn = 'cn=' + username + ',' + process.env.LDAP_USERS_BASE_DN;
|
||||
|
||||
ldapClient.bind(ldapDn, password, function (error) {
|
||||
if (error) {
|
||||
response.writeHead(401, {"Content-Type": "text/plain"});
|
||||
response.end('Failed to authenticate: ' + error);
|
||||
} else {
|
||||
response.writeHead(200, {"Content-Type": "text/plain"});
|
||||
response.end('Successfully authenticated');
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
server.listen(8000);
|
||||
|
||||
console.log("Server running at port 8000");
|
||||
```
|
||||
|
||||
Once we have used `cloudron build` and `cloudron install`, you can use `curl` to test
|
||||
credentials as follows:
|
||||
|
||||
```bash
|
||||
# Test with various credentials here. Your cloudon admin username and password should succeed.
|
||||
curl -X 'X-Username: admin' -X 'X-Password: pass' https://tutorial-craft.selfhost.io/login
|
||||
```
|
||||
|
||||
## OAuth
|
||||
|
||||
An app can integrate with OAuth 2.0 Authorization code grant flow by adding
|
||||
[oauth](/references/addons.html#oauth) to CloudronManifest.json `addons` section.
|
||||
|
||||
Doing so will get the following environment variables:
|
||||
```
|
||||
$ cloudron exec env
|
||||
OAUTH_CLIENT_ID=cid-addon-4089f65a-2adb-49d2-a6d1-e519b7d85e8d
|
||||
OAUTH_CLIENT_SECRET=5af99a9633283aa15f5e6df4a108ff57f82064e4845de8bce8ad3af54dfa9dda
|
||||
OAUTH_ORIGIN=https://my-craft.selfhost.io
|
||||
API_ORIGIN=https://my-craft.selfhost.io
|
||||
HOSTNAME=tutorial-craft.selfhost.io
|
||||
```
|
||||
|
||||
OAuth Authorization code grant flow works as follows:
|
||||
* App starts the flow by redirecting the user to Cloudron authorization endpoint of the following format:
|
||||
```
|
||||
https://API_ORIGIN/api/v1/oauth/dialog/authorize?response_type=code&client_id=OAUTH_CLIENT_ID&redirect_uri=CALLBACK_URL&scope=profile
|
||||
```
|
||||
|
||||
In the above URL, API_ORIGIN and OAUTH_CLIENT_ID are environment variables. CALLBACK_URL is a url of the app
|
||||
to which the user will be redirected back to after successful authentication. CALLBACK_URL has to have the
|
||||
same origin as the app.
|
||||
|
||||
* The Cloudron OAuth server authenticates the user (using a password form) at the above URL. It also establishes
|
||||
that the user grants the client's access request.
|
||||
|
||||
* If the user authenticated successfully, it will redirect the browser to CALLBACK_URL with a `code` query parameter.
|
||||
|
||||
* The app can exchange the `code` above for a `access token` by using the `OAUTH_CLIENT_SECRET`. It does so by making
|
||||
a _POST_ request to the following url:
|
||||
```
|
||||
https://API_ORIGIN/api/v1/oauth/token?response_type=token&client_id=OAUTH_CLIENT_ID
|
||||
```
|
||||
with the following request body (json):
|
||||
```json
|
||||
{
|
||||
"grant_type": "authorization_code",
|
||||
"code": "<the code received in CALLBACK_URL query parameter>",
|
||||
"redirect_uri": "https://<HOSTNAME>",
|
||||
"client_id": "<OAUTH_CLIENT_ID>",
|
||||
"client_secret": "<OAUTH_CLIENT_SECRET>"
|
||||
}
|
||||
```
|
||||
|
||||
In the above URL, API_ORIGIN, OAUTH_CLIENT_ID and HOSTNAME are environment variables. The response contains
|
||||
the `access_token` in the body.
|
||||
|
||||
* The `access_token` can be used to get the [user's profile](/references/api.html#profile) using the following url:
|
||||
```
|
||||
https://API_ORIGIN/api/v1/profile?access_token=ACCESS_TOKEN
|
||||
```
|
||||
|
||||
The `access_token` may also be provided in the `Authorization` header as `Bearer: <token>`.
|
||||
|
||||
An implementation of the above OAuth logic is at [ircd-app](https://github.com/cloudron-io/ircd-app/blob/master/settings/app.js).
|
||||
|
||||
The following libraries implement Cloudron OAuth for Ruby and Javascript.
|
||||
|
||||
* [omniauth-cloudron](https://github.com/cloudron-io/omniauth-cloudron)
|
||||
* [passport-cloudron](https://github.com/cloudron-io/passport-cloudron)
|
||||
|
||||
# Beta Testing
|
||||
|
||||
Once your app is ready, you can upload it to the store for `beta testing` by
|
||||
other Cloudron users. This can be done using:
|
||||
|
||||
```
|
||||
cloudron upload
|
||||
```
|
||||
|
||||
The app should now be visible in the Store view of your cloudron under
|
||||
the 'Testing' section. You can check if the icon, description and other details
|
||||
appear correctly.
|
||||
|
||||
Other Cloudron users can install your app on their Cloudron's using
|
||||
`cloudron install --appstore-id <appid@version>`. Note that this currently
|
||||
requires your beta testers to install the CLI tool and put their Cloudron in
|
||||
developer mode.
|
||||
|
||||
# Publishing
|
||||
|
||||
Once you are satisfied with the beta testing, you can submit it for review.
|
||||
|
||||
```
|
||||
cloudron submit
|
||||
```
|
||||
|
||||
The cloudron.io team will review the app and publish the app to the store.
|
||||
|
||||
# Next steps
|
||||
|
||||
Congratulations! You are now well equipped to build web applications for the Cloudron.
|
||||
|
||||
# Samples
|
||||
|
||||
* [Lets Chat](https://github.com/cloudron-io/letschat-app)
|
||||
* [Haste bin](https://github.com/cloudron-io/haste-app)
|
||||
* [Pasteboard](https://github.com/cloudron-io/pasteboard-app)
|
||||
@@ -1,497 +0,0 @@
|
||||
# Overview
|
||||
|
||||
This tutorial outlines how to package an existing web application for the Cloudron.
|
||||
|
||||
If you are aware of Docker and Heroku, you should feel at home packaging for the
|
||||
Cloudron. Roughly, the steps involved are:
|
||||
|
||||
* Create a Dockerfile for your application. If your application already has a Dockerfile, it
|
||||
is a good starting point for packaging for the Cloudron. By virtue of Docker, the Cloudron
|
||||
is able to run apps written in any language/framework.
|
||||
|
||||
* Create a CloudronManifest.json that provides information like title, author, description
|
||||
etc. You can also specify the addons (like database) required
|
||||
to run your app. When the app runs on the Cloudron, it will have environment
|
||||
variables set for connecting to the addon.
|
||||
|
||||
* Test the app on your Cloudron with the CLI tool.
|
||||
|
||||
* Optionally, submit the app to [Cloudron Store](/appstore.html).
|
||||
|
||||
# Prerequisites
|
||||
|
||||
## Install CLI tool
|
||||
|
||||
The Cloudron CLI tool allows you to install, configure and test apps on your Cloudron.
|
||||
|
||||
Installing the CLI tool requires [node.js](https://nodejs.org/) and
|
||||
[npm](https://www.npmjs.com/). You can then install the CLI tool using the following
|
||||
command:
|
||||
|
||||
```
|
||||
sudo npm install -g cloudron
|
||||
```
|
||||
|
||||
Note: Depending on your setup, you can run the above command without `sudo`.
|
||||
|
||||
## Login to Cloudron
|
||||
|
||||
The `cloudron` command should now be available in your path.
|
||||
|
||||
You can login to your Cloudron now:
|
||||
|
||||
```
|
||||
$ cloudron login
|
||||
Cloudron Hostname: craft.selfhost.io
|
||||
|
||||
Enter credentials for craft.selfhost.io:
|
||||
Username: girish
|
||||
Password:
|
||||
Login successful.
|
||||
```
|
||||
|
||||
# Basic app
|
||||
|
||||
We will first package a very simple app to understand how the packaging works.
|
||||
You can clone this app from https://git.cloudron.io/cloudron/tutorial-basic.
|
||||
|
||||
## The server
|
||||
|
||||
The basic app server is a very simple HTTP server that runs on port 8000.
|
||||
While the server in this tutorial uses node.js, you can write your server
|
||||
in any language you want.
|
||||
|
||||
```server.js
|
||||
var http = require("http");
|
||||
|
||||
var server = http.createServer(function (request, response) {
|
||||
response.writeHead(200, {"Content-Type": "text/plain"});
|
||||
response.end("Hello World\n");
|
||||
});
|
||||
|
||||
server.listen(8000);
|
||||
|
||||
console.log("Server running at port 8000");
|
||||
```
|
||||
|
||||
## Dockerfile
|
||||
|
||||
The Dockerfile contains instructions on how to create an image for your application.
|
||||
|
||||
```Dockerfile
|
||||
FROM cloudron/base:0.10.0
|
||||
|
||||
ADD server.js /app/code/server.js
|
||||
|
||||
CMD [ "/usr/local/node-4.4.7/bin/node", "/app/code/server.js" ]
|
||||
```
|
||||
|
||||
The `FROM` command specifies that we want to start off with Cloudron's [base image](/references/baseimage.html).
|
||||
All Cloudron apps **must** start from this base image. This approach conserves space on the Cloudron since
|
||||
Docker images tend to be quite large and also helps us to do a security audit on apps more easily.
|
||||
|
||||
The `ADD` command copies the source code of the app into the directory `/app/code`. There is nothing special
|
||||
about the `/app/code` directory and it is merely a convention we use to store the application code.
|
||||
|
||||
The `CMD` command specifies how to run the server. The base image already contains many different versions of
|
||||
node.js. We use Node 4.4.7 here.
|
||||
|
||||
This Dockerfile can be built and run locally as:
|
||||
```
|
||||
docker build -t tutorial .
|
||||
docker run -p 8000:8000 -t tutorial
|
||||
```
|
||||
|
||||
## Manifest
|
||||
|
||||
The `CloudronManifest.json` specifies
|
||||
|
||||
* Information for installing and running the app on the Cloudron. This includes fields like addons, httpPort, tcpPorts.
|
||||
|
||||
* Information about displaying the app on the Cloudron Store. For example, fields like title, author, description.
|
||||
|
||||
Create the CloudronManifest.json using `cloudron init` as follows:
|
||||
|
||||
```
|
||||
$ cloudron init
|
||||
id: io.cloudron.tutorial # unique id for this app. use reverse domain name convention
|
||||
author: John Doe # developer or company name of the for user <email>
|
||||
title: Tutorial App # Cloudron Store title of this app
|
||||
description: App that uses node.js # A string or local file reference like file://DESCRIPTION.md
|
||||
tagline: Changing the world one app at a time # A tag line for this app for the Cloudron Store
|
||||
website: https://cloudron.io # A link to this app's website
|
||||
contactEmail: support@cloudron.io # Contact email of developer or company
|
||||
httPort: 8000 # The http port on which this application listens to
|
||||
```
|
||||
|
||||
The above command creates a CloudronManifest.json:
|
||||
|
||||
File ```tutorial/CloudronManifest.json```
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "io.cloudron.tutorial",
|
||||
"title": "Tutorial App",
|
||||
"author": "John Doe",
|
||||
"description": "file://DESCRIPTION.md",
|
||||
"changelog": "file://CHANGELOG",
|
||||
"tagline": "Changing the world one app at a time",
|
||||
"version": "0.0.1",
|
||||
"healthCheckPath": "/",
|
||||
"httpPort": 8000,
|
||||
"addons": {
|
||||
"localstorage": {}
|
||||
},
|
||||
"manifestVersion": 1,
|
||||
"website": "https://cloudron.io",
|
||||
"contactEmail": "support@cloudron.io",
|
||||
"icon": "",
|
||||
"tags": [
|
||||
"changme"
|
||||
],
|
||||
"mediaLinks": [ ]
|
||||
}
|
||||
```
|
||||
|
||||
You can read in more detail about each field in the [Manifest reference](/references/manifest.html). The
|
||||
`localstorage` addon allows the app to store files in `/app/data`. We will explore addons further further
|
||||
down in this tutorial.
|
||||
|
||||
Additional files created by `init` are:
|
||||
* `DESCRIPTION.md` - A markdown file providing description of the app for the Cloudron Store.
|
||||
* `CHANGELOG` - A file containing change information for each version released to the Cloudron Store. This
|
||||
information is shown when the user updates the app.
|
||||
|
||||
# Installing
|
||||
|
||||
We now have all the necessary files in place to build and deploy the app to the Cloudron.
|
||||
|
||||
## Building
|
||||
|
||||
Building, pushing and pulling docker images can be very bandwidth and CPU intensive. To alleviate this
|
||||
problem, apps are built using the `build service` which uses `cloudron.io` account credentials.
|
||||
|
||||
**Warning**: As of this writing, the build service uses the public Docker registry and the images that are built
|
||||
can be downloaded by anyone. This means that your source code will be viewable by others.
|
||||
|
||||
Initiate a build using ```cloudron build```:
|
||||
```
|
||||
$ cloudron build
|
||||
Building io.cloudron.tutorial@0.0.1
|
||||
|
||||
Appstore login:
|
||||
Email: ramakrishnan.girish@gmail.com # cloudron.io account
|
||||
Password: # Enter password
|
||||
Login successful.
|
||||
|
||||
Build scheduled with id e7706847-f2e3-4ba2-9638-3f334a9453a5
|
||||
Waiting for build to begin, this may take a bit...
|
||||
Downloading source
|
||||
Building
|
||||
Step 1 : FROM cloudron/base:0.10.0
|
||||
---> be9fc6312b2d
|
||||
Step 2 : ADD server.js /app/code/server.js
|
||||
---> 10513e428d7a
|
||||
Removing intermediate container 574573f6ed1c
|
||||
Step 3 : CMD /usr/local/node-4.2.1/bin/node /app/code/server.js
|
||||
---> Running in b541d149b6b9
|
||||
---> 51aa796ea6e5
|
||||
Removing intermediate container b541d149b6b9
|
||||
Successfully built 51aa796ea6e5
|
||||
Pushing
|
||||
The push refers to a repository [docker.io/cloudron/img-062037096d69bbf3ffb5b9316ad89cb9] (len: 1)
|
||||
Pushed 51aa796ea6e5
|
||||
Pushed 10513e428d7a
|
||||
Image already exists be9fc6312b2d
|
||||
Image already exists a0261a2a7c75
|
||||
Image already exists f9d4f0f1eeed
|
||||
Image already exists 2b650158d5d8
|
||||
e7706847-f2e3-4ba2-9638-3f334a9453a5: digest: sha256:8241d68b65874496191106ecf2ee8f3df2e05a953cd90ff074a6f8815a49389c size: 26098
|
||||
Build succeeded
|
||||
Success
|
||||
```
|
||||
|
||||
## Installing
|
||||
|
||||
Now that we have built the image, we can install our latest build on the Cloudron
|
||||
using the following command:
|
||||
|
||||
```
|
||||
$ cloudron install
|
||||
Using cloudron craft.selfhost.io
|
||||
Using build 76cebfdd-7822-4f3d-af17-b3eb393ae604 from 1 hour ago
|
||||
Location: tutorial # This is the location into which the application installs
|
||||
App is being installed with id: 4dedd3bb-4bae-41ef-9f32-7f938995f85e
|
||||
|
||||
=> Waiting to start installation
|
||||
=> Registering subdomain .
|
||||
=> Verifying manifest .
|
||||
=> Downloading image ..............
|
||||
=> Creating volume .
|
||||
=> Creating container
|
||||
=> Setting up collectd profile ................
|
||||
=> Waiting for DNS propagation ...
|
||||
|
||||
App is installed.
|
||||
```
|
||||
|
||||
Open the app in your default browser:
|
||||
```
|
||||
cloudron open
|
||||
```
|
||||
|
||||
You should see `Hello World`.
|
||||
|
||||
# Testing
|
||||
|
||||
The application testing cycle involves `cloudron build` and `cloudron install`.
|
||||
Note that `cloudron install` updates an existing app in place.
|
||||
|
||||
You can view the logs using `cloudron logs`. When the app is running you can follow the logs
|
||||
using `cloudron logs -f`.
|
||||
|
||||
For example, you can see the console.log output in our server.js with the command below:
|
||||
|
||||
```
|
||||
$ cloudron logs
|
||||
Using cloudron craft.selfhost.io
|
||||
16:44:11 [main] Server running at port 8000
|
||||
```
|
||||
|
||||
It is also possible to run a *shell* and *execute* arbitrary commands in the context of the application
|
||||
process by using `cloudron exec`. By default, exec simply drops you into an interactive bash shell with
|
||||
which you can inspect the file system and the environment.
|
||||
|
||||
```
|
||||
$ cloudron exec
|
||||
```
|
||||
|
||||
You can also execute arbitrary commands:
|
||||
```
|
||||
$ cloudron exec env # display the env variables that your app is running with
|
||||
```
|
||||
|
||||
### Debugging
|
||||
|
||||
An app can be placed in `debug` mode by passing `--debug` to `cloudron install` or `cloudron configure`.
|
||||
Doing so, runs the app in a non-readonly rootfs and unlimited memory. By default, this will also ignore
|
||||
the `RUN` command specified in the Dockerfile. The developer can then interactively test the app and
|
||||
startup scripts using `cloudron exec`.
|
||||
|
||||
This mode can be used to identify the files being modified by your application - often required to
|
||||
debug situations where your app does not run on a readonly rootfs. Run your app using `cloudron exec`
|
||||
and use `find / -mmin -30` to find file that have been changed or created in the last 30 minutes.
|
||||
|
||||
You can turn off debugging mode using `cloudron configure --no-debug`.
|
||||
|
||||
# Addons
|
||||
|
||||
## Filesystem
|
||||
|
||||
The application container created on the Cloudron has a `readonly` file system. Writing to any location
|
||||
other than the below will result in an error:
|
||||
|
||||
* `/tmp` - Use this location for temporary files. The Cloudron will cleanup any files in this directory
|
||||
periodically.
|
||||
|
||||
* `/run` - Use this location for runtime configuration and dynamic data. These files should not be expected
|
||||
to persist across application restarts (for example, after an update or a crash).
|
||||
|
||||
* `/app/data` - Use this location to store application data that is to be backed up. To use this location,
|
||||
you must use the [localstorage](/references/addons.html#localstorage) addon. For convenience, the initial CloudronManifest.json generated by
|
||||
`cloudron init` already contains this addon.
|
||||
|
||||
## Database
|
||||
|
||||
Most web applications require a database of some form. In theory, it is possible to run any
|
||||
database you want as part of the application image. This is, however, a waste of server resources
|
||||
should every app runs it's own database server.
|
||||
|
||||
Cloudron currently provides [mysql](/references/addons.html#mysql), [postgresql](/references/addons.html#postgresql),
|
||||
[mongodb](/references/addons.html#mongodb), [redis](/references/addons.html#redis) database addons. When choosing
|
||||
these addons, the Cloudron will inject environment variables that contain information on how to connect
|
||||
to the addon.
|
||||
|
||||
See https://git.cloudron.io/cloudron/tutorial-redis for a simple example of how redis can be used by
|
||||
an application. The server simply uses the environment variables to connect to redis.
|
||||
|
||||
## Email
|
||||
|
||||
Cloudron applications can send email using the `sendmail` addon. Using the `sendmail` addon provides
|
||||
the SMTP server and authentication credentials in environment variables.
|
||||
|
||||
Cloudron applications can also receive mail via IMAP using the `recvmail` addon.
|
||||
|
||||
## Authentication
|
||||
|
||||
The Cloudron has a centralized panel for managing users and groups. Apps can integrate Single Sign-On
|
||||
authentication using LDAP or OAuth.
|
||||
|
||||
Apps can integrate with the Cloudron authentication system using LDAP, OAuth or Simple Auth. See the
|
||||
[authentication](/references/authentication.html) reference page for more details.
|
||||
|
||||
See https://git.cloudron.io/cloudron/tutorial-ldap for a simple example of how to authenticate via LDAP.
|
||||
|
||||
For apps that are single user can skip Single Sign-On support by setting the `"singleUser": true`
|
||||
in the manifest. By doing so, the Cloudron will installer will show a dialog to choose a user.
|
||||
|
||||
For app that have no user management at all, the Cloudron implements an `OAuth proxy` that
|
||||
optionally lets the Cloudron admin make the app visible only for logged in users.
|
||||
|
||||
# Best practices
|
||||
|
||||
## No Setup
|
||||
|
||||
A Cloudron app is meant to instantly usable after installation. For this reason, Cloudron apps must not
|
||||
show any setup screen after installation and should simply choose reasonable defaults.
|
||||
|
||||
Databases, email configuration should be automatically picked up from the environment variables using
|
||||
addons.
|
||||
|
||||
## Dockerfile
|
||||
|
||||
The app is run as a read-only docker container. Because of this:
|
||||
* Install any required packages in the Dockerfile.
|
||||
* Create static configuration files in the Dockerfile.
|
||||
* Create symlinks to dynamic configuration files under /run in the Dockerfile.
|
||||
|
||||
## Process manager
|
||||
|
||||
Docker supports restarting processes natively. Should your application crash, it will be restarted
|
||||
automatically. If your application is a single process, you do not require any process manager.
|
||||
|
||||
Use supervisor, pm2 or any of the other process managers if you application has more then one component.
|
||||
This **excludes** web servers like apache, nginx which can already manage their children by themselves.
|
||||
Be sure to pick a process manager that forwards signals to child processes.
|
||||
|
||||
## Automatic updates
|
||||
|
||||
Some apps support automatic updates by overwriting themselves. A Cloudron app cannot overwrite itself
|
||||
because of the read-only file system. For this reason, disable auto updates for app and let updates be
|
||||
triggered through the Cloudron Store. This ties in better to the Cloudron's update and restore approach
|
||||
should something go wrong with the update.
|
||||
|
||||
## Logging
|
||||
|
||||
Cloudron applications stream their logs to stdout and stderr. In practice, this ideal is hard to achieve.
|
||||
Some programs like apache simply don't log to stdout. In those cases, simply log to `/tmp` or `/run`.
|
||||
|
||||
Logging to stdout has many advantages:
|
||||
* App does not need to rotate logs and the Cloudron takes care of managing logs.
|
||||
* App does not need special mechanism to release log file handles (on a log rotate).
|
||||
* Integrates better with tooling like cloudron cli.
|
||||
|
||||
## Memory
|
||||
|
||||
By default, applications get 256MB RAM (including swap). This can be changed using the `memoryLimit`
|
||||
field in the manifest.
|
||||
|
||||
Design your application runtime for concurrent use by 50 users. The Cloudron is not designed for
|
||||
concurrent access by 100s or 1000s of users.
|
||||
|
||||
An app can determine it's memory limit by reading `/sys/fs/cgroup/memory/memory.limit_in_bytes`.
|
||||
|
||||
## Authentication
|
||||
|
||||
Apps should integrate with one of the [authentication strategies](/references/authentication.html).
|
||||
This saves the user from having to manage separate set of credentials for each app.
|
||||
|
||||
## Startup Script
|
||||
|
||||
Many apps do not launch the server directly, as we did in our basic example. Instead, they execute
|
||||
a `start.sh` script (named so by convention) which launches the server. Before starting the server,
|
||||
the `start.sh` script does the following:
|
||||
|
||||
* When using the `localstorage` addon, it changes the ownership of files in `/app/data` as desired using `chown`. This
|
||||
is necessary because file permissions may not be correctly preserved across backup, restore, application and base image
|
||||
updates.
|
||||
|
||||
* Addon information (mail, database) exposed as environment are subject to change across restarts and an application
|
||||
must use these values directly (i.e not cache them across restarts). For this reason, it usually regenerates
|
||||
any config files with the current database settings on each invocation.
|
||||
|
||||
* Finally, it starts the server as a non-root user.
|
||||
|
||||
The app's main process must handle SIGTERM and forward it as required to child processes. bash does not
|
||||
automatically forward signals to child processes. For this reason, when using a startup shell script,
|
||||
remember to use exec <app> as the last line. Doing so will replace bash with your program and allows
|
||||
your program to handle signals as required.
|
||||
|
||||
# Beta Testing
|
||||
|
||||
## Metadata
|
||||
|
||||
Publishing to the Cloudron Store requires apps to have meta data specified in the `CloudronManifest.json`.
|
||||
|
||||
The `cloudron` tool will notify if any such information is missing, prior to uploading.
|
||||
See more information for each field [here](/references/manifest.html).
|
||||
|
||||
## Upload for Testing
|
||||
|
||||
Once your app is ready, you can upload it to the store for `beta testing` by
|
||||
other Cloudron users. This can be done using:
|
||||
|
||||
```
|
||||
cloudron upload
|
||||
```
|
||||
|
||||
You should now be able to visit `/#/appstore/<appid>?version=<appversion>` on your
|
||||
Cloudron to check if the icon, description and other details appear correctly.
|
||||
|
||||
Other Cloudron users can install your app on their Cloudron's using
|
||||
`cloudron install --appstore-id <appid@version>`.
|
||||
|
||||
# Publishing
|
||||
|
||||
Once you are satisfied with the beta testing, you can submit it for review.
|
||||
|
||||
```
|
||||
cloudron submit
|
||||
```
|
||||
|
||||
The cloudron.io team will review the app and publish the app to the store.
|
||||
|
||||
# Updating the app
|
||||
|
||||
## Versioning
|
||||
|
||||
To create an update for an app, simply bump up the [semver version](/references/manifest.html#version) field in
|
||||
the manifest and publish a new version to the store.
|
||||
|
||||
The Cloudron chooses the next app version to update to based on the following algorithm:
|
||||
* Choose the maximum `patch` version matching the app's current `major` and `minor` version.
|
||||
* Failing the above, choose the maximum patch version of the next minor version matching the app's current `major` version.
|
||||
* Failing the above, choose the maximum patch and minor version of the next major version
|
||||
|
||||
For example, let's assume the versions 1.1.3, 1.1.4, 1.1.5, 1.2.4, 1.2.6, 1.3.0, 2.0.0 are published.
|
||||
|
||||
* If the app is running 1.1.3, then app will directly update to 1.1.5 (skipping 1.1.4)
|
||||
* Once in 1.1.5, the app will update to 1.2.6 (skipping 1.2.4)
|
||||
* Once in 1.2.6, the app will update to 1.3.0
|
||||
* Once in 1.3.0, the app will update to 2.0.0
|
||||
|
||||
The Cloudron admins get notified by email for any major or minor app releases.
|
||||
|
||||
## Failed updates
|
||||
|
||||
The Cloudron always makes a backup of the app before making an update. Should the
|
||||
update fail, the user can restore to the backup (which will also restore the app's
|
||||
code to the previous version).
|
||||
|
||||
# Cloudron Button
|
||||
|
||||
The [Cloudron Button](/references/button.html) allows anyone to install your application with the click of a button
|
||||
on their Cloudron.
|
||||
|
||||
The button can be added to just about any website including the application's website
|
||||
and README.md files in GitHub repositories.
|
||||
|
||||
# Next steps
|
||||
|
||||
Congratulations! You are now well equipped to build web applications for the Cloudron.
|
||||
|
||||
You can see some examples of how real apps are packaged here:
|
||||
|
||||
* [Lets Chat](https://git.cloudron.io/cloudron/letschat-app)
|
||||
* [Haste bin](https://git.cloudron.io/cloudron/haste-app)
|
||||
* [Pasteboard](https://git.cloudron.io/cloudron/pasteboard-app)
|
||||
@@ -2,17 +2,18 @@
|
||||
|
||||
'use strict';
|
||||
|
||||
var ejs = require('gulp-ejs'),
|
||||
gulp = require('gulp'),
|
||||
del = require('del'),
|
||||
concat = require('gulp-concat'),
|
||||
uglify = require('gulp-uglify'),
|
||||
serve = require('gulp-serve'),
|
||||
sass = require('gulp-sass'),
|
||||
sourcemaps = require('gulp-sourcemaps'),
|
||||
cssnano = require('gulp-cssnano'),
|
||||
var argv = require('yargs').argv,
|
||||
autoprefixer = require('gulp-autoprefixer'),
|
||||
argv = require('yargs').argv;
|
||||
concat = require('gulp-concat'),
|
||||
cssnano = require('gulp-cssnano'),
|
||||
del = require('del'),
|
||||
ejs = require('gulp-ejs'),
|
||||
gulp = require('gulp'),
|
||||
sass = require('gulp-sass'),
|
||||
serve = require('gulp-serve'),
|
||||
sourcemaps = require('gulp-sourcemaps'),
|
||||
uglify = require('gulp-uglify'),
|
||||
url = require('url');
|
||||
|
||||
gulp.task('3rdparty', function () {
|
||||
gulp.src([
|
||||
@@ -54,14 +55,16 @@ gulp.task('js', ['js-index', 'js-setup', 'js-setupdns', 'js-update'], function (
|
||||
var oauth = {
|
||||
clientId: argv.clientId || 'cid-webadmin',
|
||||
clientSecret: argv.clientSecret || 'unused',
|
||||
apiOrigin: argv.apiOrigin || ''
|
||||
apiOrigin: argv.apiOrigin || '',
|
||||
apiOriginHostname: argv.apiOrigin ? url.parse(argv.apiOrigin).hostname : ''
|
||||
};
|
||||
|
||||
console.log();
|
||||
console.log('Using OAuth credentials:');
|
||||
console.log(' ClientId: %s', oauth.clientId);
|
||||
console.log(' ClientSecret: %s', oauth.clientSecret);
|
||||
console.log(' Cloudron API: %s', oauth.apiOrigin || 'default');
|
||||
console.log(' ClientId: %s', oauth.clientId);
|
||||
console.log(' ClientSecret: %s', oauth.clientSecret);
|
||||
console.log(' Cloudron API: %s', oauth.apiOrigin || 'default');
|
||||
console.log(' Cloudron Host: %s', oauth.apiOriginHostname);
|
||||
console.log();
|
||||
|
||||
|
||||
@@ -140,7 +143,7 @@ gulp.task('js-update', function () {
|
||||
// --------------
|
||||
|
||||
gulp.task('html', ['html-views', 'html-update', 'html-templates'], function () {
|
||||
return gulp.src('webadmin/src/*.html').pipe(gulp.dest('webadmin/dist'));
|
||||
return gulp.src('webadmin/src/*.html').pipe(ejs({ apiOriginHostname: oauth.apiOriginHostname }, { ext: '.html' })).pipe(gulp.dest('webadmin/dist'));
|
||||
});
|
||||
|
||||
gulp.task('html-update', function () {
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
'use strict';
|
||||
|
||||
var tar = require('tar-fs'),
|
||||
fs = require('fs'),
|
||||
path = require('path'),
|
||||
zlib = require('zlib');
|
||||
|
||||
if (process.argv.length < 4) {
|
||||
console.error('Usage: tarjs <cwd> <dir>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
var dir = process.argv[3];
|
||||
var cwd = process.argv[2];
|
||||
|
||||
console.error('Packing directory "'+ dir +'" from within "' + cwd + '" and stream to stdout');
|
||||
|
||||
process.chdir(cwd);
|
||||
|
||||
var stat = fs.statSync(dir);
|
||||
if (!stat.isDirectory()) throw(dir + ' is not a directory');
|
||||
|
||||
var gzipStream = zlib.createGzip({});
|
||||
|
||||
tar.pack(path.resolve(dir), {
|
||||
ignore: function (name) {
|
||||
if (name === '.') return true;
|
||||
return false;
|
||||
}
|
||||
}).pipe(gzipStream).pipe(process.stdout);
|
||||
@@ -3,10 +3,10 @@
|
||||
exports.up = function(db, callback) {
|
||||
var cmd = "CREATE TABLE eventlog(" +
|
||||
"id VARCHAR(128) NOT NULL," +
|
||||
"source JSON," +
|
||||
"source TEXT," +
|
||||
"creationTime TIMESTAMP," +
|
||||
"action VARCHAR(128) NOT NULL," +
|
||||
"data JSON," +
|
||||
"data TEXT," +
|
||||
"PRIMARY KEY (id))";
|
||||
|
||||
db.runSql(cmd, function (error) {
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE appAddonConfigs ADD COLUMN name VARCHAR(128)', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE appAddonConfigs DROP COLUMN name', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
'use strict';
|
||||
|
||||
var url = require('url');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var dbName = url.parse(process.env.DATABASE_URL).path.substr(1); // remove slash
|
||||
|
||||
// by default, mysql collates case insensitively. 'utf8_general_cs' is not available
|
||||
db.runSql('ALTER DATABASE ' + dbName + ' DEFAULT CHARACTER SET=utf8mb4 DEFAULT COLLATE utf8mb4_unicode_ci', callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,95 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
// from apps.js DO NOT UPDATE WHEN apps.js changes, as this is part of db migration!!
|
||||
function postProcess(result) {
|
||||
try {
|
||||
result.manifest = JSON.parse(result.manifestJson);
|
||||
delete result.manifestJson;
|
||||
|
||||
result.oldConfig = JSON.parse(result.oldConfigJson);
|
||||
delete result.oldConfigJson;
|
||||
|
||||
result.portBindings = { };
|
||||
var hostPorts = result.hostPorts === null ? [ ] : result.hostPorts.split(',');
|
||||
var environmentVariables = result.environmentVariables === null ? [ ] : result.environmentVariables.split(',');
|
||||
|
||||
delete result.hostPorts;
|
||||
delete result.environmentVariables;
|
||||
|
||||
for (var i = 0; i < environmentVariables.length; i++) {
|
||||
result.portBindings[environmentVariables[i]] = parseInt(hostPorts[i], 10);
|
||||
}
|
||||
|
||||
result.accessRestriction = JSON.parse(result.accessRestrictionJson);
|
||||
if (result.accessRestriction && !result.accessRestriction.users) result.accessRestriction.users = [];
|
||||
delete result.accessRestrictionJson;
|
||||
|
||||
// TODO remove later once all apps have this attribute
|
||||
result.xFrameOptions = result.xFrameOptions || 'SAMEORIGIN';
|
||||
|
||||
result.sso = !!result.sso; // make it bool
|
||||
|
||||
result.debugMode = JSON.parse(result.debugModeJson);
|
||||
delete result.debugModeJson;
|
||||
} catch (e) {
|
||||
console.error('Failed to get restoreConfig for app.', e);
|
||||
console.error('Falling back to empty values to make the update succeed.');
|
||||
result.manifest = null;
|
||||
}
|
||||
}
|
||||
|
||||
// from apps.js DO NOT UPDATE WHEN apps.js changes, as this is part of db migration!!
|
||||
var APPS_FIELDS_PREFIXED = [ 'apps.id', 'apps.appStoreId', 'apps.installationState', 'apps.installationProgress', 'apps.runState',
|
||||
'apps.health', 'apps.containerId', 'apps.manifestJson', 'apps.httpPort', 'apps.location', 'apps.dnsRecordId',
|
||||
'apps.accessRestrictionJson', 'apps.lastBackupId', 'apps.oldConfigJson', 'apps.memoryLimit', 'apps.altDomain',
|
||||
'apps.xFrameOptions', 'apps.sso', 'apps.debugModeJson' ].join(',');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE backups ADD COLUMN restoreConfigJson TEXT'),
|
||||
// fill all the backups with restoreConfigs from current apps
|
||||
function addRestoreConfigs(callback) {
|
||||
console.log('Importing restoreConfigs');
|
||||
|
||||
var appQuery = 'SELECT ' + APPS_FIELDS_PREFIXED + ',' +
|
||||
'GROUP_CONCAT(CAST(appPortBindings.hostPort AS CHAR(6))) AS hostPorts, GROUP_CONCAT(appPortBindings.environmentVariable) AS environmentVariables' +
|
||||
' FROM apps LEFT OUTER JOIN appPortBindings ON apps.id = appPortBindings.appId' +
|
||||
' GROUP BY apps.id ORDER BY apps.id';
|
||||
|
||||
db.all(appQuery, function (error, apps) {
|
||||
if (error) return callback(error);
|
||||
|
||||
apps.forEach(postProcess);
|
||||
|
||||
async.eachSeries(apps, function (app, next) {
|
||||
if (app.manifest === null) return next();
|
||||
|
||||
db.all('SELECT * FROM backups WHERE type="app" AND id LIKE "%app%\\_' + app.id + '\\_%"', function (error, backups) {
|
||||
if (error) return next(error);
|
||||
|
||||
// from apps.js:getAppConfig()
|
||||
var restoreConfig = {
|
||||
manifest: app.manifest,
|
||||
location: app.location,
|
||||
accessRestriction: app.accessRestriction,
|
||||
portBindings: app.portBindings,
|
||||
memoryLimit: app.memoryLimit,
|
||||
xFrameOptions: app.xFrameOptions || 'SAMEORIGIN',
|
||||
altDomain: app.altDomain
|
||||
};
|
||||
|
||||
async.eachSeries(backups, function (backup, next) {
|
||||
db.runSql('UPDATE backups SET restoreConfigJson=?,creationTime=creationTime WHERE id=?', [ JSON.stringify(restoreConfig), backup.id ], next);
|
||||
}, next);
|
||||
});
|
||||
}, callback);
|
||||
});
|
||||
}
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups DROP COLUMN restoreConfigJson', callback);
|
||||
};
|
||||
@@ -0,0 +1,22 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT value FROM settings WHERE name="backup_config"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
|
||||
var backupConfig = JSON.parse(results[0].value);
|
||||
if (backupConfig.provider === 'filesystem') {
|
||||
backupConfig.retentionSecs = 2 * 24 * 60 * 60; // 2 days
|
||||
} else if (backupConfig.provider === 's3') { // S3
|
||||
backupConfig.retentionSecs = -1;
|
||||
} else if (backupConfig.provider === 'caas') {
|
||||
backupConfig.retentionSecs = 10 * 24 * 60 * 60; // 10 days
|
||||
}
|
||||
db.runSql('UPDATE settings SET value=? WHERE name="backup_config"', [ JSON.stringify(backupConfig) ], callback);
|
||||
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,9 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('INSERT settings (name, value) VALUES("mail_relay", ?)', [ JSON.stringify({ provider: 'cloudron-smtp' }) ], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('DELETE * FROM settings WHERE name="mail_relay"', [ ], callback);
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN robotsTxt TEXT', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN robotsTxt', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,29 @@
|
||||
'use strict';
|
||||
|
||||
// we used to have JSON as the db type for those two, however mariadb does not support it
|
||||
// and we never used any JSON related features, but have the TEXT pattern everywhere
|
||||
// This ensures all old cloudrons will have the columns altered
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE eventlog MODIFY data TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
db.runSql('ALTER TABLE eventlog MODIFY source TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
callback(error);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE eventlog MODIFY data TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
db.runSql('ALTER TABLE eventlog MODIFY source TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
callback(error);
|
||||
});
|
||||
});
|
||||
};
|
||||
@@ -68,10 +68,11 @@ CREATE TABLE IF NOT EXISTS apps(
|
||||
xFrameOptions VARCHAR(512),
|
||||
sso BOOLEAN DEFAULT 1, // whether user chose to enable SSO
|
||||
debugModeJson TEXT, // options for development mode
|
||||
robotsTxt TEXT,
|
||||
|
||||
lastBackupId VARCHAR(128), // tracks last valid backup, can be removed
|
||||
|
||||
oldConfigJson TEXT, // used to pass old config for apptask, can be removed when we use a queue
|
||||
// the following fields do not belong here, they can be removed when we use a queue for apptask
|
||||
lastBackupId VARCHAR(128), // used to pass backupId to restore from to apptask
|
||||
oldConfigJson TEXT, // used to pass old config for apptask
|
||||
|
||||
PRIMARY KEY(id));
|
||||
|
||||
@@ -97,24 +98,26 @@ CREATE TABLE IF NOT EXISTS settings(
|
||||
CREATE TABLE IF NOT EXISTS appAddonConfigs(
|
||||
appId VARCHAR(128) NOT NULL,
|
||||
addonId VARCHAR(32) NOT NULL,
|
||||
name VARCHAR(128) NOT NULL,
|
||||
value VARCHAR(512) NOT NULL,
|
||||
FOREIGN KEY(appId) REFERENCES apps(id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS backups(
|
||||
filename VARCHAR(128) NOT NULL,
|
||||
id VARCHAR(128) NOT NULL,
|
||||
creationTime TIMESTAMP,
|
||||
version VARCHAR(128) NOT NULL, /* app version or box version */
|
||||
type VARCHAR(16) NOT NULL, /* 'box' or 'app' */
|
||||
dependsOn TEXT, /* comma separate list of objects this backup depends on */
|
||||
state VARCHAR(16) NOT NULL,
|
||||
restoreConfigJson TEXT, /* JSON including the manifest of the backed up app */
|
||||
|
||||
PRIMARY KEY (filename));
|
||||
PRIMARY KEY (id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS eventlog(
|
||||
id VARCHAR(128) NOT NULL,
|
||||
action VARCHAR(128) NOT NULL,
|
||||
source JSON, /* { userId, username, ip }. userId can be null for cron,sysadmin */
|
||||
data JSON, /* free flowing json based on action */
|
||||
source TEXT, /* { userId, username, ip }. userId can be null for cron,sysadmin */
|
||||
data TEXT, /* free flowing json based on action */
|
||||
creationTime TIMESTAMP, /* FIXME: precision must be TIMESTAMP(2) */
|
||||
|
||||
PRIMARY KEY (id));
|
||||
|
||||
@@ -13,10 +13,10 @@
|
||||
"node >=4.0.0 <=4.1.1"
|
||||
],
|
||||
"dependencies": {
|
||||
"@sindresorhus/df": "^2.1.0",
|
||||
"async": "^2.1.4",
|
||||
"aws-sdk": "^2.1.46",
|
||||
"aws-sdk": "^2.41.0",
|
||||
"body-parser": "^1.13.1",
|
||||
"checksum": "^0.1.1",
|
||||
"cloudron-manifestformat": "^2.8.0",
|
||||
"connect-ensure-login": "^0.1.1",
|
||||
"connect-lastmile": "^0.1.0",
|
||||
@@ -28,14 +28,14 @@
|
||||
"db-migrate": "^0.10.0-beta.20",
|
||||
"db-migrate-mysql": "^1.1.10",
|
||||
"debug": "^2.2.0",
|
||||
"dockerode": "^2.2.10",
|
||||
"dockerode": "^2.4.3",
|
||||
"ejs": "^2.2.4",
|
||||
"ejs-cli": "^1.2.0",
|
||||
"express": "^4.12.4",
|
||||
"express-rate-limit": "^2.6.0",
|
||||
"express-session": "^1.11.3",
|
||||
"gulp-sass": "^3.0.0",
|
||||
"hat": "0.0.3",
|
||||
"hock": "https://registry.npmjs.org/hock/-/hock-1.3.2.tgz",
|
||||
"json": "^9.0.3",
|
||||
"ldapjs": "^1.0.0",
|
||||
"mime": "^1.3.4",
|
||||
@@ -43,11 +43,9 @@
|
||||
"morgan": "^1.7.0",
|
||||
"multiparty": "^4.1.2",
|
||||
"mysql": "^2.7.0",
|
||||
"native-dns": "^0.7.0",
|
||||
"node-df": "^0.1.1",
|
||||
"node-uuid": "^1.4.3",
|
||||
"nodemailer": "^1.3.0",
|
||||
"nodemailer-smtp-transport": "^1.0.3",
|
||||
"nodemailer": "^4.0.1",
|
||||
"nodemailer-smtp-transport": "^2.7.4",
|
||||
"oauth2orize": "^1.0.1",
|
||||
"once": "^1.3.2",
|
||||
"parse-links": "^0.1.0",
|
||||
@@ -57,13 +55,16 @@
|
||||
"passport-local": "^1.0.0",
|
||||
"passport-oauth2-client-password": "^0.1.2",
|
||||
"password-generator": "^2.0.2",
|
||||
"progress-stream": "^2.0.0",
|
||||
"proxy-middleware": "^0.13.0",
|
||||
"safetydance": "^0.1.1",
|
||||
"s3-block-read-stream": "^0.2.0",
|
||||
"safetydance": "^0.2.0",
|
||||
"semver": "^4.3.6",
|
||||
"showdown": "^1.6.0",
|
||||
"split": "^1.0.0",
|
||||
"superagent": "^1.8.3",
|
||||
"supererror": "^0.7.1",
|
||||
"tar-fs": "https://registry.npmjs.org/tar-fs/-/tar-fs-1.15.2.tgz",
|
||||
"tldjs": "^1.6.2",
|
||||
"underscore": "^1.7.0",
|
||||
"valid-url": "^1.0.9",
|
||||
@@ -87,15 +88,18 @@
|
||||
"istanbul": "*",
|
||||
"js2xmlparser": "^1.0.0",
|
||||
"mocha": "*",
|
||||
"mock-aws-s3": "^2.4.0",
|
||||
"nock": "^9.0.2",
|
||||
"node-sass": "^3.0.0-alpha.0",
|
||||
"readdirp": "https://registry.npmjs.org/readdirp/-/readdirp-2.1.0.tgz",
|
||||
"request": "^2.65.0",
|
||||
"yargs": "^3.15.0"
|
||||
},
|
||||
"scripts": {
|
||||
"migrate_local": "DATABASE_URL=mysql://root:@localhost/box node_modules/.bin/db-migrate up",
|
||||
"migrate_test": "BOX_ENV=test DATABASE_URL=mysql://root:@localhost/boxtest node_modules/.bin/db-migrate up",
|
||||
"test": "npm run migrate_test && src/test/setupTest && BOX_ENV=test ./node_modules/istanbul/lib/cli.js test $1 ./node_modules/mocha/bin/_mocha -- -R spec ./src/test ./src/routes/test",
|
||||
"test": "npm run migrate_test && src/test/setupTest && BOX_ENV=test ./node_modules/istanbul/lib/cli.js test $1 ./node_modules/mocha/bin/_mocha -- -R spec ./src/test ./src/routes/test/[^a]*",
|
||||
"test_all": "npm run migrate_test && src/test/setupTest && BOX_ENV=test ./node_modules/istanbul/lib/cli.js test $1 ./node_modules/mocha/bin/_mocha -- -R spec ./src/test ./src/routes/test",
|
||||
"postmerge": "/bin/true",
|
||||
"precommit": "/bin/true",
|
||||
"prepush": "npm test",
|
||||
|
||||
@@ -15,45 +15,55 @@ fi
|
||||
# change this to a hash when we make a upgrade release
|
||||
readonly LOG_FILE="/var/log/cloudron-setup.log"
|
||||
readonly DATA_FILE="/root/cloudron-install-data.json"
|
||||
readonly MINIMUM_DISK_SIZE_GB="19" # this is the size of "/" and required to fit in docker images 19 is a safe bet for different reporting on 20GB min
|
||||
readonly MINIMUM_DISK_SIZE_GB="18" # this is the size of "/" and required to fit in docker images 18 is a safe bet for different reporting on 20GB min
|
||||
readonly MINIMUM_MEMORY="974" # this is mostly reported for 1GB main memory (DO 992, EC2 990, Linode 989, Serverdiscounter.com 974)
|
||||
|
||||
readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 2400"
|
||||
|
||||
# copied from cloudron-resize-fs.sh
|
||||
readonly rootfs_type=$(LC_ALL=C df --output=fstype / | tail -n1)
|
||||
readonly physical_memory=$(LC_ALL=C free -m | awk '/Mem:/ { print $2 }')
|
||||
readonly disk_device="$(for d in $(find /dev -type b); do [ "$(mountpoint -d /)" = "$(mountpoint -x $d)" ] && echo $d && break; done)"
|
||||
readonly disk_size_bytes=$(LC_ALL=C fdisk -l ${disk_device} | grep "Disk ${disk_device}" | awk '{ printf $5 }')
|
||||
readonly disk_size_gb=$((${disk_size_bytes}/1024/1024/1024))
|
||||
readonly disk_size_bytes=$(LC_ALL=C df --output=size / | tail -n1)
|
||||
readonly disk_size_gb=$((${disk_size_bytes}/1024/1024))
|
||||
|
||||
# verify the system has minimum requirements met
|
||||
if [[ "${rootfs_type}" != "ext4" ]]; then
|
||||
echo "Error: Cloudron requires '/' to be ext4" # see #364
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${physical_memory}" -lt "${MINIMUM_MEMORY}" ]]; then
|
||||
echo "Error: Cloudron requires atleast 1GB physical memory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${disk_size_gb}" -lt "${MINIMUM_DISK_SIZE_GB}" ]]; then
|
||||
echo "Error: Cloudron requires atleast 20GB disk space (Disk space on ${disk_device} is ${disk_size_gb}GB)"
|
||||
echo "Error: Cloudron requires atleast 20GB disk space (Disk space on / is ${disk_size_gb}GB)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
initBaseImage="true"
|
||||
# provisioning data
|
||||
domain=""
|
||||
zoneName=""
|
||||
provider=""
|
||||
encryptionKey=""
|
||||
restoreUrl=""
|
||||
dnsProvider="manual"
|
||||
tlsProvider="le-prod"
|
||||
versionsUrl="https://s3.amazonaws.com/prod-cloudron-releases/versions.json"
|
||||
requestedVersion="latest"
|
||||
requestedVersion=""
|
||||
apiServerOrigin="https://api.cloudron.io"
|
||||
webServerOrigin="https://cloudron.io"
|
||||
dataJson=""
|
||||
prerelease="false"
|
||||
sourceTarballUrl=""
|
||||
rebootServer="true"
|
||||
baseDataDir=""
|
||||
|
||||
args=$(getopt -o "" -l "domain:,help,skip-baseimage-init,data:,provider:,encryption-key:,restore-url:,tls-provider:,version:,versions-url:,api-server:,dns-provider:,env:,prerelease,skip-reboot,source-url:" -n "$0" -- "$@")
|
||||
# TODO this is still there for the restore case, see other occasions below
|
||||
versionsUrl="https://s3.amazonaws.com/prod-cloudron-releases/versions.json"
|
||||
|
||||
args=$(getopt -o "" -l "domain:,help,skip-baseimage-init,data:,data-dir:,provider:,encryption-key:,restore-url:,tls-provider:,version:,dns-provider:,env:,prerelease,skip-reboot,source-url:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
@@ -68,24 +78,25 @@ while true; do
|
||||
--version) requestedVersion="$2"; shift 2;;
|
||||
--env)
|
||||
if [[ "$2" == "dev" ]]; then
|
||||
apiServerOrigin="https://api.dev.cloudron.io"
|
||||
versionsUrl="https://s3.amazonaws.com/dev-cloudron-releases/versions.json"
|
||||
apiServerOrigin="https://api.dev.cloudron.io"
|
||||
webServerOrigin="https://dev.cloudron.io"
|
||||
tlsProvider="le-staging"
|
||||
prerelease="true"
|
||||
elif [[ "$2" == "staging" ]]; then
|
||||
apiServerOrigin="https://api.staging.cloudron.io"
|
||||
versionsUrl="https://s3.amazonaws.com/staging-cloudron-releases/versions.json"
|
||||
apiServerOrigin="https://api.staging.cloudron.io"
|
||||
webServerOrigin="https://staging.cloudron.io"
|
||||
tlsProvider="le-staging"
|
||||
prerelease="true"
|
||||
fi
|
||||
shift 2;;
|
||||
--versions-url) versionsUrl="$2"; shift 2;;
|
||||
--api-server) apiServerOrigin="$2"; shift 2;;
|
||||
--skip-baseimage-init) initBaseImage="false"; shift;;
|
||||
--skip-reboot) rebootServer="false"; shift;;
|
||||
--data) dataJson="$2"; shift 2;;
|
||||
--prerelease) prerelease="true"; shift;;
|
||||
--source-url) sourceTarballUrl="$2"; version="0.0.1+custom"; shift 2;;
|
||||
--data-dir) baseDataDir=$(realpath "$2"); shift 2;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
@@ -94,13 +105,14 @@ done
|
||||
# validate arguments in the absence of data
|
||||
if [[ -z "${dataJson}" ]]; then
|
||||
if [[ -z "${provider}" ]]; then
|
||||
echo "--provider is required (azure, digitalocean, ec2, lightsail, linode, ovh, scaleway, vultr or generic)"
|
||||
echo "--provider is required (azure, digitalocean, ec2, lightsail, linode, ovh, rosehosting, scaleway, vultr or generic)"
|
||||
exit 1
|
||||
elif [[ \
|
||||
"${provider}" != "ami" && \
|
||||
"${provider}" != "azure" && \
|
||||
"${provider}" != "digitalocean" && \
|
||||
"${provider}" != "ec2" && \
|
||||
"${provider}" != "gce" && \
|
||||
"${provider}" != "lightsail" && \
|
||||
"${provider}" != "linode" && \
|
||||
"${provider}" != "ovh" && \
|
||||
@@ -109,7 +121,7 @@ if [[ -z "${dataJson}" ]]; then
|
||||
"${provider}" != "vultr" && \
|
||||
"${provider}" != "generic" \
|
||||
]]; then
|
||||
echo "--provider must be one of: azure, digitalocean, ec2, lightsail, linode, ovh, rosehosting, scaleway, vultr or generic"
|
||||
echo "--provider must be one of: azure, digitalocean, ec2, gce, lightsail, linode, ovh, rosehosting, scaleway, vultr or generic"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -125,11 +137,16 @@ if [[ -z "${dataJson}" ]]; then
|
||||
echo "--dns-provider must be one of : manual, noop"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -n "${baseDataDir}" && ! -d "${baseDataDir}" ]]; then
|
||||
echo "${baseDataDir} does not exist"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "##############################################"
|
||||
echo " Cloudron Setup (${requestedVersion}) "
|
||||
echo " Cloudron Setup (${requestedVersion:-latest})"
|
||||
echo "##############################################"
|
||||
echo ""
|
||||
echo " Follow setup logs in a second terminal with:"
|
||||
@@ -153,28 +170,35 @@ fi
|
||||
|
||||
echo "=> Checking version"
|
||||
if [[ "${sourceTarballUrl}" == "" ]]; then
|
||||
releaseJson=$($curl -s "${versionsUrl}")
|
||||
if [[ "$requestedVersion" == "latest" ]]; then
|
||||
pre=$([[ "${prerelease}" == "true" ]] && echo "null" || echo "-pre")
|
||||
version=$(echo "${releaseJson}" | python3 -c "import json,sys,collections;obj=json.load(sys.stdin, object_pairs_hook=collections.OrderedDict);latest=list(v for v in obj if '${pre}' not in v)[-1];print(latest)")
|
||||
if ! releaseJson=$($curl -s "${apiServerOrigin}/api/v1/releases?prerelease=${prerelease}&boxVersion=${requestedVersion}"); then
|
||||
echo "Failed to get release information"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$requestedVersion" == "" ]]; then
|
||||
version=$(echo "${releaseJson}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj["version"])')
|
||||
else
|
||||
version="${requestedVersion}"
|
||||
fi
|
||||
if ! sourceTarballUrl=$(echo "${releaseJson}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj[sys.argv[1]]["sourceTarballUrl"])' "${version}"); then
|
||||
echo "No source code for version ${requestedVersion}"
|
||||
|
||||
if ! sourceTarballUrl=$(echo "${releaseJson}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj["info"]["sourceTarballUrl"])'); then
|
||||
echo "No source code for version '${requestedVersion:-latest}'"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Build data
|
||||
# TODO versionsUrl is still there for the cloudron restore case
|
||||
if [[ -z "${dataJson}" ]]; then
|
||||
if [[ -z "${restoreUrl}" ]]; then
|
||||
data=$(cat <<EOF
|
||||
{
|
||||
"boxVersionsUrl": "${versionsUrl}",
|
||||
"fqdn": "${domain}",
|
||||
"zoneName": "${zoneName}",
|
||||
"provider": "${provider}",
|
||||
"apiServerOrigin": "${apiServerOrigin}",
|
||||
"webServerOrigin": "${webServerOrigin}",
|
||||
"tlsConfig": {
|
||||
"provider": "${tlsProvider}"
|
||||
},
|
||||
@@ -184,7 +208,8 @@ if [[ -z "${dataJson}" ]]; then
|
||||
"backupConfig" : {
|
||||
"provider": "filesystem",
|
||||
"backupFolder": "/var/backups",
|
||||
"key": "${encryptionKey}"
|
||||
"key": "${encryptionKey}",
|
||||
"retentionSecs": 172800
|
||||
},
|
||||
"updateConfig": {
|
||||
"prerelease": ${prerelease}
|
||||
@@ -198,8 +223,10 @@ EOF
|
||||
{
|
||||
"boxVersionsUrl": "${versionsUrl}",
|
||||
"fqdn": "${domain}",
|
||||
"zoneName": "${zoneName}",
|
||||
"provider": "${provider}",
|
||||
"apiServerOrigin": "${apiServerOrigin}",
|
||||
"webServerOrigin": "${webServerOrigin}",
|
||||
"restore": {
|
||||
"url": "${restoreUrl}",
|
||||
"key": "${encryptionKey}"
|
||||
@@ -232,9 +259,17 @@ fi
|
||||
|
||||
echo "=> Installing version ${version} (this takes some time) ..."
|
||||
echo "${data}" > "${DATA_FILE}"
|
||||
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" --data-file "${DATA_FILE}" &>> "${LOG_FILE}"; then
|
||||
echo "Failed to install cloudron. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
# poor mans semver
|
||||
if [[ ${version} == "0.10"* ]]; then
|
||||
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" --data-file "${DATA_FILE}" &>> "${LOG_FILE}"; then
|
||||
echo "Failed to install cloudron. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" --data-file "${DATA_FILE}" --data-dir "${baseDataDir}" &>> "${LOG_FILE}"; then
|
||||
echo "Failed to install cloudron. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
rm "${DATA_FILE}"
|
||||
|
||||
@@ -256,5 +291,6 @@ fi
|
||||
|
||||
if [[ "${rebootServer}" == "true" ]]; then
|
||||
echo -e "\n\nRebooting this server now to let bootloader changes take effect.\n"
|
||||
systemctl stop mysql # sometimes mysql ends up having corrupt privilege tables
|
||||
systemctl reboot
|
||||
fi
|
||||
|
||||
@@ -31,8 +31,8 @@ if ! $(cd "${SOURCE_DIR}" && git diff --exit-code >/dev/null); then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$(node --version)" != "v6.9.2" ]]; then
|
||||
echo "This script requires node 6.9.2"
|
||||
if [[ "$(node --version)" != "v6.11.1" ]]; then
|
||||
echo "This script requires node 6.11.1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -58,7 +58,7 @@ else
|
||||
fi
|
||||
|
||||
echo "Building webadmin assets"
|
||||
(cd "${bundle_dir}" && gulp)
|
||||
(cd "${bundle_dir}" && ./node_modules/.bin/gulp)
|
||||
|
||||
echo "Remove intermediate files required at build-time only"
|
||||
rm -rf "${bundle_dir}/node_modules/"
|
||||
@@ -84,4 +84,3 @@ echo "Cleaning up ${bundle_dir}"
|
||||
rm -rf "${bundle_dir}"
|
||||
|
||||
echo "Tarball saved at ${bundle_file}"
|
||||
|
||||
|
||||
@@ -9,27 +9,40 @@ fi
|
||||
|
||||
readonly USER=yellowtent
|
||||
readonly BOX_SRC_DIR=/home/${USER}/box
|
||||
readonly BASE_DATA_DIR=/home/${USER}
|
||||
readonly CLOUDRON_CONF=/home/yellowtent/configs/cloudron.conf
|
||||
|
||||
readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 2400"
|
||||
readonly script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly box_src_tmp_dir="$(realpath ${script_dir}/..)"
|
||||
|
||||
readonly is_update=$([[ -f "${CLOUDRON_CONF}" ]] && echo "yes" || echo "no")
|
||||
|
||||
arg_data=""
|
||||
arg_data_dir=""
|
||||
|
||||
args=$(getopt -o "" -l "data:,data-file:" -n "$0" -- "$@")
|
||||
args=$(getopt -o "" -l "data:,data-file:,data-dir:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
--data) arg_data="$2"; shift 2;;
|
||||
--data-file) arg_data=$(cat $2); shift 2;;
|
||||
--data-dir) arg_data_dir="$2"; shift 2;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "==> installer: updating node"
|
||||
if [[ "$(node --version)" != "v6.11.1" ]]; then
|
||||
mkdir -p /usr/local/node-6.11.1
|
||||
$curl -sL https://nodejs.org/dist/v6.11.1/node-v6.11.1-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-6.11.1
|
||||
ln -sf /usr/local/node-6.11.1/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-6.11.1/bin/npm /usr/bin/npm
|
||||
rm -rf /usr/local/node-6.9.2
|
||||
fi
|
||||
|
||||
for try in `seq 1 10`; do
|
||||
# for reasons unknown, the dtrace package will fail. but rebuilding second time will work
|
||||
|
||||
@@ -56,9 +69,21 @@ if [[ "${is_update}" == "yes" ]]; then
|
||||
${BOX_SRC_DIR}/setup/stop.sh # stop the old code
|
||||
fi
|
||||
|
||||
# setup links to data directory
|
||||
if [[ -n "${arg_data_dir}" ]]; then
|
||||
echo "==> installer: setting up links to data directory"
|
||||
mkdir "${arg_data_dir}/appsdata"
|
||||
ln -s "${arg_data_dir}/appsdata" "${BASE_DATA_DIR}/appsdata"
|
||||
mkdir "${arg_data_dir}/platformdata"
|
||||
ln -s "${arg_data_dir}/platformdata" "${BASE_DATA_DIR}/platformdata"
|
||||
fi
|
||||
|
||||
# ensure we are not inside the source directory, which we will remove now
|
||||
cd /root
|
||||
|
||||
echo "==> installer: updating packages"
|
||||
# add logic to update apt packages here
|
||||
|
||||
echo "==> installer: switching the box code"
|
||||
rm -rf "${BOX_SRC_DIR}"
|
||||
mv "${box_src_tmp_dir}" "${BOX_SRC_DIR}"
|
||||
|
||||
@@ -5,8 +5,8 @@ json="${source_dir}/../node_modules/.bin/json"
|
||||
|
||||
# IMPORTANT: Fix cloudron.js:doUpdate if you add/remove any arg. keep these sorted for readability
|
||||
arg_api_server_origin=""
|
||||
arg_box_versions_url=""
|
||||
arg_fqdn=""
|
||||
arg_zone_name=""
|
||||
arg_is_custom_domain="false"
|
||||
arg_restore_key=""
|
||||
arg_restore_url=""
|
||||
@@ -41,6 +41,7 @@ while true; do
|
||||
--data)
|
||||
# these params must be valid in all cases
|
||||
arg_fqdn=$(echo "$2" | $json fqdn)
|
||||
arg_zone_name=$(echo "$2" | $json zoneName)
|
||||
|
||||
arg_is_custom_domain=$(echo "$2" | $json isCustomDomain)
|
||||
[[ "${arg_is_custom_domain}" == "" ]] && arg_is_custom_domain="true"
|
||||
@@ -50,8 +51,6 @@ while true; do
|
||||
[[ "${arg_api_server_origin}" == "" ]] && arg_api_server_origin="https://api.cloudron.io"
|
||||
arg_web_server_origin=$(echo "$2" | $json webServerOrigin)
|
||||
[[ "${arg_web_server_origin}" == "" ]] && arg_web_server_origin="https://cloudron.io"
|
||||
arg_box_versions_url=$(echo "$2" | $json boxVersionsUrl)
|
||||
[[ "${arg_box_versions_url}" == "" ]] && arg_box_versions_url="https://s3.amazonaws.com/prod-cloudron-releases/versions.json"
|
||||
|
||||
# TODO check if an where this is used
|
||||
arg_version=$(echo "$2" | $json version)
|
||||
@@ -64,7 +63,9 @@ while true; do
|
||||
[[ "${arg_is_demo}" == "" ]] && arg_is_demo="false"
|
||||
|
||||
arg_tls_cert=$(echo "$2" | $json tlsCert)
|
||||
[[ "${arg_tls_cert}" == "null" ]] && arg_tls_cert=""
|
||||
arg_tls_key=$(echo "$2" | $json tlsKey)
|
||||
[[ "${arg_tls_key}" == "null" ]] && arg_tls_key=""
|
||||
arg_token=$(echo "$2" | $json token)
|
||||
|
||||
arg_provider=$(echo "$2" | $json provider)
|
||||
@@ -97,14 +98,14 @@ done
|
||||
|
||||
echo "Parsed arguments:"
|
||||
echo "api server: ${arg_api_server_origin}"
|
||||
echo "box versions url: ${arg_box_versions_url}"
|
||||
echo "fqdn: ${arg_fqdn}"
|
||||
echo "custom domain: ${arg_is_custom_domain}"
|
||||
echo "restore key: ${arg_restore_key}"
|
||||
echo "restore url: ${arg_restore_url}"
|
||||
echo "tls cert: ${arg_tls_cert}"
|
||||
echo "tls key: ${arg_tls_key}"
|
||||
echo "token: ${arg_token}"
|
||||
# do not dump these as they might become available via logs API
|
||||
#echo "restore key: ${arg_restore_key}"
|
||||
#echo "tls key: ${arg_tls_key}"
|
||||
#echo "token: ${arg_token}"
|
||||
echo "tlsConfig: ${arg_tls_config}"
|
||||
echo "version: ${arg_version}"
|
||||
echo "web server: ${arg_web_server_origin}"
|
||||
|
||||
@@ -6,12 +6,12 @@ readonly SETUP_WEBSITE_DIR="/home/yellowtent/setup/website"
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly box_src_dir="$(realpath ${script_dir}/..)"
|
||||
readonly DATA_DIR="/home/yellowtent/data"
|
||||
readonly PLATFORM_DATA_DIR="/home/yellowtent/platformdata"
|
||||
readonly ADMIN_LOCATION="my" # keep this in sync with constants.js
|
||||
|
||||
echo "Setting up nginx update page"
|
||||
|
||||
if [[ ! -f "${DATA_DIR}/nginx/applications/admin.conf" ]]; then
|
||||
if [[ ! -f "${PLATFORM_DATA_DIR}/nginx/applications/admin.conf" ]]; then
|
||||
echo "No admin.conf found. This Cloudron has no domain yet. Skip splash setup"
|
||||
exit
|
||||
fi
|
||||
@@ -29,16 +29,16 @@ cp -r "${script_dir}/splash/website/"* "${SETUP_WEBSITE_DIR}"
|
||||
# create nginx config
|
||||
readonly current_infra=$(node -e "console.log(require('${script_dir}/../src/infra_version.js').version);")
|
||||
existing_infra="none"
|
||||
[[ -f "${DATA_DIR}/INFRA_VERSION" ]] && existing_infra=$(node -e "console.log(JSON.parse(require('fs').readFileSync('${DATA_DIR}/INFRA_VERSION', 'utf8')).version);")
|
||||
[[ -f "${PLATFORM_DATA_DIR}/INFRA_VERSION" ]] && existing_infra=$(node -e "console.log(JSON.parse(require('fs').readFileSync('${PLATFORM_DATA_DIR}/INFRA_VERSION', 'utf8')).version);")
|
||||
if [[ "${arg_retire_reason}" != "" || "${existing_infra}" != "${current_infra}" ]]; then
|
||||
echo "Showing progress bar on all subdomains in retired mode or infra update. retire: ${arg_retire_reason} existing: ${existing_infra} current: ${current_infra}"
|
||||
rm -f ${DATA_DIR}/nginx/applications/*
|
||||
rm -f ${PLATFORM_DATA_DIR}/nginx/applications/*
|
||||
${box_src_dir}/node_modules/.bin/ejs-cli -f "${script_dir}/start/nginx/appconfig.ejs" \
|
||||
-O "{ \"vhost\": \"~^(.+)\$\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\" }" > "${DATA_DIR}/nginx/applications/admin.conf"
|
||||
-O "{ \"vhost\": \"~^(.+)\$\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\", \"robotsTxtQuoted\": null }" > "${PLATFORM_DATA_DIR}/nginx/applications/admin.conf"
|
||||
else
|
||||
echo "Show progress bar only on admin domain for normal update"
|
||||
${box_src_dir}/node_modules/.bin/ejs-cli -f "${script_dir}/start/nginx/appconfig.ejs" \
|
||||
-O "{ \"vhost\": \"${admin_fqdn}\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\" }" > "${DATA_DIR}/nginx/applications/admin.conf"
|
||||
-O "{ \"vhost\": \"${admin_fqdn}\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\", \"robotsTxtQuoted\": null }" > "${PLATFORM_DATA_DIR}/nginx/applications/admin.conf"
|
||||
fi
|
||||
|
||||
if [[ "${arg_retire_reason}" == "migrate" ]]; then
|
||||
|
||||
@@ -5,10 +5,11 @@ set -eu -o pipefail
|
||||
echo "==> Cloudron Start"
|
||||
|
||||
readonly USER="yellowtent"
|
||||
readonly DATA_FILE="/root/user_data.img"
|
||||
readonly HOME_DIR="/home/${USER}"
|
||||
readonly BOX_SRC_DIR="${HOME_DIR}/box"
|
||||
readonly DATA_DIR="${HOME_DIR}/data" # app and platform data
|
||||
readonly OLD_DATA_DIR="${HOME_DIR}/data";
|
||||
readonly PLATFORM_DATA_DIR="${HOME_DIR}/platformdata" # platform data
|
||||
readonly APPS_DATA_DIR="${HOME_DIR}/appsdata" # app data
|
||||
readonly BOX_DATA_DIR="${HOME_DIR}/boxdata" # box data
|
||||
readonly CONFIG_DIR="${HOME_DIR}/configs"
|
||||
readonly SETUP_PROGRESS_JSON="${HOME_DIR}/setup/website/progress.json"
|
||||
@@ -33,45 +34,19 @@ timedatectl set-ntp 1
|
||||
timedatectl set-timezone UTC
|
||||
hostnamectl set-hostname "${arg_fqdn}"
|
||||
|
||||
echo "==> Setting up firewall"
|
||||
iptables -t filter -N CLOUDRON || true
|
||||
iptables -t filter -F CLOUDRON # empty any existing rules
|
||||
|
||||
# NOTE: keep these in sync with src/apps.js validatePortBindings
|
||||
# allow ssh, http, https, ping, dns
|
||||
iptables -t filter -I CLOUDRON -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
# caas has ssh on port 202
|
||||
if [[ "${arg_provider}" == "caas" ]]; then
|
||||
iptables -A CLOUDRON -p tcp -m tcp -m multiport --dports 25,80,202,443,587,993,4190 -j ACCEPT
|
||||
else
|
||||
iptables -A CLOUDRON -p tcp -m tcp -m multiport --dports 25,80,22,443,587,993,4190 -j ACCEPT
|
||||
fi
|
||||
iptables -t filter -A CLOUDRON -p icmp --icmp-type echo-request -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p icmp --icmp-type echo-reply -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p udp --sport 53 -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -s 172.18.0.0/16 -j ACCEPT # required to accept any connections from apps to our IP:<public port>
|
||||
iptables -t filter -A CLOUDRON -i lo -j ACCEPT # required for localhost connections (mysql)
|
||||
|
||||
# log dropped incoming. keep this at the end of all the rules
|
||||
iptables -t filter -A CLOUDRON -m limit --limit 2/min -j LOG --log-prefix "IPTables Packet Dropped: " --log-level 7
|
||||
iptables -t filter -A CLOUDRON -j DROP
|
||||
|
||||
if ! iptables -t filter -C INPUT -j CLOUDRON 2>/dev/null; then
|
||||
iptables -t filter -I INPUT -j CLOUDRON
|
||||
fi
|
||||
|
||||
# so it gets restored across reboot
|
||||
mkdir -p /etc/iptables && iptables-save > /etc/iptables/rules.v4
|
||||
|
||||
echo "==> Configuring docker"
|
||||
cp "${script_dir}/start/docker-cloudron-app.apparmor" /etc/apparmor.d/docker-cloudron-app
|
||||
systemctl enable apparmor
|
||||
systemctl restart apparmor
|
||||
|
||||
usermod ${USER} -a -G docker
|
||||
# preserve the existing storage driver (user might be using overlay2)
|
||||
storage_driver=$(docker info | grep "Storage Driver" | sed 's/.*: //')
|
||||
[[ -n "${storage_driver}" ]] || storage_driver="devicemapper" # if the above command fails
|
||||
|
||||
temp_file=$(mktemp)
|
||||
# create systemd drop-in. some apps do not work with aufs
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=devicemapper --dns=172.18.0.1 --dns-search=." > "${temp_file}"
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=${storage_driver}" > "${temp_file}"
|
||||
|
||||
systemctl enable docker
|
||||
# restart docker if options changed
|
||||
@@ -96,46 +71,55 @@ if [[ "${arg_provider}" == "caas" ]]; then
|
||||
systemctl reload sshd
|
||||
fi
|
||||
|
||||
echo "==> Setup btrfs data"
|
||||
if [[ ! -d "${DATA_DIR}" ]]; then
|
||||
echo "==> Mounting loopback btrfs"
|
||||
truncate -s "8192m" "${DATA_FILE}" # 8gb start (this will get resized dynamically by cloudron-resize-fs.service)
|
||||
mkfs.btrfs -L UserDataHome "${DATA_FILE}"
|
||||
mkdir -p "${DATA_DIR}"
|
||||
mount -t btrfs -o loop,nosuid "${DATA_FILE}" ${DATA_DIR}
|
||||
fi
|
||||
mkdir -p "${BOX_DATA_DIR}"
|
||||
mkdir -p "${APPS_DATA_DIR}"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}"
|
||||
|
||||
# keep these in sync with paths.js
|
||||
echo "==> Ensuring directories"
|
||||
if ! btrfs subvolume show "${DATA_DIR}/mail" &> /dev/null; then
|
||||
# Migrate mail data to new format
|
||||
docker stop mail || true # otherwise the move below might fail if mail container writes in the middle
|
||||
rm -rf "${DATA_DIR}/mail" # this used to be mail container's run directory
|
||||
btrfs subvolume create "${DATA_DIR}/mail"
|
||||
[[ -d "${DATA_DIR}/box/mail" ]] && mv "${DATA_DIR}/box/mail/"* "${DATA_DIR}/mail"
|
||||
rm -rf "${DATA_DIR}/box/mail"
|
||||
if [[ ! -d "${PLATFORM_DATA_DIR}/mail" ]]; then
|
||||
if [[ -d "${OLD_DATA_DIR}/mail" ]]; then
|
||||
echo "==> Migrate old mail data"
|
||||
# Migrate mail data to new format
|
||||
docker stop mail || true # otherwise the move below might fail if mail container writes in the middle
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/mail"
|
||||
# we can't move the whole folder as it is a btrfs subvolume mount
|
||||
mv -f "${OLD_DATA_DIR}/mail/"* "${PLATFORM_DATA_DIR}/mail/" # this used to be mail container's run directory
|
||||
else
|
||||
echo "==> Create new mail data dir"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/mail"
|
||||
fi
|
||||
fi
|
||||
mkdir -p "${DATA_DIR}/graphite"
|
||||
mkdir -p "${DATA_DIR}/mail/dkim"
|
||||
|
||||
mkdir -p "${DATA_DIR}/mysql"
|
||||
mkdir -p "${DATA_DIR}/postgresql"
|
||||
mkdir -p "${DATA_DIR}/mongodb"
|
||||
mkdir -p "${DATA_DIR}/snapshots"
|
||||
mkdir -p "${DATA_DIR}/addons/mail"
|
||||
mkdir -p "${DATA_DIR}/collectd/collectd.conf.d"
|
||||
mkdir -p "${DATA_DIR}/acme"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/graphite"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/mail/dkim"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/mysql"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/postgresql"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/mongodb"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/snapshots"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/addons/mail"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/collectd/collectd.conf.d"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/acme"
|
||||
|
||||
mkdir -p "${BOX_DATA_DIR}"
|
||||
if btrfs subvolume show "${DATA_DIR}/box" &> /dev/null; then
|
||||
# Migrate box data out of data volume
|
||||
mv "${DATA_DIR}/box/"* "${BOX_DATA_DIR}"
|
||||
btrfs subvolume delete "${DATA_DIR}/box"
|
||||
fi
|
||||
mkdir -p "${BOX_DATA_DIR}/appicons"
|
||||
mkdir -p "${BOX_DATA_DIR}/certs"
|
||||
mkdir -p "${BOX_DATA_DIR}/acme" # acme keys
|
||||
|
||||
# ensure backups folder exists and is writeable
|
||||
mkdir -p /var/backups
|
||||
chmod 777 /var/backups
|
||||
|
||||
echo "==> Check for old btrfs volumes"
|
||||
if mountpoint -q "${OLD_DATA_DIR}"; then
|
||||
echo "==> Cleanup btrfs volumes"
|
||||
# First stop all container to be able to unmount
|
||||
docker ps -q | xargs docker stop
|
||||
umount "${OLD_DATA_DIR}"
|
||||
rm -rf "/root/user_data.img"
|
||||
else
|
||||
echo "==> No btrfs volumes found";
|
||||
fi
|
||||
|
||||
echo "==> Configuring journald"
|
||||
sed -e "s/^#SystemMaxUse=.*$/SystemMaxUse=100M/" \
|
||||
-e "s/^#ForwardToSyslog=.*$/ForwardToSyslog=no/" \
|
||||
@@ -161,14 +145,17 @@ echo "==> Setting up unbound"
|
||||
# DO uses Google nameservers by default. This causes RBL queries to fail (host 2.0.0.127.zen.spamhaus.org)
|
||||
# We do not use dnsmasq because it is not a recursive resolver and defaults to the value in the interfaces file (which is Google DNS!)
|
||||
# We listen on 0.0.0.0 because there is no way control ordering of docker (which creates the 172.18.0.0/16) and unbound
|
||||
echo -e "server:\n\tinterface: 0.0.0.0\n\taccess-control: 127.0.0.1 allow\n\taccess-control: 172.18.0.1/16 allow\n\tcache-max-negative-ttl: 30" > /etc/unbound/unbound.conf.d/cloudron-network.conf
|
||||
echo -e "server:\n\tinterface: 0.0.0.0\n\taccess-control: 127.0.0.1 allow\n\taccess-control: 172.18.0.1/16 allow\n\tcache-max-negative-ttl: 30\n\tcache-max-ttl: 300" > /etc/unbound/unbound.conf.d/cloudron-network.conf
|
||||
|
||||
echo "==> Adding systemd services"
|
||||
cp -r "${script_dir}/start/systemd/." /etc/systemd/system/
|
||||
systemctl daemon-reload
|
||||
systemctl enable unbound
|
||||
systemctl enable cloudron.target
|
||||
systemctl enable iptables-restore
|
||||
systemctl enable cloudron-firewall
|
||||
|
||||
# update firewall rules
|
||||
systemctl restart cloudron-firewall
|
||||
|
||||
# For logrotate
|
||||
systemctl enable --now cron
|
||||
@@ -182,18 +169,18 @@ cp "${script_dir}/start/sudoers" /etc/sudoers.d/${USER}
|
||||
|
||||
echo "==> Configuring collectd"
|
||||
rm -rf /etc/collectd
|
||||
ln -sfF "${DATA_DIR}/collectd" /etc/collectd
|
||||
cp "${script_dir}/start/collectd.conf" "${DATA_DIR}/collectd/collectd.conf"
|
||||
ln -sfF "${PLATFORM_DATA_DIR}/collectd" /etc/collectd
|
||||
cp "${script_dir}/start/collectd.conf" "${PLATFORM_DATA_DIR}/collectd/collectd.conf"
|
||||
systemctl restart collectd
|
||||
|
||||
echo "==> Configuring nginx"
|
||||
# link nginx config to system config
|
||||
unlink /etc/nginx 2>/dev/null || rm -rf /etc/nginx
|
||||
ln -s "${DATA_DIR}/nginx" /etc/nginx
|
||||
mkdir -p "${DATA_DIR}/nginx/applications"
|
||||
mkdir -p "${DATA_DIR}/nginx/cert"
|
||||
cp "${script_dir}/start/nginx/nginx.conf" "${DATA_DIR}/nginx/nginx.conf"
|
||||
cp "${script_dir}/start/nginx/mime.types" "${DATA_DIR}/nginx/mime.types"
|
||||
ln -s "${PLATFORM_DATA_DIR}/nginx" /etc/nginx
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/nginx/applications"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/nginx/cert"
|
||||
cp "${script_dir}/start/nginx/nginx.conf" "${PLATFORM_DATA_DIR}/nginx/nginx.conf"
|
||||
cp "${script_dir}/start/nginx/mime.types" "${PLATFORM_DATA_DIR}/nginx/mime.types"
|
||||
if ! grep -q "^Restart=" /etc/systemd/system/multi-user.target.wants/nginx.service; then
|
||||
# default nginx service file does not restart on crash
|
||||
echo -e "\n[Service]\nRestart=always\n" >> /etc/systemd/system/multi-user.target.wants/nginx.service
|
||||
@@ -202,12 +189,7 @@ fi
|
||||
systemctl start nginx
|
||||
|
||||
# bookkeep the version as part of data
|
||||
echo "{ \"version\": \"${arg_version}\", \"boxVersionsUrl\": \"${arg_box_versions_url}\" }" > "${BOX_DATA_DIR}/version"
|
||||
|
||||
# remove old snapshots. if we do want to keep this around, we will have to fix the chown -R below
|
||||
# which currently fails because these are readonly fs
|
||||
echo "==> Cleaning up snapshots"
|
||||
find "${DATA_DIR}/snapshots" -mindepth 1 -maxdepth 1 | xargs --no-run-if-empty btrfs subvolume delete
|
||||
echo "{ \"version\": \"${arg_version}\", \"apiServerOrigin\": \"${arg_api_server_origin}\" }" > "${BOX_DATA_DIR}/version"
|
||||
|
||||
# restart mysql to make sure it has latest config
|
||||
if [[ ! -f /etc/mysql/mysql.cnf ]] || ! diff -q "${script_dir}/start/mysql.cnf" /etc/mysql/mysql.cnf >/dev/null; then
|
||||
@@ -230,11 +212,18 @@ mysql -u root -p${mysql_root_password} -e 'CREATE DATABASE IF NOT EXISTS box'
|
||||
if [[ -n "${arg_restore_url}" ]]; then
|
||||
set_progress "30" "Downloading restore data"
|
||||
|
||||
echo "==> Downloading backup: ${arg_restore_url} and key: ${arg_restore_key}"
|
||||
decrypt=""
|
||||
if [[ "${arg_restore_url}" == *.tar.gz.enc || -n "${arg_restore_key}" ]]; then
|
||||
echo "==> Downloading encrypted backup: ${arg_restore_url} and key: ${arg_restore_key}"
|
||||
decrypt=(openssl aes-256-cbc -d -nosalt -pass "pass:${arg_restore_key}")
|
||||
else
|
||||
echo "==> Downloading backup: ${arg_restore_url}"
|
||||
decrypt=(cat -)
|
||||
fi
|
||||
|
||||
while true; do
|
||||
if $curl -L "${arg_restore_url}" | openssl aes-256-cbc -d -pass "pass:${arg_restore_key}" \
|
||||
| tar -zxf - --overwrite --transform="s,^box/\?,boxdata/," --transform="s,^mail/\?,data/mail/," --show-transformed-names -C "${HOME_DIR}"; then break; fi
|
||||
if $curl -L "${arg_restore_url}" | "${decrypt[@]}" \
|
||||
| tar -zxf - --overwrite --transform="s,^box/\?,boxdata/," --transform="s,^mail/\?,platformdata/mail/," --show-transformed-names -C "${HOME_DIR}"; then break; fi
|
||||
echo "Failed to download data, trying again"
|
||||
done
|
||||
|
||||
@@ -260,8 +249,8 @@ cat > "${CONFIG_DIR}/cloudron.conf" <<CONF_END
|
||||
"apiServerOrigin": "${arg_api_server_origin}",
|
||||
"webServerOrigin": "${arg_web_server_origin}",
|
||||
"fqdn": "${arg_fqdn}",
|
||||
"zoneName": "${arg_zone_name}",
|
||||
"isCustomDomain": ${arg_is_custom_domain},
|
||||
"boxVersionsUrl": "${arg_box_versions_url}",
|
||||
"provider": "${arg_provider}",
|
||||
"isDemo": ${arg_is_demo},
|
||||
"database": {
|
||||
@@ -289,11 +278,11 @@ CONF_END
|
||||
|
||||
echo "==> Changing ownership"
|
||||
chown "${USER}:${USER}" -R "${CONFIG_DIR}"
|
||||
chown "${USER}:${USER}" -R "${DATA_DIR}/nginx" "${DATA_DIR}/collectd" "${DATA_DIR}/addons" "${DATA_DIR}/acme"
|
||||
chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/nginx" "${PLATFORM_DATA_DIR}/collectd" "${PLATFORM_DATA_DIR}/addons" "${PLATFORM_DATA_DIR}/acme"
|
||||
chown "${USER}:${USER}" -R "${BOX_DATA_DIR}"
|
||||
chown "${USER}:${USER}" -R "${DATA_DIR}/mail/dkim" # this is owned by box currently since it generates the keys
|
||||
chown "${USER}:${USER}" "${DATA_DIR}/INFRA_VERSION" 2>/dev/null || true
|
||||
chown "${USER}:${USER}" "${DATA_DIR}"
|
||||
chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/mail/dkim" # this is owned by box currently since it generates the keys
|
||||
chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}/INFRA_VERSION" 2>/dev/null || true
|
||||
chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}"
|
||||
|
||||
echo "==> Adding automated configs"
|
||||
if [[ ! -z "${arg_backup_config}" ]]; then
|
||||
|
||||
@@ -0,0 +1,75 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
echo "==> Setting up firewall"
|
||||
iptables -t filter -N CLOUDRON || true
|
||||
iptables -t filter -F CLOUDRON # empty any existing rules
|
||||
|
||||
# NOTE: keep these in sync with src/apps.js validatePortBindings
|
||||
# allow ssh, http, https, ping, dns
|
||||
iptables -t filter -I CLOUDRON -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
# caas has ssh on port 202
|
||||
iptables -A CLOUDRON -p tcp -m tcp -m multiport --dports 22,25,80,202,443,587,993,4190 -j ACCEPT
|
||||
|
||||
iptables -t filter -A CLOUDRON -p icmp --icmp-type echo-request -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p icmp --icmp-type echo-reply -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p udp --sport 53 -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -s 172.18.0.0/16 -j ACCEPT # required to accept any connections from apps to our IP:<public port>
|
||||
iptables -t filter -A CLOUDRON -i lo -j ACCEPT # required for localhost connections (mysql)
|
||||
|
||||
# log dropped incoming. keep this at the end of all the rules
|
||||
iptables -t filter -A CLOUDRON -m limit --limit 2/min -j LOG --log-prefix "IPTables Packet Dropped: " --log-level 7
|
||||
iptables -t filter -A CLOUDRON -j DROP
|
||||
|
||||
if ! iptables -t filter -C INPUT -j CLOUDRON 2>/dev/null; then
|
||||
iptables -t filter -I INPUT -j CLOUDRON
|
||||
fi
|
||||
|
||||
# Setup rate limit chain (the recent info is at /proc/net/xt_recent)
|
||||
iptables -t filter -N CLOUDRON_RATELIMIT || true
|
||||
iptables -t filter -F CLOUDRON_RATELIMIT # empty any existing rules
|
||||
|
||||
# log dropped incoming. keep this at the end of all the rules
|
||||
iptables -t filter -N CLOUDRON_RATELIMIT_LOG || true
|
||||
iptables -t filter -F CLOUDRON_RATELIMIT_LOG # empty any existing rules
|
||||
iptables -t filter -A CLOUDRON_RATELIMIT_LOG -m limit --limit 2/min -j LOG --log-prefix "IPTables RateLimit: " --log-level 7
|
||||
iptables -t filter -A CLOUDRON_RATELIMIT_LOG -j DROP
|
||||
|
||||
# http https
|
||||
for port in 80 443; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --syn --dport ${port} -m connlimit --connlimit-above 5000 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# ssh smtp ssh msa imap sieve
|
||||
for port in 22 202; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --dport ${port} -m state --state NEW -m recent --set --name "public-${port}"
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --dport ${port} -m state --state NEW -m recent --update --name "public-${port}" --seconds 10 --hitcount 5 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# TODO: move docker platform rules to platform.js so it can be specialized to rate limit only when destination is the mail container
|
||||
|
||||
# docker translates (dnat) 25, 587, 993, 4190 in the PREROUTING step
|
||||
for port in 2525 4190 9993; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --syn ! -s 172.18.0.0/16 -d 172.18.0.0/16 --dport ${port} -m connlimit --connlimit-above 50 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# msa, ldap, imap, sieve
|
||||
for port in 2525 3002 4190 9993; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --syn -s 172.18.0.0/16 -d 172.18.0.0/16 --dport ${port} -m connlimit --connlimit-above 500 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# cloudron docker network: mysql postgresql redis mongodb
|
||||
for port in 3306 5432 6379 27017; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --syn -s 172.18.0.0/16 -d 172.18.0.0/16 --dport ${port} -m connlimit --connlimit-above 5000 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# For ssh, http, https
|
||||
if ! iptables -t filter -C INPUT -j CLOUDRON_RATELIMIT 2>/dev/null; then
|
||||
iptables -t filter -I INPUT 1 -j CLOUDRON_RATELIMIT
|
||||
fi
|
||||
|
||||
# For smtp, imap etc routed via docker/nat
|
||||
# Workaroud issue where Docker insists on adding itself first in FORWARD table
|
||||
iptables -D FORWARD -j CLOUDRON_RATELIMIT || true
|
||||
iptables -I FORWARD 1 -j CLOUDRON_RATELIMIT
|
||||
@@ -2,49 +2,42 @@
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
readonly USER_HOME="/home/yellowtent"
|
||||
readonly APPS_SWAP_FILE="/apps.swap"
|
||||
readonly USER_DATA_FILE="/root/user_data.img"
|
||||
readonly USER_DATA_DIR="/home/yellowtent/data"
|
||||
|
||||
# detect device of rootfs (http://forums.fedoraforum.org/showthread.php?t=270316)
|
||||
disk_device="$(for d in $(find /dev -type b); do [ "$(mountpoint -d /)" = "$(mountpoint -x $d)" ] && echo $d && break; done)"
|
||||
|
||||
existing_swap=$(cat /proc/meminfo | grep SwapTotal | awk '{ printf "%.0f", $2/1024 }')
|
||||
|
||||
# all sizes are in mb
|
||||
readonly physical_memory=$(LC_ALL=C free -m | awk '/Mem:/ { print $2 }')
|
||||
readonly swap_size=$((${physical_memory} - ${existing_swap})) # if you change this, fix enoughResourcesAvailable() in client.js
|
||||
readonly swap_size=$((${physical_memory} > 4096 ? 4096 : ${physical_memory})) # min(RAM, 4GB) if you change this, fix enoughResourcesAvailable() in client.js
|
||||
readonly app_count=$((${physical_memory} / 200)) # estimated app count
|
||||
readonly disk_size_bytes=$(LC_ALL=C fdisk -l ${disk_device} | grep "Disk ${disk_device}" | awk '{ printf $5 }') # can't rely on fdisk human readable units, using bytes instead
|
||||
readonly disk_size=$((${disk_size_bytes}/1024/1024))
|
||||
readonly disk_size_bytes=$(LC_ALL=C df --output=size / | tail -n1)
|
||||
readonly disk_size=$((${disk_size_bytes}/1024))
|
||||
readonly system_size=10240 # 10 gigs for system libs, apps images, installer, box code, data and tmp
|
||||
readonly ext4_reserved=$((disk_size * 5 / 100)) # this can be changes using tune2fs -m percent /dev/vda1
|
||||
|
||||
echo "Disk device: ${disk_device}"
|
||||
echo "Physical memory: ${physical_memory}"
|
||||
echo "Estimated app count: ${app_count}"
|
||||
echo "Disk size: ${disk_size}M"
|
||||
|
||||
# Allocate swap for general app usage
|
||||
if [[ ! -f "${APPS_SWAP_FILE}" && ${swap_size} -gt 0 ]]; then
|
||||
echo "Creating Apps swap file of size ${swap_size}M"
|
||||
fallocate -l "${swap_size}m" "${APPS_SWAP_FILE}"
|
||||
readonly current_swap=$(swapon --show="name,size" --noheadings --bytes | awk 'BEGIN{s=0}{s+=$2}END{printf "%.0f", s/1024/1024}')
|
||||
readonly needed_swap_size=$((swap_size - current_swap))
|
||||
if [[ ${needed_swap_size} -gt 0 ]]; then
|
||||
echo "Need more swap of ${needed_swap_size}M"
|
||||
# compute size of apps.swap ignoring what is already set
|
||||
without_apps_swap=$(swapon --show="name,size" --noheadings --bytes | awk 'BEGIN{s=0}{if ($1!="/apps.swap") s+=$2}END{printf "%.0f", s/1024/1024}')
|
||||
apps_swap_size=$((swap_size - without_apps_swap))
|
||||
echo "Creating Apps swap file of size ${apps_swap_size}M"
|
||||
if [[ -f "${APPS_SWAP_FILE}" ]]; then
|
||||
echo "Swapping off before resizing swap"
|
||||
swapoff "${APPS_SWAP_FILE}" || true
|
||||
fi
|
||||
fallocate -l "${apps_swap_size}m" "${APPS_SWAP_FILE}"
|
||||
chmod 600 "${APPS_SWAP_FILE}"
|
||||
mkswap "${APPS_SWAP_FILE}"
|
||||
swapon "${APPS_SWAP_FILE}"
|
||||
echo "${APPS_SWAP_FILE} none swap sw 0 0" >> /etc/fstab
|
||||
if ! grep -q "${APPS_SWAP_FILE}" /etc/fstab; then
|
||||
echo "Adding swap to fstab"
|
||||
echo "${APPS_SWAP_FILE} none swap sw 0 0" >> /etc/fstab
|
||||
fi
|
||||
else
|
||||
echo "Apps Swap file already exists"
|
||||
echo "Swap requirements already met"
|
||||
fi
|
||||
|
||||
# see start.sh for the initial default size of 8gb. On small disks the calculation might be lower than 8gb resulting in a failure to resize here.
|
||||
echo "Resizing data volume"
|
||||
home_data_size=$((disk_size - system_size - swap_size - ext4_reserved))
|
||||
echo "Resizing up btrfs user data to size ${home_data_size}M"
|
||||
umount "${USER_DATA_DIR}" || true
|
||||
# Do not preallocate (non-sparse). Doing so overallocates for data too much in advance and causes problems when using many apps with smaller data
|
||||
# fallocate -l "${home_data_size}m" "${USER_DATA_FILE}" # does not overwrite existing data
|
||||
truncate -s "${home_data_size}m" "${USER_DATA_FILE}" # this will shrink it if the file had existed. this is useful when running this script on a live system
|
||||
mount -t btrfs -o loop,nosuid "${USER_DATA_FILE}" ${USER_DATA_DIR}
|
||||
btrfs filesystem resize max "${USER_DATA_DIR}"
|
||||
|
||||
@@ -194,7 +194,7 @@ LoadPlugin write_graphite
|
||||
|
||||
<Plugin df>
|
||||
FSType "ext4"
|
||||
FSType "btrfs"
|
||||
MountPoint "/"
|
||||
|
||||
ReportByDevice true
|
||||
IgnoreSelected false
|
||||
@@ -260,4 +260,3 @@ LoadPlugin write_graphite
|
||||
<Include "/etc/collectd/collectd.conf.d">
|
||||
Filter "*.conf"
|
||||
</Include>
|
||||
|
||||
|
||||
@@ -8,3 +8,19 @@ max_connections=50
|
||||
# on ec2, without this we get a sporadic connection drop when doing the initial migration
|
||||
max_allowed_packet=32M
|
||||
|
||||
# https://mathiasbynens.be/notes/mysql-utf8mb4
|
||||
character-set-server = utf8mb4
|
||||
collation-server = utf8mb4_unicode_ci
|
||||
|
||||
[mysqldump]
|
||||
quick
|
||||
quote-names
|
||||
max_allowed_packet = 16M
|
||||
default-character-set = utf8mb4
|
||||
|
||||
[mysql]
|
||||
default-character-set = utf8mb4
|
||||
|
||||
[client]
|
||||
default-character-set = utf8mb4
|
||||
|
||||
|
||||
@@ -6,10 +6,10 @@ map $http_upgrade $connection_upgrade {
|
||||
|
||||
server {
|
||||
<% if (vhost) { %>
|
||||
listen 443;
|
||||
listen 443 http2;
|
||||
server_name <%= vhost %>;
|
||||
<% } else { %>
|
||||
listen 443 default_server;
|
||||
listen 443 http2 default_server;
|
||||
<% } %>
|
||||
|
||||
ssl on;
|
||||
@@ -32,14 +32,21 @@ server {
|
||||
|
||||
# https://developer.mozilla.org/en-US/docs/Web/HTTP/X-Frame-Options
|
||||
add_header X-Frame-Options "<%= xFrameOptions %>";
|
||||
proxy_hide_header X-Frame-Options;
|
||||
|
||||
# https://github.com/twitter/secureheaders
|
||||
# https://www.owasp.org/index.php/OWASP_Secure_Headers_Project#tab=Compatibility_Matrix
|
||||
# https://wiki.mozilla.org/Security/Guidelines/Web_Security
|
||||
add_header X-XSS-Protection "1; mode=block";
|
||||
proxy_hide_header X-XSS-Protection;
|
||||
add_header X-Download-Options "noopen";
|
||||
proxy_hide_header X-Download-Options;
|
||||
add_header X-Content-Type-Options "nosniff";
|
||||
proxy_hide_header X-Content-Type-Options;
|
||||
add_header X-Permitted-Cross-Domain-Policies "none";
|
||||
proxy_hide_header X-Permitted-Cross-Domain-Policies;
|
||||
add_header Referrer-Policy "no-referrer-when-downgrade";
|
||||
proxy_hide_header Referrer-Policy;
|
||||
|
||||
proxy_http_version 1.1;
|
||||
proxy_intercept_errors on;
|
||||
@@ -49,6 +56,7 @@ server {
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_set_header X-Forwarded-Ssl on;
|
||||
|
||||
@@ -69,22 +77,37 @@ server {
|
||||
proxy_buffers 4 256k;
|
||||
proxy_busy_buffers_size 256k;
|
||||
|
||||
# No buffering to temp files, it fails for large downloads
|
||||
proxy_max_temp_file_size 0;
|
||||
|
||||
# Disable check to allow unlimited body sizes
|
||||
client_max_body_size 0;
|
||||
|
||||
<% if (robotsTxtQuoted) { %>
|
||||
location = /robots.txt {
|
||||
return 200 <%- robotsTxtQuoted %>;
|
||||
}
|
||||
<% } %>
|
||||
|
||||
<% if ( endpoint === 'admin' ) { %>
|
||||
location /api/ {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
client_max_body_size 1m;
|
||||
}
|
||||
|
||||
location ~ ^/api/v1/(developer|session)/login$ {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
client_max_body_size 1m;
|
||||
limit_req zone=admin_login burst=5;
|
||||
}
|
||||
|
||||
# the read timeout is between successive reads and not the whole connection
|
||||
location ~ ^/api/v1/apps/.*/exec$ {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
proxy_read_timeout 30m;
|
||||
}
|
||||
|
||||
# graphite paths
|
||||
# graphite paths (uncomment block below and visit /graphite/index.html)
|
||||
# location ~ ^/(graphite|content|metrics|dashboard|render|browser|composer)/ {
|
||||
# proxy_pass http://127.0.0.1:8000;
|
||||
# client_max_body_size 1m;
|
||||
@@ -94,7 +117,6 @@ server {
|
||||
root <%= sourceDir %>/webadmin/dist;
|
||||
index index.html index.htm;
|
||||
}
|
||||
|
||||
<% } else if ( endpoint === 'app' ) { %>
|
||||
proxy_pass http://127.0.0.1:<%= port %>;
|
||||
<% } else if ( endpoint === 'splash' ) { %>
|
||||
@@ -134,4 +156,3 @@ server {
|
||||
<% } %>
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,12 +15,12 @@ http {
|
||||
# the collectd config depends on this log format
|
||||
log_format combined2 '$remote_addr - [$time_local] '
|
||||
'"$request" $status $body_bytes_sent $request_time '
|
||||
'"$http_referer" "$http_user_agent"';
|
||||
'"$http_referer" "$host" "$http_user_agent"';
|
||||
|
||||
# required for long host names
|
||||
server_names_hash_bucket_size 128;
|
||||
|
||||
access_log access.log combined2;
|
||||
access_log /var/log/nginx/access.log combined2;
|
||||
|
||||
sendfile on;
|
||||
|
||||
@@ -33,6 +33,9 @@ http {
|
||||
# keep-alive connections timeout in 65s. this is because many browsers timeout in 60 seconds
|
||||
keepalive_timeout 65s;
|
||||
|
||||
# zones for rate limiting
|
||||
limit_req_zone $binary_remote_addr zone=admin_login:10m rate=10r/s; # 10 request a second
|
||||
|
||||
# HTTP server
|
||||
server {
|
||||
listen 80;
|
||||
@@ -48,7 +51,7 @@ http {
|
||||
# acme challenges
|
||||
location /.well-known/acme-challenge/ {
|
||||
default_type text/plain;
|
||||
alias /home/yellowtent/data/acme/;
|
||||
alias /home/yellowtent/platformdata/acme/;
|
||||
}
|
||||
|
||||
location / {
|
||||
@@ -59,4 +62,3 @@ http {
|
||||
|
||||
include applications/*.conf;
|
||||
}
|
||||
|
||||
|
||||
@@ -10,15 +10,6 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmappdir.sh
|
||||
Defaults!/home/yellowtent/box/src/scripts/reloadnginx.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reloadnginx.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/backupbox.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/backupbox.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/backupapp.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/backupapp.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/restoreapp.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restoreapp.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/reboot.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reboot.sh
|
||||
|
||||
@@ -31,11 +22,11 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/collectlogs.sh
|
||||
Defaults!/home/yellowtent/box/src/scripts/retire.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/retire.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/rmbackup.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmbackup.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/update.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/update.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/authorized_keys.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/authorized_keys.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/node.sh env_keep="HOME BOX_ENV NODE_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/node.sh
|
||||
|
||||
@@ -0,0 +1,12 @@
|
||||
[Unit]
|
||||
Description=Cloudron Firewall
|
||||
After=docker.service
|
||||
PartOf=docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart="/home/yellowtent/box/setup/start/cloudron-firewall.sh"
|
||||
RemainAfterExit=yes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,11 +0,0 @@
|
||||
[Unit]
|
||||
Description=IPTables Restore
|
||||
Before=docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/sbin/iptables-restore /etc/iptables/rules.v4
|
||||
RemainAfterExit=yes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -20,6 +20,7 @@ var appdb = require('./appdb.js'),
|
||||
async = require('async'),
|
||||
clients = require('./clients.js'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
ClientsError = clients.ClientsError,
|
||||
debug = require('debug')('box:addons'),
|
||||
docker = require('./docker.js'),
|
||||
@@ -194,7 +195,11 @@ function getEnvironment(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
appdb.getAddonConfigByAppId(app.id, callback);
|
||||
appdb.getAddonConfigByAppId(app.id, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
return callback(null, result.map(function (e) { return e.name + '=' + e.value; }));
|
||||
});
|
||||
}
|
||||
|
||||
function getBindsSync(app, addons) {
|
||||
@@ -207,7 +212,7 @@ function getBindsSync(app, addons) {
|
||||
|
||||
for (var addon in addons) {
|
||||
switch (addon) {
|
||||
case 'localstorage': binds.push(path.join(paths.DATA_DIR, app.id, 'data') + ':/app/data:rw'); break;
|
||||
case 'localstorage': binds.push(path.join(paths.APPS_DATA_DIR, app.id, 'data') + ':/app/data:rw'); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
@@ -254,9 +259,9 @@ function setupOauth(app, options, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var env = [
|
||||
'OAUTH_CLIENT_ID=' + result.id,
|
||||
'OAUTH_CLIENT_SECRET=' + result.clientSecret,
|
||||
'OAUTH_ORIGIN=' + config.adminOrigin()
|
||||
{ name: 'OAUTH_CLIENT_ID', value: result.id },
|
||||
{ name: 'OAUTH_CLIENT_SECRET', value: result.clientSecret },
|
||||
{ name: 'OAUTH_ORIGIN', value: config.adminOrigin() }
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting oauth addon config to %j', env);
|
||||
@@ -287,13 +292,13 @@ function setupEmail(app, options, callback) {
|
||||
|
||||
// note that "external" access info can be derived from MAIL_DOMAIN (since it's part of user documentation)
|
||||
var env = [
|
||||
'MAIL_SMTP_SERVER=mail',
|
||||
'MAIL_SMTP_PORT=2525',
|
||||
'MAIL_IMAP_SERVER=mail',
|
||||
'MAIL_IMAP_PORT=9993',
|
||||
'MAIL_SIEVE_SERVER=mail',
|
||||
'MAIL_SIEVE_PORT=4190',
|
||||
'MAIL_DOMAIN=' + config.fqdn()
|
||||
{ name: 'MAIL_SMTP_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_SMTP_PORT', value: '2525' },
|
||||
{ name: 'MAIL_IMAP_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_IMAP_PORT', value: '9993' },
|
||||
{ name: 'MAIL_SIEVE_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_SIEVE_PORT', value: '4190' },
|
||||
{ name: 'MAIL_DOMAIN', value: config.fqdn() }
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting up Email');
|
||||
@@ -319,13 +324,13 @@ function setupLdap(app, options, callback) {
|
||||
if (!app.sso) return callback(null);
|
||||
|
||||
var env = [
|
||||
'LDAP_SERVER=172.18.0.1',
|
||||
'LDAP_PORT=' + config.get('ldapPort'),
|
||||
'LDAP_URL=ldap://172.18.0.1:' + config.get('ldapPort'),
|
||||
'LDAP_USERS_BASE_DN=ou=users,dc=cloudron',
|
||||
'LDAP_GROUPS_BASE_DN=ou=groups,dc=cloudron',
|
||||
'LDAP_BIND_DN=cn='+ app.id + ',ou=apps,dc=cloudron',
|
||||
'LDAP_BIND_PASSWORD=' + hat(4 * 128) // this is ignored
|
||||
{ name: 'LDAP_SERVER', value: '172.18.0.1' },
|
||||
{ name: 'LDAP_PORT', value: '' + config.get('ldapPort') },
|
||||
{ name: 'LDAP_URL', value: 'ldap://172.18.0.1:' + config.get('ldapPort') },
|
||||
{ name: 'LDAP_USERS_BASE_DN', value: 'ou=users,dc=cloudron' },
|
||||
{ name: 'LDAP_GROUPS_BASE_DN', value: 'ou=groups,dc=cloudron' },
|
||||
{ name: 'LDAP_BIND_DN', value: 'cn='+ app.id + ',ou=apps,dc=cloudron' },
|
||||
{ name: 'LDAP_BIND_PASSWORD', value: hat(4 * 128) } // this is ignored
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting up LDAP');
|
||||
@@ -354,14 +359,15 @@ function setupSendMail(app, options, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var mailbox = results.filter(function (r) { return !r.aliasTarget; })[0];
|
||||
var password = generatePassword(128, false /* memorable */, /[\w\d_]/);
|
||||
|
||||
var env = [
|
||||
"MAIL_SMTP_SERVER=mail",
|
||||
"MAIL_SMTP_PORT=2525",
|
||||
"MAIL_SMTP_USERNAME=" + mailbox.name,
|
||||
"MAIL_SMTP_PASSWORD=" + app.id,
|
||||
"MAIL_FROM=" + mailbox.name + '@' + config.fqdn(),
|
||||
"MAIL_DOMAIN=" + config.fqdn()
|
||||
{ name: 'MAIL_SMTP_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_SMTP_PORT', value: '2525' },
|
||||
{ name: 'MAIL_SMTP_USERNAME', value: mailbox.name },
|
||||
{ name: 'MAIL_SMTP_PASSWORD', value: password },
|
||||
{ name: 'MAIL_FROM', value: mailbox.name + '@' + config.fqdn() },
|
||||
{ name: 'MAIL_DOMAIN', value: config.fqdn() }
|
||||
];
|
||||
debugApp(app, 'Setting sendmail addon config to %j', env);
|
||||
appdb.setAddonConfig(app.id, 'sendmail', env, callback);
|
||||
@@ -389,14 +395,15 @@ function setupRecvMail(app, options, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var mailbox = results.filter(function (r) { return !r.aliasTarget; })[0];
|
||||
var password = generatePassword(128, false /* memorable */, /[\w\d_]/);
|
||||
|
||||
var env = [
|
||||
"MAIL_IMAP_SERVER=mail",
|
||||
"MAIL_IMAP_PORT=9993",
|
||||
"MAIL_IMAP_USERNAME=" + mailbox.name,
|
||||
"MAIL_IMAP_PASSWORD=" + app.id,
|
||||
"MAIL_TO=" + mailbox.name + '@' + config.fqdn(),
|
||||
"MAIL_DOMAIN=" + config.fqdn()
|
||||
{ name: 'MAIL_IMAP_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_IMAP_PORT', value: '9993' },
|
||||
{ name: 'MAIL_IMAP_USERNAME', value: mailbox.name },
|
||||
{ name: 'MAIL_IMAP_PASSWORD', value: password },
|
||||
{ name: 'MAIL_TO', value: mailbox.name + '@' + config.fqdn() },
|
||||
{ name: 'MAIL_DOMAIN', value: config.fqdn() }
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting sendmail addon config to %j', env);
|
||||
@@ -426,7 +433,9 @@ function setupMySql(app, options, callback) {
|
||||
docker.execContainer('mysql', cmd, { bufferStdout: true }, function (error, stdout) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var env = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
var result = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
var env = result.map(function (r) { var idx = r.indexOf('='); return { name: r.substr(0, idx), value: r.substr(idx + 1) }; });
|
||||
|
||||
debugApp(app, 'Setting mysql addon config to %j', env);
|
||||
appdb.setAddonConfig(app.id, 'mysql', env, callback);
|
||||
});
|
||||
@@ -453,7 +462,7 @@ function backupMySql(app, options, callback) {
|
||||
|
||||
callback = once(callback); // ChildProcess exit may or may not be called after error
|
||||
|
||||
var output = fs.createWriteStream(path.join(paths.DATA_DIR, app.id, 'mysqldump'));
|
||||
var output = fs.createWriteStream(path.join(paths.APPS_DATA_DIR, app.id, 'mysqldump'));
|
||||
output.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/mysql/service.sh', options.multipleDatabases ? 'backup-prefix' : 'backup', app.id ];
|
||||
@@ -469,7 +478,7 @@ function restoreMySql(app, options, callback) {
|
||||
|
||||
debugApp(app, 'restoreMySql');
|
||||
|
||||
var input = fs.createReadStream(path.join(paths.DATA_DIR, app.id, 'mysqldump'));
|
||||
var input = fs.createReadStream(path.join(paths.APPS_DATA_DIR, app.id, 'mysqldump'));
|
||||
input.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/mysql/service.sh', options.multipleDatabases ? 'restore-prefix' : 'restore', app.id ];
|
||||
@@ -489,7 +498,9 @@ function setupPostgreSql(app, options, callback) {
|
||||
docker.execContainer('postgresql', cmd, { bufferStdout: true }, function (error, stdout) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var env = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
var result = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
var env = result.map(function (r) { var idx = r.indexOf('='); return { name: r.substr(0, idx), value: r.substr(idx + 1) }; });
|
||||
|
||||
debugApp(app, 'Setting postgresql addon config to %j', env);
|
||||
appdb.setAddonConfig(app.id, 'postgresql', env, callback);
|
||||
});
|
||||
@@ -516,7 +527,7 @@ function backupPostgreSql(app, options, callback) {
|
||||
|
||||
callback = once(callback); // ChildProcess exit may or may not be called after error
|
||||
|
||||
var output = fs.createWriteStream(path.join(paths.DATA_DIR, app.id, 'postgresqldump'));
|
||||
var output = fs.createWriteStream(path.join(paths.APPS_DATA_DIR, app.id, 'postgresqldump'));
|
||||
output.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/postgresql/service.sh', 'backup', app.id ];
|
||||
@@ -532,7 +543,7 @@ function restorePostgreSql(app, options, callback) {
|
||||
|
||||
debugApp(app, 'restorePostgreSql');
|
||||
|
||||
var input = fs.createReadStream(path.join(paths.DATA_DIR, app.id, 'postgresqldump'));
|
||||
var input = fs.createReadStream(path.join(paths.APPS_DATA_DIR, app.id, 'postgresqldump'));
|
||||
input.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/postgresql/service.sh', 'restore', app.id ];
|
||||
@@ -553,7 +564,9 @@ function setupMongoDb(app, options, callback) {
|
||||
docker.execContainer('mongodb', cmd, { bufferStdout: true }, function (error, stdout) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var env = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
var result = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
var env = result.map(function (r) { var idx = r.indexOf('='); return { name: r.substr(0, idx), value: r.substr(idx + 1) }; });
|
||||
|
||||
debugApp(app, 'Setting mongodb addon config to %j', env);
|
||||
appdb.setAddonConfig(app.id, 'mongodb', env, callback);
|
||||
});
|
||||
@@ -580,7 +593,7 @@ function backupMongoDb(app, options, callback) {
|
||||
|
||||
callback = once(callback); // ChildProcess exit may or may not be called after error
|
||||
|
||||
var output = fs.createWriteStream(path.join(paths.DATA_DIR, app.id, 'mongodbdump'));
|
||||
var output = fs.createWriteStream(path.join(paths.APPS_DATA_DIR, app.id, 'mongodbdump'));
|
||||
output.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/mongodb/service.sh', 'backup', app.id ];
|
||||
@@ -596,7 +609,7 @@ function restoreMongoDb(app, options, callback) {
|
||||
|
||||
debugApp(app, 'restoreMongoDb');
|
||||
|
||||
var input = fs.createReadStream(path.join(paths.DATA_DIR, app.id, 'mongodbdump'));
|
||||
var input = fs.createReadStream(path.join(paths.APPS_DATA_DIR, app.id, 'mongodbdump'));
|
||||
input.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/mongodb/service.sh', 'restore', app.id ];
|
||||
@@ -610,9 +623,9 @@ function setupRedis(app, options, callback) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var redisPassword = generatePassword(64, false /* memorable */, /[\w\d_]/); // ensure no / in password for being sed friendly (and be uri friendly)
|
||||
var redisPassword = generatePassword(128, false /* memorable */, /[\w\d_]/); // ensure no / in password for being sed friendly (and be uri friendly)
|
||||
var redisVarsFile = path.join(paths.ADDON_CONFIG_DIR, 'redis-' + app.id + '_vars.sh');
|
||||
var redisDataDir = path.join(paths.DATA_DIR, app.id + '/redis');
|
||||
var redisDataDir = path.join(paths.APPS_DATA_DIR, app.id + '/redis');
|
||||
|
||||
if (!safe.fs.writeFileSync(redisVarsFile, 'REDIS_PASSWORD=' + redisPassword)) {
|
||||
return callback(new Error('Error writing redis config'));
|
||||
@@ -620,21 +633,34 @@ function setupRedis(app, options, callback) {
|
||||
|
||||
if (!safe.fs.mkdirSync(redisDataDir) && safe.error.code !== 'EEXIST') return callback(new Error('Error creating redis data dir:' + safe.error));
|
||||
|
||||
// Compute redis memory limit based on app's memory limit (this is arbitrary)
|
||||
var memoryLimit = app.memoryLimit || app.manifest.memoryLimit || 0;
|
||||
|
||||
if (memoryLimit === -1) { // unrestricted (debug mode)
|
||||
memoryLimit = 0;
|
||||
} else if (memoryLimit === 0 || memoryLimit <= (2 * 1024 * 1024 * 1024)) { // less than 2G (ram+swap)
|
||||
memoryLimit = 150 * 1024 * 1024; // 150m
|
||||
} else {
|
||||
memoryLimit = 600 * 1024 * 1024; // 600m
|
||||
}
|
||||
|
||||
const tag = infra.images.redis.tag, redisName = 'redis-' + app.id;
|
||||
const cmd = `docker run --restart=always -d --name=${redisName} \
|
||||
--net cloudron \
|
||||
--net-alias ${redisName} \
|
||||
-m 100m \
|
||||
--memory-swap 150m \
|
||||
-m ${memoryLimit/2} \
|
||||
--memory-swap ${memoryLimit} \
|
||||
--dns 172.18.0.1 \
|
||||
--dns-search=. \
|
||||
-v ${redisVarsFile}:/etc/redis/redis_vars.sh:ro \
|
||||
-v ${redisDataDir}:/var/lib/redis:rw \
|
||||
--read-only -v /tmp -v /run ${tag}`;
|
||||
|
||||
var env = [
|
||||
'REDIS_URL=redis://redisuser:' + redisPassword + '@redis-' + app.id,
|
||||
'REDIS_PASSWORD=' + redisPassword,
|
||||
'REDIS_HOST=' + redisName,
|
||||
'REDIS_PORT=6379'
|
||||
{ name: 'REDIS_URL', value: 'redis://redisuser:' + redisPassword + '@redis-' + app.id },
|
||||
{ name: 'REDIS_PASSWORD', value: redisPassword },
|
||||
{ name: 'REDIS_HOST', value: redisName },
|
||||
{ name: 'REDIS_PORT', value: '6379' }
|
||||
];
|
||||
|
||||
async.series([
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
var crypto = require('crypto');
|
||||
|
||||
// This code is taken from https://github.com/fabdrol/node-aes-helper
|
||||
module.exports = {
|
||||
algorithm: 'AES-256-CBC',
|
||||
|
||||
key: function (password, salt) {
|
||||
var key = salt.toString('utf8') + password;
|
||||
var hash = crypto.createHash('sha1');
|
||||
|
||||
hash.update(key, 'utf8');
|
||||
return hash.digest('hex');
|
||||
},
|
||||
|
||||
encrypt: function (plain, password, salt) {
|
||||
var key = this.key(password, salt);
|
||||
var cipher = crypto.createCipher(this.algorithm, key);
|
||||
var crypted;
|
||||
|
||||
try {
|
||||
crypted = cipher.update(plain, 'utf8', 'hex');
|
||||
crypted += cipher.final('hex');
|
||||
} catch (e) {
|
||||
console.error('Encryption error:', e);
|
||||
crypted = '';
|
||||
}
|
||||
|
||||
return crypted;
|
||||
},
|
||||
|
||||
decrypt: function (crypted, password, salt) {
|
||||
var key = this.key(password, salt);
|
||||
var decipher = crypto.createDecipher(this.algorithm, key);
|
||||
var decoded;
|
||||
|
||||
try {
|
||||
decoded = decipher.update(crypted, 'hex', 'utf8');
|
||||
decoded += decipher.final('utf8');
|
||||
} catch (e) {
|
||||
console.error('Decryption error:', e);
|
||||
decoded = '';
|
||||
}
|
||||
|
||||
return decoded;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -14,6 +14,7 @@ exports = module.exports = {
|
||||
setAddonConfig: setAddonConfig,
|
||||
getAddonConfig: getAddonConfig,
|
||||
getAddonConfigByAppId: getAddonConfigByAppId,
|
||||
getAddonConfigByName: getAddonConfigByName,
|
||||
unsetAddonConfig: unsetAddonConfig,
|
||||
unsetAddonConfigByAppId: unsetAddonConfigByAppId,
|
||||
|
||||
@@ -59,7 +60,7 @@ var assert = require('assert'),
|
||||
var APPS_FIELDS_PREFIXED = [ 'apps.id', 'apps.appStoreId', 'apps.installationState', 'apps.installationProgress', 'apps.runState',
|
||||
'apps.health', 'apps.containerId', 'apps.manifestJson', 'apps.httpPort', 'apps.location', 'apps.dnsRecordId',
|
||||
'apps.accessRestrictionJson', 'apps.lastBackupId', 'apps.oldConfigJson', 'apps.memoryLimit', 'apps.altDomain',
|
||||
'apps.xFrameOptions', 'apps.sso', 'apps.debugModeJson' ].join(',');
|
||||
'apps.xFrameOptions', 'apps.sso', 'apps.debugModeJson', 'apps.robotsTxt' ].join(',');
|
||||
|
||||
var PORT_BINDINGS_FIELDS = [ 'hostPort', 'environmentVariable', 'appId' ].join(',');
|
||||
|
||||
@@ -413,11 +414,11 @@ function setAddonConfig(appId, addonId, env, callback) {
|
||||
|
||||
if (env.length === 0) return callback(null);
|
||||
|
||||
var query = 'INSERT INTO appAddonConfigs(appId, addonId, value) VALUES ';
|
||||
var query = 'INSERT INTO appAddonConfigs(appId, addonId, name, value) VALUES ';
|
||||
var args = [ ], queryArgs = [ ];
|
||||
for (var i = 0; i < env.length; i++) {
|
||||
args.push(appId, addonId, env[i]);
|
||||
queryArgs.push('(?, ?, ?)');
|
||||
args.push(appId, addonId, env[i].name, env[i].value);
|
||||
queryArgs.push('(?, ?, ?, ?)');
|
||||
}
|
||||
|
||||
database.query(query + queryArgs.join(','), args, function (error) {
|
||||
@@ -456,13 +457,10 @@ function getAddonConfig(appId, addonId, callback) {
|
||||
assert.strictEqual(typeof addonId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT value FROM appAddonConfigs WHERE appId = ? AND addonId = ?', [ appId, addonId ], function (error, results) {
|
||||
database.query('SELECT name, value FROM appAddonConfigs WHERE appId = ? AND addonId = ?', [ appId, addonId ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
var config = [ ];
|
||||
results.forEach(function (v) { config.push(v.value); });
|
||||
|
||||
callback(null, config);
|
||||
callback(null, results);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -470,13 +468,23 @@ function getAddonConfigByAppId(appId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT value FROM appAddonConfigs WHERE appId = ?', [ appId ], function (error, results) {
|
||||
database.query('SELECT name, value FROM appAddonConfigs WHERE appId = ?', [ appId ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
var config = [ ];
|
||||
results.forEach(function (v) { config.push(v.value); });
|
||||
|
||||
callback(null, config);
|
||||
callback(null, results);
|
||||
});
|
||||
}
|
||||
|
||||
function getAddonConfigByName(appId, addonId, name, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof addonId, 'string');
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT value FROM appAddonConfigs WHERE appId = ? AND addonId = ? AND name = ?', [ appId, addonId, name ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
if (results.length === 0) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
|
||||
|
||||
callback(null, results[0].value);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ exports = module.exports = {
|
||||
|
||||
checkManifestConstraints: checkManifestConstraints,
|
||||
|
||||
updateApps: updateApps,
|
||||
autoupdateApps: autoupdateApps,
|
||||
|
||||
restoreInstalledApps: restoreInstalledApps,
|
||||
configureInstalledApps: configureInstalledApps,
|
||||
@@ -45,6 +45,8 @@ exports = module.exports = {
|
||||
|
||||
var addons = require('./addons.js'),
|
||||
appdb = require('./appdb.js'),
|
||||
appstore = require('./appstore.js'),
|
||||
AppstoreError = require('./appstore.js').AppstoreError,
|
||||
assert = require('assert'),
|
||||
async = require('async'),
|
||||
backups = require('./backups.js'),
|
||||
@@ -64,7 +66,6 @@ var addons = require('./addons.js'),
|
||||
paths = require('./paths.js'),
|
||||
safe = require('safetydance'),
|
||||
semver = require('semver'),
|
||||
settings = require('./settings.js'),
|
||||
spawn = require('child_process').spawn,
|
||||
split = require('split'),
|
||||
superagent = require('superagent'),
|
||||
@@ -242,6 +243,17 @@ function validateDebugMode(debugMode) {
|
||||
return null;
|
||||
}
|
||||
|
||||
function validateRobotsTxt(robotsTxt) {
|
||||
if (robotsTxt === null) return null;
|
||||
|
||||
// this is the nginx limit on inline strings. if we really hit this, we have to generate a file
|
||||
if (robotsTxt.length > 4096) return new AppsError(AppsError.BAD_FIELD, 'robotsTxt must be less than 4096');
|
||||
|
||||
// TODO: validate the robots file? we escape the string when templating the nginx config right now
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function getDuplicateErrorDetails(location, portBindings, error) {
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof portBindings, 'object');
|
||||
@@ -366,99 +378,6 @@ function getAllByUser(user, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function purchase(appId, appstoreId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof appstoreId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (appstoreId === '') return callback(null);
|
||||
|
||||
function purchaseWithAppstoreConfig(appstoreConfig) {
|
||||
assert.strictEqual(typeof appstoreConfig.userId, 'string');
|
||||
assert.strictEqual(typeof appstoreConfig.cloudronId, 'string');
|
||||
assert.strictEqual(typeof appstoreConfig.token, 'string');
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/apps/' + appId;
|
||||
var data = { appstoreId: appstoreId };
|
||||
|
||||
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 404) return callback(new AppsError(AppsError.NOT_FOUND));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
// Caas Cloudrons do not store appstore credentials in their local database
|
||||
if (config.provider() === 'caas') {
|
||||
var url = config.apiServerOrigin() + '/api/v1/exchangeBoxTokenWithUserToken';
|
||||
superagent.post(url).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode !== 201) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
purchaseWithAppstoreConfig(result.body);
|
||||
});
|
||||
} else {
|
||||
settings.getAppstoreConfig(function (error, result) {
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
if (!result.token) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
|
||||
purchaseWithAppstoreConfig(result);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function unpurchase(appId, appstoreId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof appstoreId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (appstoreId === '') return callback(null);
|
||||
|
||||
function unpurchaseWithAppstoreConfig(appstoreConfig) {
|
||||
assert.strictEqual(typeof appstoreConfig.userId, 'string');
|
||||
assert.strictEqual(typeof appstoreConfig.cloudronId, 'string');
|
||||
assert.strictEqual(typeof appstoreConfig.token, 'string');
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/apps/' + appId;
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
if (result.statusCode === 404) return callback(null); // was never purchased
|
||||
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
superagent.del(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
if (result.statusCode !== 204) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// Caas Cloudrons do not store appstore credentials in their local database
|
||||
if (config.provider() === 'caas') {
|
||||
var url = config.apiServerOrigin() + '/api/v1/exchangeBoxTokenWithUserToken';
|
||||
superagent.post(url).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode !== 201) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
unpurchaseWithAppstoreConfig(result.body);
|
||||
});
|
||||
} else {
|
||||
settings.getAppstoreConfig(function (error, result) {
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
if (!result.token) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
|
||||
unpurchaseWithAppstoreConfig(result);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function downloadManifest(appStoreId, manifest, callback) {
|
||||
if (!appStoreId && !manifest) return callback(new AppsError(AppsError.BAD_FIELD, 'Neither manifest nor appStoreId provided'));
|
||||
|
||||
@@ -494,7 +413,9 @@ function install(data, auditSource, callback) {
|
||||
altDomain = data.altDomain || null,
|
||||
xFrameOptions = data.xFrameOptions || 'SAMEORIGIN',
|
||||
sso = 'sso' in data ? data.sso : null,
|
||||
debugMode = data.debugMode || null;
|
||||
debugMode = data.debugMode || null,
|
||||
robotsTxt = data.robotsTxt || null,
|
||||
backupId = data.backupId || null;
|
||||
|
||||
assert(data.appStoreId || data.manifest); // atleast one of them is required
|
||||
|
||||
@@ -525,11 +446,14 @@ function install(data, auditSource, callback) {
|
||||
error = validateDebugMode(debugMode);
|
||||
if (error) return callback(error);
|
||||
|
||||
error = validateRobotsTxt(robotsTxt);
|
||||
if (error) return callback(error);
|
||||
|
||||
if ('sso' in data && !('optionalSso' in manifest)) return callback(new AppsError(AppsError.BAD_FIELD, 'sso can only be specified for apps with optionalSso'));
|
||||
// if sso was unspecified, enable it by default if possible
|
||||
if (sso === null) sso = !!manifest.addons['ldap'] || !!manifest.addons['oauth'];
|
||||
|
||||
if (altDomain !== null && !validator.isFQDN(altDomain)) return callback(new AppsError(AppsError.BAD_FIELD, 'Invalid alt domain'));
|
||||
if (altDomain !== null && !validator.isFQDN(altDomain)) return callback(new AppsError(AppsError.BAD_FIELD, 'Invalid external domain'));
|
||||
|
||||
var appId = uuid.v4();
|
||||
|
||||
@@ -546,8 +470,11 @@ function install(data, auditSource, callback) {
|
||||
|
||||
debug('Will install app with id : ' + appId);
|
||||
|
||||
purchase(appId, appStoreId, function (error) {
|
||||
if (error) return callback(error);
|
||||
appstore.purchase(appId, appStoreId, function (error) {
|
||||
if (error && error.reason === AppstoreError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND));
|
||||
if (error && error.reason === AppstoreError.BILLING_REQUIRED) return callback(new AppsError(AppsError.BILLING_REQUIRED, error.message));
|
||||
if (error && error.reason === AppstoreError.EXTERNAL_ERROR) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
var data = {
|
||||
accessRestriction: accessRestriction,
|
||||
@@ -556,7 +483,8 @@ function install(data, auditSource, callback) {
|
||||
xFrameOptions: xFrameOptions,
|
||||
sso: sso,
|
||||
debugMode: debugMode,
|
||||
mailboxName: (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app'
|
||||
mailboxName: (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app',
|
||||
lastBackupId: backupId
|
||||
};
|
||||
|
||||
appdb.add(appId, appStoreId, manifest, location, portBindings, data, function (error) {
|
||||
@@ -571,7 +499,7 @@ function install(data, auditSource, callback) {
|
||||
|
||||
taskmanager.restartAppTask(appId);
|
||||
|
||||
eventlog.add(eventlog.ACTION_APP_INSTALL, auditSource, { appId: appId, location: location, manifest: manifest });
|
||||
eventlog.add(eventlog.ACTION_APP_INSTALL, auditSource, { appId: appId, location: location, manifest: manifest, backupId: backupId });
|
||||
|
||||
callback(null, { id : appId });
|
||||
});
|
||||
@@ -606,7 +534,7 @@ function configure(appId, data, auditSource, callback) {
|
||||
|
||||
if ('altDomain' in data) {
|
||||
values.altDomain = data.altDomain;
|
||||
if (values.altDomain !== null && !validator.isFQDN(values.altDomain)) return callback(new AppsError(AppsError.BAD_FIELD, 'Invalid alt domain'));
|
||||
if (values.altDomain !== null && !validator.isFQDN(values.altDomain)) return callback(new AppsError(AppsError.BAD_FIELD, 'Invalid external domain'));
|
||||
}
|
||||
|
||||
if ('portBindings' in data) {
|
||||
@@ -635,6 +563,12 @@ function configure(appId, data, auditSource, callback) {
|
||||
if (error) return callback(error);
|
||||
}
|
||||
|
||||
if ('robotsTxt' in data) {
|
||||
values.robotsTxt = data.robotsTxt || null;
|
||||
error = validateRobotsTxt(values.robotsTxt);
|
||||
if (error) return callback(error);
|
||||
}
|
||||
|
||||
// save cert to boxdata/certs. TODO: move this to apptask when we have a real task queue
|
||||
if ('cert' in data && 'key' in data) {
|
||||
if (data.cert && data.key) {
|
||||
@@ -761,10 +695,9 @@ function appLogFilter(app) {
|
||||
return names.map(function (name) { return 'CONTAINER_NAME=' + name; });
|
||||
}
|
||||
|
||||
function getLogs(appId, lines, follow, callback) {
|
||||
function getLogs(appId, options, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof lines, 'number');
|
||||
assert.strictEqual(typeof follow, 'boolean');
|
||||
assert(options && typeof options === 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('Getting logs for %s', appId);
|
||||
@@ -773,13 +706,21 @@ function getLogs(appId, lines, follow, callback) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
var args = [ '--output=json', '--no-pager', '--lines=' + lines ];
|
||||
|
||||
var lines = options.lines || 100,
|
||||
follow = !!options.follow,
|
||||
format = options.format || 'json';
|
||||
|
||||
var args = [ '--no-pager', '--lines=' + lines ];
|
||||
if (follow) args.push('--follow');
|
||||
if (format == 'short') args.push('--output=short', '-a'); else args.push('--output=json');
|
||||
args = args.concat(appLogFilter(app));
|
||||
|
||||
var cp = spawn('/bin/journalctl', args);
|
||||
|
||||
var transformStream = split(function mapper(line) {
|
||||
if (format !== 'json') return line + '\n';
|
||||
|
||||
var obj = safe.JSON.parse(line);
|
||||
if (!obj) return undefined;
|
||||
|
||||
@@ -869,6 +810,7 @@ function clone(appId, data, auditSource, callback) {
|
||||
|
||||
backups.getRestoreConfig(backupId, function (error, restoreConfig) {
|
||||
if (error && error.reason === BackupsError.EXTERNAL_ERROR) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
|
||||
if (error && error.reason === BackupsError.NOT_FOUND) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
if (!restoreConfig) callback(new AppsError(AppsError.EXTERNAL_ERROR, 'Could not get restore config'));
|
||||
@@ -885,8 +827,11 @@ function clone(appId, data, auditSource, callback) {
|
||||
|
||||
var newAppId = uuid.v4(), appStoreId = app.appStoreId, manifest = restoreConfig.manifest;
|
||||
|
||||
purchase(newAppId, appStoreId, function (error) {
|
||||
if (error) return callback(error);
|
||||
appstore.purchase(newAppId, appStoreId, function (error) {
|
||||
if (error && error.reason === AppstoreError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND));
|
||||
if (error && error.reason === AppstoreError.BILLING_REQUIRED) return callback(new AppsError(AppsError.BILLING_REQUIRED, error.message));
|
||||
if (error && error.reason === AppstoreError.EXTERNAL_ERROR) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
var data = {
|
||||
installationState: appdb.ISTATE_PENDING_CLONE,
|
||||
@@ -923,8 +868,11 @@ function uninstall(appId, auditSource, callback) {
|
||||
get(appId, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
unpurchase(appId, result.appStoreId, function (error) {
|
||||
if (error) return callback(error);
|
||||
appstore.unpurchase(appId, result.appStoreId, function (error) {
|
||||
if (error && error.reason === AppstoreError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND));
|
||||
if (error && error.reason === AppstoreError.BILLING_REQUIRED) return callback(new AppsError(AppsError.BILLING_REQUIRED, error.message));
|
||||
if (error && error.reason === AppstoreError.EXTERNAL_ERROR) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
taskmanager.stopAppTask(appId, function () {
|
||||
appdb.setInstallationCommand(appId, appdb.ISTATE_PENDING_UNINSTALL, function (error) {
|
||||
@@ -1047,12 +995,14 @@ function exec(appId, options, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function updateApps(updateInfo, auditSource, callback) { // updateInfo is { appId -> { manifest } }
|
||||
function autoupdateApps(updateInfo, auditSource, callback) { // updateInfo is { appId -> { manifest } }
|
||||
assert.strictEqual(typeof updateInfo, 'object');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
function canAutoupdateApp(app, newManifest) {
|
||||
if ((semver.major(app.manifest.version) !== 0) && (semver.major(app.manifest.version) !== semver.major(newManifest.version))) return new Error('Major version change'); // major changes are blocking
|
||||
|
||||
var newTcpPorts = newManifest.tcpPorts || { };
|
||||
var oldTcpPorts = app.manifest.tcpPorts || { };
|
||||
var portBindings = app.portBindings; // this is never null
|
||||
|
||||
@@ -0,0 +1,247 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
purchase: purchase,
|
||||
unpurchase: unpurchase,
|
||||
|
||||
getSubscription: getSubscription,
|
||||
|
||||
sendAliveStatus: sendAliveStatus,
|
||||
|
||||
getAppUpdate: getAppUpdate,
|
||||
getBoxUpdate: getBoxUpdate,
|
||||
|
||||
AppstoreError: AppstoreError
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
config = require('./config.js'),
|
||||
debug = require('debug')('box:appstore'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
os = require('os'),
|
||||
settings = require('./settings.js'),
|
||||
superagent = require('superagent'),
|
||||
util = require('util');
|
||||
|
||||
function AppstoreError(reason, errorOrMessage) {
|
||||
assert.strictEqual(typeof reason, 'string');
|
||||
assert(errorOrMessage instanceof Error || typeof errorOrMessage === 'string' || typeof errorOrMessage === 'undefined');
|
||||
|
||||
Error.call(this);
|
||||
Error.captureStackTrace(this, this.constructor);
|
||||
|
||||
this.name = this.constructor.name;
|
||||
this.reason = reason;
|
||||
if (typeof errorOrMessage === 'undefined') {
|
||||
this.message = reason;
|
||||
} else if (typeof errorOrMessage === 'string') {
|
||||
this.message = errorOrMessage;
|
||||
} else {
|
||||
this.message = 'Internal error';
|
||||
this.nestedError = errorOrMessage;
|
||||
}
|
||||
}
|
||||
util.inherits(AppstoreError, Error);
|
||||
AppstoreError.INTERNAL_ERROR = 'Internal Error';
|
||||
AppstoreError.EXTERNAL_ERROR = 'External Error';
|
||||
AppstoreError.NOT_FOUND = 'Internal Error';
|
||||
AppstoreError.BILLING_REQUIRED = 'Billing Required';
|
||||
|
||||
var NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
|
||||
function getAppstoreConfig(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Caas Cloudrons do not store appstore credentials in their local database
|
||||
if (config.provider() === 'caas') {
|
||||
var url = config.apiServerOrigin() + '/api/v1/exchangeBoxTokenWithUserToken';
|
||||
superagent.post(url).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode !== 201) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null, result.body);
|
||||
});
|
||||
} else {
|
||||
settings.getAppstoreConfig(function (error, result) {
|
||||
if (error) return callback(new AppstoreError(AppstoreError.INTERNAL_ERROR, error));
|
||||
if (!result.token) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED));
|
||||
|
||||
callback(null, result);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function getSubscription(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
const url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/subscription';
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error.message));
|
||||
if (result.statusCode === 401) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, 'invalid appstore token'));
|
||||
if (result.statusCode === 403) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, 'wrong user'));
|
||||
if (result.statusCode === 502) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, 'stripe error'));
|
||||
if (result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, 'unknown error'));
|
||||
|
||||
callback(null, result.body.subscription);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function purchase(appId, appstoreId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof appstoreId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (appstoreId === '') return callback(null);
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/apps/' + appId;
|
||||
var data = { appstoreId: appstoreId };
|
||||
|
||||
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error.message));
|
||||
if (result.statusCode === 404) return callback(new AppstoreError(AppstoreError.NOT_FOUND));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED));
|
||||
if (result.statusCode === 402) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED, result.body.message));
|
||||
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function unpurchase(appId, appstoreId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof appstoreId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (appstoreId === '') return callback(null);
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/apps/' + appId;
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED));
|
||||
if (result.statusCode === 404) return callback(null); // was never purchased
|
||||
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
superagent.del(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED));
|
||||
if (result.statusCode !== 204) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function sendAliveStatus(data, callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
settings.getAll(function (error, result) {
|
||||
if (error) return callback(new AppstoreError(AppstoreError.INTERNAL_ERROR, error));
|
||||
|
||||
eventlog.getAllPaged(eventlog.ACTION_USER_LOGIN, null, 1, 1, function (error, loginEvents) {
|
||||
if (error) return callback(new AppstoreError(AppstoreError.INTERNAL_ERROR, error));
|
||||
|
||||
var backendSettings = {
|
||||
dnsConfig: {
|
||||
provider: result[settings.DNS_CONFIG_KEY].provider,
|
||||
wildcard: result[settings.DNS_CONFIG_KEY].provider === 'manual' ? result[settings.DNS_CONFIG_KEY].wildcard : undefined
|
||||
},
|
||||
tlsConfig: {
|
||||
provider: result[settings.TLS_CONFIG_KEY].provider
|
||||
},
|
||||
backupConfig: {
|
||||
provider: result[settings.BACKUP_CONFIG_KEY].provider
|
||||
},
|
||||
mailConfig: {
|
||||
enabled: result[settings.MAIL_CONFIG_KEY].enabled
|
||||
},
|
||||
mailRelay: {
|
||||
provider: result[settings.MAIL_RELAY_KEY].provider
|
||||
},
|
||||
mailCatchAll: {
|
||||
count: result[settings.CATCH_ALL_ADDRESS_KEY].length
|
||||
},
|
||||
autoupdatePattern: result[settings.AUTOUPDATE_PATTERN_KEY],
|
||||
timeZone: result[settings.TIME_ZONE_KEY],
|
||||
};
|
||||
|
||||
var data = {
|
||||
domain: config.fqdn(),
|
||||
version: config.version(),
|
||||
provider: config.provider(),
|
||||
backendSettings: backendSettings,
|
||||
machine: {
|
||||
cpus: os.cpus(),
|
||||
totalmem: os.totalmem()
|
||||
},
|
||||
events: {
|
||||
lastLogin: loginEvents[0] ? (new Date(loginEvents[0].creationTime).getTime()) : 0
|
||||
}
|
||||
};
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/alive';
|
||||
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 404) return callback(new AppstoreError(AppstoreError.NOT_FOUND));
|
||||
if (result.statusCode !== 201) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Sending alive status failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getBoxUpdate(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/boxupdate';
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token, boxVersion: config.version() }).timeout(10 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 204) return callback(null); // no update
|
||||
if (result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response: %s %s', result.statusCode, result.text)));
|
||||
|
||||
// { version, changelog, upgrade, sourceTarballUrl}
|
||||
callback(null, result.body);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getAppUpdate(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/appupdate';
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token, boxVersion: config.version(), appId: app.appStoreId, appVersion: app.manifest.version }).timeout(10 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 204) return callback(null); // no update
|
||||
if (result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response: %s %s', result.statusCode, result.text)));
|
||||
|
||||
// { id, creationDate, manifest }
|
||||
callback(null, result.body);
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -50,6 +50,7 @@ var addons = require('./addons.js'),
|
||||
subdomains = require('./subdomains.js'),
|
||||
superagent = require('superagent'),
|
||||
sysinfo = require('./sysinfo.js'),
|
||||
tld = require('tldjs'),
|
||||
util = require('util'),
|
||||
_ = require('underscore');
|
||||
|
||||
@@ -222,7 +223,7 @@ function registerSubdomain(app, overwrite, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.retry({ times: 200, interval: 5000 }, function (retryCallback) {
|
||||
debugApp(app, 'Registering subdomain location [%s]', app.location);
|
||||
debugApp(app, 'Registering subdomain location [%s] overwrite: %s', app.location, overwrite);
|
||||
|
||||
// get the current record before updating it
|
||||
subdomains.get(app.location, 'A', function (error, values) {
|
||||
@@ -307,7 +308,16 @@ function waitForAltDomainDnsPropagation(app, callback) {
|
||||
|
||||
// try for 10 minutes before giving up. this allows the user to "reconfigure" the app in the case where
|
||||
// an app has an external domain and cloudron is migrated to custom domain.
|
||||
subdomains.waitForDns(app.altDomain, config.appFqdn(app.location), 'CNAME', { interval: 10000, times: 60 }, callback);
|
||||
var isNakedDomain = tld.getDomain(app.altDomain) === app.altDomain;
|
||||
if (isNakedDomain) { // check naked domains with A record since CNAME records don't work there
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(error);
|
||||
|
||||
subdomains.waitForDns(app.altDomain, ip, 'A', { interval: 10000, times: 60 }, callback);
|
||||
});
|
||||
} else {
|
||||
subdomains.waitForDns(app.altDomain, config.appFqdn(app.location) + '.', 'CNAME', { interval: 10000, times: 60 }, callback);
|
||||
}
|
||||
}
|
||||
|
||||
// updates the app object and the database
|
||||
@@ -343,6 +353,8 @@ function install(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const backupId = app.lastBackupId, isRestoring = app.installationState === appdb.ISTATE_PENDING_RESTORE;
|
||||
|
||||
async.series([
|
||||
verifyManifest.bind(null, app),
|
||||
|
||||
@@ -352,9 +364,16 @@ function install(app, callback) {
|
||||
removeCollectdProfile.bind(null, app),
|
||||
stopApp.bind(null, app),
|
||||
deleteContainers.bind(null, app),
|
||||
addons.teardownAddons.bind(null, app, app.manifest.addons),
|
||||
// oldConfig can be null during upgrades
|
||||
addons.teardownAddons.bind(null, app, app.oldConfig ? app.oldConfig.manifest.addons : app.manifest.addons),
|
||||
deleteVolume.bind(null, app),
|
||||
unregisterSubdomain.bind(null, app, app.location),
|
||||
|
||||
// for restore case
|
||||
function deleteImageIfChanged(done) {
|
||||
if (!app.oldConfig || (app.oldConfig.manifest.dockerImage === app.manifest.dockerImage)) return done();
|
||||
|
||||
docker.deleteImage(app.oldConfig.manifest, done);
|
||||
},
|
||||
|
||||
reserveHttpPort.bind(null, app),
|
||||
|
||||
@@ -362,7 +381,7 @@ function install(app, callback) {
|
||||
downloadIcon.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '30, Registering subdomain' }),
|
||||
registerSubdomain.bind(null, app, false /* overwrite */),
|
||||
registerSubdomain.bind(null, app, isRestoring /* overwrite */),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '40, Downloading image' }),
|
||||
docker.downloadImage.bind(null, app.manifest),
|
||||
@@ -370,8 +389,19 @@ function install(app, callback) {
|
||||
updateApp.bind(null, app, { installationProgress: '50, Creating volume' }),
|
||||
createVolume.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '60, Setting up addons' }),
|
||||
addons.setupAddons.bind(null, app, app.manifest.addons),
|
||||
function restoreFromBackup(next) {
|
||||
if (!backupId) {
|
||||
async.series([
|
||||
updateApp.bind(null, app, { installationProgress: '60, Setting up addons' }),
|
||||
addons.setupAddons.bind(null, app, app.manifest.addons),
|
||||
], next);
|
||||
} else {
|
||||
async.series([
|
||||
updateApp.bind(null, app, { installationProgress: '60, Download backup and restoring addons' }),
|
||||
backups.restoreApp.bind(null, app, app.manifest.addons, backupId),
|
||||
], next);
|
||||
}
|
||||
},
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '70, Creating container' }),
|
||||
createContainer.bind(null, app),
|
||||
@@ -384,7 +414,7 @@ function install(app, callback) {
|
||||
updateApp.bind(null, app, { installationProgress: '85, Waiting for DNS propagation' }),
|
||||
exports._waitForDnsPropagation.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '90, Waiting for External Domain CNAME setup' }),
|
||||
updateApp.bind(null, app, { installationProgress: '90, Waiting for External Domain setup' }),
|
||||
exports._waitForAltDomainDnsPropagation.bind(null, app), // required when restoring and !lastBackupId
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '95, Configure nginx' }),
|
||||
@@ -408,9 +438,11 @@ function backup(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var prefix = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
|
||||
|
||||
async.series([
|
||||
updateApp.bind(null, app, { installationProgress: '10, Backing up' }),
|
||||
backups.backupApp.bind(null, app, app.manifest, 'appbackups' /* tag */),
|
||||
backups.backupApp.bind(null, app, app.manifest, prefix),
|
||||
|
||||
// done!
|
||||
function (callback) {
|
||||
@@ -426,84 +458,6 @@ function backup(app, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
// restore is also called for upgrades and infra updates. note that in those cases it is possible there is no backup
|
||||
function restore(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// we don't have a backup, same as re-install. this allows us to install from install failures (update failures always
|
||||
// have a backupId)
|
||||
if (!app.lastBackupId) {
|
||||
debugApp(app, 'No lastBackupId. reinstalling');
|
||||
return install(app, callback);
|
||||
}
|
||||
|
||||
var backupId = app.lastBackupId;
|
||||
|
||||
async.series([
|
||||
updateApp.bind(null, app, { installationProgress: '10, Cleaning up old install' }),
|
||||
unconfigureNginx.bind(null, app),
|
||||
removeCollectdProfile.bind(null, app),
|
||||
stopApp.bind(null, app),
|
||||
deleteContainers.bind(null, app),
|
||||
// oldConfig can be null during upgrades
|
||||
addons.teardownAddons.bind(null, app, app.oldConfig ? app.oldConfig.manifest.addons : null),
|
||||
deleteVolume.bind(null, app),
|
||||
function deleteImageIfChanged(done) {
|
||||
if (!app.oldConfig || (app.oldConfig.manifest.dockerImage === app.manifest.dockerImage)) return done();
|
||||
|
||||
docker.deleteImage(app.oldConfig.manifest, done);
|
||||
},
|
||||
|
||||
reserveHttpPort.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '40, Downloading icon' }),
|
||||
downloadIcon.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '55, Registering subdomain' }), // ip might change during upgrades
|
||||
registerSubdomain.bind(null, app, true /* overwrite */),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '60, Downloading image' }),
|
||||
docker.downloadImage.bind(null, app.manifest),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '65, Creating volume' }),
|
||||
createVolume.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '70, Download backup and restore addons' }),
|
||||
backups.restoreApp.bind(null, app, app.manifest.addons, backupId),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '75, Creating container' }),
|
||||
createContainer.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '80, Setting up collectd profile' }),
|
||||
addCollectdProfile.bind(null, app),
|
||||
|
||||
runApp.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '85, Waiting for DNS propagation' }),
|
||||
exports._waitForDnsPropagation.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '90, Waiting for External Domain CNAME setup' }),
|
||||
exports._waitForAltDomainDnsPropagation.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '95, Configuring Nginx' }),
|
||||
configureNginx.bind(null, app),
|
||||
|
||||
// done!
|
||||
function (callback) {
|
||||
debugApp(app, 'restored');
|
||||
updateApp(app, { installationState: appdb.ISTATE_INSTALLED, installationProgress: '', health: null }, callback);
|
||||
}
|
||||
], function seriesDone(error) {
|
||||
if (error) {
|
||||
debugApp(app, 'Error installing app: %s', error);
|
||||
return updateApp(app, { installationState: appdb.ISTATE_ERROR, installationProgress: error.message }, callback.bind(null, error));
|
||||
}
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
// note that configure is called after an infra update as well
|
||||
function configure(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
@@ -550,7 +504,7 @@ function configure(app, callback) {
|
||||
updateApp.bind(null, app, { installationProgress: '80, Waiting for DNS propagation' }),
|
||||
exports._waitForDnsPropagation.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '85, Waiting for External Domain CNAME setup' }),
|
||||
updateApp.bind(null, app, { installationProgress: '85, Waiting for External Domain setup' }),
|
||||
exports._waitForAltDomainDnsPropagation.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '90, Configuring Nginx' }),
|
||||
@@ -605,9 +559,11 @@ function update(app, callback) {
|
||||
function (next) {
|
||||
if (app.installationState === appdb.ISTATE_PENDING_FORCE_UPDATE) return next(null);
|
||||
|
||||
var prefix = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
|
||||
|
||||
async.series([
|
||||
updateApp.bind(null, app, { installationProgress: '30, Backing up app' }),
|
||||
backups.backupApp.bind(null, app, app.oldConfig.manifest, 'appbackups' /* tag */)
|
||||
backups.backupApp.bind(null, app, app.oldConfig.manifest, prefix)
|
||||
], next);
|
||||
},
|
||||
|
||||
@@ -740,13 +696,17 @@ function startTask(appId, callback) {
|
||||
switch (app.installationState) {
|
||||
case appdb.ISTATE_PENDING_UNINSTALL: return uninstall(app, callback);
|
||||
case appdb.ISTATE_PENDING_CONFIGURE: return configure(app, callback);
|
||||
|
||||
case appdb.ISTATE_PENDING_UPDATE: return update(app, callback);
|
||||
case appdb.ISTATE_PENDING_RESTORE: return restore(app, callback);
|
||||
case appdb.ISTATE_PENDING_FORCE_UPDATE: return update(app, callback);
|
||||
|
||||
case appdb.ISTATE_PENDING_INSTALL: return install(app, callback);
|
||||
case appdb.ISTATE_PENDING_CLONE: return install(app, callback);
|
||||
case appdb.ISTATE_PENDING_RESTORE: return install(app, callback);
|
||||
|
||||
case appdb.ISTATE_PENDING_BACKUP: return backup(app, callback);
|
||||
case appdb.ISTATE_INSTALLED: return handleRunCommand(app, callback);
|
||||
case appdb.ISTATE_PENDING_INSTALL: return install(app, callback);
|
||||
case appdb.ISTATE_PENDING_CLONE: return restore(app, callback);
|
||||
case appdb.ISTATE_PENDING_FORCE_UPDATE: return update(app, callback);
|
||||
|
||||
case appdb.ISTATE_ERROR:
|
||||
debugApp(app, 'Internal error. apptask launched with error status.');
|
||||
return callback(null);
|
||||
|
||||
@@ -100,7 +100,7 @@ function initialize(callback) {
|
||||
var info = { scope: token.scope };
|
||||
|
||||
user.get(token.identifier, function (error, user) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(null, false);
|
||||
if (error && error.reason === UserError.NOT_FOUND) return callback(null, false);
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(null, user, info);
|
||||
|
||||
@@ -3,15 +3,20 @@
|
||||
var assert = require('assert'),
|
||||
database = require('./database.js'),
|
||||
DatabaseError = require('./databaseerror.js'),
|
||||
safe = require('safetydance'),
|
||||
util = require('util');
|
||||
|
||||
var BACKUPS_FIELDS = [ 'id', 'creationTime', 'version', 'type', 'dependsOn', 'state', ];
|
||||
var BACKUPS_FIELDS = [ 'id', 'creationTime', 'version', 'type', 'dependsOn', 'state', 'restoreConfigJson' ];
|
||||
|
||||
exports = module.exports = {
|
||||
add: add,
|
||||
getPaged: getPaged,
|
||||
|
||||
getByTypeAndStatePaged: getByTypeAndStatePaged,
|
||||
getByTypePaged: getByTypePaged,
|
||||
|
||||
get: get,
|
||||
del: del,
|
||||
update: update,
|
||||
getByAppIdPaged: getByAppIdPaged,
|
||||
|
||||
_clear: clear,
|
||||
@@ -20,21 +25,44 @@ exports = module.exports = {
|
||||
BACKUP_TYPE_BOX: 'box',
|
||||
|
||||
BACKUP_STATE_NORMAL: 'normal', // should rename to created to avoid listing in UI?
|
||||
BACKUP_STATE_CREATING: 'creating',
|
||||
BACKUP_STATE_ERROR: 'error'
|
||||
};
|
||||
|
||||
function postProcess(result) {
|
||||
assert.strictEqual(typeof result, 'object');
|
||||
|
||||
result.dependsOn = result.dependsOn ? result.dependsOn.split(',') : [ ];
|
||||
|
||||
result.restoreConfig = result.restoreConfigJson ? safe.JSON.parse(result.restoreConfigJson) : null;
|
||||
delete result.restoreConfigJson;
|
||||
}
|
||||
|
||||
function getPaged(page, perPage, callback) {
|
||||
function getByTypeAndStatePaged(type, state, page, perPage, callback) {
|
||||
assert(type === exports.BACKUP_TYPE_APP || type === exports.BACKUP_TYPE_BOX);
|
||||
assert.strictEqual(typeof state, 'string');
|
||||
assert(typeof page === 'number' && page > 0);
|
||||
assert(typeof perPage === 'number' && perPage > 0);
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT ' + BACKUPS_FIELDS + ' FROM backups WHERE type = ? AND state = ? ORDER BY creationTime DESC LIMIT ?,?',
|
||||
[ exports.BACKUP_TYPE_BOX, exports.BACKUP_STATE_NORMAL, (page-1)*perPage, perPage ], function (error, results) {
|
||||
[ type, state, (page-1)*perPage, perPage ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
results.forEach(function (result) { postProcess(result); });
|
||||
|
||||
callback(null, results);
|
||||
});
|
||||
}
|
||||
|
||||
function getByTypePaged(type, page, perPage, callback) {
|
||||
assert(type === exports.BACKUP_TYPE_APP || type === exports.BACKUP_TYPE_BOX);
|
||||
assert(typeof page === 'number' && page > 0);
|
||||
assert(typeof perPage === 'number' && perPage > 0);
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT ' + BACKUPS_FIELDS + ' FROM backups WHERE type = ? ORDER BY creationTime DESC LIMIT ?,?',
|
||||
[ type, (page-1)*perPage, perPage ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
results.forEach(function (result) { postProcess(result); });
|
||||
@@ -81,12 +109,14 @@ function add(backup, callback) {
|
||||
assert.strictEqual(typeof backup.version, 'string');
|
||||
assert(backup.type === exports.BACKUP_TYPE_APP || backup.type === exports.BACKUP_TYPE_BOX);
|
||||
assert(util.isArray(backup.dependsOn));
|
||||
assert.strictEqual(typeof backup.restoreConfig, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var creationTime = backup.creationTime || new Date(); // allow tests to set the time
|
||||
var restoreConfig = backup.restoreConfig ? JSON.stringify(backup.restoreConfig) : '';
|
||||
|
||||
database.query('INSERT INTO backups (id, version, type, creationTime, state, dependsOn) VALUES (?, ?, ?, ?, ?, ?)',
|
||||
[ backup.id, backup.version, backup.type, creationTime, exports.BACKUP_STATE_NORMAL, backup.dependsOn.join(',') ],
|
||||
database.query('INSERT INTO backups (id, version, type, creationTime, state, dependsOn, restoreConfigJson) VALUES (?, ?, ?, ?, ?, ?, ?)',
|
||||
[ backup.id, backup.version, backup.type, creationTime, exports.BACKUP_STATE_NORMAL, backup.dependsOn.join(','), restoreConfig ],
|
||||
function (error) {
|
||||
if (error && error.code === 'ER_DUP_ENTRY') return callback(new DatabaseError(DatabaseError.ALREADY_EXISTS));
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
@@ -95,6 +125,26 @@ function add(backup, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function update(id, backup, callback) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof backup, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var fields = [ ], values = [ ];
|
||||
for (var p in backup) {
|
||||
fields.push(p + ' = ?');
|
||||
values.push(backup[p]);
|
||||
}
|
||||
values.push(id);
|
||||
|
||||
database.query('UPDATE backups SET ' + fields.join(', ') + ' WHERE id = ?', values, function (error) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
function clear(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
@@ -108,8 +158,19 @@ function del(id, callback) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('DELETE FROM backups WHERE id=?', [ id ], function (error) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
callback(null);
|
||||
get(id, function (error, result) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback();
|
||||
if (error) return callback(error);
|
||||
|
||||
var whereClause = [ 'id=?' ], whereArgs = [ result.id ];
|
||||
result.dependsOn.forEach(function (id) {
|
||||
whereClause.push('id=?');
|
||||
whereArgs.push(id);
|
||||
});
|
||||
|
||||
database.query('DELETE FROM backups WHERE ' + whereClause.join(' OR '), whereArgs, function (error) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -5,10 +5,9 @@ exports = module.exports = {
|
||||
|
||||
testConfig: testConfig,
|
||||
|
||||
getPaged: getPaged,
|
||||
getByStatePaged: getByStatePaged,
|
||||
getByAppIdPaged: getByAppIdPaged,
|
||||
|
||||
getRestoreUrl: getRestoreUrl,
|
||||
getRestoreConfig: getRestoreConfig,
|
||||
|
||||
ensureBackup: ensureBackup,
|
||||
@@ -19,9 +18,7 @@ exports = module.exports = {
|
||||
|
||||
backupBoxAndApps: backupBoxAndApps,
|
||||
|
||||
getLocalDownloadPath: getLocalDownloadPath,
|
||||
|
||||
removeBackup: removeBackup
|
||||
cleanup: cleanup
|
||||
};
|
||||
|
||||
var addons = require('./addons.js'),
|
||||
@@ -38,6 +35,7 @@ var addons = require('./addons.js'),
|
||||
filesystem = require('./storage/filesystem.js'),
|
||||
locker = require('./locker.js'),
|
||||
mailer = require('./mailer.js'),
|
||||
noop = require('./storage/noop.js'),
|
||||
path = require('path'),
|
||||
paths = require('./paths.js'),
|
||||
progress = require('./progress.js'),
|
||||
@@ -48,9 +46,8 @@ var addons = require('./addons.js'),
|
||||
SettingsError = require('./settings.js').SettingsError,
|
||||
util = require('util');
|
||||
|
||||
var BACKUP_BOX_CMD = path.join(__dirname, 'scripts/backupbox.sh'),
|
||||
BACKUP_APP_CMD = path.join(__dirname, 'scripts/backupapp.sh'),
|
||||
RESTORE_APP_CMD = path.join(__dirname, 'scripts/restoreapp.sh');
|
||||
var NODE_CMD = path.join(__dirname, './scripts/node.sh');
|
||||
var BACKUPTASK_CMD = path.join(__dirname, 'backuptask.js');
|
||||
|
||||
var NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
|
||||
@@ -83,6 +80,7 @@ util.inherits(BackupsError, Error);
|
||||
BackupsError.EXTERNAL_ERROR = 'external error';
|
||||
BackupsError.INTERNAL_ERROR = 'internal error';
|
||||
BackupsError.BAD_STATE = 'bad state';
|
||||
BackupsError.BAD_FIELD = 'bad field';
|
||||
BackupsError.NOT_FOUND = 'not found';
|
||||
BackupsError.MISSING_CREDENTIALS = 'missing credentials';
|
||||
|
||||
@@ -92,6 +90,9 @@ function api(provider) {
|
||||
case 'caas': return caas;
|
||||
case 's3': return s3;
|
||||
case 'filesystem': return filesystem;
|
||||
case 'minio': return s3;
|
||||
case 'exoscale-sos': return s3;
|
||||
case 'noop': return noop;
|
||||
default: return null;
|
||||
}
|
||||
}
|
||||
@@ -106,12 +107,13 @@ function testConfig(backupConfig, callback) {
|
||||
api(backupConfig.provider).testConfig(backupConfig, callback);
|
||||
}
|
||||
|
||||
function getPaged(page, perPage, callback) {
|
||||
function getByStatePaged(state, page, perPage, callback) {
|
||||
assert.strictEqual(typeof state, 'string');
|
||||
assert(typeof page === 'number' && page > 0);
|
||||
assert(typeof perPage === 'number' && perPage > 0);
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
backupdb.getPaged(page, perPage, function (error, results) {
|
||||
backupdb.getByTypeAndStatePaged(backupdb.BACKUP_TYPE_BOX, state, page, perPage, function (error, results) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, results);
|
||||
@@ -135,39 +137,12 @@ function getRestoreConfig(backupId, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
backupdb.get(backupId, function (error, result) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new BackupsError(BackupsError.NOT_FOUND, error));
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
if (!result.restoreConfig) return callback(new BackupsError(BackupsError.NOT_FOUND, error));
|
||||
|
||||
api(backupConfig.provider).getAppRestoreConfig(backupConfig, backupId, function (error, result) {
|
||||
if (error && error.reason === BackupsError.NOT_FOUND) return callback(error);
|
||||
if (error) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error));
|
||||
|
||||
callback(null, result);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getRestoreUrl(backupId, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
api(backupConfig.provider).getRestoreUrl(backupConfig, backupId, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var obj = {
|
||||
id: backupId,
|
||||
url: result.url,
|
||||
backupKey: backupConfig.key,
|
||||
sha1: result.sha1 || null // not supported by all backends
|
||||
};
|
||||
|
||||
debug('getRestoreUrl: id:%s url:%s backupKey:%s sha1:%s', obj.id, obj.url, obj.backupKey, obj.sha1);
|
||||
|
||||
callback(null, obj);
|
||||
});
|
||||
callback(null, result.restoreConfig);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -179,58 +154,98 @@ function copyLastBackup(app, manifest, prefix, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var timestamp = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
|
||||
var toFilenameArchive = util.format('%s/app_%s_%s_v%s.tar.gz', prefix, app.id, timestamp, manifest.version);
|
||||
var toFilenameConfig = util.format('%s/app_%s_%s_v%s.json', prefix, app.id, timestamp, manifest.version);
|
||||
var newBackupId = util.format('%s/app_%s_%s_v%s', prefix, app.id, timestamp, manifest.version);
|
||||
|
||||
var restoreConfig = apps.getAppConfig(app);
|
||||
restoreConfig.manifest = manifest;
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
debug('copyLastBackup: copying archive %s to %s', app.lastBackupId, toFilenameArchive);
|
||||
debug('copyLastBackup: copying backup %s to %s', app.lastBackupId, newBackupId);
|
||||
|
||||
api(backupConfig.provider).copyObject(backupConfig, app.lastBackupId, toFilenameArchive, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error));
|
||||
backupdb.add({ id: newBackupId, version: manifest.version, type: backupdb.BACKUP_TYPE_APP, dependsOn: [ ], restoreConfig: restoreConfig }, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
// TODO change that logic by adjusting app.lastBackupId to not contain the file type
|
||||
var configFileId = app.lastBackupId.slice(0, -'.tar.gz'.length) + '.json';
|
||||
api(backupConfig.provider).copyBackup(backupConfig, app.lastBackupId, newBackupId, function (copyBackupError) {
|
||||
const state = copyBackupError ? backupdb.BACKUP_STATE_ERROR : backupdb.BACKUP_STATE_NORMAL;
|
||||
|
||||
debug('copyLastBackup: copying config %s to %s', configFileId, toFilenameConfig);
|
||||
debugApp(app, 'copyLastBackup: %s done with state %s', newBackupId, state);
|
||||
|
||||
api(backupConfig.provider).copyObject(backupConfig, configFileId, toFilenameConfig, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error));
|
||||
backupdb.update(newBackupId, { state: state }, function (error) {
|
||||
if (copyBackupError) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, copyBackupError.message));
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
return callback(null, toFilenameArchive);
|
||||
callback(null, newBackupId);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function runBackupTask(backupId, appId, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert(appId === null || typeof backupId === 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var killTimerId = null;
|
||||
|
||||
var cp = shell.sudo('backup' + (appId ? 'App' : 'Box'), [ NODE_CMD, BACKUPTASK_CMD, backupId ].concat(appId ? [ appId ] : [ ]), function (error) {
|
||||
|
||||
clearTimeout(killTimerId);
|
||||
cp = null;
|
||||
|
||||
if (error && (error.code === null /* signal */ || (error.code !== 0 && error.code !== 50))) { // backuptask crashed
|
||||
return callback(new BackupsError(BackupsError.INTERNAL_ERROR, 'backuptask crashed'));
|
||||
} else if (error && error.code === 50) { // exited with error
|
||||
var result = safe.fs.readFileSync(paths.BACKUP_RESULT_FILE, 'utf8') || safe.error.message;
|
||||
return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, result));
|
||||
}
|
||||
|
||||
callback();
|
||||
});
|
||||
|
||||
killTimerId = setTimeout(function () {
|
||||
debug('runBackupTask: backup task taking too long. killing');
|
||||
cp.kill();
|
||||
}, 4 * 60 * 60 * 1000); // 4 hours
|
||||
}
|
||||
|
||||
function backupBoxWithAppBackupIds(appBackupIds, prefix, callback) {
|
||||
assert(util.isArray(appBackupIds));
|
||||
assert(Array.isArray(appBackupIds));
|
||||
assert.strictEqual(typeof prefix, 'string');
|
||||
|
||||
var timestamp = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
|
||||
var filebase = util.format('%s/box_%s_v%s', prefix, timestamp, config.version());
|
||||
var filename = filebase + '.tar.gz';
|
||||
var backupId = util.format('%s/box_%s_v%s', prefix, timestamp, config.version());
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
api(backupConfig.provider).getBoxBackupDetails(backupConfig, filename, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
var password = config.database().password ? '-p' + config.database().password : '--skip-password';
|
||||
var mysqlDumpArgs = [
|
||||
'-c',
|
||||
`/usr/bin/mysqldump -u root ${password} --single-transaction --routines \
|
||||
--triggers ${config.database().name} > "${paths.BOX_DATA_DIR}/box.mysqldump"`
|
||||
];
|
||||
shell.exec('backupBox', '/bin/bash', mysqlDumpArgs, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
debug('backupBoxWithAppBackupIds: backup details %j', result);
|
||||
|
||||
shell.sudo('backupBox', [ BACKUP_BOX_CMD ].concat(result.backupScriptArguments), function (error) {
|
||||
backupdb.add({ id: backupId, version: config.version(), type: backupdb.BACKUP_TYPE_BOX, dependsOn: appBackupIds, restoreConfig: null }, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
debug('backupBoxWithAppBackupIds: success');
|
||||
runBackupTask(backupId, null /* appId */, function (backupTaskError) {
|
||||
const state = backupTaskError ? backupdb.BACKUP_STATE_ERROR : backupdb.BACKUP_STATE_NORMAL;
|
||||
debug('backupBoxWithAppBackupIds: %s', state);
|
||||
|
||||
backupdb.add({ id: filename, version: config.version(), type: backupdb.BACKUP_TYPE_BOX, dependsOn: appBackupIds }, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
backupdb.update(backupId, { state: state }, function (error) {
|
||||
if (backupTaskError) return callback(backupTaskError);
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
api(backupConfig.provider).backupDone(filename, null /* app */, appBackupIds, function (error) {
|
||||
if (error) return callback(error);
|
||||
callback(null, filename);
|
||||
// FIXME this is only needed for caas, hopefully we can remove that in the future
|
||||
api(backupConfig.provider).backupDone(backupId, appBackupIds, function (error) {
|
||||
if (error) return callback(error);
|
||||
callback(null, backupId);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -254,29 +269,31 @@ function createNewAppBackup(app, manifest, prefix, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var timestamp = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
|
||||
var filebase = util.format('%s/app_%s_%s_v%s', prefix, app.id, timestamp, manifest.version);
|
||||
var configFilename = filebase + '.json', dataFilename = filebase + '.tar.gz';
|
||||
var backupId = util.format('%s/app_%s_%s_v%s', prefix, app.id, timestamp, manifest.version);
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
var restoreConfig = apps.getAppConfig(app);
|
||||
restoreConfig.manifest = manifest;
|
||||
|
||||
api(backupConfig.provider).getAppBackupDetails(backupConfig, app.id, dataFilename, configFilename, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/config.json'), JSON.stringify(restoreConfig))) {
|
||||
return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, 'Error creating config.json: ' + safe.error.message));
|
||||
}
|
||||
|
||||
debug('createNewAppBackup: backup details %j', result);
|
||||
addons.backupAddons(app, manifest.addons, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
|
||||
async.series([
|
||||
addons.backupAddons.bind(null, app, manifest.addons),
|
||||
shell.sudo.bind(null, 'backupApp', [ BACKUP_APP_CMD ].concat(result.backupScriptArguments))
|
||||
], function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
backupdb.add({ id: backupId, version: manifest.version, type: backupdb.BACKUP_TYPE_APP, dependsOn: [ ], restoreConfig: restoreConfig }, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
debugApp(app, 'createNewAppBackup: %s done', dataFilename);
|
||||
runBackupTask(backupId, app.id, function (backupTaskError) {
|
||||
const state = backupTaskError ? backupdb.BACKUP_STATE_ERROR : backupdb.BACKUP_STATE_NORMAL;
|
||||
|
||||
backupdb.add({ id: dataFilename, version: manifest.version, type: backupdb.BACKUP_TYPE_APP, dependsOn: [ ] }, function (error) {
|
||||
debugApp(app, 'createNewAppBackup: %s done with state %s', backupId, state);
|
||||
|
||||
backupdb.update(backupId, { state: state }, function (error) {
|
||||
if (backupTaskError) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, backupTaskError.message));
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, dataFilename);
|
||||
callback(null, backupId);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -314,13 +331,7 @@ function backupApp(app, manifest, prefix, callback) {
|
||||
// s3 does not allow changing creation time, so copying the last backup is easy way out for now
|
||||
backupFunction = copyLastBackup.bind(null, app, manifest, prefix);
|
||||
} else {
|
||||
var appConfig = apps.getAppConfig(app);
|
||||
appConfig.manifest = manifest;
|
||||
backupFunction = createNewAppBackup.bind(null, app, manifest, prefix);
|
||||
|
||||
if (!safe.fs.writeFileSync(path.join(paths.DATA_DIR, app.id + '/config.json'), JSON.stringify(appConfig), 'utf8')) {
|
||||
return callback(safe.error);
|
||||
}
|
||||
}
|
||||
|
||||
backupFunction(function (error, backupId) {
|
||||
@@ -416,7 +427,7 @@ function ensureBackup(auditSource, callback) {
|
||||
|
||||
debug('ensureBackup: %j', auditSource);
|
||||
|
||||
getPaged(1, 1, function (error, backups) {
|
||||
getByStatePaged(backupdb.BACKUP_STATE_NORMAL, 1, 1, function (error, backups) {
|
||||
if (error) {
|
||||
debug('Unable to list backups', error);
|
||||
return callback(error); // no point trying to backup if appstore is down
|
||||
@@ -438,56 +449,130 @@ function restoreApp(app, addonsToRestore, backupId, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
assert(app.lastBackupId);
|
||||
|
||||
getRestoreUrl(backupId, function (error, result) {
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
async.series([
|
||||
api(backupConfig.provider).restore.bind(null, backupConfig, backupId, path.join(paths.APPS_DATA_DIR, app.id)),
|
||||
addons.restoreAddons.bind(null, app, addonsToRestore)
|
||||
], callback);
|
||||
});
|
||||
}
|
||||
|
||||
function cleanupAppBackups(backupConfig, referencedAppBackups, callback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert(Array.isArray(referencedAppBackups));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const now = new Date();
|
||||
|
||||
// we clean app backups of any state because the ones to keep are determined by the box cleanup code
|
||||
backupdb.getByTypePaged(backupdb.BACKUP_TYPE_APP, 1, 1000, function (error, appBackups) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
async.eachSeries(appBackups, function iterator(backup, iteratorDone) {
|
||||
if (referencedAppBackups.indexOf(backup.id) !== -1) return iteratorDone();
|
||||
if ((now - backup.creationTime) < (backupConfig.retentionSecs * 1000)) return iteratorDone();
|
||||
|
||||
debug('cleanup: removing %s', backup.id);
|
||||
|
||||
api(backupConfig.provider).removeBackups(backupConfig, [ backup.id ], function (error) {
|
||||
if (error) {
|
||||
debug('cleanup: error removing backup %j : %s', backup, error.message);
|
||||
iteratorDone();
|
||||
}
|
||||
|
||||
backupdb.del(backup.id, function (error) {
|
||||
if (error) debug('cleanup: error removing from database', error);
|
||||
else debug('cleanup: removed %s', backup.id);
|
||||
|
||||
iteratorDone();
|
||||
});
|
||||
});
|
||||
}, function () {
|
||||
debug('cleanup: done cleaning app backups');
|
||||
|
||||
callback();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function cleanupBoxBackups(backupConfig, callback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const now = new Date();
|
||||
var referencedAppBackups = [];
|
||||
|
||||
backupdb.getByTypePaged(backupdb.BACKUP_TYPE_BOX, 1, 1000, function (error, boxBackups) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debugApp(app, 'restoreApp: restoreUrl:%s', result.url);
|
||||
if (boxBackups.length === 0) return callback(null, []);
|
||||
|
||||
shell.sudo('restoreApp', [ RESTORE_APP_CMD, app.id, result.url, result.backupKey, result.sessionToken ], function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
// search for the first valid backup
|
||||
var i;
|
||||
for (i = 0; i < boxBackups.length; i++) {
|
||||
if (boxBackups[i].state === backupdb.BACKUP_STATE_NORMAL) break;
|
||||
}
|
||||
|
||||
addons.restoreAddons(app, addonsToRestore, callback);
|
||||
});
|
||||
});
|
||||
}
|
||||
// keep the first valid backup
|
||||
if (i !== boxBackups.length) {
|
||||
debug('cleanup: preserving box backup %j', boxBackups[i]);
|
||||
referencedAppBackups = boxBackups[i].dependsOn;
|
||||
boxBackups.splice(i, 1);
|
||||
} else {
|
||||
debug('cleanup: no box backup to preserve');
|
||||
}
|
||||
|
||||
function getLocalDownloadPath(backupId, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
async.eachSeries(boxBackups, function iterator(backup, iteratorDone) {
|
||||
referencedAppBackups = referencedAppBackups.concat(backup.dependsOn);
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
// TODO: errored backups should probably be cleaned up before retention time, but we will
|
||||
// have to be careful not to remove any backup currently being created
|
||||
if ((now - backup.creationTime) < (backupConfig.retentionSecs * 1000)) return iteratorDone();
|
||||
|
||||
api(backupConfig.provider).getLocalFilePath(backupConfig, backupId, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
debug('cleanup: removing %s', backup.id);
|
||||
|
||||
debug('getLocalDownloadPath: id:%s path:%s', backupId, result.filePath);
|
||||
var backupIds = [].concat(backup.id, backup.dependsOn);
|
||||
|
||||
callback(null, result.filePath);
|
||||
});
|
||||
});
|
||||
}
|
||||
api(backupConfig.provider).removeBackups(backupConfig, backupIds, function (error) {
|
||||
if (error) {
|
||||
debug('cleanup: error removing backup %j : %s', backup, error.message);
|
||||
iteratorDone();
|
||||
}
|
||||
|
||||
function removeBackup(backupId, appBackupIds, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert(util.isArray(appBackupIds));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
backupdb.del(backup.id, function (error) {
|
||||
if (error) debug('cleanup: error removing from database', error);
|
||||
else debug('cleanup: removed %j', backupIds);
|
||||
|
||||
debug('removeBackup: %s', backupId);
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
api(backupConfig.provider).removeBackup(backupConfig, backupId, appBackupIds, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
backupdb.del(backupId, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
debug('removeBackup: %s done', backupId);
|
||||
|
||||
callback(null);
|
||||
iteratorDone();
|
||||
});
|
||||
});
|
||||
}, function () {
|
||||
return callback(null, referencedAppBackups);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function cleanup(callback) {
|
||||
assert(!callback || typeof callback === 'function'); // callback is null when called from cronjob
|
||||
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
if (backupConfig.retentionSecs < 0) {
|
||||
debug('cleanup: keeping all backups');
|
||||
return callback();
|
||||
}
|
||||
|
||||
cleanupBoxBackups(backupConfig, function (error, referencedAppBackups) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('cleanup: done cleaning box backups');
|
||||
|
||||
cleanupAppBackups(backupConfig, referencedAppBackups, callback);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -0,0 +1,111 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
'use strict';
|
||||
|
||||
require('supererror')({ splatchError: true });
|
||||
|
||||
// remove timestamp from debug() based output
|
||||
require('debug').formatArgs = function formatArgs(args) {
|
||||
args[0] = this.namespace + ' ' + args[0];
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
BackupsError = require('./backups.js').BackupsError,
|
||||
caas = require('./storage/caas.js'),
|
||||
database = require('./database.js'),
|
||||
debug = require('debug')('box:backuptask'),
|
||||
filesystem = require('./storage/filesystem.js'),
|
||||
noop = require('./storage/noop.js'),
|
||||
path = require('path'),
|
||||
paths = require('./paths.js'),
|
||||
s3 = require('./storage/s3.js'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js');
|
||||
|
||||
function api(provider) {
|
||||
switch (provider) {
|
||||
case 'caas': return caas;
|
||||
case 's3': return s3;
|
||||
case 'filesystem': return filesystem;
|
||||
case 'minio': return s3;
|
||||
case 'exoscale-sos': return s3;
|
||||
case 'noop': return noop;
|
||||
default: return null;
|
||||
}
|
||||
}
|
||||
|
||||
function initialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.initialize(callback);
|
||||
}
|
||||
|
||||
function backupApp(backupId, appId, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('Start app backup with id %s for %s', backupId, appId);
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
var backupMapping = [{
|
||||
source: path.join(paths.APPS_DATA_DIR, appId),
|
||||
destination: '.'
|
||||
}];
|
||||
|
||||
api(backupConfig.provider).backup(backupConfig, backupId, backupMapping, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function backupBox(backupId, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('Start box backup with id %s', backupId);
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
var backupMapping = [{
|
||||
source: paths.BOX_DATA_DIR,
|
||||
destination: 'box'
|
||||
}, {
|
||||
source: path.join(paths.PLATFORM_DATA_DIR, 'mail'),
|
||||
destination: 'mail'
|
||||
}];
|
||||
|
||||
api(backupConfig.provider).backup(backupConfig, backupId, backupMapping, callback);
|
||||
});
|
||||
}
|
||||
|
||||
// Main process starts here
|
||||
var backupId = process.argv[2];
|
||||
var appId = process.argv[3];
|
||||
|
||||
if (appId) debug('Backuptask for the app %s with id %s', appId, backupId);
|
||||
else debug('Backuptask for the whole Cloudron with id %s', backupId);
|
||||
|
||||
process.on('SIGTERM', function () {
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
initialize(function (error) {
|
||||
if (error) throw error;
|
||||
|
||||
function resultHandler(error) {
|
||||
if (error) debug('completed with error', error);
|
||||
|
||||
debug('completed');
|
||||
|
||||
safe.fs.writeFileSync(paths.BACKUP_RESULT_FILE, error ? error.message : '');
|
||||
|
||||
// https://nodejs.org/api/process.html are exit codes used by node. apps.js uses the value below
|
||||
// to check apptask crashes
|
||||
process.exit(error ? 50 : 0);
|
||||
}
|
||||
|
||||
if (appId) backupApp(backupId, appId, resultHandler);
|
||||
else backupBox(backupId, resultHandler);
|
||||
});
|
||||
@@ -32,7 +32,7 @@ var acme = require('./cert/acme.js'),
|
||||
caas = require('./cert/caas.js'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
debug = require('debug')('box:src/certificates'),
|
||||
debug = require('debug')('box:certificates'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
fallback = require('./cert/fallback.js'),
|
||||
fs = require('fs'),
|
||||
@@ -96,7 +96,7 @@ function getApi(app, callback) {
|
||||
|
||||
var options = { };
|
||||
if (tlsConfig.provider === 'caas') {
|
||||
options.prod = !config.isDev(); // with altDomain, we will choose acme setting based on this
|
||||
options.prod = true; // with altDomain, we will choose acme setting based on this
|
||||
} else { // acme
|
||||
options.prod = tlsConfig.provider.match(/.*-prod/) !== null;
|
||||
}
|
||||
@@ -263,10 +263,6 @@ function validateCertificate(cert, key, fqdn) {
|
||||
assert(key === null || typeof key === 'string');
|
||||
assert.strictEqual(typeof fqdn, 'string');
|
||||
|
||||
if (cert === null && key === null) return null;
|
||||
if (!cert && key) return new Error('missing cert');
|
||||
if (cert && !key) return new Error('missing key');
|
||||
|
||||
function matchesDomain(domain) {
|
||||
if (typeof domain !== 'string') return false;
|
||||
if (domain === fqdn) return true;
|
||||
@@ -275,23 +271,26 @@ function validateCertificate(cert, key, fqdn) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// get commonName (http://stackoverflow.com/questions/17353122/parsing-strings-crt-files)
|
||||
var result = safe.child_process.execSync('openssl x509 -noout -subject | sed -r "s|.*CN=(.*)|\\1|; s|/[^/]*=.*$||"', { encoding: 'utf8', input: cert });
|
||||
if (!result) return new Error(util.format('could not get CN'));
|
||||
var commonName = result.trim();
|
||||
debug('validateCertificate: detected commonName as %s', commonName);
|
||||
if (cert === null && key === null) return null;
|
||||
if (!cert && key) return new Error('missing cert');
|
||||
if (cert && !key) return new Error('missing key');
|
||||
|
||||
// https://github.com/drwetter/testssl.sh/pull/383
|
||||
var cmd = `openssl x509 -noout -text | grep -A3 "Subject Alternative Name" | \
|
||||
grep "DNS:" | \
|
||||
sed -e "s/DNS://g" -e "s/ //g" -e "s/,/ /g" -e "s/othername:<unsupported>//g"`;
|
||||
result = safe.child_process.execSync(cmd, { encoding: 'utf8', input: cert });
|
||||
var altNames = result ? [ ] : result.trim().split(' '); // might fail if cert has no SAN
|
||||
debug('validateCertificate: detected altNames as %j', altNames);
|
||||
var result = safe.child_process.execSync('openssl x509 -noout -checkhost "' + fqdn + '"', { encoding: 'utf8', input: cert });
|
||||
if (!result) return new Error(util.format('could not get cert subject'));
|
||||
|
||||
// check altNames
|
||||
var domains = altNames.concat(commonName);
|
||||
if (!domains.some(matchesDomain)) return new Error(util.format('cert is not valid for this domain. Expecting %s in %j', fqdn, domains));
|
||||
// if no match, check alt names
|
||||
if (result.indexOf('does match certificate') === -1) {
|
||||
// https://github.com/drwetter/testssl.sh/pull/383
|
||||
var cmd = `openssl x509 -noout -text | grep -A3 "Subject Alternative Name" | \
|
||||
grep "DNS:" | \
|
||||
sed -e "s/DNS://g" -e "s/ //g" -e "s/,/ /g" -e "s/othername:<unsupported>//g"`;
|
||||
result = safe.child_process.execSync(cmd, { encoding: 'utf8', input: cert });
|
||||
var altNames = result ? [ ] : result.trim().split(' '); // might fail if cert has no SAN
|
||||
debug('validateCertificate: detected altNames as %j', altNames);
|
||||
|
||||
// check altNames
|
||||
if (!altNames.some(matchesDomain)) return new Error(util.format('cert is not valid for this domain. Expecting %s in %j', fqdn, altNames));
|
||||
}
|
||||
|
||||
// http://httpd.apache.org/docs/2.0/ssl/ssl_faq.html#verify
|
||||
var certModulus = safe.child_process.execSync('openssl x509 -noout -modulus', { encoding: 'utf8', input: cert });
|
||||
|
||||
@@ -8,29 +8,26 @@ exports = module.exports = {
|
||||
activate: activate,
|
||||
getConfig: getConfig,
|
||||
getStatus: getStatus,
|
||||
getDisks: getDisks,
|
||||
dnsSetup: dnsSetup,
|
||||
getLogs: getLogs,
|
||||
|
||||
sendHeartbeat: sendHeartbeat,
|
||||
sendAliveStatus: sendAliveStatus,
|
||||
|
||||
updateToLatest: updateToLatest,
|
||||
reboot: reboot,
|
||||
retire: retire,
|
||||
migrate: migrate,
|
||||
|
||||
getConfigStateSync: getConfigStateSync,
|
||||
|
||||
checkDiskSpace: checkDiskSpace,
|
||||
|
||||
readDkimPublicKeySync: readDkimPublicKeySync,
|
||||
refreshDNS: refreshDNS,
|
||||
|
||||
events: null,
|
||||
|
||||
EVENT_ACTIVATED: 'activated'
|
||||
configureWebadmin: configureWebadmin
|
||||
};
|
||||
|
||||
var apps = require('./apps.js'),
|
||||
var appdb = require('./appdb.js'),
|
||||
apps = require('./apps.js'),
|
||||
assert = require('assert'),
|
||||
async = require('async'),
|
||||
backups = require('./backups.js'),
|
||||
@@ -41,7 +38,7 @@ var apps = require('./apps.js'),
|
||||
constants = require('./constants.js'),
|
||||
cron = require('./cron.js'),
|
||||
debug = require('debug')('box:cloudron'),
|
||||
df = require('node-df'),
|
||||
df = require('@sindresorhus/df'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
fs = require('fs'),
|
||||
locker = require('./locker.js'),
|
||||
@@ -56,10 +53,12 @@ var apps = require('./apps.js'),
|
||||
settings = require('./settings.js'),
|
||||
SettingsError = settings.SettingsError,
|
||||
shell = require('./shell.js'),
|
||||
spawn = require('child_process').spawn,
|
||||
split = require('split'),
|
||||
subdomains = require('./subdomains.js'),
|
||||
superagent = require('superagent'),
|
||||
sysinfo = require('./sysinfo.js'),
|
||||
taskmanager = require('./taskmanager.js'),
|
||||
tld = require('tldjs'),
|
||||
tokendb = require('./tokendb.js'),
|
||||
updateChecker = require('./updatechecker.js'),
|
||||
user = require('./user.js'),
|
||||
@@ -87,9 +86,8 @@ const BOX_AND_USER_TEMPLATE = {
|
||||
}
|
||||
};
|
||||
|
||||
var gUpdatingDns = false, // flag for dns update reentrancy
|
||||
gBoxAndUserDetails = null, // cached cloudron details like region,size...
|
||||
gConfigState = { dns: false, tls: false, configured: false };
|
||||
var gBoxAndUserDetails = null, // cached cloudron details like region,size...
|
||||
gWebadminStatus = { dns: false, tls: false, configuring: false };
|
||||
|
||||
function CloudronError(reason, errorOrMessage) {
|
||||
assert.strictEqual(typeof reason, 'string');
|
||||
@@ -123,107 +121,74 @@ CloudronError.SELF_UPGRADE_NOT_SUPPORTED = 'Self upgrade not supported';
|
||||
function initialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
exports.events = new (require('events').EventEmitter)();
|
||||
|
||||
gConfigState = { dns: false, tls: false, configured: false };
|
||||
gUpdatingDns = false;
|
||||
gWebadminStatus = { dns: false, tls: false, configuring: false };
|
||||
gBoxAndUserDetails = null;
|
||||
|
||||
async.series([
|
||||
certificates.initialize,
|
||||
settings.initialize,
|
||||
platform.initialize,
|
||||
installAppBundle,
|
||||
checkConfigState,
|
||||
configureDefaultServer
|
||||
], callback);
|
||||
configureDefaultServer,
|
||||
onDomainConfigured
|
||||
], function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
configureWebadmin(NOOP_CALLBACK); // for restore() and caas initial setup. do not block
|
||||
|
||||
callback();
|
||||
});
|
||||
}
|
||||
|
||||
function uninitialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
exports.events = null;
|
||||
|
||||
platform.events.removeListener(platform.EVENT_READY, onPlatformReady);
|
||||
|
||||
async.series([
|
||||
cron.uninitialize,
|
||||
taskmanager.pauseTasks,
|
||||
mailer.stop,
|
||||
platform.uninitialize,
|
||||
platform.stop,
|
||||
certificates.uninitialize,
|
||||
settings.uninitialize
|
||||
], callback);
|
||||
}
|
||||
|
||||
function onConfigured(callback) {
|
||||
function onDomainConfigured(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
// if we hit here, the domain has to be set, this is a logic issue if it isn't
|
||||
assert(config.fqdn());
|
||||
|
||||
debug('onConfigured: current state: %j', gConfigState);
|
||||
|
||||
if (gConfigState.configured) return callback(); // re-entracy flag
|
||||
|
||||
gConfigState.configured = true;
|
||||
|
||||
platform.events.on(platform.EVENT_READY, onPlatformReady);
|
||||
settings.events.on(settings.DNS_CONFIG_KEY, function () { refreshDNS(); });
|
||||
if (!config.fqdn()) return callback();
|
||||
|
||||
async.series([
|
||||
clients.addDefaultClients,
|
||||
certificates.ensureFallbackCertificate,
|
||||
platform.start, // requires fallback certs for mail container
|
||||
ensureDkimKey,
|
||||
addDnsRecords,
|
||||
configureAdmin,
|
||||
mailer.start,
|
||||
cron.initialize // do not send heartbeats until we are "ready"
|
||||
platform.start, // requires fallback certs for mail container
|
||||
mailer.start, // this requires the "mail" container to be running
|
||||
cron.initialize
|
||||
], callback);
|
||||
}
|
||||
|
||||
function onPlatformReady(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
debug('onPlatformReady');
|
||||
|
||||
async.series([
|
||||
taskmanager.resumeTasks
|
||||
], callback);
|
||||
}
|
||||
|
||||
function getConfigStateSync() {
|
||||
return gConfigState;
|
||||
}
|
||||
|
||||
function checkConfigState(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
if (!config.fqdn()) {
|
||||
settings.events.once(settings.DNS_CONFIG_KEY, function () { checkConfigState(); }); // check again later
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
debug('checkConfigState: configured');
|
||||
|
||||
onConfigured(callback);
|
||||
}
|
||||
|
||||
function dnsSetup(dnsConfig, domain, callback) {
|
||||
function dnsSetup(dnsConfig, domain, zoneName, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (config.fqdn()) return callback(new CloudronError(CloudronError.ALREADY_SETUP));
|
||||
|
||||
settings.setDnsConfig(dnsConfig, domain, function (error) {
|
||||
if (!zoneName) zoneName = tld.getDomain(domain) || '';
|
||||
|
||||
debug('dnsSetup: Setting up Cloudron with domain %s and zone %s', domain, zoneName);
|
||||
|
||||
settings.setDnsConfig(dnsConfig, domain, zoneName, function (error) {
|
||||
if (error && error.reason === SettingsError.BAD_FIELD) return callback(new CloudronError(CloudronError.BAD_FIELD, error.message));
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
|
||||
config.set('fqdn', domain); // set fqdn only after dns config is valid, otherwise cannot re-setup if we failed
|
||||
config.setFqdn(domain); // set fqdn only after dns config is valid, otherwise cannot re-setup if we failed
|
||||
config.setZoneName(zoneName);
|
||||
|
||||
onConfigured(); // do not block
|
||||
async.series([ // do not block
|
||||
onDomainConfigured,
|
||||
configureWebadmin
|
||||
], NOOP_CALLBACK);
|
||||
|
||||
callback();
|
||||
});
|
||||
@@ -247,8 +212,6 @@ function configureDefaultServer(callback) {
|
||||
safe.child_process.execSync(certCommand);
|
||||
}
|
||||
|
||||
safe.fs.unlinkSync(path.join(paths.NGINX_APPCONFIG_DIR,'ip_based_setup.conf'));
|
||||
|
||||
nginx.configureAdmin(certFilePath, keyFilePath, 'default.conf', '', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
@@ -258,30 +221,39 @@ function configureDefaultServer(callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function configureAdmin(callback) {
|
||||
function configureWebadmin(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback();
|
||||
debug('configureWebadmin: fqdn:%s status:%j', config.fqdn(), gWebadminStatus);
|
||||
|
||||
debug('configureAdmin');
|
||||
if (process.env.BOX_ENV === 'test' || !config.fqdn() || gWebadminStatus.configuring) return callback();
|
||||
|
||||
gWebadminStatus.configuring = true; // re-entracy guard
|
||||
|
||||
function done(error) {
|
||||
gWebadminStatus.configuring = false;
|
||||
debug('configureWebadmin: done error:%j', error);
|
||||
callback(error);
|
||||
}
|
||||
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(error);
|
||||
if (error) return done(error);
|
||||
|
||||
subdomains.waitForDns(config.adminFqdn(), ip, 'A', { interval: 30000, times: 50000 }, function (error) {
|
||||
if (error) return callback(error);
|
||||
addDnsRecords(ip, function (error) {
|
||||
if (error) return done(error);
|
||||
|
||||
gConfigState.dns = true;
|
||||
subdomains.waitForDns(config.adminFqdn(), ip, 'A', { interval: 30000, times: 50000 }, function (error) {
|
||||
if (error) return done(error);
|
||||
|
||||
certificates.ensureCertificate({ location: constants.ADMIN_LOCATION }, function (error, certFilePath, keyFilePath) {
|
||||
if (error) { // currently, this can never happen
|
||||
debug('Error obtaining certificate. Proceed anyway', error);
|
||||
return callback();
|
||||
}
|
||||
gWebadminStatus.dns = true;
|
||||
|
||||
gConfigState.tls = true;
|
||||
certificates.ensureCertificate({ location: constants.ADMIN_LOCATION }, function (error, certFilePath, keyFilePath) {
|
||||
if (error) return done(error);
|
||||
|
||||
nginx.configureAdmin(certFilePath, keyFilePath, constants.NGINX_ADMIN_CONFIG_FILE_NAME, config.adminFqdn(), callback);
|
||||
gWebadminStatus.tls = true;
|
||||
|
||||
nginx.configureAdmin(certFilePath, keyFilePath, constants.NGINX_ADMIN_CONFIG_FILE_NAME, config.adminFqdn(), done);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -293,26 +265,22 @@ function setTimeZone(ip, callback) {
|
||||
|
||||
debug('setTimeZone ip:%s', ip);
|
||||
|
||||
// https://github.com/bluesmoon/node-geoip
|
||||
// https://github.com/runk/node-maxmind
|
||||
// { url: 'http://freegeoip.net/json/%s', jpath: 'time_zone' },
|
||||
// { url: 'http://ip-api.com/json/%s', jpath: 'timezone' },
|
||||
// { url: 'http://geoip.nekudo.com/api/%s', jpath: 'time_zone }
|
||||
|
||||
superagent.get('http://ip-api.com/json/' + ip).timeout(10 * 1000).end(function (error, result) {
|
||||
superagent.get('https://geolocation.cloudron.io/json').query({ ip: ip }).timeout(10 * 1000).end(function (error, result) {
|
||||
if ((error && !error.response) || result.statusCode !== 200) {
|
||||
debug('Failed to get geo location: %s', error.message);
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
if (!result.body.timezone || typeof result.body.timezone !== 'string') {
|
||||
var timezone = safe.query(result.body, 'location.time_zone');
|
||||
|
||||
if (!timezone || typeof timezone !== 'string') {
|
||||
debug('No timezone in geoip response : %j', result.body);
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
debug('Setting timezone to ', result.body.timezone);
|
||||
debug('Setting timezone to ', timezone);
|
||||
|
||||
settings.setTimeZone(result.body.timezone, callback);
|
||||
settings.setTimeZone(timezone, callback);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -346,7 +314,7 @@ function activate(username, password, email, displayName, ip, auditSource, callb
|
||||
|
||||
eventlog.add(eventlog.ACTION_ACTIVATE, auditSource, { });
|
||||
|
||||
exports.events.emit(exports.EVENT_ACTIVATED);
|
||||
platform.createMailConfig(NOOP_CALLBACK); // bounces can now be sent to the cloudron owner
|
||||
|
||||
callback(null, { token: token, expires: expires });
|
||||
});
|
||||
@@ -366,17 +334,42 @@ function getStatus(callback) {
|
||||
callback(null, {
|
||||
activated: count !== 0,
|
||||
version: config.version(),
|
||||
boxVersionsUrl: config.get('boxVersionsUrl'),
|
||||
apiServerOrigin: config.apiServerOrigin(), // used by CaaS tool
|
||||
provider: config.provider(),
|
||||
cloudronName: cloudronName,
|
||||
adminFqdn: config.fqdn() ? config.adminFqdn() : null,
|
||||
configState: gConfigState
|
||||
webadminStatus: gWebadminStatus
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getDisks(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var disks = {
|
||||
boxDataDisk: null,
|
||||
platformDataDisk: null,
|
||||
appsDataDisk: null
|
||||
};
|
||||
|
||||
df.file(paths.BOX_DATA_DIR).then(function (result) {
|
||||
disks.boxDataDisk = result.filesystem;
|
||||
|
||||
return df.file(paths.PLATFORM_DATA_DIR);
|
||||
}).then(function (result) {
|
||||
disks.platformDataDisk = result.filesystem;
|
||||
|
||||
return df.file(paths.APPS_DATA_DIR);
|
||||
}).then(function (result) {
|
||||
disks.appsDataDisk = result.filesystem;
|
||||
|
||||
callback(null, disks);
|
||||
}).catch(function (error) {
|
||||
callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
});
|
||||
}
|
||||
|
||||
function getBoxAndUserDetails(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
@@ -416,7 +409,6 @@ function getConfig(callback) {
|
||||
callback(null, {
|
||||
apiServerOrigin: config.apiServerOrigin(),
|
||||
webServerOrigin: config.webServerOrigin(),
|
||||
isDev: config.isDev(),
|
||||
fqdn: config.fqdn(),
|
||||
version: config.version(),
|
||||
update: updateChecker.getUpdateInfo(),
|
||||
@@ -449,87 +441,9 @@ function sendHeartbeat() {
|
||||
});
|
||||
}
|
||||
|
||||
function sendAliveStatus(callback) {
|
||||
if (typeof callback !== 'function') {
|
||||
callback = function (error) {
|
||||
if (error && error.reason !== CloudronError.INTERNAL_ERROR) debug(error);
|
||||
else if (error) debug(error);
|
||||
};
|
||||
}
|
||||
|
||||
function sendAliveStatusWithAppstoreConfig(backendSettings, appstoreConfig) {
|
||||
assert.strictEqual(typeof backendSettings, 'object');
|
||||
assert.strictEqual(typeof appstoreConfig.userId, 'string');
|
||||
assert.strictEqual(typeof appstoreConfig.cloudronId, 'string');
|
||||
assert.strictEqual(typeof appstoreConfig.token, 'string');
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId;
|
||||
var data = {
|
||||
domain: config.fqdn(),
|
||||
version: config.version(),
|
||||
provider: config.provider(),
|
||||
backendSettings: backendSettings,
|
||||
machine: {
|
||||
cpus: os.cpus(),
|
||||
totalmem: os.totalmem()
|
||||
}
|
||||
};
|
||||
|
||||
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new CloudronError(CloudronError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 404) return callback(new CloudronError(CloudronError.NOT_FOUND));
|
||||
if (result.statusCode !== 201) return callback(new CloudronError(CloudronError.EXTERNAL_ERROR, util.format('Sending alive status failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
settings.getAll(function (error, result) {
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
|
||||
var backendSettings = {
|
||||
dnsConfig: {
|
||||
provider: result[settings.DNS_CONFIG_KEY].provider,
|
||||
wildcard: result[settings.DNS_CONFIG_KEY].provider === 'manual' ? result[settings.DNS_CONFIG_KEY].wildcard : undefined
|
||||
},
|
||||
tlsConfig: {
|
||||
provider: result[settings.TLS_CONFIG_KEY].provider
|
||||
},
|
||||
backupConfig: {
|
||||
provider: result[settings.BACKUP_CONFIG_KEY].provider
|
||||
},
|
||||
mailConfig: {
|
||||
enabled: result[settings.MAIL_CONFIG_KEY].enabled
|
||||
},
|
||||
autoupdatePattern: result[settings.AUTOUPDATE_PATTERN_KEY],
|
||||
timeZone: result[settings.TIME_ZONE_KEY]
|
||||
};
|
||||
|
||||
// Caas Cloudrons do not store appstore credentials in their local database
|
||||
if (config.provider() === 'caas') {
|
||||
var url = config.apiServerOrigin() + '/api/v1/exchangeBoxTokenWithUserToken';
|
||||
superagent.post(url).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new CloudronError(CloudronError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode !== 201) return callback(new CloudronError(CloudronError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
sendAliveStatusWithAppstoreConfig(backendSettings, result.body);
|
||||
});
|
||||
} else {
|
||||
settings.getAppstoreConfig(function (error, result) {
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
|
||||
if (!result.token) {
|
||||
debug('sendAliveStatus: Cloudron not yet registered');
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
sendAliveStatusWithAppstoreConfig(backendSettings, result);
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function ensureDkimKey(callback) {
|
||||
assert(config.fqdn(), 'fqdn is not set');
|
||||
|
||||
var dkimPath = path.join(paths.MAIL_DATA_DIR, 'dkim/' + config.fqdn());
|
||||
var dkimPrivateKeyFile = path.join(dkimPath, 'private');
|
||||
var dkimPublicKeyFile = path.join(dkimPath, 'public');
|
||||
@@ -608,66 +522,55 @@ function txtRecordsWithSpf(callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function addDnsRecords(callback) {
|
||||
function addDnsRecords(ip, callback) {
|
||||
assert.strictEqual(typeof ip, 'string');
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback();
|
||||
|
||||
if (gUpdatingDns) {
|
||||
debug('addDnsRecords: dns update already in progress');
|
||||
return callback();
|
||||
}
|
||||
gUpdatingDns = true;
|
||||
|
||||
var dkimKey = readDkimPublicKeySync();
|
||||
if (!dkimKey) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, new Error('Failed to read dkim public key')));
|
||||
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
var webadminRecord = { subdomain: constants.ADMIN_LOCATION, type: 'A', values: [ ip ] };
|
||||
// t=s limits the domainkey to this domain and not it's subdomains
|
||||
var dkimRecord = { subdomain: constants.DKIM_SELECTOR + '._domainkey', type: 'TXT', values: [ '"v=DKIM1; t=s; p=' + dkimKey + '"' ] };
|
||||
|
||||
var webadminRecord = { subdomain: constants.ADMIN_LOCATION, type: 'A', values: [ ip ] };
|
||||
// t=s limits the domainkey to this domain and not it's subdomains
|
||||
var dkimRecord = { subdomain: constants.DKIM_SELECTOR + '._domainkey', type: 'TXT', values: [ '"v=DKIM1; t=s; p=' + dkimKey + '"' ] };
|
||||
var records = [ ];
|
||||
if (config.isCustomDomain()) {
|
||||
records.push(webadminRecord);
|
||||
records.push(dkimRecord);
|
||||
} else {
|
||||
// for non-custom domains, we show a noapp.html page
|
||||
var nakedDomainRecord = { subdomain: '', type: 'A', values: [ ip ] };
|
||||
|
||||
var records = [ ];
|
||||
if (config.isCustomDomain()) {
|
||||
records.push(webadminRecord);
|
||||
records.push(dkimRecord);
|
||||
} else {
|
||||
// for non-custom domains, we show a noapp.html page
|
||||
var nakedDomainRecord = { subdomain: '', type: 'A', values: [ ip ] };
|
||||
records.push(nakedDomainRecord);
|
||||
records.push(webadminRecord);
|
||||
records.push(dkimRecord);
|
||||
}
|
||||
|
||||
records.push(nakedDomainRecord);
|
||||
records.push(webadminRecord);
|
||||
records.push(dkimRecord);
|
||||
}
|
||||
debug('addDnsRecords: %j', records);
|
||||
|
||||
debug('addDnsRecords: %j', records);
|
||||
async.retry({ times: 10, interval: 20000 }, function (retryCallback) {
|
||||
txtRecordsWithSpf(function (error, txtRecords) {
|
||||
if (error) return retryCallback(error);
|
||||
|
||||
async.retry({ times: 10, interval: 20000 }, function (retryCallback) {
|
||||
txtRecordsWithSpf(function (error, txtRecords) {
|
||||
if (error) return retryCallback(error);
|
||||
if (txtRecords) records.push({ subdomain: '', type: 'TXT', values: txtRecords });
|
||||
|
||||
if (txtRecords) records.push({ subdomain: '', type: 'TXT', values: txtRecords });
|
||||
debug('addDnsRecords: will update %j', records);
|
||||
|
||||
debug('addDnsRecords: will update %j', records);
|
||||
async.mapSeries(records, function (record, iteratorCallback) {
|
||||
subdomains.upsert(record.subdomain, record.type, record.values, iteratorCallback);
|
||||
}, function (error, changeIds) {
|
||||
if (error) debug('addDnsRecords: failed to update : %s. will retry', error);
|
||||
else debug('addDnsRecords: records %j added with changeIds %j', records, changeIds);
|
||||
|
||||
async.mapSeries(records, function (record, iteratorCallback) {
|
||||
subdomains.upsert(record.subdomain, record.type, record.values, iteratorCallback);
|
||||
}, function (error, changeIds) {
|
||||
if (error) debug('addDnsRecords: failed to update : %s. will retry', error);
|
||||
else debug('addDnsRecords: records %j added with changeIds %j', records, changeIds);
|
||||
|
||||
retryCallback(error);
|
||||
});
|
||||
retryCallback(error);
|
||||
});
|
||||
}, function (error) {
|
||||
gUpdatingDns = false;
|
||||
|
||||
debug('addDnsRecords: done updating records with error:', error);
|
||||
|
||||
callback(error);
|
||||
});
|
||||
}, function (error) {
|
||||
debug('addDnsRecords: done updating records with error:', error);
|
||||
|
||||
callback(error);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -719,6 +622,7 @@ function updateToLatest(auditSource, callback) {
|
||||
|
||||
var boxUpdateInfo = updateChecker.getUpdateInfo().box;
|
||||
if (!boxUpdateInfo) return callback(new CloudronError(CloudronError.ALREADY_UPTODATE, 'No update available'));
|
||||
if (!boxUpdateInfo.sourceTarballUrl) return callback(new CloudronError(CloudronError.BAD_STATE, 'No automatic update available'));
|
||||
|
||||
// check if this is just a version number change
|
||||
if (config.version().match(/[-+]/) !== null && config.version().replace(/[-+].*/, '') === boxUpdateInfo.version) {
|
||||
@@ -798,6 +702,7 @@ function doUpdate(boxUpdateInfo, callback) {
|
||||
tlsKey: config.tlsKey(),
|
||||
isCustomDomain: config.isCustomDomain(),
|
||||
isDemo: config.isDemo(),
|
||||
zoneName: config.zoneName(),
|
||||
|
||||
appstore: {
|
||||
token: config.token(),
|
||||
@@ -809,11 +714,10 @@ function doUpdate(boxUpdateInfo, callback) {
|
||||
webServerOrigin: config.webServerOrigin()
|
||||
},
|
||||
|
||||
version: boxUpdateInfo.version,
|
||||
boxVersionsUrl: config.get('boxVersionsUrl')
|
||||
version: boxUpdateInfo.version
|
||||
};
|
||||
|
||||
debug('updating box %s %j', boxUpdateInfo.sourceTarballUrl, data);
|
||||
debug('updating box %s %j', boxUpdateInfo.sourceTarballUrl, _.omit(data, 'tlsCert', 'tlsKey', 'token', 'appstore', 'caas'));
|
||||
|
||||
progress.set(progress.UPDATE, 5, 'Downloading and extracting new version');
|
||||
|
||||
@@ -860,23 +764,40 @@ function checkDiskSpace(callback) {
|
||||
|
||||
debug('Checking disk space');
|
||||
|
||||
df(function (error, entries) {
|
||||
getDisks(function (error, disks) {
|
||||
if (error) {
|
||||
debug('df error %s', error.message);
|
||||
mailer.outOfDiskSpace(error.message);
|
||||
return callback();
|
||||
}
|
||||
|
||||
var oos = entries.some(function (entry) {
|
||||
return (entry.mount === paths.DATA_DIR && entry.capacity >= 0.90) ||
|
||||
(entry.mount === '/' && entry.available <= (1.25 * 1024 * 1024)); // 1.5G
|
||||
df().then(function (entries) {
|
||||
/*
|
||||
[{
|
||||
filesystem: '/dev/disk1',
|
||||
size: 499046809600,
|
||||
used: 443222245376,
|
||||
available: 55562420224,
|
||||
capacity: 0.89,
|
||||
mountpoint: '/'
|
||||
}, ...]
|
||||
*/
|
||||
var oos = entries.some(function (entry) {
|
||||
// ignore other filesystems but where box, app and platform data is
|
||||
if (entry.filesystem !== disks.boxDataDisk && entry.filesystem !== disks.platformDataDisk && entry.filesystem !== disks.appsDataDisk) return false;
|
||||
|
||||
return (entry.available <= (1.25 * 1024 * 1024 * 1024)); // 1.5G
|
||||
});
|
||||
|
||||
debug('Disk space checked. ok: %s', !oos);
|
||||
|
||||
if (oos) mailer.outOfDiskSpace(JSON.stringify(entries, null, 4));
|
||||
|
||||
callback();
|
||||
}).catch(function (error) {
|
||||
debug('df error %s', error.message);
|
||||
mailer.outOfDiskSpace(error.message);
|
||||
return callback();
|
||||
});
|
||||
|
||||
debug('Disk space checked. ok: %s', !oos);
|
||||
|
||||
if (oos) mailer.outOfDiskSpace(JSON.stringify(entries, null, 4));
|
||||
|
||||
callback();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -909,13 +830,11 @@ function doMigrate(options, callback) {
|
||||
progress.set(progress.MIGRATE, 10, 'Backing up for migration');
|
||||
|
||||
// initiate the migration in the background
|
||||
backups.backupBoxAndApps({ userId: null, username: 'migrator' }, function (error, backupId) {
|
||||
backups.backupBoxAndApps({ userId: null, username: 'migrator' }, function (error) {
|
||||
if (error) return unlock(error);
|
||||
|
||||
debug('migrate: domain: %s size %s region %s', options.domain, options.size, options.region);
|
||||
|
||||
options.restoreKey = backupId;
|
||||
|
||||
superagent
|
||||
.post(config.apiServerOrigin() + '/api/v1/boxes/' + config.fqdn() + '/migrate')
|
||||
.query({ token: config.token() })
|
||||
@@ -944,9 +863,9 @@ function migrate(options, callback) {
|
||||
|
||||
if (!options.domain) return doMigrate(options, callback);
|
||||
|
||||
var dnsConfig = _.pick(options, 'domain', 'provider', 'accessKeyId', 'secretAccessKey', 'region', 'endpoint', 'token');
|
||||
var dnsConfig = _.pick(options, 'domain', 'provider', 'accessKeyId', 'secretAccessKey', 'region', 'endpoint', 'token', 'zoneName');
|
||||
|
||||
settings.setDnsConfig(dnsConfig, options.domain, function (error) {
|
||||
settings.setDnsConfig(dnsConfig, options.domain, options.zoneName || tld.getDomain(options.domain), function (error) {
|
||||
if (error && error.reason === SettingsError.BAD_FIELD) return callback(new CloudronError(CloudronError.BAD_FIELD, error.message));
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
|
||||
@@ -955,6 +874,7 @@ function migrate(options, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
// called for dynamic dns setups where we have to update the IP
|
||||
function refreshDNS(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
@@ -963,7 +883,7 @@ function refreshDNS(callback) {
|
||||
|
||||
debug('refreshDNS: current ip %s', ip);
|
||||
|
||||
addDnsRecords(function (error) {
|
||||
addDnsRecords(ip, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('refreshDNS: done for system records');
|
||||
@@ -972,6 +892,9 @@ function refreshDNS(callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.each(result, function (app, callback) {
|
||||
// do not change state of installing apps since apptask will error if dns record already exists
|
||||
if (app.installationState !== appdb.ISTATE_INSTALLED) return callback();
|
||||
|
||||
subdomains.upsert(app.location, 'A', [ ip ], callback);
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
@@ -984,3 +907,49 @@ function refreshDNS(callback) {
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getLogs(options, callback) {
|
||||
assert(options && typeof options === 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var units = options.units || [],
|
||||
lines = options.lines || 100,
|
||||
format = options.format || 'json',
|
||||
follow = !!options.follow;
|
||||
|
||||
assert(Array.isArray(units));
|
||||
assert.strictEqual(typeof lines, 'number');
|
||||
assert.strictEqual(typeof format, 'string');
|
||||
|
||||
debug('Getting logs for %j', units);
|
||||
|
||||
var args = [ '--no-pager', '--lines=' + lines ];
|
||||
units.forEach(function (u) {
|
||||
if (u === 'box') args.push('--unit=box');
|
||||
else if (u === 'mail') args.push('CONTAINER_NAME=mail');
|
||||
});
|
||||
if (format === 'short') args.push('--output=short', '-a'); else args.push('--output=json');
|
||||
if (follow) args.push('--follow');
|
||||
|
||||
var cp = spawn('/bin/journalctl', args);
|
||||
|
||||
var transformStream = split(function mapper(line) {
|
||||
if (format !== 'json') return line + '\n';
|
||||
|
||||
var obj = safe.JSON.parse(line);
|
||||
if (!obj) return undefined;
|
||||
|
||||
return JSON.stringify({
|
||||
realtimeTimestamp: obj.__REALTIME_TIMESTAMP,
|
||||
monotonicTimestamp: obj.__MONOTONIC_TIMESTAMP,
|
||||
message: obj.MESSAGE,
|
||||
source: obj.SYSLOG_IDENTIFIER || ''
|
||||
}) + '\n';
|
||||
});
|
||||
|
||||
transformStream.close = cp.kill.bind(cp, 'SIGKILL'); // closing stream kills the child process
|
||||
|
||||
cp.stdout.pipe(transformStream);
|
||||
|
||||
return callback(null, transformStream);
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ exports = module.exports = {
|
||||
apiServerOrigin: apiServerOrigin,
|
||||
webServerOrigin: webServerOrigin,
|
||||
fqdn: fqdn,
|
||||
setFqdn: setFqdn,
|
||||
token: token,
|
||||
version: version,
|
||||
setVersion: setVersion,
|
||||
@@ -31,8 +32,8 @@ exports = module.exports = {
|
||||
mailFqdn: mailFqdn,
|
||||
appFqdn: appFqdn,
|
||||
zoneName: zoneName,
|
||||
setZoneName: setZoneName,
|
||||
|
||||
isDev: isDev,
|
||||
isDemo: isDemo,
|
||||
|
||||
tlsCert: tlsCert,
|
||||
@@ -47,6 +48,7 @@ var assert = require('assert'),
|
||||
fs = require('fs'),
|
||||
path = require('path'),
|
||||
safe = require('safetydance'),
|
||||
tld = require('tldjs'),
|
||||
_ = require('underscore');
|
||||
|
||||
var homeDir = process.env.HOME || process.env.HOMEPATH || process.env.USERPROFILE;
|
||||
@@ -75,9 +77,9 @@ function _reset(callback) {
|
||||
function initConfig() {
|
||||
// setup defaults
|
||||
data.fqdn = 'localhost';
|
||||
data.zoneName = '';
|
||||
|
||||
data.token = null;
|
||||
data.boxVersionsUrl = null;
|
||||
data.version = null;
|
||||
data.isCustomDomain = true;
|
||||
data.webServerOrigin = null;
|
||||
@@ -145,10 +147,26 @@ function webServerOrigin() {
|
||||
return get('webServerOrigin');
|
||||
}
|
||||
|
||||
function setFqdn(fqdn) {
|
||||
set('fqdn', fqdn);
|
||||
}
|
||||
|
||||
function fqdn() {
|
||||
return get('fqdn');
|
||||
}
|
||||
|
||||
function setZoneName(zone) {
|
||||
set('zoneName', zone);
|
||||
}
|
||||
|
||||
function zoneName() {
|
||||
var zone = get('zoneName');
|
||||
if (zone) return zone;
|
||||
|
||||
// TODO: move this to migration code path instead
|
||||
return tld.getDomain(fqdn()) || '';
|
||||
}
|
||||
|
||||
// keep this in sync with start.sh admin.conf generation code
|
||||
function appFqdn(location) {
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
@@ -193,21 +211,10 @@ function isCustomDomain() {
|
||||
return get('isCustomDomain');
|
||||
}
|
||||
|
||||
function zoneName() {
|
||||
if (isCustomDomain()) return fqdn(); // the appstore sets up the custom domain as a zone
|
||||
|
||||
// for shared domain name, strip out the hostname
|
||||
return fqdn().substr(fqdn().indexOf('.') + 1);
|
||||
}
|
||||
|
||||
function database() {
|
||||
return get('database');
|
||||
}
|
||||
|
||||
function isDev() {
|
||||
return /dev/i.test(get('boxVersionsUrl'));
|
||||
}
|
||||
|
||||
function isDemo() {
|
||||
return get('isDemo') === true;
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ exports = module.exports = {
|
||||
};
|
||||
|
||||
var apps = require('./apps.js'),
|
||||
appstore = require('./appstore.js'),
|
||||
assert = require('assert'),
|
||||
backups = require('./backups.js'),
|
||||
certificates = require('./certificates.js'),
|
||||
@@ -14,26 +15,29 @@ var apps = require('./apps.js'),
|
||||
constants = require('./constants.js'),
|
||||
CronJob = require('cron').CronJob,
|
||||
debug = require('debug')('box:cron'),
|
||||
digest = require('./digest.js'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
janitor = require('./janitor.js'),
|
||||
scheduler = require('./scheduler.js'),
|
||||
settings = require('./settings.js'),
|
||||
semver = require('semver'),
|
||||
updateChecker = require('./updatechecker.js');
|
||||
|
||||
var gAutoupdaterJob = null,
|
||||
gBoxUpdateCheckerJob = null,
|
||||
var gAliveJob = null, // send periodic stats
|
||||
gAppUpdateCheckerJob = null,
|
||||
gHeartbeatJob = null, // for CaaS health check
|
||||
gAliveJob = null, // send periodic stats
|
||||
gAutoupdaterJob = null,
|
||||
gBackupJob = null,
|
||||
gCleanupTokensJob = null,
|
||||
gCleanupBackupsJob = null,
|
||||
gDockerVolumeCleanerJob = null,
|
||||
gSchedulerSyncJob = null,
|
||||
gBoxUpdateCheckerJob = null,
|
||||
gCertificateRenewJob = null,
|
||||
gCheckDiskSpaceJob = null,
|
||||
gCleanupBackupsJob = null,
|
||||
gCleanupEventlogJob = null,
|
||||
gDynamicDNSJob = null;
|
||||
gCleanupTokensJob = null,
|
||||
gDockerVolumeCleanerJob = null,
|
||||
gDynamicDNSJob = null,
|
||||
gHeartbeatJob = null, // for CaaS health check
|
||||
gSchedulerSyncJob = null,
|
||||
gDigestEmailJob = null;
|
||||
|
||||
var NOOP_CALLBACK = function (error) { if (error) console.error(error); };
|
||||
var AUDIT_SOURCE = { userId: null, username: 'cron' };
|
||||
@@ -65,7 +69,7 @@ function initialize(callback) {
|
||||
var randomHourMinute = Math.floor(60*Math.random());
|
||||
gAliveJob = new CronJob({
|
||||
cronTime: '00 ' + randomHourMinute + ' * * * *', // every hour on a random minute
|
||||
onTick: cloudron.sendAliveStatus,
|
||||
onTick: appstore.sendAliveStatus,
|
||||
start: true
|
||||
});
|
||||
|
||||
@@ -91,7 +95,7 @@ function recreateJobs(tz) {
|
||||
|
||||
if (gBackupJob) gBackupJob.stop();
|
||||
gBackupJob = new CronJob({
|
||||
cronTime: '00 00 */4 * * *', // every 4 hours. backups.ensureBackup() will only trigger a backup once per day
|
||||
cronTime: '00 00 */6 * * *', // every 6 hours. backups.ensureBackup() will only trigger a backup once per day
|
||||
onTick: backups.ensureBackup.bind(null, AUDIT_SOURCE, NOOP_CALLBACK),
|
||||
start: true,
|
||||
timeZone: tz
|
||||
@@ -105,13 +109,12 @@ function recreateJobs(tz) {
|
||||
timeZone: tz
|
||||
});
|
||||
|
||||
// randomized pattern per cloudron every 10 min
|
||||
var randomMinute = Math.floor(10*Math.random());
|
||||
var random10MinPattern = [0,1,2,3,4,5].map(function (n) { return n*10+randomMinute; }).join(',');
|
||||
// randomized pattern per cloudron every hour
|
||||
var randomMinute = Math.floor(60*Math.random());
|
||||
|
||||
if (gBoxUpdateCheckerJob) gBoxUpdateCheckerJob.stop();
|
||||
gBoxUpdateCheckerJob = new CronJob({
|
||||
cronTime: '00 ' + random10MinPattern + ' * * * *', // every 10 minutes
|
||||
cronTime: '00 ' + randomMinute + ' * * * *', // once an hour
|
||||
onTick: updateChecker.checkBoxUpdates,
|
||||
start: true,
|
||||
timeZone: tz
|
||||
@@ -119,7 +122,7 @@ function recreateJobs(tz) {
|
||||
|
||||
if (gAppUpdateCheckerJob) gAppUpdateCheckerJob.stop();
|
||||
gAppUpdateCheckerJob = new CronJob({
|
||||
cronTime: '00 ' + random10MinPattern + ' * * * *', // every 10 minutes
|
||||
cronTime: '00 ' + randomMinute + ' * * * *', // once an hour
|
||||
onTick: updateChecker.checkAppUpdates,
|
||||
start: true,
|
||||
timeZone: tz
|
||||
@@ -135,8 +138,8 @@ function recreateJobs(tz) {
|
||||
|
||||
if (gCleanupBackupsJob) gCleanupBackupsJob.stop();
|
||||
gCleanupBackupsJob = new CronJob({
|
||||
cronTime: '00 */30 * * * *', // every 30 minutes
|
||||
onTick: janitor.cleanupBackups,
|
||||
cronTime: '00 45 */6 * * *', // every 6 hours. try not to overlap with ensureBackup job
|
||||
onTick: backups.cleanup,
|
||||
start: true,
|
||||
timeZone: tz
|
||||
});
|
||||
@@ -172,6 +175,14 @@ function recreateJobs(tz) {
|
||||
start: true,
|
||||
timeZone: tz
|
||||
});
|
||||
|
||||
if (gDigestEmailJob) gDigestEmailJob.stop();
|
||||
gDigestEmailJob = new CronJob({
|
||||
cronTime: '00 00 00 * * 3', // every wednesday
|
||||
onTick: digest.maybeSend,
|
||||
start: true,
|
||||
timeZone: tz
|
||||
});
|
||||
}
|
||||
|
||||
function autoupdatePatternChanged(pattern) {
|
||||
@@ -189,11 +200,15 @@ function autoupdatePatternChanged(pattern) {
|
||||
onTick: function() {
|
||||
var updateInfo = updateChecker.getUpdateInfo();
|
||||
if (updateInfo.box) {
|
||||
debug('Starting autoupdate to %j', updateInfo.box);
|
||||
cloudron.updateToLatest(AUDIT_SOURCE, NOOP_CALLBACK);
|
||||
if (semver.major(updateInfo.box.version) === semver.major(config.version())) {
|
||||
debug('Starting autoupdate to %j', updateInfo.box);
|
||||
cloudron.updateToLatest(AUDIT_SOURCE, NOOP_CALLBACK);
|
||||
} else {
|
||||
debug('Block automatic update for major version');
|
||||
}
|
||||
} else if (updateInfo.apps) {
|
||||
debug('Starting app update to %j', updateInfo.apps);
|
||||
apps.updateApps(updateInfo.apps, AUDIT_SOURCE, NOOP_CALLBACK);
|
||||
apps.autoupdateApps(updateInfo.apps, AUDIT_SOURCE, NOOP_CALLBACK);
|
||||
} else {
|
||||
debug('No auto updates available');
|
||||
}
|
||||
@@ -267,5 +282,8 @@ function uninitialize(callback) {
|
||||
if (gDynamicDNSJob) gDynamicDNSJob.stop();
|
||||
gDynamicDNSJob = null;
|
||||
|
||||
if (gDigestEmailJob) gDigestEmailJob.stop();
|
||||
gDigestEmailJob = null;
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
@@ -0,0 +1,46 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
resolve: resolve
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
child_process = require('child_process'),
|
||||
debug = require('debug')('box:dig');
|
||||
|
||||
function resolve(domain, type, options, callback) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// dig @server cloudron.io TXT +short
|
||||
var args = [ ];
|
||||
if (options.server) args.push('@' + options.server);
|
||||
if (type === 'PTR') {
|
||||
args.push('-x', domain);
|
||||
} else {
|
||||
args.push(domain, type);
|
||||
}
|
||||
args.push('+short');
|
||||
|
||||
child_process.execFile('/usr/bin/dig', args, { encoding: 'utf8', killSignal: 'SIGKILL', timeout: options.timeout || 0 }, function (error, stdout, stderr) {
|
||||
if (error && error.killed) error.code = 'ETIMEDOUT';
|
||||
|
||||
if (error || stderr) debug('resolve error (%j): %j %s %s', args, error, stdout, stderr);
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('resolve (%j): %s', args, stdout);
|
||||
|
||||
if (!stdout) return callback(); // timeout or no result
|
||||
|
||||
var lines = stdout.trim().split('\n');
|
||||
if (type === 'MX') {
|
||||
lines = lines.map(function (line) {
|
||||
var parts = line.split(' ');
|
||||
return { priority: parts[0], exchange: parts[1] };
|
||||
});
|
||||
}
|
||||
return callback(null, lines);
|
||||
});
|
||||
}
|
||||
@@ -0,0 +1,64 @@
|
||||
'use strict';
|
||||
|
||||
var appstore = require('./appstore.js'),
|
||||
debug = require('debug')('box:digest'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
updatechecker = require('./updatechecker.js'),
|
||||
mailer = require('./mailer.js'),
|
||||
settings = require('./settings.js');
|
||||
|
||||
var NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
|
||||
exports = module.exports = {
|
||||
maybeSend: maybeSend
|
||||
};
|
||||
|
||||
function maybeSend(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
settings.getEmailDigest(function (error, enabled) {
|
||||
if (error) return callback(error);
|
||||
|
||||
if (!enabled) {
|
||||
debug('Email digest is disabled');
|
||||
return callback();
|
||||
}
|
||||
|
||||
var updateInfo = updatechecker.getUpdateInfo();
|
||||
var pendingAppUpdates = updateInfo.apps || {};
|
||||
pendingAppUpdates = Object.keys(pendingAppUpdates).map(function (key) { return pendingAppUpdates[key]; });
|
||||
|
||||
appstore.getSubscription(function (error, result) {
|
||||
if (error) debug('Error getting subscription:', error);
|
||||
|
||||
var hasSubscription = result && result.plan.id !== 'free' && result.plan.id !== 'undecided';
|
||||
|
||||
eventlog.getByActionLastWeek(eventlog.ACTION_APP_UPDATE, function (error, appUpdates) {
|
||||
if (error) return callback(error);
|
||||
|
||||
eventlog.getByActionLastWeek(eventlog.ACTION_UPDATE, function (error, boxUpdates) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var info = {
|
||||
hasSubscription: hasSubscription,
|
||||
|
||||
pendingAppUpdates: pendingAppUpdates,
|
||||
pendingBoxUpdate: updateInfo.box || null,
|
||||
|
||||
finishedAppUpdates: (appUpdates || []).map(function (e) { return e.data; }),
|
||||
finishedBoxUpdates: (boxUpdates || []).map(function (e) { return e.data; })
|
||||
};
|
||||
|
||||
if (info.pendingAppUpdates.length || info.pendingBoxUpdate || info.finishedAppUpdates.length || info.finishedBoxUpdates.length) {
|
||||
debug('maybeSend: sending digest email', info);
|
||||
mailer.sendDigest(info);
|
||||
} else {
|
||||
debug('maybeSend: nothing happened, NOT sending digest email');
|
||||
}
|
||||
|
||||
callback();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -112,9 +112,10 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
function verifyDnsConfig(dnsConfig, domain, zoneName, ip, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof ip, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
|
||||
@@ -0,0 +1,260 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
upsert: upsert,
|
||||
get: get,
|
||||
del: del,
|
||||
waitForDns: require('./waitfordns.js'),
|
||||
verifyDnsConfig: verifyDnsConfig
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
dns = require('dns'),
|
||||
_ = require('underscore'),
|
||||
SubdomainError = require('../subdomains.js').SubdomainError,
|
||||
superagent = require('superagent'),
|
||||
debug = require('debug')('box:dns/cloudflare'),
|
||||
util = require('util');
|
||||
|
||||
// we are using latest v4 stable API https://api.cloudflare.com/#getting-started-endpoints
|
||||
var CLOUDFLARE_ENDPOINT = 'https://api.cloudflare.com/client/v4';
|
||||
|
||||
function translateRequestError(result, callback) {
|
||||
assert.strictEqual(typeof result, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (result.statusCode === 404) return callback(new SubdomainError(SubdomainError.NOT_FOUND, util.format('%s %j', result.statusCode, 'API does not exist')));
|
||||
if (result.statusCode === 422) return callback(new SubdomainError(SubdomainError.BAD_FIELD, result.body.message));
|
||||
if ((result.statusCode === 400 || result.statusCode === 401 || result.statusCode === 403) && result.body.errors.length > 0) {
|
||||
let error = result.body.errors[0];
|
||||
let message = error.message;
|
||||
if (error.code === 6003) {
|
||||
if (error.error_chain[0] && error.error_chain[0].code === 6103) message = 'Invalid API Key';
|
||||
else message = 'Invalid credentials';
|
||||
}
|
||||
|
||||
return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, message));
|
||||
}
|
||||
|
||||
callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body)));
|
||||
}
|
||||
|
||||
function getZoneByName(dnsConfig, zoneName, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
superagent.get(CLOUDFLARE_ENDPOINT + '/zones?name=' + zoneName + '&status=active')
|
||||
.set('X-Auth-Key', dnsConfig.token)
|
||||
.set('X-Auth-Email', dnsConfig.email)
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode !== 200 || result.body.success !== true) return translateRequestError(result, callback);
|
||||
if (!result.body.result.length) return callback(new SubdomainError(SubdomainError.NOT_FOUND, util.format('%s %j', result.statusCode, result.body)));
|
||||
|
||||
callback(null, result.body.result[0]);
|
||||
});
|
||||
}
|
||||
|
||||
function getDNSRecordsByZoneId(dnsConfig, zoneId, zoneName, subdomain, type, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof zoneId, 'string');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
superagent.get(CLOUDFLARE_ENDPOINT + '/zones/' + zoneId + '/dns_records')
|
||||
.set('X-Auth-Key',dnsConfig.token)
|
||||
.set('X-Auth-Email',dnsConfig.email)
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode !== 200 || result.body.success !== true) return translateRequestError(result, callback);
|
||||
|
||||
var fqdn = subdomain === '' ? zoneName : subdomain + '.' + zoneName;
|
||||
var tmp = result.body.result.filter(function (record) {
|
||||
return (record.type === type && record.name === fqdn);
|
||||
});
|
||||
|
||||
return callback(null, tmp);
|
||||
});
|
||||
}
|
||||
|
||||
function upsert(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(util.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var fqdn = subdomain === '' ? zoneName : subdomain + '.' + zoneName;
|
||||
|
||||
debug('upsert: %s for zone %s of type %s with values %j', subdomain, zoneName, type, values);
|
||||
|
||||
getZoneByName(dnsConfig, zoneName, function(error, result){
|
||||
if (error) return callback(error);
|
||||
|
||||
var zoneId = result.id;
|
||||
|
||||
getDNSRecordsByZoneId(dnsConfig, zoneId, zoneName, subdomain, type, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var dnsRecords = result;
|
||||
|
||||
// used to track available records to update instead of create
|
||||
var i = 0;
|
||||
|
||||
async.eachSeries(values, function (value, callback) {
|
||||
var data = {
|
||||
type: type,
|
||||
name: fqdn,
|
||||
content: value,
|
||||
ttl: 120 // 1 means "automatic" (meaning 300ms) and 120 is the lowest supported
|
||||
};
|
||||
|
||||
if (i >= dnsRecords.length) {
|
||||
superagent.post(CLOUDFLARE_ENDPOINT + '/zones/'+ zoneId + '/dns_records')
|
||||
.set('X-Auth-Key',dnsConfig.token)
|
||||
.set('X-Auth-Email',dnsConfig.email)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode !== 200 || result.body.success !== true) return translateRequestError(result, callback);
|
||||
|
||||
callback(null);
|
||||
});
|
||||
} else {
|
||||
superagent.put(CLOUDFLARE_ENDPOINT + '/zones/'+ zoneId + '/dns_records/' + dnsRecords[i].id)
|
||||
.set('X-Auth-Key',dnsConfig.token)
|
||||
.set('X-Auth-Email',dnsConfig.email)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
// increment, as we have consumed the record
|
||||
++i;
|
||||
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode !== 200 || result.body.success !== true) return translateRequestError(result, callback);
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(null, 'unused');
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function get(dnsConfig, zoneName, subdomain, type, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getZoneByName(dnsConfig, zoneName, function(error, result){
|
||||
if (error) return callback(error);
|
||||
|
||||
getDNSRecordsByZoneId(dnsConfig, result.id, zoneName, subdomain, type, function(error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var tmp = result.map(function (record) { return record.content; });
|
||||
debug('get: %j', tmp);
|
||||
|
||||
callback(null, tmp);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function del(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof subdomain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert(util.isArray(values));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getZoneByName(dnsConfig, zoneName, function(error, result){
|
||||
if (error) return callback(error);
|
||||
|
||||
getDNSRecordsByZoneId(dnsConfig, result.id, zoneName, subdomain, type, function(error, result) {
|
||||
if (error) return callback(error);
|
||||
if (result.length === 0) return callback(null);
|
||||
|
||||
var zoneId = result[0].zone_id;
|
||||
|
||||
var tmp = result.filter(function (record) { return values.some(function (value) { return value === record.content; }); });
|
||||
debug('del: %j', tmp);
|
||||
|
||||
if (tmp.length === 0) return callback(null);
|
||||
|
||||
async.eachSeries(tmp, function (record, callback) {
|
||||
superagent.del(CLOUDFLARE_ENDPOINT + '/zones/'+ zoneId + '/dns_records/' + record.id)
|
||||
.set('X-Auth-Key',dnsConfig.token)
|
||||
.set('X-Auth-Email',dnsConfig.email)
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode !== 204 || result.body.success !== true) return translateRequestError(result, callback);
|
||||
|
||||
debug('del: done');
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(null, 'unused');
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function verifyDnsConfig(dnsConfig, fqdn, zoneName, ip, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof fqdn, 'string');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof ip, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (!dnsConfig.token || typeof dnsConfig.token !== 'string') return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'token must be a non-empty string'));
|
||||
if (!dnsConfig.email || typeof dnsConfig.email !== 'string') return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'email must be a non-empty string'));
|
||||
|
||||
var credentials = {
|
||||
provider: dnsConfig.provider,
|
||||
token: dnsConfig.token,
|
||||
email: dnsConfig.email
|
||||
};
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
|
||||
|
||||
dns.resolveNs(zoneName, function (error, nameservers) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to resolve nameservers for this domain'));
|
||||
if (error || !nameservers) return callback(new SubdomainError(SubdomainError.BAD_FIELD, error ? error.message : 'Unable to get nameservers'));
|
||||
|
||||
getZoneByName(dnsConfig, zoneName, function(error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
if (!_.isEqual(result.name_servers.sort(), nameservers.sort())) {
|
||||
debug('verifyDnsConfig: %j and %j do not match', nameservers, result.name_servers);
|
||||
return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Domain nameservers are not set to Cloudflare'));
|
||||
}
|
||||
|
||||
upsert(credentials, zoneName, 'my', 'A', [ ip ], function (error, changeId) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: A record added with change id %s', changeId);
|
||||
|
||||
callback(null, credentials);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -10,14 +10,19 @@ exports = module.exports = {
|
||||
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
constants = require('../constants.js'),
|
||||
debug = require('debug')('box:dns/digitalocean'),
|
||||
dns = require('native-dns'),
|
||||
dns = require('dns'),
|
||||
SubdomainError = require('../subdomains.js').SubdomainError,
|
||||
superagent = require('superagent'),
|
||||
util = require('util');
|
||||
|
||||
var DIGITALOCEAN_ENDPOINT = 'https://api.digitalocean.com';
|
||||
|
||||
function formatError(response) {
|
||||
return util.format('DigitalOcean DNS error [%s] %j', response.statusCode, response.body);
|
||||
}
|
||||
|
||||
function getInternal(dnsConfig, zoneName, subdomain, type, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
@@ -30,9 +35,9 @@ function getInternal(dnsConfig, zoneName, subdomain, type, callback) {
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode === 404) return callback(new SubdomainError(SubdomainError.NOT_FOUND, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode !== 200) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode === 404) return callback(new SubdomainError(SubdomainError.NOT_FOUND, formatError(result)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 200) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, formatError(result)));
|
||||
|
||||
var tmp = result.body.domain_records.filter(function (record) {
|
||||
return (record.type === type && record.name === subdomain);
|
||||
@@ -74,7 +79,8 @@ function upsert(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
type: type,
|
||||
name: subdomain,
|
||||
data: value,
|
||||
priority: priority
|
||||
priority: priority,
|
||||
ttl: 1
|
||||
};
|
||||
|
||||
if (i >= result.length) {
|
||||
@@ -84,9 +90,9 @@ function upsert(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode === 422) return callback(new SubdomainError(SubdomainError.BAD_FIELD, result.body.message));
|
||||
if (result.statusCode !== 201) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode !== 201) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, formatError(result)));
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
@@ -100,9 +106,9 @@ function upsert(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
++i;
|
||||
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode === 422) return callback(new SubdomainError(SubdomainError.BAD_FIELD, result.body.message));
|
||||
if (result.statusCode !== 200) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode !== 200) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, formatError(result)));
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
@@ -165,8 +171,8 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode === 404) return callback(null);
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode !== 204) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 204) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, formatError(result)));
|
||||
|
||||
debug('del: done');
|
||||
|
||||
@@ -175,9 +181,10 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
function verifyDnsConfig(dnsConfig, fqdn, zoneName, ip, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof fqdn, 'string');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof ip, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
@@ -188,7 +195,7 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
|
||||
|
||||
dns.resolveNs(domain, function (error, nameservers) {
|
||||
dns.resolveNs(zoneName, function (error, nameservers) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to resolve nameservers for this domain'));
|
||||
if (error || !nameservers) return callback(new SubdomainError(SubdomainError.BAD_FIELD, error ? error.message : 'Unable to get nameservers'));
|
||||
|
||||
@@ -197,7 +204,9 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Domain nameservers are not set to Digital Ocean'));
|
||||
}
|
||||
|
||||
upsert(credentials, domain, 'my', 'A', [ ip ], function (error, changeId) {
|
||||
const name = constants.ADMIN_LOCATION + (fqdn === zoneName ? '' : '.' + fqdn.slice(0, - zoneName.length - 1));
|
||||
|
||||
upsert(credentials, zoneName, name, 'A', [ ip ], function (error, changeId) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: A record added with change id %s', changeId);
|
||||
|
||||
@@ -56,9 +56,10 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
callback(new Error('not implemented'));
|
||||
}
|
||||
|
||||
function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
function verifyDnsConfig(dnsConfig, domain, zoneName, ip, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof ip, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
|
||||
@@ -10,8 +10,10 @@ exports = module.exports = {
|
||||
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
constants = require('../constants.js'),
|
||||
debug = require('debug')('box:dns/manual'),
|
||||
dns = require('native-dns'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('dns'),
|
||||
SubdomainError = require('../subdomains.js').SubdomainError,
|
||||
util = require('util');
|
||||
|
||||
@@ -49,15 +51,16 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
return callback();
|
||||
}
|
||||
|
||||
function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
function verifyDnsConfig(dnsConfig, domain, zoneName, ip, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof ip, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var adminDomain = 'my.' + domain;
|
||||
var adminDomain = constants.ADMIN_LOCATION + '.' + domain;
|
||||
|
||||
dns.resolveNs(domain, function (error, nameservers) {
|
||||
dns.resolveNs(zoneName, function (error, nameservers) {
|
||||
if (error || !nameservers) return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to get nameservers'));
|
||||
|
||||
async.every(nameservers, function (nameserver, everyNsCallback) {
|
||||
@@ -68,42 +71,30 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
}
|
||||
|
||||
async.every(nsIps, function (nsIp, everyIpCallback) {
|
||||
var req = dns.Request({
|
||||
question: dns.Question({ name: adminDomain, type: 'A' }),
|
||||
server: { address: nsIp },
|
||||
timeout: 5000
|
||||
});
|
||||
dig.resolve(adminDomain, 'A', { server: nsIp, timeout: 5000 }, function (error, answer) {
|
||||
if (error && error.code === 'ETIMEDOUT') {
|
||||
debug('nameserver %s (%s) timed out when trying to resolve %s', nameserver, nsIp, adminDomain);
|
||||
return everyIpCallback(null, true); // should be ok if dns server is down
|
||||
}
|
||||
|
||||
req.on('timeout', function () {
|
||||
debug('nameserver %s (%s) timed out when trying to resolve %s', nameserver, nsIp, adminDomain);
|
||||
return everyIpCallback(null, true); // should be ok if dns server is down
|
||||
});
|
||||
|
||||
req.on('message', function (error, message) {
|
||||
if (error) {
|
||||
debug('nameserver %s (%s) returned error trying to resolve %s: %s', nameserver, nsIp, adminDomain, error);
|
||||
return everyIpCallback(null, false);
|
||||
}
|
||||
|
||||
var answer = message.answer;
|
||||
|
||||
if (!answer || answer.length === 0) {
|
||||
debug('bad answer from nameserver %s (%s) resolving %s (%s): %j', nameserver, nsIp, adminDomain, 'A', message);
|
||||
debug('bad answer from nameserver %s (%s) resolving %s (%s): %j', nameserver, nsIp, adminDomain, 'A', answer);
|
||||
return everyIpCallback(null, false);
|
||||
}
|
||||
|
||||
debug('verifyDnsConfig: ns: %s (%s), name:%s Actual:%j Expecting:%s', nameserver, nsIp, adminDomain, answer, ip);
|
||||
|
||||
var match = answer.some(function (a) {
|
||||
return a.address === ip;
|
||||
});
|
||||
var match = answer.some(function (a) { return a === ip; });
|
||||
|
||||
if (match) return everyIpCallback(null, true); // done!
|
||||
|
||||
everyIpCallback(null, false);
|
||||
});
|
||||
|
||||
req.send();
|
||||
}, everyNsCallback);
|
||||
});
|
||||
}, function (error, success) {
|
||||
|
||||
@@ -46,8 +46,9 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
return callback();
|
||||
}
|
||||
|
||||
function waitForDns(domain, value, type, options, callback) {
|
||||
function waitForDns(domain, zoneName, value, type, options, callback) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert(typeof value === 'string' || util.isRegExp(value));
|
||||
assert(type === 'A' || type === 'CNAME' || type === 'TXT');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
@@ -56,9 +57,10 @@ function waitForDns(domain, value, type, options, callback) {
|
||||
callback();
|
||||
}
|
||||
|
||||
function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
function verifyDnsConfig(dnsConfig, domain, zoneName, ip, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof ip, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
|
||||
@@ -13,8 +13,9 @@ exports = module.exports = {
|
||||
|
||||
var assert = require('assert'),
|
||||
AWS = require('aws-sdk'),
|
||||
constants = require('../constants.js'),
|
||||
debug = require('debug')('box:dns/route53'),
|
||||
dns = require('native-dns'),
|
||||
dns = require('dns'),
|
||||
SubdomainError = require('../subdomains.js').SubdomainError,
|
||||
util = require('util'),
|
||||
_ = require('underscore');
|
||||
@@ -41,6 +42,7 @@ function getZoneByName(dnsConfig, zoneName, callback) {
|
||||
var route53 = new AWS.Route53(getDnsCredentials(dnsConfig));
|
||||
route53.listHostedZones({}, function (error, result) {
|
||||
if (error && error.code === 'AccessDenied') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'InvalidClientTokenId') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, error.message));
|
||||
|
||||
var zone = result.HostedZones.filter(function (zone) {
|
||||
@@ -64,6 +66,7 @@ function getHostedZone(dnsConfig, zoneName, callback) {
|
||||
var route53 = new AWS.Route53(getDnsCredentials(dnsConfig));
|
||||
route53.getHostedZone({ Id: zone.Id }, function (error, result) {
|
||||
if (error && error.code === 'AccessDenied') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'InvalidClientTokenId') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, error.message));
|
||||
|
||||
callback(null, result);
|
||||
@@ -105,6 +108,7 @@ function add(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
var route53 = new AWS.Route53(getDnsCredentials(dnsConfig));
|
||||
route53.changeResourceRecordSets(params, function(error, result) {
|
||||
if (error && error.code === 'AccessDenied') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'InvalidClientTokenId') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'PriorRequestNotComplete') return callback(new SubdomainError(SubdomainError.STILL_BUSY, error.message));
|
||||
if (error && error.code === 'InvalidChangeBatch') return callback(new SubdomainError(SubdomainError.BAD_FIELD, error.message));
|
||||
if (error) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, error.message));
|
||||
@@ -145,6 +149,7 @@ function get(dnsConfig, zoneName, subdomain, type, callback) {
|
||||
var route53 = new AWS.Route53(getDnsCredentials(dnsConfig));
|
||||
route53.listResourceRecordSets(params, function (error, result) {
|
||||
if (error && error.code === 'AccessDenied') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'InvalidClientTokenId') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, error.message));
|
||||
if (result.ResourceRecordSets.length === 0) return callback(null, [ ]);
|
||||
if (result.ResourceRecordSets[0].Name !== params.StartRecordName || result.ResourceRecordSets[0].Type !== params.StartRecordType) return callback(null, [ ]);
|
||||
@@ -190,6 +195,7 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
var route53 = new AWS.Route53(getDnsCredentials(dnsConfig));
|
||||
route53.changeResourceRecordSets(params, function(error, result) {
|
||||
if (error && error.code === 'AccessDenied') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'InvalidClientTokenId') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error && error.message && error.message.indexOf('it was not found') !== -1) {
|
||||
debug('del: resource record set not found.', error);
|
||||
return callback(new SubdomainError(SubdomainError.NOT_FOUND, error.message));
|
||||
@@ -212,9 +218,10 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
function verifyDnsConfig(dnsConfig, fqdn, zoneName, ip, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof fqdn, 'string');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof ip, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
@@ -228,11 +235,11 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
|
||||
|
||||
dns.resolveNs(domain, function (error, nameservers) {
|
||||
dns.resolveNs(zoneName, function (error, nameservers) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to resolve nameservers for this domain'));
|
||||
if (error || !nameservers) return callback(new SubdomainError(SubdomainError.BAD_FIELD, error ? error.message : 'Unable to get nameservers'));
|
||||
|
||||
getHostedZone(credentials, domain, function (error, zone) {
|
||||
getHostedZone(credentials, zoneName, function (error, zone) {
|
||||
if (error) return callback(error);
|
||||
|
||||
if (!_.isEqual(zone.DelegationSet.NameServers.sort(), nameservers.sort())) {
|
||||
@@ -240,7 +247,9 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Domain nameservers are not set to Route53'));
|
||||
}
|
||||
|
||||
upsert(credentials, domain, 'my', 'A', [ ip ], function (error, changeId) {
|
||||
const name = constants.ADMIN_LOCATION + (fqdn === zoneName ? '' : '.' + fqdn.slice(0, - zoneName.length - 1));
|
||||
|
||||
upsert(credentials, zoneName, name, 'A', [ ip ], function (error, changeId) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: A record added with change id %s', changeId);
|
||||
|
||||
@@ -5,9 +5,9 @@ exports = module.exports = waitForDns;
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
debug = require('debug')('box:dns/waitfordns'),
|
||||
dns = require('native-dns'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('dns'),
|
||||
SubdomainError = require('../subdomains.js').SubdomainError,
|
||||
tld = require('tldjs'),
|
||||
util = require('util');
|
||||
|
||||
function isChangeSynced(domain, value, type, nameserver, callback) {
|
||||
@@ -25,61 +25,53 @@ function isChangeSynced(domain, value, type, nameserver, callback) {
|
||||
}
|
||||
|
||||
async.every(nsIps, function (nsIp, iteratorCallback) {
|
||||
var req = dns.Request({
|
||||
question: dns.Question({ name: domain, type: type }),
|
||||
server: { address: nsIp },
|
||||
timeout: 5000
|
||||
});
|
||||
dig.resolve(domain, type, { server: nsIp, timeout: 5000 }, function (error, answer) {
|
||||
if (error && error.code === 'ETIMEDOUT') {
|
||||
debug('nameserver %s (%s) timed out when trying to resolve %s', nameserver, nsIp, domain);
|
||||
return iteratorCallback(null, true); // should be ok if dns server is down
|
||||
}
|
||||
|
||||
req.on('timeout', function () {
|
||||
debug('nameserver %s (%s) timed out when trying to resolve %s', nameserver, nsIp, domain);
|
||||
return iteratorCallback(null, true); // should be ok if dns server is down
|
||||
});
|
||||
|
||||
req.on('message', function (error, message) {
|
||||
if (error) {
|
||||
debug('nameserver %s (%s) returned error trying to resolve %s: %s', nameserver, nsIp, domain, error);
|
||||
return iteratorCallback(null, false);
|
||||
}
|
||||
|
||||
var answer = message.answer;
|
||||
|
||||
if (!answer || answer.length === 0) {
|
||||
debug('bad answer from nameserver %s (%s) resolving %s (%s): %j', nameserver, nsIp, domain, type, message);
|
||||
debug('bad answer from nameserver %s (%s) resolving %s (%s)', nameserver, nsIp, domain, type);
|
||||
return iteratorCallback(null, false);
|
||||
}
|
||||
|
||||
debug('isChangeSynced: ns: %s (%s), name:%s Actual:%j Expecting:%s', nameserver, nsIp, domain, answer, value);
|
||||
|
||||
var match = answer.some(function (a) {
|
||||
return ((type === 'A' && value.test(a.address)) ||
|
||||
(type === 'CNAME' && value.test(a.data)) ||
|
||||
(type === 'TXT' && value.test(a.data.join(''))));
|
||||
return ((type === 'A' && value.test(a)) ||
|
||||
(type === 'CNAME' && value.test(a)) ||
|
||||
(type === 'TXT' && value.test(a)));
|
||||
});
|
||||
|
||||
if (match) return iteratorCallback(null, true); // done!
|
||||
|
||||
iteratorCallback(null, false);
|
||||
});
|
||||
|
||||
req.send();
|
||||
}, callback);
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
// check if IP change has propagated to every nameserver
|
||||
function waitForDns(domain, value, type, options, callback) {
|
||||
function waitForDns(domain, zoneName, value, type, options, callback) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert(typeof value === 'string' || util.isRegExp(value));
|
||||
assert(type === 'A' || type === 'CNAME' || type === 'TXT');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var zoneName = tld.getDomain(domain);
|
||||
if (typeof value === 'string') {
|
||||
// http://stackoverflow.com/questions/3561493/is-there-a-regexp-escape-function-in-javascript
|
||||
value = new RegExp('^' + value.replace(/[-\/\\^$*+?.()|[\]{}]/g, '\\$&') + '$');
|
||||
}
|
||||
|
||||
debug('waitForIp: domain %s to be %s in zone %s.', domain, value, zoneName);
|
||||
|
||||
var attempt = 1;
|
||||
|
||||
@@ -40,7 +40,7 @@ var addons = require('./addons.js'),
|
||||
child_process = require('child_process'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
debug = require('debug')('box:src/docker.js'),
|
||||
debug = require('debug')('box:docker.js'),
|
||||
once = require('once'),
|
||||
safe = require('safetydance'),
|
||||
spawn = child_process.spawn,
|
||||
@@ -202,8 +202,10 @@ function createSubcontainer(app, name, cmd, options, callback) {
|
||||
},
|
||||
CpuShares: 512, // relative to 1024 for system processes
|
||||
VolumesFrom: isAppContainer ? null : [ app.containerId + ":rw" ],
|
||||
NetworkMode: isAppContainer ? 'cloudron' : ('container:' + app.containerId), // share network namespace with parent
|
||||
SecurityOpt: enableSecurityOpt ? [ "apparmor:docker-cloudron-app" ] : null // profile available only on cloudron
|
||||
NetworkMode: 'cloudron',
|
||||
Dns: ['172.18.0.1'], // use internal dns
|
||||
DnsSearch: ['.'], // use internal dns
|
||||
SecurityOpt: enableSecurityOpt ? [ "apparmor=docker-cloudron-app" ] : null // profile available only on cloudron
|
||||
}
|
||||
};
|
||||
containerOptions = _.extend(containerOptions, options);
|
||||
|
||||
@@ -0,0 +1,303 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
verifyRelay: verifyRelay,
|
||||
getStatus: getStatus,
|
||||
|
||||
EmailError: EmailError
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
cloudron = require('./cloudron.js'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
debug = require('debug')('box:email'),
|
||||
dig = require('./dig.js'),
|
||||
net = require('net'),
|
||||
nodemailer = require('nodemailer'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js'),
|
||||
smtpTransport = require('nodemailer-smtp-transport'),
|
||||
sysinfo = require('./sysinfo.js'),
|
||||
util = require('util'),
|
||||
_ = require('underscore');
|
||||
|
||||
const digOptions = { server: '127.0.0.1', port: 53, timeout: 5000 };
|
||||
|
||||
function EmailError(reason, errorOrMessage) {
|
||||
assert.strictEqual(typeof reason, 'string');
|
||||
assert(errorOrMessage instanceof Error || typeof errorOrMessage === 'string' || typeof errorOrMessage === 'undefined');
|
||||
|
||||
Error.call(this);
|
||||
Error.captureStackTrace(this, this.constructor);
|
||||
|
||||
this.name = this.constructor.name;
|
||||
this.reason = reason;
|
||||
if (typeof errorOrMessage === 'undefined') {
|
||||
this.message = reason;
|
||||
} else if (typeof errorOrMessage === 'string') {
|
||||
this.message = errorOrMessage;
|
||||
} else {
|
||||
this.message = 'Internal error';
|
||||
this.nestedError = errorOrMessage;
|
||||
}
|
||||
}
|
||||
util.inherits(EmailError, Error);
|
||||
EmailError.INTERNAL_ERROR = 'Internal Error';
|
||||
EmailError.BAD_FIELD = 'Bad Field';
|
||||
|
||||
function checkOutboundPort25(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var smtpServer = _.sample([
|
||||
'smtp.gmail.com',
|
||||
'smtp.live.com',
|
||||
'smtp.mail.yahoo.com',
|
||||
'smtp.o2.ie',
|
||||
'smtp.comcast.net',
|
||||
'outgoing.verizon.net'
|
||||
]);
|
||||
|
||||
var relay = {
|
||||
value: 'OK',
|
||||
status: false
|
||||
};
|
||||
|
||||
var client = new net.Socket();
|
||||
client.setTimeout(5000);
|
||||
client.connect(25, smtpServer);
|
||||
client.on('connect', function () {
|
||||
relay.status = true;
|
||||
relay.value = 'OK';
|
||||
client.destroy(); // do not use end() because it still triggers timeout
|
||||
callback(null, relay);
|
||||
});
|
||||
client.on('timeout', function () {
|
||||
relay.status = false;
|
||||
relay.value = 'Connect to ' + smtpServer + ' timed out';
|
||||
client.destroy();
|
||||
callback(new Error('Timeout'), relay);
|
||||
});
|
||||
client.on('error', function (error) {
|
||||
relay.status = false;
|
||||
relay.value = 'Connect to ' + smtpServer + ' failed: ' + error.message;
|
||||
client.destroy();
|
||||
callback(error, relay);
|
||||
});
|
||||
}
|
||||
|
||||
function checkSmtpRelay(relay, callback) {
|
||||
var result = {
|
||||
value: 'OK',
|
||||
status: false
|
||||
};
|
||||
|
||||
var transporter = nodemailer.createTransport(smtpTransport({
|
||||
host: relay.host,
|
||||
port: relay.port,
|
||||
auth: {
|
||||
user: relay.username,
|
||||
pass: relay.password
|
||||
}
|
||||
}));
|
||||
|
||||
transporter.verify(function(error) {
|
||||
result.status = !error;
|
||||
if (error) {
|
||||
result.value = error.message;
|
||||
return callback(error, result);
|
||||
}
|
||||
|
||||
callback(null, result);
|
||||
});
|
||||
}
|
||||
|
||||
function verifyRelay(relay, callback) {
|
||||
assert.strictEqual(typeof relay, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var verifier = relay.provider === 'cloudron-smtp' ? checkOutboundPort25 : checkSmtpRelay.bind(null, relay);
|
||||
|
||||
verifier(function (error) {
|
||||
if (error) return callback(new EmailError(EmailError.BAD_FIELD, error.message));
|
||||
|
||||
callback();
|
||||
});
|
||||
}
|
||||
|
||||
function checkDkim(callback) {
|
||||
var dkim = {
|
||||
domain: constants.DKIM_SELECTOR + '._domainkey.' + config.fqdn(),
|
||||
type: 'TXT',
|
||||
expected: null,
|
||||
value: null,
|
||||
status: false
|
||||
};
|
||||
|
||||
var dkimKey = cloudron.readDkimPublicKeySync();
|
||||
if (!dkimKey) return callback(new Error('Failed to read dkim public key'), dkim);
|
||||
|
||||
dkim.expected = '"v=DKIM1; t=s; p=' + dkimKey + '"';
|
||||
|
||||
dig.resolve(dkim.domain, dkim.type, digOptions, function (error, txtRecords) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(null, dkim); // not setup
|
||||
if (error) return callback(error, dkim);
|
||||
|
||||
if (Array.isArray(txtRecords) && txtRecords.length !== 0) {
|
||||
dkim.value = txtRecords[0];
|
||||
dkim.status = (dkim.value === dkim.expected);
|
||||
}
|
||||
|
||||
callback(null, dkim);
|
||||
});
|
||||
}
|
||||
|
||||
function checkSpf(callback) {
|
||||
var spf = {
|
||||
domain: config.fqdn(),
|
||||
type: 'TXT',
|
||||
value: null,
|
||||
expected: '"v=spf1 a:' + config.adminFqdn() + ' ~all"',
|
||||
status: false
|
||||
};
|
||||
|
||||
// https://agari.zendesk.com/hc/en-us/articles/202952749-How-long-can-my-SPF-record-be-
|
||||
dig.resolve(spf.domain, spf.type, digOptions, function (error, txtRecords) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(null, spf); // not setup
|
||||
if (error) return callback(error, spf);
|
||||
|
||||
if (!Array.isArray(txtRecords)) return callback(null, spf);
|
||||
|
||||
var i;
|
||||
for (i = 0; i < txtRecords.length; i++) {
|
||||
if (txtRecords[i].indexOf('"v=spf1 ') !== 0) continue; // not SPF
|
||||
spf.value = txtRecords[i];
|
||||
spf.status = spf.value.indexOf(' a:' + config.adminFqdn()) !== -1;
|
||||
break;
|
||||
}
|
||||
|
||||
if (spf.status) {
|
||||
spf.expected = spf.value;
|
||||
} else if (i !== txtRecords.length) {
|
||||
spf.expected = '"v=spf1 a:' + config.adminFqdn() + ' ' + spf.value.slice('"v=spf1 '.length);
|
||||
}
|
||||
|
||||
callback(null, spf);
|
||||
});
|
||||
}
|
||||
|
||||
function checkMx(callback) {
|
||||
var mx = {
|
||||
domain: config.fqdn(),
|
||||
type: 'MX',
|
||||
value: null,
|
||||
expected: '10 ' + config.mailFqdn() + '.',
|
||||
status: false
|
||||
};
|
||||
|
||||
dig.resolve(mx.domain, mx.type, digOptions, function (error, mxRecords) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(null, mx); // not setup
|
||||
if (error) return callback(error, mx);
|
||||
|
||||
if (Array.isArray(mxRecords) && mxRecords.length !== 0) {
|
||||
mx.status = mxRecords.length == 1 && mxRecords[0].exchange === (config.mailFqdn() + '.');
|
||||
mx.value = mxRecords.map(function (r) { return r.priority + ' ' + r.exchange; }).join(' ');
|
||||
}
|
||||
|
||||
callback(null, mx);
|
||||
});
|
||||
}
|
||||
|
||||
function checkDmarc(callback) {
|
||||
var dmarc = {
|
||||
domain: '_dmarc.' + config.fqdn(),
|
||||
type: 'TXT',
|
||||
value: null,
|
||||
expected: '"v=DMARC1; p=reject; pct=100"',
|
||||
status: false
|
||||
};
|
||||
|
||||
dig.resolve(dmarc.domain, dmarc.type, digOptions, function (error, txtRecords) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(null, dmarc); // not setup
|
||||
if (error) return callback(error, dmarc);
|
||||
|
||||
if (Array.isArray(txtRecords) && txtRecords.length !== 0) {
|
||||
dmarc.value = txtRecords[0];
|
||||
dmarc.status = (dmarc.value === dmarc.expected);
|
||||
}
|
||||
|
||||
callback(null, dmarc);
|
||||
});
|
||||
}
|
||||
|
||||
function checkPtr(callback) {
|
||||
var ptr = {
|
||||
domain: null,
|
||||
type: 'PTR',
|
||||
value: null,
|
||||
expected: config.mailFqdn() + '.',
|
||||
status: false
|
||||
};
|
||||
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(error, ptr);
|
||||
|
||||
ptr.domain = ip.split('.').reverse().join('.') + '.in-addr.arpa';
|
||||
|
||||
dig.resolve(ip, 'PTR', digOptions, function (error, ptrRecords) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(null, ptr); // not setup
|
||||
if (error) return callback(error, ptr);
|
||||
|
||||
if (Array.isArray(ptrRecords) && ptrRecords.length !== 0) {
|
||||
ptr.value = ptrRecords.join(' ');
|
||||
ptr.status = ptrRecords.some(function (v) { return v === ptr.expected; });
|
||||
}
|
||||
|
||||
return callback(null, ptr);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getStatus(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var results = {};
|
||||
|
||||
function recordResult(what, func) {
|
||||
return function (callback) {
|
||||
func(function (error, result) {
|
||||
if (error) debug('Ignored error - ' + what + ':', error);
|
||||
|
||||
safe.set(results, what, result);
|
||||
|
||||
callback();
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
settings.getMailRelay(function (error, relay) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var checks = [
|
||||
recordResult('dns.mx', checkMx),
|
||||
recordResult('dns.dmarc', checkDmarc)
|
||||
];
|
||||
|
||||
if (relay.provider === 'cloudron-smtp') {
|
||||
// these tests currently only make sense when using Cloudron's SMTP server at this point
|
||||
checks.push(
|
||||
recordResult('dns.spf', checkSpf),
|
||||
recordResult('dns.dkim', checkDkim),
|
||||
recordResult('dns.ptr', checkPtr),
|
||||
recordResult('relay', checkOutboundPort25)
|
||||
);
|
||||
} else {
|
||||
checks.push(recordResult('relay', checkSmtpRelay.bind(null, relay)));
|
||||
}
|
||||
|
||||
async.parallel(checks, function () {
|
||||
callback(null, results);
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -6,6 +6,7 @@ exports = module.exports = {
|
||||
add: add,
|
||||
get: get,
|
||||
getAllPaged: getAllPaged,
|
||||
getByActionLastWeek: getByActionLastWeek,
|
||||
cleanup: cleanup,
|
||||
|
||||
// keep in sync with webadmin index.js filter and CLI tool
|
||||
@@ -103,6 +104,17 @@ function getAllPaged(action, search, page, perPage, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function getByActionLastWeek(action, callback) {
|
||||
assert(typeof action === 'string' || action === null);
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
eventlogdb.getByActionLastWeek(action, function (error, boxes) {
|
||||
if (error) return callback(new EventLogError(EventLogError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, boxes);
|
||||
});
|
||||
}
|
||||
|
||||
function cleanup(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
exports = module.exports = {
|
||||
get: get,
|
||||
getAllPaged: getAllPaged,
|
||||
getByActionLastWeek: getByActionLastWeek,
|
||||
add: add,
|
||||
count: count,
|
||||
delByCreationTime: delByCreationTime,
|
||||
@@ -19,10 +20,11 @@ var assert = require('assert'),
|
||||
|
||||
var EVENTLOGS_FIELDS = [ 'id', 'action', 'source', 'data', 'creationTime' ].join(',');
|
||||
|
||||
// until mysql module supports automatic type coercion
|
||||
function postProcess(eventLog) {
|
||||
// usually we have sourceJson and dataJson, however since this used to be the JSON data type, we don't
|
||||
eventLog.source = safe.JSON.parse(eventLog.source);
|
||||
eventLog.data = safe.JSON.parse(eventLog.data);
|
||||
|
||||
return eventLog;
|
||||
}
|
||||
|
||||
@@ -71,6 +73,20 @@ function getAllPaged(action, search, page, perPage, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function getByActionLastWeek(action, callback) {
|
||||
assert(typeof action === 'string' || action === null);
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var query = 'SELECT ' + EVENTLOGS_FIELDS + ' FROM eventlog WHERE action=? AND creationTime >= DATE_SUB(NOW(), INTERVAL 1 WEEK) ORDER BY creationTime DESC';
|
||||
database.query(query, [ action ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
results.forEach(postProcess);
|
||||
|
||||
callback(null, results);
|
||||
});
|
||||
}
|
||||
|
||||
function add(id, action, source, data, callback) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof action, 'string');
|
||||
|
||||
@@ -5,20 +5,20 @@
|
||||
// Do not require anything here!
|
||||
|
||||
exports = module.exports = {
|
||||
// a version bump means that all app containers are recreated
|
||||
'version': 46,
|
||||
// a major version makes all apps restore from backup
|
||||
// a minor version makes all apps re-configure themselves
|
||||
'version': '48.5.0',
|
||||
|
||||
'baseImages': [ 'cloudron/base:0.10.0' ],
|
||||
|
||||
// Note that if any of the databases include an upgrade, bump the infra version above
|
||||
// This is because we upgrade using dumps instead of mysql_upgrade, pg_upgrade etc
|
||||
'images': {
|
||||
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:0.14.0' },
|
||||
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:0.16.0' },
|
||||
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:0.12.0' },
|
||||
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:0.18.0' },
|
||||
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:0.17.0' },
|
||||
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:0.13.0' },
|
||||
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:0.11.0' },
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:0.30.3' },
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:0.36.2' },
|
||||
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:0.11.0' }
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -3,16 +3,13 @@
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
authcodedb = require('./authcodedb.js'),
|
||||
backups = require('./backups.js'),
|
||||
debug = require('debug')('box:src/janitor'),
|
||||
debug = require('debug')('box:janitor'),
|
||||
docker = require('./docker.js').connection,
|
||||
settings = require('./settings.js'),
|
||||
tokendb = require('./tokendb.js');
|
||||
|
||||
exports = module.exports = {
|
||||
cleanupTokens: cleanupTokens,
|
||||
cleanupDockerVolumes: cleanupDockerVolumes,
|
||||
cleanupBackups: cleanupBackups
|
||||
cleanupDockerVolumes: cleanupDockerVolumes
|
||||
};
|
||||
|
||||
var NOOP_CALLBACK = function () { };
|
||||
@@ -104,37 +101,3 @@ function cleanupDockerVolumes(callback) {
|
||||
}, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function cleanupBackups(callback) {
|
||||
assert(!callback || typeof callback === 'function'); // callback is null when called from cronjob
|
||||
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
debug('Cleaning backups');
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
// nothing to do here
|
||||
if (backupConfig.provider !== 'filesystem') return callback();
|
||||
|
||||
backups.getPaged(1, 1000, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
// sort with latest backups first in the array and slice 2
|
||||
var toCleanup = result.sort(function (a, b) { return b.creationTime.getTime() - a.creationTime.getTime(); }).slice(2);
|
||||
|
||||
debug('cleanupBackups: about to clean: ', toCleanup);
|
||||
|
||||
async.each(toCleanup, function (backup, callback) {
|
||||
backups.removeBackup(backup.id, backup.dependsOn, function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
debug('cleanupBackups: %s, %s done', backup.id, backup.dependsOn.join(', '));
|
||||
|
||||
callback();
|
||||
});
|
||||
}, callback);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ exports = module.exports = {
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
appdb = require('./appdb.js'),
|
||||
apps = require('./apps.js'),
|
||||
async = require('async'),
|
||||
config = require('./config.js'),
|
||||
@@ -318,18 +319,28 @@ function authenticateMailbox(req, res, next) {
|
||||
if (error) return next(new ldap.OperationsError(error.message));
|
||||
|
||||
if (mailbox.ownerType === mailboxdb.TYPE_APP) {
|
||||
if (req.credentials !== mailbox.ownerId) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
eventlog.add(eventlog.ACTION_APP_LOGIN, { authType: 'ldap', mailboxId: name }, { appId: mailbox.ownerId });
|
||||
return res.end();
|
||||
var addonId = req.dn.rdns[1].attrs.ou.value.toLowerCase(); // 'sendmail' or 'recvmail'
|
||||
var name;
|
||||
if (addonId === 'sendmail') name = 'MAIL_SMTP_PASSWORD';
|
||||
else if (addonId === 'recvmail') name = 'MAIL_IMAP_PASSWORD';
|
||||
else return next(new ldap.OperationsError('Invalid DN'));
|
||||
|
||||
appdb.getAddonConfigByName(mailbox.ownerId, addonId, name, function (error, value) {
|
||||
if (error) return next(new ldap.OperationsError(error.message));
|
||||
if (req.credentials !== value) return next(new ldap.InvalidCredentialsError(req.dn.toString()));
|
||||
|
||||
eventlog.add(eventlog.ACTION_APP_LOGIN, { authType: 'ldap', mailboxId: name }, { appId: mailbox.ownerId, addonId: addonId });
|
||||
return res.end();
|
||||
});
|
||||
} else if (mailbox.ownerType === mailboxdb.TYPE_USER) {
|
||||
authenticateUser(req, res, function (error) {
|
||||
if (error) return next(error);
|
||||
eventlog.add(eventlog.ACTION_USER_LOGIN, { authType: 'ldap', mailboxId: name }, { userId: req.user.username });
|
||||
res.end();
|
||||
});
|
||||
} else {
|
||||
return next(new ldap.OperationsError('Unknown ownerType for mailbox'));
|
||||
}
|
||||
|
||||
assert.strictEqual(mailbox.ownerType, mailboxdb.TYPE_USER);
|
||||
|
||||
authenticateUser(req, res, function (error) {
|
||||
if (error) return next(error);
|
||||
eventlog.add(eventlog.ACTION_USER_LOGIN, { authType: 'ldap', mailboxId: name }, { userId: req.user.username });
|
||||
res.end();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -356,7 +367,8 @@ function start(callback) {
|
||||
gServer.search('ou=mailaliases,dc=cloudron', mailAliasSearch);
|
||||
gServer.search('ou=mailinglists,dc=cloudron', mailingListSearch);
|
||||
|
||||
gServer.bind('ou=mailboxes,dc=cloudron', authenticateMailbox);
|
||||
gServer.bind('ou=recvmail,dc=cloudron', authenticateMailbox);
|
||||
gServer.bind('ou=sendmail,dc=cloudron', authenticateMailbox);
|
||||
|
||||
// this is the bind for addons (after bind, they might search and authenticate)
|
||||
gServer.bind('ou=addons,dc=cloudron', function(req, res, next) {
|
||||
|
||||
@@ -0,0 +1,136 @@
|
||||
<% if (format === 'text') { -%>
|
||||
|
||||
Dear <%= cloudronName %> Admin,
|
||||
|
||||
This is the weekly summary of activities on your Cloudron <%= fqdn %>.
|
||||
<% if (info.pendingBoxUpdate) { -%>
|
||||
|
||||
Cloudron v<%- info.pendingBoxUpdate.version %> is available:
|
||||
<% for (var i = 0; i < info.pendingBoxUpdate.changelog.length; i++) { -%>
|
||||
* <%- info.pendingBoxUpdate.changelog[i] %>
|
||||
<% }} -%>
|
||||
<% if (info.pendingAppUpdates.length) { -%>
|
||||
|
||||
One or more app updates are available:
|
||||
<% for (var i = 0; i < info.pendingAppUpdates.length; i++) { -%>
|
||||
- <%= info.pendingAppUpdates[i].manifest.title %> package v<%= info.pendingAppUpdates[i].manifest.version %>
|
||||
<% for (var j = 0; j < info.pendingAppUpdates[i].manifest.changelog.trim().split('\n').length; j++) { -%>
|
||||
<%= info.pendingAppUpdates[i].manifest.changelog.trim().split('\n')[j] %>
|
||||
<% }}} -%>
|
||||
<% if (info.finishedBoxUpdates.length) { -%>
|
||||
|
||||
Cloudron was updated with the following releases:
|
||||
<% for (var i = 0; i < info.finishedBoxUpdates.length; i++) { -%>
|
||||
- Version <%= info.finishedBoxUpdates[i].boxUpdateInfo.version %>
|
||||
<% for (var j = 0; j < info.finishedBoxUpdates[i].boxUpdateInfo.changelog.length; j++) { -%>
|
||||
* <%= info.finishedBoxUpdates[i].boxUpdateInfo.changelog[j] %>
|
||||
<% }}} -%>
|
||||
<% if (info.finishedAppUpdates.length) { -%>
|
||||
|
||||
The following apps were updated:
|
||||
<% for (var i = 0; i < info.finishedAppUpdates.length; i++) { -%>
|
||||
- <%= info.finishedAppUpdates[i].toManifest.title %> package v<%= info.finishedAppUpdates[i].toManifest.version %>
|
||||
<% for (var j = 0; j < info.finishedAppUpdates[i].toManifest.changelog.trim().split('\n').length; j++) { -%>
|
||||
<%= info.finishedAppUpdates[i].toManifest.changelog.trim().split('\n')[j] %>
|
||||
<% }}} -%>
|
||||
<% if (!info.hasSubscription) { -%>
|
||||
|
||||
*Keep your Cloudron automatically up-to-date and secure by upgrading to a paid plan at* <%= webadminUrl %>/#/settings
|
||||
<% } -%>
|
||||
|
||||
Powered by https://cloudron.io
|
||||
|
||||
Sent at: <%= new Date().toUTCString() %>
|
||||
|
||||
<% } else { %>
|
||||
|
||||
<center>
|
||||
<div style="max-width: 800px; text-align: left; border: 1px solid lightgray; padding: 20px;">
|
||||
<center>
|
||||
<img src="<%= cloudronAvatarUrl %>" width="128px" height="128px"/>
|
||||
</center>
|
||||
|
||||
<br/>
|
||||
|
||||
<p>Weekly summary of activities on your Cloudron <a href="<%= webadminUrl %>"><%= cloudronName %></a>:</p>
|
||||
|
||||
<br/>
|
||||
|
||||
<% if (info.pendingBoxUpdate) { -%>
|
||||
<p><b>Cloudron v<%- info.pendingBoxUpdate.version %> is available:</b></p>
|
||||
<ul>
|
||||
<% for (var i = 0; i < info.pendingBoxUpdate.changelog.length; i++) { %>
|
||||
<li><%- info.pendingBoxUpdate.changelog[i].replace(/^[\*,-] /, '') %></li>
|
||||
<% } %>
|
||||
</ul>
|
||||
<% } %>
|
||||
|
||||
<% if (info.pendingAppUpdates.length) { %>
|
||||
<p><b>Available app updates:</b></p>
|
||||
<ul>
|
||||
<% for (var i = 0; i < info.pendingAppUpdates.length; i++) { %>
|
||||
<li>
|
||||
<b><%= info.pendingAppUpdates[i].manifest.title %></b>
|
||||
<ul>
|
||||
<% for (var j = 0; j < info.pendingAppUpdates[i].manifest.changelog.trim().split('\n').length; j++) { %>
|
||||
<li><%= info.pendingAppUpdates[i].manifest.changelog.trim().split('\n')[j].replace(/^[\*,-] /, '') %></li>
|
||||
<% } %>
|
||||
</ul>
|
||||
</li>
|
||||
<% } %>
|
||||
</ul>
|
||||
<% } %>
|
||||
|
||||
<% if (info.finishedBoxUpdates.length) { %>
|
||||
<p><b>Your Cloudron was updated with the following releases:</b></p>
|
||||
<ul>
|
||||
<% for (var i = 0; i < info.finishedBoxUpdates.length; i++) { %>
|
||||
<li>
|
||||
<b><%= info.finishedBoxUpdates[i].boxUpdateInfo.version %></b>
|
||||
<ul>
|
||||
<% for (var j = 0; j < info.finishedBoxUpdates[i].boxUpdateInfo.changelog.length; j++) { %>
|
||||
<li><%= info.finishedBoxUpdates[i].boxUpdateInfo.changelog[j].replace(/^[\*,-] /, '') %></li>
|
||||
<% } %>
|
||||
</ul>
|
||||
</li>
|
||||
<% } %>
|
||||
</ul>
|
||||
<% } %>
|
||||
|
||||
<% if (info.finishedAppUpdates.length) { %>
|
||||
<p><b>The following apps were updated:</b></p>
|
||||
<ul>
|
||||
<% for (var i = 0; i < info.finishedAppUpdates.length; i++) { %>
|
||||
<li>
|
||||
<b><%= info.finishedAppUpdates[i].toManifest.title %></b> (package v<%= info.finishedAppUpdates[i].toManifest.version %>)
|
||||
<ul>
|
||||
<% for (var j = 0; j < info.finishedAppUpdates[i].toManifest.changelog.trim().split('\n').length; j++) { -%>
|
||||
<li><%= info.finishedAppUpdates[i].toManifest.changelog.trim().split('\n')[j].replace(/^[\*,-] /, '') %></li>
|
||||
<% } %>
|
||||
</ul>
|
||||
</li>
|
||||
<% } %>
|
||||
</ul>
|
||||
<% } %>
|
||||
|
||||
<br/>
|
||||
|
||||
<% if (!info.hasSubscription) { %>
|
||||
Keep your Cloudron automatically up-to-date and secure by upgrading to a <a href="<%= webadminUrl %>/#/settings">paid plan</a>.
|
||||
<% } %>
|
||||
|
||||
<br/>
|
||||
<br/>
|
||||
<br/>
|
||||
|
||||
<p style="text-align: right;">
|
||||
<small>
|
||||
Powered by <a href="https://cloudron.io">Cloudron</a><br/>
|
||||
Sent on <%= new Date().toUTCString() %>
|
||||
</small>
|
||||
</p>
|
||||
</div>
|
||||
</center>
|
||||
|
||||
<img src="https://analytics.cloudron.io/piwik.php?idsite=2&rec=1&e_c=CloudronEmail&e_a=digest" style="border:0" alt="" />
|
||||
<% } %>
|
||||
@@ -0,0 +1,55 @@
|
||||
{
|
||||
"format": "html",
|
||||
"webadminUrl": "https://my.cloudron.io",
|
||||
"fqdn": "my.cloudron.io",
|
||||
"cloudronName": "Smartserver",
|
||||
"cloudronAvatarUrl": "https://cloudron.io/img/logo.png",
|
||||
"info": {
|
||||
"pendingBoxUpdate": {
|
||||
"version": "1.3.7",
|
||||
"changelog": [
|
||||
"Feature one",
|
||||
"Feature two"
|
||||
]
|
||||
},
|
||||
"pendingAppUpdates": [{
|
||||
"manifest": {
|
||||
"title": "Wordpress",
|
||||
"version": "1.2.3",
|
||||
"changelog": "* This has changed\n * and that as well"
|
||||
}
|
||||
}],
|
||||
"finishedBoxUpdates": [{
|
||||
"boxUpdateInfo": {
|
||||
"version": "1.0.1",
|
||||
"changelog": [
|
||||
"Feature one",
|
||||
"Feature two"
|
||||
]
|
||||
}
|
||||
}, {
|
||||
"boxUpdateInfo": {
|
||||
"version": "1.0.2",
|
||||
"changelog": [
|
||||
"Feature one",
|
||||
"Feature two",
|
||||
"Feature three"
|
||||
]
|
||||
}
|
||||
}],
|
||||
"finishedAppUpdates": [{
|
||||
"toManifest": {
|
||||
"title": "Rocket.Chat",
|
||||
"version": "0.2.1",
|
||||
"changelog": "* This has changed\n * and that as well\n * some more"
|
||||
}
|
||||
}, {
|
||||
"toManifest": {
|
||||
"title": "Redmine",
|
||||
"version": "1.2.1",
|
||||
"changelog": "* This has changed\n * and that as well\n * some more"
|
||||
}
|
||||
}],
|
||||
"hasSubscription": false
|
||||
}
|
||||
}
|
||||