Compare commits
752 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 0c904af927 | |||
| 9cd025972c | |||
| 21111eccc4 | |||
| 917079f341 | |||
| 4d6d768be1 | |||
| c54cd992ca | |||
| d5ec599dd1 | |||
| 0542ab16d4 | |||
| 7e75ef7685 | |||
| f296265461 | |||
| fb4eade215 | |||
| 8b3e85907c | |||
| ca4876649d | |||
| 7ebc2abe5d | |||
| 37e132319b | |||
| b2728118e9 | |||
| c428f649aa | |||
| 7baf979a59 | |||
| ccecaca047 | |||
| c7ee684f25 | |||
| 52156c9a35 | |||
| 4fba216af9 | |||
| 1d00c788d1 | |||
| d891d39587 | |||
| cfde6e31ad | |||
| 243772d1f5 | |||
| 1c36b8eaf7 | |||
| 120fa4924a | |||
| c3c9c2f39a | |||
| fc90829ba2 | |||
| ce9224c690 | |||
| 18a2107247 | |||
| f13d05dad7 | |||
| 86586444a9 | |||
| 4e47d0595d | |||
| 45e85e4d53 | |||
| a3420f885d | |||
| a266fe13d0 | |||
| 44aba5d6e1 | |||
| 3fe5307ae3 | |||
| d03fb0e71f | |||
| d9723b72e4 | |||
| 6ba61f1bda | |||
| d1df647ddd | |||
| 95c4a1f90c | |||
| e00325e694 | |||
| 85c13cae58 | |||
| 00fd9e5b7f | |||
| dde81ee847 | |||
| c46fc96500 | |||
| 1914a9a703 | |||
| 1a061e4446 | |||
| 29ce80cebe | |||
| 4b6ac538ac | |||
| 70b9000b0e | |||
| 24dcb1b79c | |||
| 384915883f | |||
| 4cfc75f1d1 | |||
| c49cbb524d | |||
| b401c3d930 | |||
| 890a7cfb37 | |||
| 70a1ef1af3 | |||
| 38a0cdc0be | |||
| 93344a5a4a | |||
| 9f792fc04b | |||
| 7cb95faacb | |||
| bf122f0f56 | |||
| 78e9446a05 | |||
| 138e1595fa | |||
| 37b02ad36a | |||
| 02f0055594 | |||
| ec1f0f9320 | |||
| bfe6389f62 | |||
| 30db3e8973 | |||
| 5b67f2cf29 | |||
| a007b74b1c | |||
| a89482d4fa | |||
| 0cd4f133aa | |||
| e5ba4ff973 | |||
| ce133b997d | |||
| 217632354f | |||
| 9841351190 | |||
| f3341f4b7f | |||
| ff1f448860 | |||
| 37f28746fc | |||
| 9a22ba3af7 | |||
| 2942da78de | |||
| 89ff6be971 | |||
| be0d7bcce1 | |||
| 851b257678 | |||
| 579eacb644 | |||
| f52c5b584e | |||
| 8980c18deb | |||
| b05a9ce064 | |||
| 1974314c1f | |||
| 2bde023d4d | |||
| 3a10003246 | |||
| 1b08710b7e | |||
| 101d09eeb3 | |||
| 00f949f156 | |||
| adbe46d369 | |||
| 3198926cd6 | |||
| 957a6a20fe | |||
| 94f75bb0d7 | |||
| 0f442755e5 | |||
| cd2e782d48 | |||
| e97606ca87 | |||
| 00ada80230 | |||
| 34db98c489 | |||
| 110695355c | |||
| 021fb4bb94 | |||
| dea033e4b0 | |||
| 7dfe40739e | |||
| 9f0d1b515c | |||
| 2691d46d50 | |||
| 78c8f1de71 | |||
| d27ee4bfbc | |||
| cc5daa428d | |||
| 3e2189aeed | |||
| 79f9963792 | |||
| 6f53723169 | |||
| d8cb100fc0 | |||
| 5f9b2f1159 | |||
| 801ca7eda1 | |||
| 45a2d3745c | |||
| 551fe4d846 | |||
| 791981c2f2 | |||
| a18a620847 | |||
| 99e63ffc3f | |||
| e10a6d9de5 | |||
| 147f16571a | |||
| bd1fbc4a05 | |||
| 0843f78ec8 | |||
| 9769fbfcf2 | |||
| 7e73197eb9 | |||
| e3964fd710 | |||
| e66961b814 | |||
| 4176e5a98e | |||
| 45cf8a62d1 | |||
| b1380819ba | |||
| 57fa457596 | |||
| de1e218ce9 | |||
| e117ee2bef | |||
| a9e101d9f4 | |||
| a2f8203a42 | |||
| b9ee127775 | |||
| 6668bb3e8a | |||
| 5fd129e509 | |||
| d59c1f53b9 | |||
| d2f38c1abc | |||
| c0a1db6941 | |||
| fc10b4a79b | |||
| 9da2117e99 | |||
| 7e030b149b | |||
| bd23abd265 | |||
| dd0fb8292c | |||
| b4cbf63519 | |||
| 4fd04fa349 | |||
| c22cdb8d81 | |||
| eb963b2eb4 | |||
| 7d299908c9 | |||
| 2585282f86 | |||
| f25d5b3304 | |||
| 6e878faa8b | |||
| 15a6cbe62b | |||
| 76b0b214ec | |||
| f5c643c960 | |||
| ca8e0613fb | |||
| 0c9334d0d2 | |||
| 712dc97e9b | |||
| 4df48c97ec | |||
| fe3ea53cda | |||
| d385c80882 | |||
| b823213c94 | |||
| 4b86311ab9 | |||
| b9efa8f445 | |||
| f8db12346d | |||
| 4d3948f81f | |||
| 5431d50206 | |||
| 6db078c26a | |||
| f61e9c7f27 | |||
| 567d92ce00 | |||
| 7a6d26c5da | |||
| 046ac85177 | |||
| f0fd088247 | |||
| 5ec0d1e691 | |||
| 9391a934c3 | |||
| bb62e6a318 | |||
| 0da6539c48 | |||
| 9cf833dab2 | |||
| ed57260fcf | |||
| c98f625c4c | |||
| f3008064e4 | |||
| 1faee00764 | |||
| a40505e2ee | |||
| 484202b4c6 | |||
| 6a7fc17c60 | |||
| 05d3897ae2 | |||
| 9f1210202a | |||
| be6b172d6f | |||
| fef9e0a5c1 | |||
| b84b033bf3 | |||
| b30ff1f55a | |||
| c6be0b290b | |||
| 33cfd7a629 | |||
| 5952a5c69d | |||
| 20de563925 | |||
| 7da80b4c62 | |||
| 15d765be6d | |||
| bfe2f116a7 | |||
| f535b3de2f | |||
| e560c18b57 | |||
| aecb99b6a3 | |||
| 7da17f8190 | |||
| 1964270a4f | |||
| f45b61d95c | |||
| ff11c38169 | |||
| 3e67067431 | |||
| 824f00d1e8 | |||
| 96d19f59a4 | |||
| 42c6fe50d2 | |||
| 9242f7095a | |||
| 99c9fbc38f | |||
| 0d31207ad7 | |||
| 8af7dbc35a | |||
| d0a373cb15 | |||
| 3dc87bbca8 | |||
| a55c399585 | |||
| f74aa24dd2 | |||
| 1aa7eb4478 | |||
| 0c7002ba59 | |||
| fd6dd1ea18 | |||
| aa74d5cd82 | |||
| 8fc10a0bdd | |||
| 809ed0f0dc | |||
| b8a4e1c4a3 | |||
| d9e45f732b | |||
| ca025b36f7 | |||
| bfb719d35e | |||
| 2a1b61107f | |||
| 969cee7c90 | |||
| 7a3f579d3e | |||
| 288d5efa88 | |||
| 7be821963c | |||
| a236f8992a | |||
| a5c2257f39 | |||
| 9d3b4ba816 | |||
| 43bf0767f1 | |||
| b301e5b151 | |||
| 2b484c0382 | |||
| f40ab4e2d5 | |||
| c0a27380e9 | |||
| 0d7a3f43c4 | |||
| 8195e439f3 | |||
| b5edbf716c | |||
| 466265fde1 | |||
| 40033e09cd | |||
| 573663412c | |||
| 17599417f7 | |||
| 0ece6d8b0e | |||
| e0ac0393fe | |||
| 6d38b3255c | |||
| 477ff424d6 | |||
| a843104348 | |||
| 0f4bc0981a | |||
| 07f6351465 | |||
| 1b26e86365 | |||
| 94b4bf94c0 | |||
| d5de05b633 | |||
| 0ab6cad048 | |||
| 9833ad548b | |||
| aa1ba3b226 | |||
| 3774d4de28 | |||
| e4961726bc | |||
| 77cf7d0da6 | |||
| a993e0b228 | |||
| 43671a9fd6 | |||
| 49cfd1e9b7 | |||
| 58d4a4f54f | |||
| e4e328ba6a | |||
| fd6bc955ff | |||
| 511a18e0ed | |||
| e29d224a92 | |||
| bb48ffb01f | |||
| 31fd3411f7 | |||
| a737d2675e | |||
| fd462659cd | |||
| cb10d0d465 | |||
| 61f1c4884c | |||
| 2cd00de6e3 | |||
| d3c5d53eae | |||
| 6dfafae342 | |||
| 2f861c3309 | |||
| af388f0f16 | |||
| c36cc86c5f | |||
| 02f195b25c | |||
| 18623fd9b7 | |||
| 9b74bb73aa | |||
| ee9636b496 | |||
| 5c2cbd7840 | |||
| 7fbac6cc17 | |||
| 9e7e9d66bf | |||
| 7fe66aa7fa | |||
| 2dda0efe83 | |||
| 59620ca473 | |||
| 12eae1eff2 | |||
| b03bf87b7d | |||
| c32718b164 | |||
| a6ea12fedc | |||
| 2d260eb0d5 | |||
| d7dd069ae0 | |||
| 6a77a58489 | |||
| c30ac5f927 | |||
| 437f7ef890 | |||
| 1f7347e8de | |||
| 96f59d7cfe | |||
| d55f65c7c9 | |||
| 9a0d5b918f | |||
| 3553fbc7b6 | |||
| 55d53f13d9 | |||
| 27369a650c | |||
| 913f0d5d97 | |||
| ada63ec697 | |||
| 117f06e971 | |||
| 9f03a9a6e2 | |||
| ce406c7088 | |||
| e7127df30d | |||
| 10e2817257 | |||
| 337a47c62b | |||
| 14bdac20ef | |||
| 88e2b3f9aa | |||
| 22d731f06d | |||
| e3d288ef7d | |||
| 455f597543 | |||
| 8c9e626920 | |||
| 5a000c1ff4 | |||
| ddf634bfb2 | |||
| 89d3b8cc6a | |||
| 49af6d09a2 | |||
| e5b0cac284 | |||
| 6f33900f85 | |||
| 514823af7d | |||
| 65b058f563 | |||
| 7c8560deff | |||
| 6bbe2613b4 | |||
| 5771478e4b | |||
| e13030bc89 | |||
| 0a0ac93a55 | |||
| 214fb50e74 | |||
| 959f8ee31e | |||
| cb0d75be37 | |||
| 11353e9e3a | |||
| 8cd5c15c2b | |||
| b86b8b8ee1 | |||
| c5f6e6b028 | |||
| 592d8abc58 | |||
| d93068fc62 | |||
| a864af52df | |||
| 1eedd4b185 | |||
| 9d38edfe95 | |||
| f895ebba73 | |||
| 511287b16e | |||
| 530e06ec66 | |||
| 9cab383b43 | |||
| 9785ab82ed | |||
| 9d237e7bd6 | |||
| 7e9885012d | |||
| 1de785d97c | |||
| 2bd6566537 | |||
| 88fa4cf188 | |||
| b26167481e | |||
| 1b6af9bd12 | |||
| 0159963cb0 | |||
| 996041cabc | |||
| cb0352e33c | |||
| 3169f032c8 | |||
| 5ff8ee1a8f | |||
| d3f31a3ace | |||
| ac7e7f0db9 | |||
| 4c1e967dad | |||
| f3ccd5c074 | |||
| 8369c0e2c0 | |||
| 122a966e72 | |||
| 9c2ff2f862 | |||
| 0ba45e746b | |||
| 54c06cdabb | |||
| 5a2e10317c | |||
| 8292d52acf | |||
| 7d21470fc7 | |||
| eb0530bcba | |||
| 8855092faa | |||
| 2e02a3c71e | |||
| 5b5303ba7f | |||
| 022a54278e | |||
| 19b50dc428 | |||
| e7eac003a9 | |||
| cc17c6b2cd | |||
| 23d16b07aa | |||
| 7ecb3dd771 | |||
| e43f974d34 | |||
| e16cd38722 | |||
| 9d2f81d6b9 | |||
| 3fe539436b | |||
| 76f94eb559 | |||
| 7630ef921d | |||
| 625127d298 | |||
| f24c4d2805 | |||
| 194340afa0 | |||
| fdc9639aba | |||
| f95ec53a85 | |||
| 3d425b7030 | |||
| 37c6c24e0e | |||
| 50bdd7ec7b | |||
| 769cb3e251 | |||
| 9447c45406 | |||
| 66a3962cfe | |||
| d145eacbaf | |||
| ed03ed7bad | |||
| 953b463799 | |||
| 6d28bb0489 | |||
| c2f464ea75 | |||
| 4c56ffc767 | |||
| 885aa8833c | |||
| 63310c44c0 | |||
| 05dd65718f | |||
| 05d3f8a667 | |||
| 3fa45ea728 | |||
| a7d2098f09 | |||
| e1ecb49d59 | |||
| 6facfac4c5 | |||
| 97d2494fe3 | |||
| a54be69c96 | |||
| 800e25a7a7 | |||
| c1ce2977fa | |||
| 7177f82041 | |||
| ecc9415679 | |||
| 23280f1635 | |||
| af062631c5 | |||
| 99b866736c | |||
| 9e74b666fb | |||
| 1cc54f9a71 | |||
| a2d8ef7c63 | |||
| 0835a6d690 | |||
| 67e0a19e13 | |||
| 6fa13f1bcc | |||
| 25604774b9 | |||
| b0fb989be0 | |||
| 61c70b9922 | |||
| 8b5bdf4e88 | |||
| 84d8d4a745 | |||
| 04f93fb90f | |||
| d559af15b8 | |||
| 1510e13066 | |||
| d7cb8842d3 | |||
| 7596411d70 | |||
| 876cef5970 | |||
| 1ebc077721 | |||
| f5acdc0c4b | |||
| 866321da4d | |||
| 58696c5132 | |||
| 3d11dfaae3 | |||
| 3cc320b1d0 | |||
| 2efeab3493 | |||
| a78cab36ab | |||
| 41e525e037 | |||
| 33d5cb3061 | |||
| 0cf523c285 | |||
| 9e346e0780 | |||
| 9cd25e548f | |||
| d221e40602 | |||
| a6ce5ce796 | |||
| 23bfb7b8e6 | |||
| b536c8196b | |||
| 8f677177f2 | |||
| bb7cac4855 | |||
| e62fa01ab3 | |||
| 5be48c5486 | |||
| 6265efd5e7 | |||
| 222d2840cf | |||
| 0e8fe8cd07 | |||
| f96a8bc269 | |||
| bbced03f3a | |||
| ffe0e6dbbe | |||
| 7bb6016f7b | |||
| d5f58eea34 | |||
| 204c659af2 | |||
| ae5eb00bd0 | |||
| 61c059babe | |||
| a4e4d30d5d | |||
| c4fbfd32ee | |||
| 4f5af1f7be | |||
| 22efb96f66 | |||
| b7d3014aab | |||
| fc7e72835e | |||
| d824ced70f | |||
| 2b9147bbc0 | |||
| 1ea9a0af50 | |||
| 7825cf06c0 | |||
| 20a2f56184 | |||
| 22be3ad948 | |||
| dfa5a0f5fe | |||
| b7d046533c | |||
| c74cf59e3b | |||
| 92941260a5 | |||
| 74fdcb0781 | |||
| 6ab38633c2 | |||
| e553ade936 | |||
| b4b3fd9ab6 | |||
| 4fc6f5a094 | |||
| 0f521e2521 | |||
| 088b016083 | |||
| ccb2fcf39b | |||
| f6e82e93e4 | |||
| 02126caacd | |||
| 8f55565dbf | |||
| 3246edd5a8 | |||
| 7e7e067b5b | |||
| fd0bdbce7e | |||
| 5ba8a8f7da | |||
| a799b0931f | |||
| d895786f55 | |||
| 4d89b4a8df | |||
| e544e77fd9 | |||
| 7a5a94614e | |||
| c935811e08 | |||
| cd6d97c3eb | |||
| 292f0624fb | |||
| 9fb0d1f3bf | |||
| d82b722887 | |||
| 3bcab14de1 | |||
| 3ded411c15 | |||
| 706c970314 | |||
| 35dfd540b5 | |||
| 7c4394d8cb | |||
| 69972e6460 | |||
| dd19f11e29 | |||
| b312414957 | |||
| f3910f03ca | |||
| 080c667d9c | |||
| 2bf0614da1 | |||
| bd5bdfa8b5 | |||
| d1582a435d | |||
| d27aa8821a | |||
| f5f08cf9ff | |||
| 91877f7b2d | |||
| 7f9b078430 | |||
| e9a01fa0c8 | |||
| 1f33674f30 | |||
| badf58db3e | |||
| 58c98be9a7 | |||
| 71a5555816 | |||
| aee20c0423 | |||
| 95faccd8d9 | |||
| 85ec4a3de7 | |||
| e79b6ade51 | |||
| 79ccfa087d | |||
| 9a349eff32 | |||
| e756a442f6 | |||
| 332c860e80 | |||
| d934671202 | |||
| 747359478d | |||
| f0f18db5a8 | |||
| 2f068c66d4 | |||
| fa5415b1eb | |||
| e23fd5e3c5 | |||
| 996c838320 | |||
| 083b8a6d7b | |||
| 58625c82a0 | |||
| 74211dd2c9 | |||
| cd4808e5e2 | |||
| e44ad19bda | |||
| e31d413551 | |||
| d170a3e5e1 | |||
| 10c3872902 | |||
| a3c658d6b8 | |||
| 0c8cf9cc0d | |||
| b92a79eb9d | |||
| ba2c6c1219 | |||
| f0a5cc6116 | |||
| 9fbe209c04 | |||
| b8bbd73e07 | |||
| 2e40325f2a | |||
| bdbd336db5 | |||
| 5c645ea224 | |||
| 5c2985cac7 | |||
| a0d1016c01 | |||
| 816cf44117 | |||
| 1119a62576 | |||
| 152cb48340 | |||
| 5a4c2a4974 | |||
| 9063e8f4a4 | |||
| 519552083b | |||
| f6ad66bd30 | |||
| 65affb5146 | |||
| 38d9378e48 | |||
| cd8011e858 | |||
| e352562c51 | |||
| 3dd2a19915 | |||
| 384286cf0a | |||
| 162281e753 | |||
| 9a3603fea3 | |||
| 121dc85e40 | |||
| 823bad2ace | |||
| c9ed8543c8 | |||
| d752c68790 | |||
| 39c0409d42 | |||
| 7937738258 | |||
| ea092bda99 | |||
| 3b908a5857 | |||
| 536e14dfe8 | |||
| edd2c5f779 | |||
| ee76c2c06e | |||
| 0190a92c26 | |||
| 730da103b1 | |||
| ffe30289ee | |||
| cfe9af5c3c | |||
| 9647ce153d | |||
| bec63c1ad0 | |||
| 3dec0ff85b | |||
| 8868d8e99e | |||
| 9a0d9506f8 | |||
| ec57b813da | |||
| 57c4d96467 | |||
| ebaf0a08b5 | |||
| abae4b5106 | |||
| 81b721be2b | |||
| e9108ae3f8 | |||
| acc7b65649 | |||
| 77ed177855 | |||
| 71cb5c579d | |||
| f22e4c261a | |||
| 4a046ca70e | |||
| 62ee3fa0f1 | |||
| ef0403af0e | |||
| 044b27967e | |||
| 4942a2480b | |||
| cde852f0f9 | |||
| a435e88b25 | |||
| add954db80 | |||
| 76ec639fea | |||
| 155decfca1 | |||
| 3ab131786b | |||
| a273dea755 | |||
| 9a6d8e3a33 | |||
| 4d9ecef6fd | |||
| 4df72a4015 | |||
| c3e2dc4ce8 | |||
| 5a1bcd53a3 | |||
| 4c8865ed5b | |||
| cb6bd50db0 | |||
| 614dbb3674 | |||
| 8ed8bb2dad | |||
| a1f6985534 | |||
| 6bdf2e38cc | |||
| 19ae55ebd4 | |||
| 61cef46a95 | |||
| 172738f7b7 | |||
| 5d0d60d89e | |||
| 3cfdc64735 | |||
| b47069ccad | |||
| 2051b3921b | |||
| 46f46483f8 | |||
| 66b02b58b6 | |||
| 4428c3d7d8 | |||
| 2d4b9786fa | |||
| d2d9c4be6f | |||
| a9d6ac29f1 | |||
| 4d50bd5c78 | |||
| fdd651b9cc | |||
| 7b56f102cc | |||
| e329360daa | |||
| 5e8a431a92 | |||
| cd3f21a92e | |||
| 03d3ae3eb4 | |||
| 0c350dcf6e | |||
| c6b3d15d72 | |||
| 8d7f7cb438 | |||
| b5a4121574 | |||
| 916ca87db4 | |||
| bfea97f14e | |||
| f98657aca8 | |||
| 45c5e770fa | |||
| f4ce7ecf4b | |||
| 8dfe1fe97f | |||
| 4bf165efaf | |||
| c7f6ae5be9 | |||
| d83d2d5f4e | |||
| 2362b2a5a0 | |||
| fb08a17ec8 | |||
| 1bcc2d544a | |||
| 6fd1205681 | |||
| da2b00c9cf | |||
| f6213595d1 | |||
| b1b2bd5b97 | |||
| aa19cbbfc7 | |||
| 8d39faddc9 | |||
| 52714dbcc9 | |||
| be92d3a0bc | |||
| f3189f72fd | |||
| 144c1d4e2f | |||
| e5964f9d93 | |||
| ea30cbe117 | |||
| 598a9664a7 | |||
| d04628a42d | |||
| 7bce63d74e | |||
| 452fe9f76d | |||
| 7983ff5db2 | |||
| c361ab954d | |||
| a8735a6465 | |||
| 76255c0dd4 | |||
| 87655ff3cd | |||
| fc7be2ac1a | |||
| e93b95bee8 | |||
| 6a18d6918e | |||
| 578ce09b5e | |||
| 27f6177fc9 | |||
| 20c0deeac4 | |||
| f1f8cdb6e9 | |||
| 345e4e846c | |||
| 6f57b36158 | |||
| 0264e10e69 | |||
| 067f5bf5a3 | |||
| c81b643cdf | |||
| 388ad077d6 | |||
| db93cdd95f | |||
| 68304a3fc1 | |||
| 13259c114a | |||
| 5131ba453d | |||
| 8fdc9939cd | |||
| c15449492a | |||
| 1cab1e06d9 | |||
| 4831926869 | |||
| 4fcf25077b | |||
| c32461f322 | |||
| 0abe6fc0b4 | |||
| edc3d53f94 | |||
| bb5fbbe746 | |||
| 36f3e3fe50 | |||
| 65c8000f66 | |||
| 2d45f8bc40 | |||
| 7a0d4ad508 | |||
| 5ae93bb569 | |||
| aa6ca46792 | |||
| e8c11f6e15 | |||
| 08bb8e3df9 | |||
| d62bf6812e | |||
| 422abc205b | |||
| 1269104112 | |||
| 97d762f01f | |||
| 671b5e29d0 | |||
| c7538a35a2 | |||
| 458658a71b |
+4
-4
@@ -1,11 +1,11 @@
|
||||
{
|
||||
"env": {
|
||||
"es6": true,
|
||||
"node": true
|
||||
"node": true,
|
||||
"es6": true
|
||||
},
|
||||
"extends": "eslint:recommended",
|
||||
"parserOptions": {
|
||||
"ecmaVersion": 2017
|
||||
"ecmaVersion": 8
|
||||
},
|
||||
"rules": {
|
||||
"indent": [
|
||||
@@ -26,4 +26,4 @@
|
||||
],
|
||||
"no-console": "off"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1532,3 +1532,164 @@
|
||||
* DNS and backup credential secrets are not returned in API calls anymore
|
||||
* Send notification when an app that went down, came back up
|
||||
|
||||
[3.5.2]
|
||||
* Fix encoding of links in plain text email
|
||||
* Hide mail relay password
|
||||
* Do not return API tokens in REST API
|
||||
|
||||
[3.5.3]
|
||||
* Make reboot required check server side
|
||||
* Update node to 10.15.1
|
||||
* Enable gzip compression for large objects
|
||||
* Update docker to 18.09
|
||||
* Add a way to lock specific settings
|
||||
* Add UI to copy app's backup id
|
||||
* Block platform updates based on app manifest constraints
|
||||
* Make crash logs viewable via the dashboard
|
||||
* Fix issue where uploading of filenames with brackets and plus was not working
|
||||
* Add notification for cert renewal and backup failures
|
||||
* Fix issue where mail container was not updated with the latest certificate
|
||||
|
||||
[3.5.4]
|
||||
* Make reboot required check server side
|
||||
* Update node to 10.15.1
|
||||
* Enable gzip compression for large objects
|
||||
* Update docker to 18.09
|
||||
* Add a way to lock specific settings
|
||||
* Add UI to copy app's backup id
|
||||
* Block platform updates based on app manifest constraints
|
||||
* Make crash logs viewable via the dashboard
|
||||
* Fix issue where uploading of filenames with brackets and plus was not working
|
||||
* Add notification for cert renewal and backup failures
|
||||
* Fix issue where mail container was not updated with the latest certificate
|
||||
|
||||
[4.0.0]
|
||||
* (mail) Bump mail_max_userip_connections to 50
|
||||
* Fix issue where DKIM was not setup correctly during a restore
|
||||
* (mysql) Remove any stale lock file on restart
|
||||
* Add a way to disable outbound mail for a domain
|
||||
* Cleanup task logs
|
||||
* Fix issue where dashboard location might conflict with existing app location
|
||||
* Ad graphite to services
|
||||
* Add labels and tags to apps
|
||||
* Ensure MySQL is storing data/time in UTC
|
||||
* Fix bug where the UI redirects to login screen when enabling 2FA with invalid token
|
||||
* Use unbound resolver when resolving NS record of a domain
|
||||
* Various fixes for notifications
|
||||
* Add FTP support for apps
|
||||
* Add app version as part of info dialog
|
||||
* (backup) Do not abort archive if file(s) disappear
|
||||
* Show app upstream version in the info dialog
|
||||
* Add Scaleway ObjectStorage backup backend
|
||||
* Preserve update backups for 3 weeks
|
||||
* Make send test mail functionality work with secondary domain
|
||||
* Add support for an external email relay that does not require authentication
|
||||
* Add option to accept self-signed certs when using external mail relay
|
||||
* Allow publishing and listing community supported apps
|
||||
* Remove spaces support
|
||||
* Features implementation for customization
|
||||
|
||||
[4.0.1]
|
||||
* Make it easier to import email
|
||||
* Give SFTP access only to admins
|
||||
|
||||
[4.0.2]
|
||||
* Fix GCDNS crash
|
||||
* Add option to update without backing up
|
||||
|
||||
[4.0.3]
|
||||
* Fix dashboard issue for non-admins
|
||||
|
||||
[4.1.0]
|
||||
* Remove password requirement for uninstalling apps and users
|
||||
* Hosting provider edition
|
||||
* Enforce limits in mail container
|
||||
* Fix crash when using unauthenticated relay
|
||||
* Fix domain and tag filtering
|
||||
* Customizable app icons
|
||||
* Remove obsolete X-Frame-Options from nginx configs
|
||||
* Give SFTP access based on access restriction
|
||||
|
||||
[4.1.1]
|
||||
* Add UI hint about SFTP access restriction
|
||||
|
||||
[4.1.2]
|
||||
* Accept incoming mail from a private relay
|
||||
* Fix issue where unused addon images were not pruned
|
||||
* Add UI for redirect from multiple domains
|
||||
* Allow apps to be relocated to custom data directory
|
||||
* Make all cloudron env vars have CLOUDRON_ prefix
|
||||
* Update manifest version to 2
|
||||
* Fix issue where DKIM keys were inaccessible
|
||||
* Fix DKIM selector conflict when adding same domain across multiple cloudrons
|
||||
* Fix name.com DNS backend issue for naked domains
|
||||
* Add DigitalOcean Frankfurt (fra1) region for backup storage
|
||||
|
||||
[4.1.3]
|
||||
* Update manifest format package
|
||||
|
||||
[4.1.4]
|
||||
* Add CLOUDRON_ prefix to MySQL addon variables
|
||||
|
||||
[4.1.5]
|
||||
* Make the terminal addon button inject variables based on manifest version
|
||||
* Preserve addon passwords correctly when using v2 manifest
|
||||
* Show error message instead of logging out user when invalid 2FA token is provided
|
||||
* Ensure redis vars are renamed with manifest v2
|
||||
* Add missing Scaleway Object Storage to restore UI
|
||||
* Fix Exoscale endpoints in restore UI
|
||||
* Reset the app icon when showing the configure UI
|
||||
|
||||
[4.1.6]
|
||||
* Fix issue where CLOUDRON_APP_HOSTNAME was incorrectly set
|
||||
* Remove chat link from the footer of login screen
|
||||
* Add support for oplog tailing in mongodb
|
||||
* Fix LDAP not accessible via scheduler containers
|
||||
|
||||
[4.1.7]
|
||||
* Fix issue where login looped when admin bit was removed
|
||||
|
||||
[4.2.0]
|
||||
* Fix issue where tar backups with files > 8GB was corrupt
|
||||
* Add SparkPost as mail relay backend
|
||||
* Add Wasabi storage backend
|
||||
* TOTP tokens are now checked for with +- 60 seconds
|
||||
* IP based restore
|
||||
* Fix issue where task logs were not getting rotated correctly
|
||||
* Add notification for box update
|
||||
* User enable/disable flag
|
||||
* Check disk space before various operations like install, update, backup etc
|
||||
* Collect per app du information
|
||||
* Set Cloudron specific UA for healthchecks
|
||||
* Show message why an app task is 'pending'
|
||||
* Rework app task system so that we can now pass dynamic arguments
|
||||
* Add external LDAP server integration
|
||||
|
||||
[4.2.1]
|
||||
* Rework the app configuration routes & UI
|
||||
* Fine grained eventlog for app configuration
|
||||
* Update Haraka to 2.8.24
|
||||
* Set sieve_max_redirects to 64
|
||||
* SRS support for mail forwarding
|
||||
* Fix issue where sieve responses were not sent via the relay
|
||||
* File based session store
|
||||
* Fix API token error reporting for namecheap backend
|
||||
|
||||
[4.2.2]
|
||||
* Fix typos in migration
|
||||
|
||||
[4.2.3]
|
||||
* Remove flicker of custom icon
|
||||
* Preserve PROVIDER setting from cloudron.conf
|
||||
* Add Skip backup option when updating an app
|
||||
* Fix bug where nginx was not reloaded on cert renewal
|
||||
|
||||
[4.2.4]
|
||||
* Fix demo settings state regression
|
||||
|
||||
[4.2.5]
|
||||
* Fix the demo settins fix
|
||||
|
||||
[4.2.6]
|
||||
* Fix configuration of empty app location (subdomain)
|
||||
|
||||
|
||||
@@ -1,661 +1,35 @@
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
The Cloudron Subscription license
|
||||
Copyright (c) 2019 Cloudron UG
|
||||
|
||||
With regard to the Cloudron Software:
|
||||
|
||||
This software and associated documentation files (the "Software") may only be
|
||||
used in production, if you (and any entity that you represent) have agreed to,
|
||||
and are in compliance with, the Cloudron Subscription Terms of Service, available
|
||||
at https://cloudron.io/legal/terms.html (the “Subscription Terms”), or other
|
||||
agreement governing the use of the Software, as agreed by you and Cloudron,
|
||||
and otherwise have a valid Cloudron Subscription. Subject to the foregoing sentence,
|
||||
you are free to modify this Software and publish patches to the Software. You agree
|
||||
that Subscription and/or its licensors (as applicable) retain all right, title and
|
||||
interest in and to all such modifications and/or patches, and all such modifications
|
||||
and/or patches may only be used, copied, modified, displayed, distributed, or otherwise
|
||||
exploited with a valid Cloudron subscription. Notwithstanding the foregoing, you may copy
|
||||
and modify the Software for development and testing purposes, without requiring a
|
||||
subscription. You agree that Cloudron and/or its licensors (as applicable) retain
|
||||
all right, title and interest in and to all such modifications. You are not
|
||||
granted any other rights beyond what is expressly stated herein. Subject to the
|
||||
foregoing, it is forbidden to copy, merge, publish, distribute, sublicense,
|
||||
and/or sell the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
For all third party components incorporated into the Cloudron Software, those
|
||||
components are licensed under the original license provided by the owner of the
|
||||
applicable component.
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
box
|
||||
Copyright (C) 2016-2019 Cloudron UG
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published
|
||||
by the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
|
||||
@@ -37,16 +37,11 @@ anyone to effortlessly host web applications on their server on their own terms.
|
||||
|
||||
## Demo
|
||||
|
||||
Try our demo at https://my-demo.cloudron.me (username: cloudron password: cloudron).
|
||||
Try our demo at https://my.demo.cloudron.io (username: cloudron password: cloudron).
|
||||
|
||||
## Installing
|
||||
|
||||
You can install the Cloudron platform on your own server or get a managed server
|
||||
from cloudron.io. In either case, the Cloudron platform will keep your server and
|
||||
apps up-to-date and secure.
|
||||
|
||||
* [Selfhosting](https://cloudron.io/documentation/installation/) - [Pricing](https://cloudron.io/pricing.html)
|
||||
* [Managed Hosting](https://cloudron.io/managed.html)
|
||||
[Install script](https://cloudron.io/documentation/installation/) - [Pricing](https://cloudron.io/pricing.html)
|
||||
|
||||
**Note:** This repo is a small part of what gets installed on your server - there is
|
||||
the dashboard, database addons, graph container, base image etc. Cloudron also relies
|
||||
@@ -64,6 +59,7 @@ the containers in the Cloudron.
|
||||
|
||||
## Community
|
||||
|
||||
* [Chat](https://chat.cloudron.io)
|
||||
* [Forum](https://forum.cloudron.io/)
|
||||
* [Support](mailto:support@cloudron.io)
|
||||
|
||||
|
||||
@@ -1,179 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
assertNotEmpty() {
|
||||
: "${!1:? "$1 is not set."}"
|
||||
}
|
||||
|
||||
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)"
|
||||
export JSON="${SOURCE_DIR}/node_modules/.bin/json"
|
||||
|
||||
revision=$(git rev-parse HEAD)
|
||||
box_name=""
|
||||
server_id=""
|
||||
server_ip=""
|
||||
destroy_server="yes"
|
||||
deploy_env="dev"
|
||||
|
||||
# Only GNU getopt supports long options. OS X comes bundled with the BSD getopt
|
||||
# brew install gnu-getopt to get the GNU getopt on OS X
|
||||
[[ $(uname -s) == "Darwin" ]] && GNU_GETOPT="/usr/local/opt/gnu-getopt/bin/getopt" || GNU_GETOPT="getopt"
|
||||
readonly GNU_GETOPT
|
||||
|
||||
args=$(${GNU_GETOPT} -o "" -l "revision:,regions:,size:,name:,no-destroy,env:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
--env) deploy_env="$2"; shift 2;;
|
||||
--revision) revision="$2"; shift 2;;
|
||||
--name) box_name="$2"; destroy_server="no"; shift 2;;
|
||||
--no-destroy) destroy_server="no"; shift 2;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "Creating digitalocean image"
|
||||
if [[ "${deploy_env}" == "staging" ]]; then
|
||||
assertNotEmpty DIGITAL_OCEAN_TOKEN_STAGING
|
||||
export DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_STAGING}"
|
||||
elif [[ "${deploy_env}" == "dev" ]]; then
|
||||
assertNotEmpty DIGITAL_OCEAN_TOKEN_DEV
|
||||
export DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_DEV}"
|
||||
elif [[ "${deploy_env}" == "prod" ]]; then
|
||||
assertNotEmpty DIGITAL_OCEAN_TOKEN_PROD
|
||||
export DIGITAL_OCEAN_TOKEN="${DIGITAL_OCEAN_TOKEN_PROD}"
|
||||
else
|
||||
echo "No such env ${deploy_env}."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
vps="/bin/bash ${SCRIPT_DIR}/digitalocean.sh"
|
||||
|
||||
readonly ssh_keys="${HOME}/.ssh/id_rsa_caas_${deploy_env}"
|
||||
readonly scp202="scp -P 202 -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}"
|
||||
readonly scp22="scp -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}"
|
||||
|
||||
readonly ssh202="ssh -p 202 -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}"
|
||||
readonly ssh22="ssh -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}"
|
||||
|
||||
if [[ ! -f "${ssh_keys}" ]]; then
|
||||
echo "caas ssh key is missing at ${ssh_keys} (pick it up from secrets repo)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function get_pretty_revision() {
|
||||
local git_rev="$1"
|
||||
local sha1=$(git rev-parse --short "${git_rev}" 2>/dev/null)
|
||||
|
||||
echo "${sha1}"
|
||||
}
|
||||
|
||||
now=$(date "+%Y-%m-%d-%H%M%S")
|
||||
pretty_revision=$(get_pretty_revision "${revision}")
|
||||
|
||||
if [[ -z "${box_name}" ]]; then
|
||||
# if you change this, change the regexp is appstore/janitor.js
|
||||
box_name="box-${deploy_env}-${pretty_revision}-${now}" # remove slashes
|
||||
|
||||
# create a new server if no name given
|
||||
if ! caas_ssh_key_id=$($vps get_ssh_key_id "caas"); then
|
||||
echo "Could not query caas ssh key"
|
||||
exit 1
|
||||
fi
|
||||
echo "Detected caas ssh key id: ${caas_ssh_key_id}"
|
||||
|
||||
echo "Creating Server with name [${box_name}]"
|
||||
if ! server_id=$($vps create ${caas_ssh_key_id} ${box_name}); then
|
||||
echo "Failed to create server"
|
||||
exit 1
|
||||
fi
|
||||
echo "Created server with id: ${server_id}"
|
||||
|
||||
# If we run scripts overenthusiastically without the wait, setup script randomly fails
|
||||
echo -n "Waiting 120 seconds for server creation"
|
||||
for i in $(seq 1 24); do
|
||||
echo -n "."
|
||||
sleep 5
|
||||
done
|
||||
echo ""
|
||||
else
|
||||
if ! server_id=$($vps get_id "${box_name}"); then
|
||||
echo "Could not determine id from name"
|
||||
exit 1
|
||||
fi
|
||||
echo "Reusing server with id: ${server_id}"
|
||||
|
||||
$vps power_on "${server_id}"
|
||||
fi
|
||||
|
||||
# Query until we get an IP
|
||||
while true; do
|
||||
echo "Trying to get the server IP"
|
||||
if server_ip=$($vps get_ip "${server_id}"); then
|
||||
echo "Server IP : [${server_ip}]"
|
||||
break
|
||||
fi
|
||||
echo "Timedout, trying again in 10 seconds"
|
||||
sleep 10
|
||||
done
|
||||
|
||||
while true; do
|
||||
echo "Trying to copy init script to server"
|
||||
if $scp22 "${SCRIPT_DIR}/initializeBaseUbuntuImage.sh" root@${server_ip}:.; then
|
||||
break
|
||||
fi
|
||||
echo "Timedout, trying again in 30 seconds"
|
||||
sleep 30
|
||||
done
|
||||
|
||||
echo "Copying infra_version.js"
|
||||
$scp22 "${SCRIPT_DIR}/../src/infra_version.js" root@${server_ip}:.
|
||||
|
||||
echo "Copying box source"
|
||||
cd "${SOURCE_DIR}"
|
||||
git archive --format=tar HEAD | $ssh22 "root@${server_ip}" "cat - > /tmp/box.tar.gz"
|
||||
|
||||
echo "Executing init script"
|
||||
if ! $ssh22 "root@${server_ip}" "/bin/bash /root/initializeBaseUbuntuImage.sh caas"; then
|
||||
echo "Init script failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Shutting down server with id : ${server_id}"
|
||||
$ssh22 "root@${server_ip}" "shutdown -f now" || true # shutdown sometimes terminates ssh connection immediately making this command fail
|
||||
|
||||
# wait 10 secs for actual shutdown
|
||||
echo "Waiting for 10 seconds for server to shutdown"
|
||||
sleep 30
|
||||
|
||||
echo "Powering off server"
|
||||
if ! $vps power_off "${server_id}"; then
|
||||
echo "Could not power off server"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
snapshot_name="box-${deploy_env}-${pretty_revision}-${now}"
|
||||
echo "Snapshotting as ${snapshot_name}"
|
||||
if ! image_id=$($vps snapshot "${server_id}" "${snapshot_name}"); then
|
||||
echo "Could not snapshot and get image id"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${destroy_server}" == "yes" ]]; then
|
||||
echo "Destroying server"
|
||||
if ! $vps destroy "${server_id}"; then
|
||||
echo "Could not destroy server"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Skipping server destroy"
|
||||
fi
|
||||
|
||||
echo "Transferring image ${image_id} to other regions"
|
||||
$vps transfer_image_to_all_regions "${image_id}"
|
||||
|
||||
echo "Done."
|
||||
Regular → Executable
+20
-9
@@ -28,51 +28,61 @@ debconf-set-selections <<< 'mysql-server mysql-server/root_password_again passwo
|
||||
# this enables automatic security upgrades (https://help.ubuntu.com/community/AutomaticSecurityUpdates)
|
||||
# resolvconf is needed for unbound to work property after disabling systemd-resolved in 18.04
|
||||
ubuntu_version=$(lsb_release -rs)
|
||||
ubuntu_codename=$(lsb_release -cs)
|
||||
gpg_package=$([[ "${ubuntu_version}" == "16.04" ]] && echo "gnupg" || echo "gpg")
|
||||
apt-get -y install \
|
||||
acl \
|
||||
build-essential \
|
||||
cifs-utils \
|
||||
cron \
|
||||
curl \
|
||||
debconf-utils \
|
||||
dmsetup \
|
||||
$gpg_package \
|
||||
iptables \
|
||||
libpython2.7 \
|
||||
linux-generic \
|
||||
logrotate \
|
||||
mysql-server-5.7 \
|
||||
nginx-full \
|
||||
openssh-server \
|
||||
pwgen \
|
||||
resolvconf \
|
||||
sudo \
|
||||
swaks \
|
||||
tzdata \
|
||||
unattended-upgrades \
|
||||
unbound \
|
||||
xfsprogs
|
||||
|
||||
# on some providers like scaleway the sudo file is changed and we want to keep the old one
|
||||
apt-get -o Dpkg::Options::="--force-confold" install -y sudo
|
||||
|
||||
# this ensures that unattended upgades are enabled, if it was disabled during ubuntu install time (see #346)
|
||||
# debconf-set-selection of unattended-upgrades/enable_auto_updates + dpkg-reconfigure does not work
|
||||
cp /usr/share/unattended-upgrades/20auto-upgrades /etc/apt/apt.conf.d/20auto-upgrades
|
||||
|
||||
echo "==> Installing node.js"
|
||||
mkdir -p /usr/local/node-8.9.3
|
||||
curl -sL https://nodejs.org/dist/v8.9.3/node-v8.9.3-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-8.9.3
|
||||
ln -sf /usr/local/node-8.9.3/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-8.9.3/bin/npm /usr/bin/npm
|
||||
mkdir -p /usr/local/node-10.15.1
|
||||
curl -sL https://nodejs.org/dist/v10.15.1/node-v10.15.1-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-10.15.1
|
||||
ln -sf /usr/local/node-10.15.1/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-10.15.1/bin/npm /usr/bin/npm
|
||||
apt-get install -y python # Install python which is required for npm rebuild
|
||||
[[ "$(python --version 2>&1)" == "Python 2.7."* ]] || die "Expecting python version to be 2.7.x"
|
||||
|
||||
# https://docs.docker.com/engine/installation/linux/ubuntulinux/
|
||||
echo "==> Installing Docker"
|
||||
|
||||
# create systemd drop-in file
|
||||
# create systemd drop-in file. if you channge options here, be sure to fixup installer.sh as well
|
||||
mkdir -p /etc/systemd/system/docker.service.d
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=overlay2" > /etc/systemd/system/docker.service.d/cloudron.conf
|
||||
|
||||
curl -sL https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_18.03.1~ce-0~ubuntu_amd64.deb -o /tmp/docker.deb
|
||||
# there are 3 packages for docker - containerd, CLI and the daemon
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.2.2-3_amd64.deb" -o /tmp/containerd.deb
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_18.09.2~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
|
||||
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_18.09.2~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
|
||||
# apt install with install deps (as opposed to dpkg -i)
|
||||
apt install -y /tmp/docker.deb
|
||||
rm /tmp/docker.deb
|
||||
apt install -y /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
|
||||
rm /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
|
||||
|
||||
storage_driver=$(docker info | grep "Storage Driver" | sed 's/.*: //')
|
||||
if [[ "${storage_driver}" != "overlay2" ]]; then
|
||||
@@ -110,6 +120,7 @@ fi
|
||||
echo "==> Configuring host"
|
||||
sed -e 's/^#NTP=/NTP=0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org/' -i /etc/systemd/timesyncd.conf
|
||||
timedatectl set-ntp 1
|
||||
# mysql follows the system timezone
|
||||
timedatectl set-timezone UTC
|
||||
|
||||
# Disable bind for good measure (on online.net, kimsufi servers these are pre-installed and conflicts with unbound)
|
||||
|
||||
@@ -14,25 +14,14 @@
|
||||
require('supererror')({ splatchError: true });
|
||||
|
||||
let async = require('async'),
|
||||
config = require('./src/config.js'),
|
||||
ldap = require('./src/ldap.js'),
|
||||
constants = require('./src/constants.js'),
|
||||
dockerProxy = require('./src/dockerproxy.js'),
|
||||
ldap = require('./src/ldap.js'),
|
||||
server = require('./src/server.js');
|
||||
|
||||
console.log();
|
||||
console.log('==========================================');
|
||||
console.log(' Cloudron will use the following settings ');
|
||||
console.log('==========================================');
|
||||
console.log();
|
||||
console.log(' Environment: ', config.CLOUDRON ? 'CLOUDRON' : 'TEST');
|
||||
console.log(' Version: ', config.version());
|
||||
console.log(' Admin Origin: ', config.adminOrigin());
|
||||
console.log(' Appstore API server origin: ', config.apiServerOrigin());
|
||||
console.log(' Appstore Web server origin: ', config.webServerOrigin());
|
||||
console.log(' SysAdmin Port: ', config.get('sysadminPort'));
|
||||
console.log(' LDAP Server Port: ', config.get('ldapPort'));
|
||||
console.log(' Docker Proxy Port: ', config.get('dockerProxyPort'));
|
||||
console.log();
|
||||
console.log(` Cloudron ${constants.VERSION} `);
|
||||
console.log('==========================================');
|
||||
console.log();
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
var database = require('./src/database.js');
|
||||
|
||||
var sendFailureLogs = require('./src/logcollector').sendFailureLogs;
|
||||
var crashNotifier = require('./src/crashnotifier.js');
|
||||
|
||||
// This is triggered by systemd with the crashed unit name as argument
|
||||
function main() {
|
||||
@@ -17,7 +17,11 @@ function main() {
|
||||
database.initialize(function (error) {
|
||||
if (error) return console.error('Cannot connect to database. Unable to send crash log.', error);
|
||||
|
||||
sendFailureLogs(unitName);
|
||||
crashNotifier.sendFailureLogs(unitName, function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
process.exit();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
'use strict';
|
||||
|
||||
let async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('SELECT * FROM tokens WHERE clientId=?', ['cid-sdk'], function (error, tokens) {
|
||||
if (error) console.error(error);
|
||||
|
||||
async.eachSeries(tokens, function (token, iteratorDone) {
|
||||
if (token.name) return iteratorDone();
|
||||
db.runSql('UPDATE tokens SET name=? WHERE accessToken=?', [ 'Unnamed-' + token.accessToken.slice(0,8), token.accessToken ], iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,29 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
var uuid = require('uuid');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'START TRANSACTION;'),
|
||||
|
||||
db.runSql.bind(db, 'ALTER TABLE tokens ADD COLUMN id VARCHAR(128)'),
|
||||
|
||||
function (done) {
|
||||
db.runSql('SELECT * FROM tokens', function (error, tokens) {
|
||||
async.eachSeries(tokens, function (token, iteratorDone) {
|
||||
db.runSql('UPDATE tokens SET id=? WHERE accessToken=?', [ 'tid-'+uuid.v4(), token.accessToken ], iteratorDone);
|
||||
}, done);
|
||||
});
|
||||
},
|
||||
|
||||
db.runSql.bind(db, 'ALTER TABLE tokens MODIFY id VARCHAR(128) NOT NULL UNIQUE'),
|
||||
db.runSql.bind(db, 'COMMIT'),
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE tokens DROP COLUMN id'),
|
||||
], callback);
|
||||
};
|
||||
@@ -0,0 +1,17 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE settings ADD COLUMN locked BOOLEAN DEFAULT 0', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback();
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE settings DROP COLUMN locked', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE notifications DROP COLUMN action', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE notifications ADD COLUMN action VARCHAR(512) NOT NULL', callback);
|
||||
};
|
||||
@@ -0,0 +1,27 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async'),
|
||||
crypto = require('crypto'),
|
||||
fs = require('fs'),
|
||||
os = require('os'),
|
||||
path = require('path'),
|
||||
safe = require('safetydance'),
|
||||
tldjs = require('tldjs');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM apps, subdomains WHERE apps.id=subdomains.appId AND type="primary"', function (error, apps) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(apps, function (app, iteratorDone) {
|
||||
if (app.mailboxName) return iteratorDone();
|
||||
|
||||
const mailboxName = (app.subdomain ? app.subdomain : JSON.parse(app.manifestJson).title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app';
|
||||
|
||||
db.runSql('UPDATE apps SET mailboxName=? WHERE id=?', [ mailboxName, app.id ], iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN label VARCHAR(128)', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN label', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN tagsJson VARCHAR(2048)', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN tagsJson ', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups ADD COLUMN preserveSecs INTEGER DEFAULT 0', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups DROP COLUMN preserveSecs', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,19 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT value FROM settings WHERE name="backup_config"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
|
||||
var backupConfig = JSON.parse(results[0].value);
|
||||
if (backupConfig.provider !== 'caas') return callback();
|
||||
|
||||
backupConfig.boxId = backupConfig.prefix; // hack to set the boxId that happens to match the prefix
|
||||
delete backupConfig.fqdn;
|
||||
|
||||
db.runSql('UPDATE settings SET value=? WHERE name="backup_config"', [ JSON.stringify(backupConfig) ], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,51 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async'),
|
||||
fs = require('fs'),
|
||||
superagent = require('superagent');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
if (!fs.existsSync('/etc/cloudron/cloudron.conf')) {
|
||||
console.log('Unable to locate cloudron.conf');
|
||||
return callback();
|
||||
}
|
||||
|
||||
const config = JSON.parse(fs.readFileSync('/etc/cloudron/cloudron.conf', 'utf8'));
|
||||
|
||||
db.all('SELECT * FROM settings WHERE name="appstore_config"', function (error, results) {
|
||||
if (error) return callback(error);
|
||||
|
||||
if (results.length === 0) {
|
||||
console.log('No appstore config, skipping license migration');
|
||||
return callback();
|
||||
}
|
||||
|
||||
console.log('Downloading license');
|
||||
|
||||
const appstoreConfig = JSON.parse(results[0].value);
|
||||
|
||||
superagent.get(`${config.apiServerOrigin}/api/v1/cloudron_license`)
|
||||
.query({ accessToken: appstoreConfig.token, cloudronId: appstoreConfig.cloudronId, provider: config.provider })
|
||||
.timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new Error('Network error getting license:' + error.message));
|
||||
if (result.statusCode !== 200) return callback(new Error(`Bad status getting license: ${result.status} ${result.text}`));
|
||||
|
||||
if (!result.body.cloudronId || !result.body.licenseKey || !result.body.cloudronToken) return callback(new Error(`Bad response getting license: ${result.text}`));
|
||||
|
||||
console.log('Adding license', result.body);
|
||||
|
||||
async.series([
|
||||
db.runSql.bind(db, 'START TRANSACTION;'),
|
||||
db.runSql.bind(db, 'INSERT settings (name, value) VALUES(?, ?)', [ 'cloudron_id', result.body.cloudronId ]),
|
||||
db.runSql.bind(db, 'INSERT settings (name, value) VALUES(?, ?)', [ 'license_key', result.body.licenseKey ]),
|
||||
db.runSql.bind(db, 'INSERT settings (name, value) VALUES(?, ?)', [ 'cloudron_token', result.body.cloudronToken ]),
|
||||
db.runSql.bind(db, 'DELETE FROM settings WHERE name=?', [ 'appstore_config' ]),
|
||||
db.runSql.bind(db, 'COMMIT')
|
||||
], callback);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,13 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('UPDATE tokens SET expires=? WHERE clientId=?', [ 1557089270832, 'cid-webadmin' ], function (error) { // force webadmin to get a new token
|
||||
if (error) console.error(error);
|
||||
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE settings DROP COLUMN locked', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE settings ADD COLUMN locked BOOLEAN DEFAULT 0', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE mail ADD COLUMN dkimSelector VARCHAR(128) NOT NULL DEFAULT "cloudron"', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE mail DROP COLUMN dkimSelector', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,14 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE apps DROP FOREIGN KEY apps_owner_constraint'),
|
||||
db.runSql.bind(db, 'ALTER TABLE apps DROP COLUMN ownerId')
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,29 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async'),
|
||||
fs = require('fs');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
if (!fs.existsSync('/etc/cloudron/cloudron.conf')) {
|
||||
console.log('Unable to locate cloudron.conf');
|
||||
return callback();
|
||||
}
|
||||
|
||||
const config = JSON.parse(fs.readFileSync('/etc/cloudron/cloudron.conf', 'utf8'));
|
||||
|
||||
async.series([
|
||||
fs.writeFile.bind(null, '/etc/cloudron/PROVIDER', config.provider, 'utf8'),
|
||||
db.runSql.bind(db, 'START TRANSACTION;'),
|
||||
// we use replace instead of insert because the cloudron-setup adds api/web_server_origin even for legacy setups
|
||||
db.runSql.bind(db, 'REPLACE INTO settings (name, value) VALUES(?, ?)', [ 'api_server_origin', config.apiServerOrigin ]),
|
||||
db.runSql.bind(db, 'REPLACE INTO settings (name, value) VALUES(?, ?)', [ 'web_server_origin', config.webServerOrigin ]),
|
||||
db.runSql.bind(db, 'REPLACE INTO settings (name, value) VALUES(?, ?)', [ 'admin_domain', config.adminDomain ]),
|
||||
db.runSql.bind(db, 'REPLACE INTO settings (name, value) VALUES(?, ?)', [ 'admin_fqdn', config.adminFqdn ]),
|
||||
db.runSql.bind(db, 'REPLACE INTO settings (name, value) VALUES(?, ?)', [ 'demo', config.isDemo ]),
|
||||
db.runSql.bind(db, 'COMMIT')
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,17 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN active BOOLEAN DEFAULT 1', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback();
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users DROP COLUMN active', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE apps ADD COLUMN taskId INTEGER'),
|
||||
db.runSql.bind(db, 'ALTER TABLE apps ADD CONSTRAINT apps_task_constraint FOREIGN KEY(taskId) REFERENCES tasks(id)')
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE app DROP FOREIGN KEY apps_task_constraint'),
|
||||
db.runSql.bind(db, 'ALTER TABLE apps DROP COLUMN taskId'),
|
||||
], callback);
|
||||
};
|
||||
@@ -0,0 +1,12 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP updateConfigJson, DROP restoreConfigJson, DROP oldConfigJson', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps CHANGE installationProgress errorJson TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps CHANGE errorJson installationProgress TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,17 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN source VARCHAR(128) DEFAULT ""', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback();
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users DROP COLUMN source', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
'use strict';
|
||||
|
||||
let async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE tasks CHANGE errorMessage errorJson TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
// convert error messages into json
|
||||
db.all('SELECT id, errorJson FROM apps', function (error, apps) {
|
||||
async.eachSeries(apps, function (app, iteratorDone) {
|
||||
if (app.errorJson === 'null') return iteratorDone();
|
||||
if (app.errorJson === null) return iteratorDone();
|
||||
|
||||
db.runSql('UPDATE apps SET errorJson = ? WHERE id = ?', [ JSON.stringify({ message: app.errorJson }), app.id ], iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE tasks CHANGE errorJson errorMessage TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,21 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
// imports mailbox entries for existing users
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM mailboxes', function (error, mailboxes) {
|
||||
async.eachSeries(mailboxes, function (mailbox, iteratorDone) {
|
||||
if (!mailbox.membersJson) return iteratorDone();
|
||||
|
||||
let members = JSON.parse(mailbox.membersJson);
|
||||
members = members.map((m) => m.indexOf('@') === -1 ? `${m}@${mailbox.domain}` : m); // only because we don't do things in a xction
|
||||
|
||||
db.runSql('UPDATE mailboxes SET membersJson=? WHERE name=? AND domain=?', [ JSON.stringify(members), mailbox.name, mailbox.domain ], iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,19 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('UPDATE apps SET runState=? WHERE runState IS NULL', [ 'running' ], function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
db.runSql('ALTER TABLE apps MODIFY runState VARCHAR(512) NOT NULL', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE app MODIFY runState VARCHAR(512)', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,10 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
// We clear all demo state in the Cloudron...the demo cloudron needs manual intervention afterwards
|
||||
db.runSql('REPLACE INTO settings (name, value) VALUES(?, ?)', [ 'demo', '' ], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
+22
-19
@@ -8,6 +8,7 @@
|
||||
#### TEXT - stored offline from table row (use for strings)
|
||||
#### BLOB - stored offline from table row (use for binary data)
|
||||
#### https://dev.mysql.com/doc/refman/5.0/en/storage-requirements.html
|
||||
#### Times are stored in the database in UTC. And precision is seconds
|
||||
|
||||
# The code uses zero dates. Make sure sql_mode does NOT have NO_ZERO_DATE
|
||||
# http://johnemb.blogspot.com/2014/09/adding-or-removing-individual-sql-modes.html
|
||||
@@ -26,6 +27,7 @@ CREATE TABLE IF NOT EXISTS users(
|
||||
twoFactorAuthenticationSecret VARCHAR(128) DEFAULT "",
|
||||
twoFactorAuthenticationEnabled BOOLEAN DEFAULT false,
|
||||
admin BOOLEAN DEFAULT false,
|
||||
source VARCHAR(128) DEFAULT "",
|
||||
|
||||
PRIMARY KEY(id));
|
||||
|
||||
@@ -41,9 +43,10 @@ CREATE TABLE IF NOT EXISTS groupMembers(
|
||||
FOREIGN KEY(userId) REFERENCES users(id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS tokens(
|
||||
id VARCHAR(128) NOT NULL UNIQUE,
|
||||
name VARCHAR(64) DEFAULT "", // description
|
||||
accessToken VARCHAR(128) NOT NULL UNIQUE,
|
||||
identifier VARCHAR(128) NOT NULL,
|
||||
identifier VARCHAR(128) NOT NULL, // resourceId: app id or user id
|
||||
clientId VARCHAR(128),
|
||||
scope VARCHAR(512) NOT NULL,
|
||||
expires BIGINT NOT NULL, // FIXME: make this a timestamp
|
||||
@@ -61,9 +64,8 @@ CREATE TABLE IF NOT EXISTS clients(
|
||||
CREATE TABLE IF NOT EXISTS apps(
|
||||
id VARCHAR(128) NOT NULL UNIQUE,
|
||||
appStoreId VARCHAR(128) NOT NULL,
|
||||
installationState VARCHAR(512) NOT NULL,
|
||||
installationProgress TEXT,
|
||||
runState VARCHAR(512),
|
||||
installationState VARCHAR(512) NOT NULL, // the active task on the app
|
||||
runState VARCHAR(512) NOT NULL, // if the app is stopped
|
||||
health VARCHAR(128),
|
||||
healthTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, // when the app last responded
|
||||
containerId VARCHAR(128),
|
||||
@@ -82,16 +84,14 @@ CREATE TABLE IF NOT EXISTS apps(
|
||||
robotsTxt TEXT,
|
||||
enableBackup BOOLEAN DEFAULT 1, // misnomer: controls automatic daily backups
|
||||
enableAutomaticUpdate BOOLEAN DEFAULT 1,
|
||||
mailboxName VARCHAR(128), // mailbox of this app
|
||||
mailboxName VARCHAR(128), // mailbox of this app. default allocated as '.app'
|
||||
label VARCHAR(128), // display name
|
||||
tagsJson VARCHAR(2048), // array of tags
|
||||
dataDir VARCHAR(256) UNIQUE,
|
||||
taskId INTEGER, // current task
|
||||
errorJson TEXT,
|
||||
|
||||
// the following fields do not belong here, they can be removed when we use a queue for apptask
|
||||
restoreConfigJson VARCHAR(256), // used to pass backupId to restore from to apptask
|
||||
oldConfigJson TEXT, // used to pass old config to apptask (configure, restore)
|
||||
updateConfigJson TEXT, // used to pass new config to apptask (update)
|
||||
|
||||
ownerId VARCHAR(128),
|
||||
|
||||
FOREIGN KEY(ownerId) REFERENCES users(id),
|
||||
FOREIGN KEY(taskId) REFERENCES tasks(id),
|
||||
PRIMARY KEY(id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS appPortBindings(
|
||||
@@ -136,6 +136,7 @@ CREATE TABLE IF NOT EXISTS backups(
|
||||
state VARCHAR(16) NOT NULL,
|
||||
manifestJson TEXT, /* to validate if the app can be installed in this version of box */
|
||||
format VARCHAR(16) DEFAULT "tgz",
|
||||
preserveSecs INTEGER DEFAULT 0,
|
||||
|
||||
PRIMARY KEY (id));
|
||||
|
||||
@@ -154,6 +155,7 @@ CREATE TABLE IF NOT EXISTS domains(
|
||||
provider VARCHAR(16) NOT NULL,
|
||||
configJson TEXT, /* JSON containing the dns backend provider config */
|
||||
tlsConfigJson TEXT, /* JSON containing the tls provider config */
|
||||
locked BOOLEAN,
|
||||
|
||||
PRIMARY KEY (domain))
|
||||
|
||||
@@ -168,6 +170,8 @@ CREATE TABLE IF NOT EXISTS mail(
|
||||
catchAllJson TEXT,
|
||||
relayJson TEXT,
|
||||
|
||||
dkimSelector VARCHAR(128) NOT NULL DEFAULT "cloudron",
|
||||
|
||||
FOREIGN KEY(domain) REFERENCES domains(domain),
|
||||
PRIMARY KEY(domain))
|
||||
|
||||
@@ -185,7 +189,7 @@ CREATE TABLE IF NOT EXISTS mailboxes(
|
||||
type VARCHAR(16) NOT NULL, /* 'mailbox', 'alias', 'list' */
|
||||
ownerId VARCHAR(128) NOT NULL, /* user id */
|
||||
aliasTarget VARCHAR(128), /* the target name type is an alias */
|
||||
membersJson TEXT, /* members of a group */
|
||||
membersJson TEXT, /* members of a group. fully qualified */
|
||||
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
domain VARCHAR(128),
|
||||
|
||||
@@ -196,7 +200,7 @@ CREATE TABLE IF NOT EXISTS subdomains(
|
||||
appId VARCHAR(128) NOT NULL,
|
||||
domain VARCHAR(128) NOT NULL,
|
||||
subdomain VARCHAR(128) NOT NULL,
|
||||
type VARCHAR(128) NOT NULL,
|
||||
type VARCHAR(128) NOT NULL, /* primary or redirect */
|
||||
|
||||
FOREIGN KEY(domain) REFERENCES domains(domain),
|
||||
FOREIGN KEY(appId) REFERENCES apps(id),
|
||||
@@ -207,8 +211,8 @@ CREATE TABLE IF NOT EXISTS tasks(
|
||||
type VARCHAR(32) NOT NULL,
|
||||
percent INTEGER DEFAULT 0,
|
||||
message TEXT,
|
||||
errorMessage TEXT,
|
||||
result TEXT,
|
||||
errorJson TEXT,
|
||||
resultJson TEXT,
|
||||
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (id));
|
||||
@@ -216,10 +220,9 @@ CREATE TABLE IF NOT EXISTS tasks(
|
||||
CREATE TABLE IF NOT EXISTS notifications(
|
||||
id int NOT NULL AUTO_INCREMENT,
|
||||
userId VARCHAR(128) NOT NULL,
|
||||
eventId VARCHAR(128),
|
||||
eventId VARCHAR(128), // reference to eventlog. can be null
|
||||
title VARCHAR(512) NOT NULL,
|
||||
message TEXT,
|
||||
action VARCHAR(512) NOT NULL,
|
||||
acknowledged BOOLEAN DEFAULT false,
|
||||
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
|
||||
|
||||
Generated
+2703
-6575
File diff suppressed because it is too large
Load Diff
+51
-49
@@ -14,85 +14,87 @@
|
||||
"node": ">=4.0.0 <=4.1.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"@google-cloud/dns": "^0.7.2",
|
||||
"@google-cloud/storage": "^1.7.0",
|
||||
"@sindresorhus/df": "^2.1.0",
|
||||
"async": "^2.6.1",
|
||||
"aws-sdk": "^2.253.1",
|
||||
"body-parser": "^1.18.3",
|
||||
"cloudron-manifestformat": "^2.14.2",
|
||||
"connect": "^3.6.6",
|
||||
"@google-cloud/dns": "^1.1.0",
|
||||
"@google-cloud/storage": "^2.5.0",
|
||||
"@sindresorhus/df": "git+https://github.com/cloudron-io/df.git#type",
|
||||
"async": "^2.6.2",
|
||||
"aws-sdk": "^2.476.0",
|
||||
"body-parser": "^1.19.0",
|
||||
"cloudron-manifestformat": "^2.15.0",
|
||||
"connect": "^3.7.0",
|
||||
"connect-ensure-login": "^0.1.1",
|
||||
"connect-lastmile": "^1.0.2",
|
||||
"connect-lastmile": "^1.2.1",
|
||||
"connect-timeout": "^1.9.0",
|
||||
"cookie-parser": "^1.3.5",
|
||||
"cookie-session": "^1.3.2",
|
||||
"cron": "^1.5.1",
|
||||
"csurf": "^1.6.6",
|
||||
"db-migrate": "^0.11.1",
|
||||
"cookie-parser": "^1.4.4",
|
||||
"cookie-session": "^1.3.3",
|
||||
"cron": "^1.7.1",
|
||||
"csurf": "^1.10.0",
|
||||
"db-migrate": "^0.11.6",
|
||||
"db-migrate-mysql": "^1.1.10",
|
||||
"debug": "^3.1.0",
|
||||
"dockerode": "^2.5.5",
|
||||
"debug": "^4.1.1",
|
||||
"dockerode": "^2.5.8",
|
||||
"ejs": "^2.6.1",
|
||||
"ejs-cli": "^2.0.1",
|
||||
"express": "^4.16.3",
|
||||
"express-session": "^1.15.6",
|
||||
"json": "^9.0.3",
|
||||
"express": "^4.17.1",
|
||||
"express-session": "^1.16.2",
|
||||
"js-yaml": "^3.13.1",
|
||||
"json": "^9.0.6",
|
||||
"ldapjs": "^1.0.2",
|
||||
"lodash": "^4.17.11",
|
||||
"lodash.chunk": "^4.2.0",
|
||||
"mime": "^2.3.1",
|
||||
"moment-timezone": "^0.5.17",
|
||||
"morgan": "^1.9.0",
|
||||
"multiparty": "^4.1.4",
|
||||
"mysql": "^2.15.0",
|
||||
"namecheap": "github:joshuakarjala/node-namecheap#464a952",
|
||||
"nodemailer": "^4.6.5",
|
||||
"mime": "^2.4.4",
|
||||
"moment-timezone": "^0.5.25",
|
||||
"morgan": "^1.9.1",
|
||||
"multiparty": "^4.2.1",
|
||||
"mysql": "^2.17.1",
|
||||
"nodemailer": "^6.2.1",
|
||||
"nodemailer-smtp-transport": "^2.7.4",
|
||||
"oauth2orize": "^1.11.0",
|
||||
"once": "^1.3.2",
|
||||
"once": "^1.4.0",
|
||||
"parse-links": "^0.1.0",
|
||||
"passport": "^0.4.0",
|
||||
"passport-http": "^0.3.0",
|
||||
"passport-http-bearer": "^1.0.1",
|
||||
"passport-local": "^1.0.0",
|
||||
"passport-oauth2-client-password": "^0.1.2",
|
||||
"pretty-bytes": "^5.3.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"proxy-middleware": "^0.15.0",
|
||||
"qrcode": "^1.2.0",
|
||||
"readdirp": "^2.1.0",
|
||||
"request": "^2.87.0",
|
||||
"rimraf": "^2.6.2",
|
||||
"qrcode": "^1.3.3",
|
||||
"readdirp": "^3.0.2",
|
||||
"request": "^2.88.0",
|
||||
"rimraf": "^2.6.3",
|
||||
"s3-block-read-stream": "^0.5.0",
|
||||
"safetydance": "^0.7.1",
|
||||
"semver": "^5.5.0",
|
||||
"showdown": "^1.8.6",
|
||||
"semver": "^6.1.1",
|
||||
"session-file-store": "^1.3.1",
|
||||
"showdown": "^1.9.0",
|
||||
"speakeasy": "^2.0.0",
|
||||
"split": "^1.0.0",
|
||||
"superagent": "^3.8.3",
|
||||
"split": "^1.0.1",
|
||||
"superagent": "^5.0.9",
|
||||
"supererror": "^0.7.2",
|
||||
"tar-fs": "^1.16.2",
|
||||
"tar-stream": "^1.6.1",
|
||||
"tar-fs": "github:cloudron-io/tar-fs#ignore_stat_error",
|
||||
"tar-stream": "^2.1.0",
|
||||
"tldjs": "^2.3.1",
|
||||
"underscore": "^1.9.1",
|
||||
"uuid": "^3.2.1",
|
||||
"uuid": "^3.3.2",
|
||||
"valid-url": "^1.0.9",
|
||||
"validator": "^10.3.0",
|
||||
"ws": "^5.2.0"
|
||||
"validator": "^11.0.0",
|
||||
"ws": "^7.0.0",
|
||||
"xml2js": "^0.4.19"
|
||||
},
|
||||
"devDependencies": {
|
||||
"expect.js": "*",
|
||||
"hock": "^1.3.2",
|
||||
"istanbul": "*",
|
||||
"js2xmlparser": "^3.0.0",
|
||||
"mocha": "^5.2.0",
|
||||
"hock": "^1.3.3",
|
||||
"js2xmlparser": "^4.0.0",
|
||||
"mocha": "^6.1.4",
|
||||
"mock-aws-s3": "git+https://github.com/cloudron-io/mock-aws-s3.git",
|
||||
"nock": "^9.0.14",
|
||||
"node-sass": "^4.6.1",
|
||||
"recursive-readdir": "^2.2.2",
|
||||
"sinon": "^7.2.2"
|
||||
"nock": "^10.0.6",
|
||||
"node-sass": "^4.12.0",
|
||||
"recursive-readdir": "^2.2.2"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "src/test/setupTest && BOX_ENV=test ./node_modules/istanbul/lib/cli.js test $1 ./node_modules/mocha/bin/_mocha -- --no-timeouts --exit -R spec ./src/test ./src/routes/test/[^a]*js",
|
||||
"test": "./runTests",
|
||||
"postmerge": "/bin/true",
|
||||
"precommit": "/bin/true",
|
||||
"prepush": "npm test",
|
||||
|
||||
@@ -0,0 +1,71 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu
|
||||
|
||||
readonly SOURCE_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly DATA_DIR="${HOME}/.cloudron_test"
|
||||
readonly DEFAULT_TESTS="./src/test/*-test.js ./src/routes/test/*-test.js"
|
||||
|
||||
! "${SOURCE_dir}/src/test/checkInstall" && exit 1
|
||||
|
||||
# cleanup old data dirs some of those docker container data requires sudo to be removed
|
||||
echo "=> Provide root password to purge any leftover data in ${DATA_DIR} and load apparmor profile:"
|
||||
sudo rm -rf ${DATA_DIR}
|
||||
|
||||
# archlinux does not have apparmor
|
||||
if hash apparmor_parser 2>/dev/null; then
|
||||
echo "=> Loading app armor profile"
|
||||
sudo apparmor_parser --replace --write-cache ./setup/start/docker-cloudron-app.apparmor
|
||||
fi
|
||||
|
||||
# create dir structure
|
||||
mkdir -p ${DATA_DIR}
|
||||
cd ${DATA_DIR}
|
||||
mkdir -p appsdata
|
||||
mkdir -p boxdata/appicons boxdata/mail boxdata/certs boxdata/mail/dkim/localhost boxdata/mail/dkim/foobar.com
|
||||
mkdir -p platformdata/addons/mail platformdata/nginx/cert platformdata/nginx/applications platformdata/collectd/collectd.conf.d platformdata/addons platformdata/logrotate.d platformdata/backup platformdata/logs/tasks
|
||||
|
||||
# put cert
|
||||
echo "=> Generating a localhost selfsigned cert"
|
||||
openssl req -x509 -newkey rsa:2048 -keyout platformdata/nginx/cert/host.key -out platformdata/nginx/cert/host.cert -days 3650 -subj '/CN=localhost' -nodes -config <(cat /etc/ssl/openssl.cnf <(printf "\n[SAN]\nsubjectAltName=DNS:*.localhost"))
|
||||
|
||||
# clear out any containers
|
||||
echo "=> Delete all docker containers first"
|
||||
docker ps -qa | xargs --no-run-if-empty docker rm -f
|
||||
|
||||
# create docker network (while the infra code does this, most tests skip infra setup)
|
||||
docker network create --subnet=172.18.0.0/16 cloudron || true
|
||||
|
||||
# create the same mysql server version to test with
|
||||
OUT=`docker inspect mysql-server` || true
|
||||
if [[ "${OUT}" = "[]" ]]; then
|
||||
echo "=> Starting mysql-server..."
|
||||
docker run --name mysql-server -e MYSQL_ROOT_PASSWORD=password -d mysql:5.7
|
||||
else
|
||||
echo "=> mysql-server already running. If you want to start fresh, run 'docker rm --force mysql-server'"
|
||||
fi
|
||||
|
||||
export MYSQL_IP=`docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' mysql-server`
|
||||
|
||||
echo "=> Waiting for mysql server to be ready..."
|
||||
while ! mysqladmin ping -h"${MYSQL_IP}" --silent; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo "=> Starting cloudron-syslog"
|
||||
cloudron-syslog --logdir "${DATA_DIR}/platformdata/logs/" &
|
||||
|
||||
echo "=> Ensure database"
|
||||
mysql -h"${MYSQL_IP}" -uroot -ppassword -e 'CREATE DATABASE IF NOT EXISTS box'
|
||||
|
||||
echo "=> Run database migrations"
|
||||
cd "${SOURCE_dir}"
|
||||
BOX_ENV=test DATABASE_URL=mysql://root:password@${MYSQL_IP}/box node_modules/.bin/db-migrate up
|
||||
|
||||
echo "=> Run tests with mocha"
|
||||
TESTS=${DEFAULT_TESTS}
|
||||
if [[ $# -gt 0 ]]; then
|
||||
TESTS="$*"
|
||||
fi
|
||||
|
||||
BOX_ENV=test ./node_modules/mocha/bin/_mocha --bail --no-timeouts --exit -R spec ${TESTS}
|
||||
@@ -1,106 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 2400 --http1.1"
|
||||
|
||||
ip=""
|
||||
dns_config=""
|
||||
tls_cert_file=""
|
||||
tls_key_file=""
|
||||
license_file=""
|
||||
backup_config=""
|
||||
|
||||
args=$(getopt -o "" -l "ip:,backup-config:,license:,dns-config:,tls-cert:,tls-key:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
--ip) ip="$2"; shift 2;;
|
||||
--dns-config) dns_config="$2"; shift 2;;
|
||||
--tls-cert) tls_cert_file="$2"; shift 2;;
|
||||
--tls-key) tls_key_file="$2"; shift 2;;
|
||||
--license) license_file="$2"; shift 2;;
|
||||
--backup-config) backup_config="$2"; shift 2;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
# validate arguments in the absence of data
|
||||
if [[ -z "${ip}" ]]; then
|
||||
echo "--ip is required"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${dns_config}" ]]; then
|
||||
echo "--dns-config is required"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -f "${license_file}" ]]; then
|
||||
echo "--license must be a valid license file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function get_status() {
|
||||
key="$1"
|
||||
if status=$($curl -q -f -k "https://${ip}/api/v1/cloudron/status" 2>/dev/null); then
|
||||
currentValue=$(echo "${status}" | python3 -c 'import sys, json; print(json.dumps(json.load(sys.stdin)[sys.argv[1]]))' "${key}")
|
||||
echo "${currentValue}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
function wait_for_status() {
|
||||
key="$1"
|
||||
expectedValue="$2"
|
||||
|
||||
echo "wait_for_status: $key to be $expectedValue"
|
||||
while true; do
|
||||
if currentValue=$(get_status "${key}"); then
|
||||
echo "wait_for_status: $key is current: $currentValue expecting: $expectedValue"
|
||||
if [[ "${currentValue}" == $expectedValue ]]; then
|
||||
break
|
||||
fi
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
}
|
||||
|
||||
echo "=> Waiting for cloudron to be ready"
|
||||
wait_for_status "version" '*'
|
||||
|
||||
domain=$(echo "${dns_config}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj["domain"])')
|
||||
|
||||
echo "Provisioning Cloudron ${domain}"
|
||||
if [[ -n "${tls_cert_file}" && -n "${tls_key_file}" ]]; then
|
||||
tls_cert=$(cat "${tls_cert_file}" | awk '{printf "%s\\n", $0}')
|
||||
tls_key=$(cat "${tls_key_file}" | awk '{printf "%s\\n", $0}')
|
||||
fallback_cert=$(printf '{ "cert": "%s", "key": "%s", "provider": "fallback", "restricted": true }' "${tls_cert}" "${tls_key}")
|
||||
else
|
||||
fallback_cert=None
|
||||
fi
|
||||
|
||||
tls_config='{ "provider": "fallback" }'
|
||||
dns_config=$(echo "${dns_config}" | python3 -c "import json,sys;obj=json.load(sys.stdin);obj.update(tlsConfig=${tls_config});obj.update(fallbackCertficate=${fallback_cert});print(json.dumps(obj))")
|
||||
|
||||
license=$(cat "${license_file}")
|
||||
|
||||
if [[ -z "${backup_config:-}" ]]; then
|
||||
backup_config='{ "provider": "filesystem", "backupFolder": "/var/backups", "format": "tgz" }'
|
||||
fi
|
||||
|
||||
setupData=$(printf '{ "dnsConfig": %s, "autoconf": { "appstoreConfig": %s, "backupConfig": %s } }' "${dns_config}" "${license}" "${backup_config}")
|
||||
|
||||
if ! setupResult=$($curl -kq -X POST -H "Content-Type: application/json" -d "${setupData}" https://${ip}/api/v1/cloudron/setup); then
|
||||
echo "Failed to setup with ${setupData} ${setupResult}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
wait_for_status "webadminStatus" '*"tls": true*'
|
||||
|
||||
echo "Cloudron is ready at https://my-${domain}"
|
||||
|
||||
+31
-41
@@ -43,22 +43,20 @@ fi
|
||||
initBaseImage="true"
|
||||
# provisioning data
|
||||
provider=""
|
||||
edition=""
|
||||
requestedVersion=""
|
||||
apiServerOrigin="https://api.cloudron.io"
|
||||
webServerOrigin="https://cloudron.io"
|
||||
sourceTarballUrl=""
|
||||
rebootServer="true"
|
||||
baseDataDir=""
|
||||
license=""
|
||||
|
||||
args=$(getopt -o "" -l "help,skip-baseimage-init,provider:,version:,env:,edition:,skip-reboot" -n "$0" -- "$@")
|
||||
args=$(getopt -o "" -l "help,skip-baseimage-init,provider:,version:,env:,skip-reboot,license:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
--help) echo "See https://cloudron.io/documentation/installation/ on how to install Cloudron"; exit 0;;
|
||||
--provider) provider="$2"; shift 2;;
|
||||
--edition) edition="$2"; shift 2;;
|
||||
--version) requestedVersion="$2"; shift 2;;
|
||||
--env)
|
||||
if [[ "$2" == "dev" ]]; then
|
||||
@@ -69,6 +67,7 @@ while true; do
|
||||
webServerOrigin="https://staging.cloudron.io"
|
||||
fi
|
||||
shift 2;;
|
||||
--license) license="$2"; shift 2;;
|
||||
--skip-baseimage-init) initBaseImage="false"; shift;;
|
||||
--skip-reboot) rebootServer="false"; shift;;
|
||||
--) break;;
|
||||
@@ -93,8 +92,9 @@ fi
|
||||
echo "Running cloudron-setup with args : $@" > "${LOG_FILE}"
|
||||
|
||||
# validate arguments in the absence of data
|
||||
readonly AVAILABLE_PROVIDERS="azure, caas, cloudscale, contabo, digitalocean, ec2, exoscale, galaxygate, gce, hetzner, interox, lightsail, linode, netcup, ovh, rosehosting, scaleway, skysilk, time4vps, upcloud, vultr or generic"
|
||||
if [[ -z "${provider}" ]]; then
|
||||
echo "--provider is required (azure, digitalocean, ec2, exoscale, gce, hetzner, lightsail, linode, netcup, ovh, rosehosting, scaleway, vultr or generic)"
|
||||
echo "--provider is required ($AVAILABLE_PROVIDERS)"
|
||||
exit 1
|
||||
elif [[ \
|
||||
"${provider}" != "ami" && \
|
||||
@@ -107,25 +107,28 @@ elif [[ \
|
||||
"${provider}" != "ec2" && \
|
||||
"${provider}" != "exoscale" && \
|
||||
"${provider}" != "galaxygate" && \
|
||||
"${provider}" != "digitalocean" && \
|
||||
"${provider}" != "gce" && \
|
||||
"${provider}" != "hetzner" && \
|
||||
"${provider}" != "interox" && \
|
||||
"${provider}" != "interox-image" && \
|
||||
"${provider}" != "lightsail" && \
|
||||
"${provider}" != "linode" && \
|
||||
"${provider}" != "linode-stackscript" && \
|
||||
"${provider}" != "netcup" && \
|
||||
"${provider}" != "netcup-image" && \
|
||||
"${provider}" != "ovh" && \
|
||||
"${provider}" != "rosehosting" && \
|
||||
"${provider}" != "scaleway" && \
|
||||
"${provider}" != "skysilk" && \
|
||||
"${provider}" != "skysilk-image" && \
|
||||
"${provider}" != "time4vps" && \
|
||||
"${provider}" != "time4vps-image" && \
|
||||
"${provider}" != "upcloud" && \
|
||||
"${provider}" != "upcloud-image" && \
|
||||
"${provider}" != "vultr" && \
|
||||
"${provider}" != "generic" \
|
||||
]]; then
|
||||
echo "--provider must be one of: azure, cloudscale.ch, contabo, digitalocean, ec2, exoscale, galaxygate, gce, hetzner, lightsail, linode, netcup, ovh, rosehosting, scaleway, vultr or generic"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -n "${baseDataDir}" && ! -d "${baseDataDir}" ]]; then
|
||||
echo "${baseDataDir} does not exist"
|
||||
echo "--provider must be one of: $AVAILABLE_PROVIDERS"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -159,7 +162,7 @@ if [[ "${initBaseImage}" == "true" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! apt-get install curl python3 ubuntu-standard -y &>> "${LOG_FILE}"; then
|
||||
if ! DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y install curl python3 ubuntu-standard -y &>> "${LOG_FILE}"; then
|
||||
echo "Could not install setup dependencies (curl). See ${LOG_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
@@ -201,43 +204,30 @@ fi
|
||||
|
||||
# NOTE: this install script only supports 3.x and above
|
||||
echo "=> Installing version ${version} (this takes some time) ..."
|
||||
if [[ "${version}" =~ 3\.[0-2]+\.[0-9]+ ]]; then
|
||||
readonly DATA_FILE="/root/cloudron-install-data.json"
|
||||
data=$(cat <<EOF
|
||||
{
|
||||
"provider": "${provider}",
|
||||
"edition": "${edition}",
|
||||
"apiServerOrigin": "${apiServerOrigin}",
|
||||
"webServerOrigin": "${webServerOrigin}",
|
||||
"version": "${version}"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
echo "${data}" > "${DATA_FILE}"
|
||||
mkdir -p /etc/cloudron
|
||||
# this file is used >= 4.2
|
||||
echo "${provider}" > /etc/cloudron/PROVIDER
|
||||
|
||||
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" --data-file "${DATA_FILE}" &>> "${LOG_FILE}"; then
|
||||
echo "Failed to install cloudron. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm "${DATA_FILE}"
|
||||
else
|
||||
mkdir -p /etc/cloudron
|
||||
cat > "/etc/cloudron/cloudron.conf" <<CONF_END
|
||||
# this file is unused <= 4.2 and exists to make legacy installations work. the start script will remove this file anyway
|
||||
cat > "/etc/cloudron/cloudron.conf" <<CONF_END
|
||||
{
|
||||
"apiServerOrigin": "${apiServerOrigin}",
|
||||
"webServerOrigin": "${webServerOrigin}",
|
||||
"provider": "${provider}",
|
||||
"edition": "${edition}"
|
||||
"provider": "${provider}"
|
||||
}
|
||||
CONF_END
|
||||
|
||||
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" &>> "${LOG_FILE}"; then
|
||||
echo "Failed to install cloudron. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
[[ -n "${license}" ]] && echo -n "$license" > /etc/cloudron/LICENSE
|
||||
|
||||
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" &>> "${LOG_FILE}"; then
|
||||
echo "Failed to install cloudron. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# only needed for >= 4.2
|
||||
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('api_server_origin', '${apiServerOrigin}');" 2>/dev/null
|
||||
mysql -uroot -ppassword -e "REPLACE INTO box.settings (name, value) VALUES ('web_server_origin', '${webServerOrigin}');" 2>/dev/null
|
||||
|
||||
echo -n "=> Waiting for cloudron to be ready (this takes some time) ..."
|
||||
while true; do
|
||||
echo -n "."
|
||||
|
||||
+58
-29
@@ -1,5 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
# This script collects diagnostic information to help debug server related issues
|
||||
# It also enables SSH access for the cloudron support team
|
||||
|
||||
@@ -11,25 +13,33 @@ HELP_MESSAGE="
|
||||
This script collects diagnostic information to help debug server related issues
|
||||
|
||||
Options:
|
||||
--admin-login Login as administrator
|
||||
--enable-ssh Enable SSH access for the Cloudron support team
|
||||
--help Show this message
|
||||
"
|
||||
|
||||
# We require root
|
||||
if [[ ${EUID} -ne 0 ]]; then
|
||||
echo "This script should be run as root." > /dev/stderr
|
||||
echo "This script should be run as root. Run with sudo"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
enableSSH="false"
|
||||
|
||||
args=$(getopt -o "" -l "help,enable-ssh" -n "$0" -- "$@")
|
||||
args=$(getopt -o "" -l "help,enable-ssh,admin-login" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
--help) echo -e "${HELP_MESSAGE}"; exit 0;;
|
||||
--enable-ssh) enableSSH="true"; shift;;
|
||||
--admin-login)
|
||||
admin_username=$(mysql -NB -uroot -ppassword -e "SELECT username FROM box.users WHERE admin=1 LIMIT 1" 2>/dev/null)
|
||||
admin_password=$(pwgen -1s 12)
|
||||
printf '{"%s":"%s"}\n' "${admin_username}" "${admin_password}" > /tmp/cloudron_ghost.json
|
||||
echo "Login as ${admin_username} / ${admin_password} . Remove /tmp/cloudron_ghost.json when done."
|
||||
exit 0
|
||||
;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
@@ -42,7 +52,7 @@ if [[ "`df --output="avail" / | sed -n 2p`" -lt "10240" ]]; then
|
||||
echo ""
|
||||
df -h
|
||||
echo ""
|
||||
echo "To recover from a full disk, follow the guide at https://cloudron.io/documentation/server/#recovery-after-disk-full"
|
||||
echo "To recover from a full disk, follow the guide at https://cloudron.io/documentation/troubleshooting/#recovery-after-disk-full"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -58,23 +68,8 @@ echo -n "Generating Cloudron Support stats..."
|
||||
# clear file
|
||||
rm -rf $OUT
|
||||
|
||||
ssh_port=$(cat /etc/ssh/sshd_config | grep "Port " | sed -e "s/.*Port //")
|
||||
if [[ $SUDO_USER == "" ]]; then
|
||||
ssh_user="root"
|
||||
ssh_folder="/root/.ssh/"
|
||||
authorized_key_file="${ssh_folder}/authorized_keys"
|
||||
else
|
||||
ssh_user="$SUDO_USER"
|
||||
ssh_folder="/home/$SUDO_USER/.ssh/"
|
||||
authorized_key_file="${ssh_folder}/authorized_keys"
|
||||
fi
|
||||
|
||||
echo -e $LINE"SSH"$LINE >> $OUT
|
||||
echo "Username: ${ssh_user}" >> $OUT
|
||||
echo "Port: ${ssh_port}" >> $OUT
|
||||
|
||||
echo -e $LINE"cloudron.conf"$LINE >> $OUT
|
||||
cat /etc/cloudron/cloudron.conf &>> $OUT
|
||||
echo -e $LINE"PROVIDER"$LINE >> $OUT
|
||||
cat /etc/cloudron/PROVIDER &>> $OUT || true
|
||||
|
||||
echo -e $LINE"Docker container"$LINE >> $OUT
|
||||
if ! timeout --kill-after 10s 15s docker ps -a &>> $OUT 2>&1; then
|
||||
@@ -84,31 +79,65 @@ fi
|
||||
echo -e $LINE"Filesystem stats"$LINE >> $OUT
|
||||
df -h &>> $OUT
|
||||
|
||||
echo -e $LINE"Appsdata stats"$LINE >> $OUT
|
||||
du -hcsL /home/yellowtent/appsdata/* &>> $OUT
|
||||
|
||||
echo -e $LINE"Boxdata stats"$LINE >> $OUT
|
||||
du -hcsL /home/yellowtent/boxdata/* &>> $OUT
|
||||
|
||||
echo -e $LINE"Backup stats (possibly misleading)"$LINE >> $OUT
|
||||
du -hcsL /var/backups/* &>> $OUT
|
||||
|
||||
echo -e $LINE"System daemon status"$LINE >> $OUT
|
||||
systemctl status --lines=100 cloudron.target box mysql unbound cloudron-syslog nginx collectd docker &>> $OUT
|
||||
|
||||
echo -e $LINE"Box logs"$LINE >> $OUT
|
||||
tail -n 100 /home/yellowtent/platformdata/logs/box.log &>> $OUT
|
||||
|
||||
echo -e $LINE"Firewall chains"$LINE >> $OUT
|
||||
ip addr &>> $OUT
|
||||
|
||||
echo -e $LINE"Firewall chains"$LINE >> $OUT
|
||||
iptables -L &>> $OUT
|
||||
|
||||
echo "Done"
|
||||
|
||||
if [[ "${enableSSH}" == "true" ]]; then
|
||||
ssh_port=$(cat /etc/ssh/sshd_config | grep "Port " | sed -e "s/.*Port //")
|
||||
permit_root_login=$(grep -q ^PermitRootLogin.*yes /etc/ssh/sshd_config && echo "yes" || echo "no")
|
||||
|
||||
# support.js uses similar logic
|
||||
if $(grep -q "ec2\|lightsail\|ami" /etc/cloudron/PROVIDER); then
|
||||
ssh_user="ubuntu"
|
||||
keys_file="/home/ubuntu/.ssh/authorized_keys"
|
||||
else
|
||||
ssh_user="root"
|
||||
keys_file="/root/.ssh/authorized_keys"
|
||||
fi
|
||||
|
||||
echo -e $LINE"SSH"$LINE >> $OUT
|
||||
echo "Username: ${ssh_user}" >> $OUT
|
||||
echo "Port: ${ssh_port}" >> $OUT
|
||||
echo "PermitRootLogin: ${permit_root_login}" >> $OUT
|
||||
echo "Key file: ${keys_file}" >> $OUT
|
||||
|
||||
echo -n "Enabling ssh access for the Cloudron support team..."
|
||||
mkdir -p $(dirname "${keys_file}") # .ssh does not exist sometimes
|
||||
touch "${keys_file}" # required for concat to work
|
||||
if ! grep -q "${CLOUDRON_SUPPORT_PUBLIC_KEY}" "${keys_file}"; then
|
||||
echo -e "\n${CLOUDRON_SUPPORT_PUBLIC_KEY}" >> "${keys_file}"
|
||||
chmod 600 "${keys_file}"
|
||||
chown "${ssh_user}" "${keys_file}"
|
||||
fi
|
||||
|
||||
echo "Done"
|
||||
fi
|
||||
|
||||
echo -n "Uploading information..."
|
||||
# for some reason not using $(cat $OUT) will not contain newlines!?
|
||||
paste_key=$(curl -X POST ${PASTEBIN}/documents --silent -d "$(cat $OUT)" | python3 -c "import sys, json; print(json.load(sys.stdin)['key'])")
|
||||
echo "Done"
|
||||
|
||||
if [[ "${enableSSH}" == "true" ]]; then
|
||||
echo -n "Enabling ssh access for the Cloudron support team..."
|
||||
mkdir -p "${ssh_folder}"
|
||||
echo "${CLOUDRON_SUPPORT_PUBLIC_KEY}" >> ${authorized_key_file}
|
||||
chown -R ${ssh_user} "${ssh_folder}"
|
||||
chmod 600 "${authorized_key_file}"
|
||||
echo "Done"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Please email the following link to support@cloudron.io"
|
||||
echo ""
|
||||
|
||||
@@ -41,8 +41,8 @@ if ! $(cd "${SOURCE_DIR}/../dashboard" && git diff --exit-code >/dev/null); then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$(node --version)" != "v8.11.2" ]]; then
|
||||
echo "This script requires node 8.11.2"
|
||||
if [[ "$(node --version)" != "v10.15.1" ]]; then
|
||||
echo "This script requires node 10.15.1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
+16
-15
@@ -19,17 +19,18 @@ readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max
|
||||
readonly script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly box_src_tmp_dir="$(realpath ${script_dir}/..)"
|
||||
|
||||
readonly ubuntu_version=$(lsb_release -rs)
|
||||
readonly ubuntu_codename=$(lsb_release -cs)
|
||||
|
||||
readonly is_update=$(systemctl is-active box && echo "yes" || echo "no")
|
||||
|
||||
echo "==> installer: updating docker"
|
||||
if [[ $(docker version --format {{.Client.Version}}) != "18.03.1-ce" ]]; then
|
||||
$curl -sL https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_18.03.1~ce-0~ubuntu_amd64.deb -o /tmp/docker.deb
|
||||
|
||||
# https://download.docker.com/linux/ubuntu/dists/xenial/stable/binary-amd64/Packages
|
||||
if [[ $(sha256sum /tmp/docker.deb | cut -d' ' -f1) != "54f4c9268492a4fd2ec2e6bcc95553855b025f35dcc8b9f60ac34e0aa307279b" ]]; then
|
||||
echo "==> installer: docker binary download is corrupt"
|
||||
exit 5
|
||||
fi
|
||||
if [[ $(docker version --format {{.Client.Version}}) != "18.09.2" ]]; then
|
||||
# there are 3 packages for docker - containerd, CLI and the daemon
|
||||
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.2.2-3_amd64.deb" -o /tmp/containerd.deb
|
||||
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_18.09.2~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
|
||||
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_18.09.2~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
|
||||
|
||||
echo "==> installer: Waiting for all dpkg tasks to finish..."
|
||||
while fuser /var/lib/dpkg/lock; do
|
||||
@@ -47,21 +48,21 @@ if [[ $(docker version --format {{.Client.Version}}) != "18.03.1-ce" ]]; then
|
||||
sleep 1
|
||||
done
|
||||
|
||||
while ! apt install -y /tmp/docker.deb; do
|
||||
while ! apt install -y /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb; do
|
||||
echo "==> installer: Failed to install docker. Retry"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
rm /tmp/docker.deb
|
||||
rm /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
|
||||
fi
|
||||
|
||||
echo "==> installer: updating node"
|
||||
if [[ "$(node --version)" != "v8.11.2" ]]; then
|
||||
mkdir -p /usr/local/node-8.11.2
|
||||
$curl -sL https://nodejs.org/dist/v8.11.2/node-v8.11.2-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-8.11.2
|
||||
ln -sf /usr/local/node-8.11.2/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-8.11.2/bin/npm /usr/bin/npm
|
||||
rm -rf /usr/local/node-6.11.5
|
||||
if [[ "$(node --version)" != "v10.15.1" ]]; then
|
||||
mkdir -p /usr/local/node-10.15.1
|
||||
$curl -sL https://nodejs.org/dist/v10.15.1/node-v10.15.1-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-10.15.1
|
||||
ln -sf /usr/local/node-10.15.1/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-10.15.1/bin/npm /usr/bin/npm
|
||||
rm -rf /usr/local/node-8.11.2 /usr/local/node-8.9.3
|
||||
fi
|
||||
|
||||
# this is here (and not in updater.js) because rebuild requires the above node
|
||||
|
||||
+20
-23
@@ -26,22 +26,6 @@ systemctl enable apparmor
|
||||
systemctl restart apparmor
|
||||
|
||||
usermod ${USER} -a -G docker
|
||||
# preserve the existing storage driver (user might be using overlay2)
|
||||
storage_driver=$(docker info | grep "Storage Driver" | sed 's/.*: //')
|
||||
[[ -n "${storage_driver}" ]] || storage_driver="overlay2" # if the above command fails
|
||||
|
||||
temp_file=$(mktemp)
|
||||
# create systemd drop-in. some apps do not work with aufs
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=${storage_driver}" > "${temp_file}"
|
||||
|
||||
systemctl enable docker
|
||||
# restart docker if options changed
|
||||
if [[ ! -f /etc/systemd/system/docker.service.d/cloudron.conf ]] || ! diff -q /etc/systemd/system/docker.service.d/cloudron.conf "${temp_file}" >/dev/null; then
|
||||
mkdir -p /etc/systemd/system/docker.service.d
|
||||
mv "${temp_file}" /etc/systemd/system/docker.service.d/cloudron.conf
|
||||
systemctl daemon-reload
|
||||
systemctl restart docker
|
||||
fi
|
||||
docker network create --subnet=172.18.0.0/16 cloudron || true
|
||||
|
||||
mkdir -p "${BOX_DATA_DIR}"
|
||||
@@ -60,7 +44,10 @@ mkdir -p "${PLATFORM_DATA_DIR}/collectd/collectd.conf.d"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/logrotate.d"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/acme"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/backup"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/logs/backup" "${PLATFORM_DATA_DIR}/logs/updater" "${PLATFORM_DATA_DIR}/logs/tasks"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/logs/backup" \
|
||||
"${PLATFORM_DATA_DIR}/logs/updater" \
|
||||
"${PLATFORM_DATA_DIR}/logs/tasks" \
|
||||
"${PLATFORM_DATA_DIR}/logs/crash"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/update"
|
||||
|
||||
mkdir -p "${BOX_DATA_DIR}/appicons"
|
||||
@@ -135,8 +122,11 @@ echo "==> Configuring logrotate"
|
||||
if ! grep -q "^include ${PLATFORM_DATA_DIR}/logrotate.d" /etc/logrotate.conf; then
|
||||
echo -e "\ninclude ${PLATFORM_DATA_DIR}/logrotate.d\n" >> /etc/logrotate.conf
|
||||
fi
|
||||
cp "${script_dir}/start/box-logrotate" "${script_dir}/start/app-logrotate" "${PLATFORM_DATA_DIR}/logrotate.d/"
|
||||
chown root:root "${PLATFORM_DATA_DIR}/logrotate.d/box-logrotate" "${PLATFORM_DATA_DIR}/logrotate.d/app-logrotate"
|
||||
rm -f "${PLATFORM_DATA_DIR}/logrotate.d/"*
|
||||
cp "${script_dir}/start/logrotate/"* "${PLATFORM_DATA_DIR}/logrotate.d/"
|
||||
|
||||
# logrotate files have to be owned by root, this is here to fixup existing installations where we were resetting the owner to yellowtent
|
||||
chown root:root "${PLATFORM_DATA_DIR}/logrotate.d/"
|
||||
|
||||
echo "==> Adding motd message for admins"
|
||||
cp "${script_dir}/start/cloudron-motd" /etc/update-motd.d/92-cloudron
|
||||
@@ -179,7 +169,13 @@ mysqladmin -u root -ppassword password password # reset default root password
|
||||
mysql -u root -p${mysql_root_password} -e 'CREATE DATABASE IF NOT EXISTS box'
|
||||
|
||||
echo "==> Migrating data"
|
||||
(cd "${BOX_SRC_DIR}" && BOX_ENV=cloudron DATABASE_URL=mysql://root:${mysql_root_password}@127.0.0.1/box "${BOX_SRC_DIR}/node_modules/.bin/db-migrate" up)
|
||||
cd "${BOX_SRC_DIR}"
|
||||
if ! BOX_ENV=cloudron DATABASE_URL=mysql://root:${mysql_root_password}@127.0.0.1/box "${BOX_SRC_DIR}/node_modules/.bin/db-migrate" up; then
|
||||
echo "DB migration failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -f /etc/cloudron/cloudron.conf
|
||||
|
||||
if [[ ! -f "${BOX_DATA_DIR}/dhparams.pem" ]]; then
|
||||
echo "==> Generating dhparams (takes forever)"
|
||||
@@ -189,6 +185,10 @@ else
|
||||
cp "${BOX_DATA_DIR}/dhparams.pem" "${PLATFORM_DATA_DIR}/addons/mail/dhparams.pem"
|
||||
fi
|
||||
|
||||
# old installations used to create appdata/<app>/redis which is now part of old backups and prevents restore
|
||||
echo "==> Cleaning up stale redis directories"
|
||||
find "${APPS_DATA_DIR}" -maxdepth 2 -type d -name redis -exec rm -rf {} +
|
||||
|
||||
echo "==> Changing ownership"
|
||||
# be careful of what is chown'ed here. subdirs like mysql,redis etc are owned by the containers and will stop working if perms change
|
||||
chown -R "${USER}" /etc/cloudron
|
||||
@@ -197,9 +197,6 @@ chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}/INFRA_VERSION" 2>/dev/null || true
|
||||
chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}"
|
||||
chown "${USER}:${USER}" "${APPS_DATA_DIR}"
|
||||
|
||||
# logrotate files have to be owned by root, this is here to fixup existing installations where we were resetting the owner to yellowtent
|
||||
chown root:root -R "${PLATFORM_DATA_DIR}/logrotate.d"
|
||||
|
||||
# do not chown the boxdata/mail directory; dovecot gets upset
|
||||
chown "${USER}:${USER}" "${BOX_DATA_DIR}"
|
||||
find "${BOX_DATA_DIR}" -mindepth 1 -maxdepth 1 -not -path "${BOX_DATA_DIR}/mail" -exec chown -R "${USER}:${USER}" {} \;
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
# logrotate config for app logs
|
||||
|
||||
/home/yellowtent/platformdata/logs/*/*.log {
|
||||
# only keep one rotated file, we currently do not send that over the api
|
||||
rotate 1
|
||||
size 10M
|
||||
# we never compress so we can simply tail the files
|
||||
nocompress
|
||||
copytruncate
|
||||
}
|
||||
@@ -9,7 +9,7 @@ iptables -t filter -F CLOUDRON # empty any existing rules
|
||||
# NOTE: keep these in sync with src/apps.js validatePortBindings
|
||||
# allow ssh, http, https, ping, dns
|
||||
iptables -t filter -I CLOUDRON -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
# caas has ssh on port 202
|
||||
# ssh is allowed alternately on port 202
|
||||
iptables -A CLOUDRON -p tcp -m tcp -m multiport --dports 22,25,80,202,443,587,993,4190 -j ACCEPT
|
||||
|
||||
iptables -t filter -A CLOUDRON -p icmp --icmp-type echo-request -j ACCEPT
|
||||
|
||||
@@ -17,11 +17,6 @@ else
|
||||
printf "are automatically installed on this server every night.\n"
|
||||
printf "\n"
|
||||
printf "Read more at https://cloudron.io/documentation/security/#os-updates\n"
|
||||
|
||||
if grep -q "^PasswordAuthentication yes" /etc/ssh/sshd_config; then
|
||||
printf "\nPlease disable password based SSH access to secure your server. Read more at\n"
|
||||
printf "https://cloudron.io/documentation/security/#securing-ssh-access\n"
|
||||
fi
|
||||
fi
|
||||
|
||||
printf "\nFor help and more information, visit https://forum.cloudron.io\n\n"
|
||||
|
||||
@@ -240,8 +240,23 @@ LoadPlugin write_graphite
|
||||
Interactive false
|
||||
|
||||
Import "df"
|
||||
# <Module df>
|
||||
# </Module>
|
||||
|
||||
Import "du"
|
||||
<Module du>
|
||||
<Path>
|
||||
Instance maildata
|
||||
Dir "/home/yellowtent/boxdata/mail"
|
||||
</Path>
|
||||
<Path>
|
||||
Instance boxdata
|
||||
Dir "/home/yellowtent/boxdata"
|
||||
Exclude "mail"
|
||||
</Path>
|
||||
<Path>
|
||||
Instance platformdata
|
||||
Dir "/home/yellowtent/platformdata"
|
||||
</Path>
|
||||
</Module>
|
||||
</Plugin>
|
||||
|
||||
<Plugin write_graphite>
|
||||
|
||||
@@ -21,6 +21,7 @@ def read():
|
||||
except:
|
||||
continue
|
||||
|
||||
# type comes from https://github.com/collectd/collectd/blob/master/src/types.db
|
||||
val = collectd.Values(type='df_complex', plugin='df', plugin_instance=instance)
|
||||
|
||||
free = st.f_bavail * st.f_frsize # bavail is for non-root user. bfree is total
|
||||
|
||||
@@ -0,0 +1,79 @@
|
||||
import collectd,os,subprocess,sys,re,time
|
||||
|
||||
# https://www.programcreek.com/python/example/106897/collectd.register_read
|
||||
|
||||
PATHS = [] # { name, dir, exclude }
|
||||
INTERVAL = 60 * 60 * 12 # twice a day. change values in docker-graphite if you change this
|
||||
|
||||
def du(pathinfo):
|
||||
cmd = 'timeout 1800 du -Dsb "{}"'.format(pathinfo['dir'])
|
||||
if pathinfo['exclude'] != '':
|
||||
cmd += ' --exclude "{}"'.format(pathinfo['exclude'])
|
||||
|
||||
collectd.info('computing size with command: %s' % cmd);
|
||||
try:
|
||||
size = subprocess.check_output(cmd, shell=True).split()[0].decode('utf-8')
|
||||
collectd.info('\tsize of %s is %s (time: %i)' % (pathinfo['dir'], size, int(time.time())))
|
||||
return size
|
||||
except Exception as e:
|
||||
collectd.info('\terror getting the size of %s: %s' % (pathinfo['dir'], str(e)))
|
||||
return 0
|
||||
|
||||
def parseSize(size):
|
||||
units = {"B": 1, "KB": 10**3, "MB": 10**6, "GB": 10**9, "TB": 10**12}
|
||||
number, unit, _ = re.split('([a-zA-Z]+)', size.upper())
|
||||
return int(float(number)*units[unit])
|
||||
|
||||
def dockerSize():
|
||||
# use --format '{{json .}}' to dump the string. '{{if eq .Type "Images"}}{{.Size}}{{end}}' still creates newlines
|
||||
cmd = 'timeout 1800 docker system df --format "{{.Size}}" | head -n1'
|
||||
try:
|
||||
size = subprocess.check_output(cmd, shell=True).strip().decode('utf-8')
|
||||
collectd.info('size of docker images is %s (%s) (time: %i)' % (size, parseSize(size), int(time.time())))
|
||||
return parseSize(size)
|
||||
except Exception as e:
|
||||
collectd.info('error getting docker images size : %s' % str(e))
|
||||
return 0
|
||||
|
||||
# configure is called for each module block. this is called before init
|
||||
def configure(config):
|
||||
global PATHS
|
||||
|
||||
for child in config.children:
|
||||
if child.key != 'Path':
|
||||
collectd.info('du plugin: Unknown config key "%s"' % key)
|
||||
continue
|
||||
|
||||
pathinfo = { 'name': '', 'dir': '', 'exclude': '' }
|
||||
for node in child.children:
|
||||
if node.key == 'Instance':
|
||||
pathinfo['name'] = node.values[0]
|
||||
elif node.key == 'Dir':
|
||||
pathinfo['dir'] = node.values[0]
|
||||
elif node.key == 'Exclude':
|
||||
pathinfo['exclude'] = node.values[0]
|
||||
|
||||
PATHS.append(pathinfo);
|
||||
collectd.info('du plugin: monitoring %s' % pathinfo['dir']);
|
||||
|
||||
def init():
|
||||
global PATHS
|
||||
collectd.info('custom du plugin initialized with %s %s' % (PATHS, sys.version))
|
||||
|
||||
def read():
|
||||
for pathinfo in PATHS:
|
||||
size = du(pathinfo)
|
||||
|
||||
# type comes from https://github.com/collectd/collectd/blob/master/src/types.db
|
||||
val = collectd.Values(type='capacity', plugin='du', plugin_instance=pathinfo['name'])
|
||||
val.dispatch(values=[size], type_instance='usage')
|
||||
|
||||
size = dockerSize()
|
||||
val = collectd.Values(type='capacity', plugin='du', plugin_instance='docker')
|
||||
val.dispatch(values=[size], type_instance='usage')
|
||||
|
||||
|
||||
|
||||
collectd.register_init(init)
|
||||
collectd.register_config(configure)
|
||||
collectd.register_read(read, INTERVAL)
|
||||
@@ -0,0 +1,40 @@
|
||||
# add customizations here
|
||||
# after making changes run "sudo systemctl restart box"
|
||||
|
||||
# appstore:
|
||||
# blacklist:
|
||||
# - io.wekan.cloudronapp
|
||||
# - io.cloudron.openvpn
|
||||
# whitelist:
|
||||
# org.wordpress.cloudronapp: {}
|
||||
# chat.rocket.cloudronapp: {}
|
||||
# com.nextcloud.cloudronapp: {}
|
||||
#
|
||||
# backups:
|
||||
# configurable: true
|
||||
#
|
||||
# domains:
|
||||
# dynamicDns: true
|
||||
# changeDashboardDomain: true
|
||||
#
|
||||
# subscription:
|
||||
# configurable: true
|
||||
#
|
||||
# support:
|
||||
# email: support@cloudron.io
|
||||
# remoteSupport: true
|
||||
#
|
||||
# ticketFormBody: |
|
||||
# Use this form to open support tickets. You can also write directly to [support@cloudron.io](mailto:support@cloudron.io).
|
||||
# * [Knowledge Base & App Docs](https://cloudron.io/documentation/apps/?support_view)
|
||||
# * [Custom App Packaging & API](https://cloudron.io/developer/packaging/?support_view)
|
||||
# * [Forum](https://forum.cloudron.io/)
|
||||
#
|
||||
# submitTickets: true
|
||||
#
|
||||
# alerts:
|
||||
# email: support@cloudron.io
|
||||
# notifyCloudronAdmins: false
|
||||
#
|
||||
# footer:
|
||||
# body: '© 2019 [Cloudron](https://cloudron.io) [Forum <i class="fa fa-comments"></i>](https://forum.cloudron.io)'
|
||||
@@ -1,7 +1,8 @@
|
||||
# logrotate config for box logs
|
||||
|
||||
# keep upto 5 logs of size 10M each
|
||||
/home/yellowtent/platformdata/logs/box.log {
|
||||
rotate 10
|
||||
rotate 5
|
||||
size 10M
|
||||
# we never compress so we can simply tail the files
|
||||
nocompress
|
||||
@@ -0,0 +1,31 @@
|
||||
# logrotate config for app, crash, addon and task logs
|
||||
|
||||
# man 7 glob
|
||||
/home/yellowtent/platformdata/logs/graphite/*.log
|
||||
/home/yellowtent/platformdata/logs/mail/*.log
|
||||
/home/yellowtent/platformdata/logs/mysql/*.log
|
||||
/home/yellowtent/platformdata/logs/mongodb/*.log
|
||||
/home/yellowtent/platformdata/logs/postgresql/*.log
|
||||
/home/yellowtent/platformdata/logs/sftp/*.log
|
||||
/home/yellowtent/platformdata/logs/redis-*/*.log
|
||||
/home/yellowtent/platformdata/logs/crash/*.log
|
||||
/home/yellowtent/platformdata/logs/updater/*.log {
|
||||
# only keep one rotated file, we currently do not send that over the api
|
||||
rotate 1
|
||||
size 10M
|
||||
missingok
|
||||
# we never compress so we can simply tail the files
|
||||
nocompress
|
||||
copytruncate
|
||||
}
|
||||
|
||||
# keep task logs for a week. the 'nocreate' option ensures empty log files are not
|
||||
# created post rotation
|
||||
/home/yellowtent/platformdata/logs/tasks/*.log {
|
||||
minage 7
|
||||
daily
|
||||
rotate 0
|
||||
missingok
|
||||
nocreate
|
||||
}
|
||||
|
||||
@@ -12,6 +12,9 @@ max_allowed_packet=32M
|
||||
character-set-server = utf8mb4
|
||||
collation-server = utf8mb4_unicode_ci
|
||||
|
||||
# set timezone to UTC
|
||||
default_time_zone='+00:00'
|
||||
|
||||
[mysqldump]
|
||||
quick
|
||||
quote-names
|
||||
|
||||
@@ -49,3 +49,4 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restartdocker.s
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/restartunbound.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restartunbound.sh
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
exports = module.exports = {
|
||||
SCOPE_APPS_READ: 'apps:read',
|
||||
SCOPE_APPS_MANAGE: 'apps:manage',
|
||||
SCOPE_APPSTORE: 'appstore',
|
||||
SCOPE_CLIENTS: 'clients',
|
||||
SCOPE_CLOUDRON: 'cloudron',
|
||||
SCOPE_DOMAINS_READ: 'domains:read',
|
||||
@@ -10,10 +11,10 @@ exports = module.exports = {
|
||||
SCOPE_MAIL: 'mail',
|
||||
SCOPE_PROFILE: 'profile',
|
||||
SCOPE_SETTINGS: 'settings',
|
||||
SCOPE_SUBSCRIPTION: 'subscription',
|
||||
SCOPE_USERS_READ: 'users:read',
|
||||
SCOPE_USERS_MANAGE: 'users:manage',
|
||||
SCOPE_APPSTORE: 'appstore',
|
||||
VALID_SCOPES: [ 'apps', 'appstore', 'clients', 'cloudron', 'domains', 'mail', 'profile', 'settings', 'users' ], // keep this sorted
|
||||
VALID_SCOPES: [ 'apps', 'appstore', 'clients', 'cloudron', 'domains', 'mail', 'profile', 'settings', 'subscription', 'users' ], // keep this sorted
|
||||
|
||||
SCOPE_ANY: '*',
|
||||
|
||||
@@ -26,7 +27,6 @@ exports = module.exports = {
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
config = require('./config.js'),
|
||||
DatabaseError = require('./databaseerror.js'),
|
||||
debug = require('debug')('box:accesscontrol'),
|
||||
tokendb = require('./tokendb.js'),
|
||||
@@ -114,14 +114,14 @@ function scopesForUser(user, callback) {
|
||||
|
||||
if (user.admin) return callback(null, exports.VALID_SCOPES);
|
||||
|
||||
callback(null, config.isSpacesEnabled() ? [ 'profile', 'apps', 'domains:read', 'users:read' ] : [ 'profile', 'apps:read' ]);
|
||||
callback(null, [ 'profile', 'apps:read' ]);
|
||||
}
|
||||
|
||||
function validateToken(accessToken, callback) {
|
||||
assert.strictEqual(typeof accessToken, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
tokendb.get(accessToken, function (error, token) {
|
||||
tokendb.getByAccessToken(accessToken, function (error, token) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(null, null /* user */, 'Invalid Token'); // will end up as a 401
|
||||
if (error) return callback(error); // this triggers 'internal error' in passport
|
||||
|
||||
@@ -129,6 +129,8 @@ function validateToken(accessToken, callback) {
|
||||
if (error && error.reason === UsersError.NOT_FOUND) return callback(null, null /* user */, 'Invalid Token'); // will end up as a 401
|
||||
if (error) return callback(error);
|
||||
|
||||
if (!user.active) return callback(null, null /* user */, 'Invalid Token'); // will end up as a 401
|
||||
|
||||
scopesForUser(user, function (error, userScopes) {
|
||||
if (error) return callback(error);
|
||||
|
||||
|
||||
+275
-143
@@ -36,16 +36,17 @@ var accesscontrol = require('./accesscontrol.js'),
|
||||
apps = require('./apps.js'),
|
||||
assert = require('assert'),
|
||||
async = require('async'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
clients = require('./clients.js'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
ClientsError = clients.ClientsError,
|
||||
crypto = require('crypto'),
|
||||
DatabaseError = require('./databaseerror.js'),
|
||||
debug = require('debug')('box:addons'),
|
||||
docker = require('./docker.js'),
|
||||
dockerConnection = docker.connection,
|
||||
DockerError = docker.DockerError,
|
||||
fs = require('fs'),
|
||||
graphs = require('./graphs.js'),
|
||||
hat = require('./hat.js'),
|
||||
infra = require('./infra_version.js'),
|
||||
mail = require('./mail.js'),
|
||||
@@ -57,6 +58,7 @@ var accesscontrol = require('./accesscontrol.js'),
|
||||
safe = require('safetydance'),
|
||||
semver = require('semver'),
|
||||
settings = require('./settings.js'),
|
||||
sftp = require('./sftp.js'),
|
||||
shell = require('./shell.js'),
|
||||
spawn = require('child_process').spawn,
|
||||
split = require('split'),
|
||||
@@ -184,7 +186,7 @@ var KNOWN_ADDONS = {
|
||||
const KNOWN_SERVICES = {
|
||||
mail: {
|
||||
status: containerStatus.bind(null, 'mail', 'CLOUDRON_MAIL_TOKEN'),
|
||||
restart: restartContainer.bind(null, 'mail'),
|
||||
restart: mail.restartMail,
|
||||
defaultMemoryLimit: Math.max((1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 128, 256) * 1024 * 1024
|
||||
},
|
||||
mongodb: {
|
||||
@@ -211,10 +213,25 @@ const KNOWN_SERVICES = {
|
||||
status: statusUnbound,
|
||||
restart: restartUnbound,
|
||||
defaultMemoryLimit: 0
|
||||
},
|
||||
sftp: {
|
||||
status: statusSftp,
|
||||
restart: restartContainer.bind(null, 'sftp'),
|
||||
defaultMemoryLimit: 256 * 1024 * 1024
|
||||
},
|
||||
graphite: {
|
||||
status: statusGraphite,
|
||||
restart: restartContainer.bind(null, 'graphite'),
|
||||
defaultMemoryLimit: 75 * 1024 * 1024
|
||||
},
|
||||
nginx: {
|
||||
status: statusNginx,
|
||||
restart: restartNginx,
|
||||
defaultMemoryLimit: 0
|
||||
}
|
||||
};
|
||||
|
||||
function debugApp(app, args) {
|
||||
function debugApp(app /*, args */) {
|
||||
assert(typeof app === 'object');
|
||||
|
||||
debug((app.fqdn || app.location) + ' ' + util.format.apply(util, Array.prototype.slice.call(arguments, 1)));
|
||||
@@ -254,6 +271,10 @@ function restartContainer(serviceName, callback) {
|
||||
if (error) return callback(new AddonsError(AddonsError.INTERNAL_ERROR, error));
|
||||
|
||||
docker.startContainer(serviceName, function (error) {
|
||||
if (error && error.reason === BoxError.NOT_FOUND) {
|
||||
callback(null); // callback early since rebuilding takes long
|
||||
return rebuildService(serviceName, function (error) { if (error) console.error(`Unable to rebuild service ${serviceName}`, error); });
|
||||
}
|
||||
if (error) return callback(new AddonsError(AddonsError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null);
|
||||
@@ -261,6 +282,48 @@ function restartContainer(serviceName, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function rebuildService(serviceName, callback) {
|
||||
assert.strictEqual(typeof serviceName, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
assert(KNOWN_SERVICES[serviceName], `Unknown service ${serviceName}`);
|
||||
|
||||
// this attempts to recreate the service docker container if they don't exist but platform infra version is unchanged
|
||||
// passing an infra version of 'none' will not attempt to purge existing data, not sure if this is good or bad
|
||||
if (serviceName === 'mongodb') return startMongodb({ version: 'none' }, callback);
|
||||
if (serviceName === 'postgresql') return startPostgresql({ version: 'none' }, callback);
|
||||
if (serviceName === 'mysql') return startMysql({ version: 'none' }, callback);
|
||||
if (serviceName === 'sftp') return sftp.startSftp({ version: 'none' }, callback);
|
||||
if (serviceName === 'graphite') return graphs.startGraphite({ version: 'none' }, callback);
|
||||
|
||||
// nothing to rebuild for now
|
||||
callback();
|
||||
}
|
||||
|
||||
function getServiceDetails(containerName, tokenEnvName, callback) {
|
||||
assert.strictEqual(typeof containerName, 'string');
|
||||
assert.strictEqual(typeof tokenEnvName, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
docker.inspect(containerName, function (error, result) {
|
||||
if (error && error.reason === BoxError.NOT_FOUND) return callback(new AddonsError(AddonsError.NOT_ACTIVE, error));
|
||||
if (error) return callback(new AddonsError(AddonsError.INTERNAL_ERROR, error));
|
||||
|
||||
const ip = safe.query(result, 'NetworkSettings.Networks.cloudron.IPAddress', null);
|
||||
if (!ip) return callback(new AddonsError(AddonsError.NOT_ACTIVE, `Error getting ${containerName} container ip`));
|
||||
|
||||
// extract the cloudron token for auth
|
||||
const env = safe.query(result, 'Config.Env', null);
|
||||
if (!env) return callback(new AddonsError(AddonsError.INTERNAL_ERROR, `Error getting ${containerName} env`));
|
||||
const tmp = env.find(function (e) { return e.indexOf(tokenEnvName) === 0; });
|
||||
if (!tmp) return callback(new AddonsError(AddonsError.INTERNAL_ERROR, `Error getting ${containerName} cloudron token env var`));
|
||||
const token = tmp.slice(tokenEnvName.length + 1); // +1 for the = sign
|
||||
if (!token) return callback(new AddonsError(AddonsError.INTERNAL_ERROR, `Error getting ${containerName} cloudron token`));
|
||||
|
||||
callback(null, { ip: ip, token: token, state: result.State });
|
||||
});
|
||||
}
|
||||
|
||||
function containerStatus(addonName, addonTokenName, callback) {
|
||||
assert.strictEqual(typeof addonName, 'string');
|
||||
assert.strictEqual(typeof addonTokenName, 'string');
|
||||
@@ -325,7 +388,7 @@ function getService(serviceName, callback) {
|
||||
}
|
||||
|
||||
KNOWN_SERVICES[serviceName].status(function (error, result) {
|
||||
if (error) return callback(new AddonsError(AddonsError.INTERNAL_ERROR, error));
|
||||
if (error) return callback(error);
|
||||
|
||||
tmp.status = result.status;
|
||||
tmp.memoryUsed = result.memoryUsed;
|
||||
@@ -438,30 +501,6 @@ function restartService(serviceName, callback) {
|
||||
KNOWN_SERVICES[serviceName].restart(callback);
|
||||
}
|
||||
|
||||
function getServiceDetails(containerName, tokenEnvName, callback) {
|
||||
assert.strictEqual(typeof containerName, 'string');
|
||||
assert.strictEqual(typeof tokenEnvName, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
docker.inspect(containerName, function (error, result) {
|
||||
if (error && error.reason === DockerError.NOT_FOUND) return callback(new AddonsError(AddonsError.NOT_ACTIVE, error));
|
||||
if (error) return callback(new AddonsError(AddonsError.INTERNAL_ERROR, error));
|
||||
|
||||
const ip = safe.query(result, 'NetworkSettings.Networks.cloudron.IPAddress', null);
|
||||
if (!ip) return callback(new AddonsError(AddonsError.NOT_ACTIVE, `Error getting ${containerName} container ip`));
|
||||
|
||||
// extract the cloudron token for auth
|
||||
const env = safe.query(result, 'Config.Env', null);
|
||||
if (!env) return callback(new AddonsError(AddonsError.INTERNAL_ERROR, `Error getting ${containerName} env`));
|
||||
const tmp = env.find(function (e) { return e.indexOf(tokenEnvName) === 0; });
|
||||
if (!tmp) return callback(new AddonsError(AddonsError.INTERNAL_ERROR, `Error getting ${containerName} cloudron token env var`));
|
||||
const token = tmp.slice(tokenEnvName.length + 1); // +1 for the = sign
|
||||
if (!token) return callback(new AddonsError(AddonsError.INTERNAL_ERROR, `Error getting ${containerName} cloudron token`));
|
||||
|
||||
callback(null, { ip: ip, token: token, state: result.State });
|
||||
});
|
||||
}
|
||||
|
||||
function waitForService(containerName, tokenEnvName, callback) {
|
||||
assert.strictEqual(typeof containerName, 'string');
|
||||
assert.strictEqual(typeof tokenEnvName, 'string');
|
||||
@@ -605,7 +644,9 @@ function importDatabase(addon, callback) {
|
||||
if (!error) return iteratorCallback();
|
||||
|
||||
debug(`importDatabase: Error importing ${addon} of app ${app.id}. Marking as errored`, error);
|
||||
appdb.update(app.id, { installationState: appdb.ISTATE_ERROR, installationProgress: error.message }, iteratorCallback);
|
||||
// FIXME: there is no way to 'repair' if we are here. we need to make a separate apptask that re-imports db
|
||||
// not clear, if repair workflow should be part of addon or per-app
|
||||
appdb.update(app.id, { installationState: apps.ISTATE_ERROR, error: { message: error.message } }, iteratorCallback);
|
||||
});
|
||||
}, callback);
|
||||
});
|
||||
@@ -617,7 +658,7 @@ function updateServiceConfig(platformConfig, callback) {
|
||||
debug('updateServiceConfig: %j', platformConfig);
|
||||
|
||||
// TODO: this should possibly also rollback memory to default
|
||||
async.eachSeries([ 'mysql', 'postgresql', 'mail', 'mongodb' ], function iterator(serviceName, iteratorCallback) {
|
||||
async.eachSeries([ 'mysql', 'postgresql', 'mail', 'mongodb', 'graphite' ], function iterator(serviceName, iteratorCallback) {
|
||||
const containerConfig = platformConfig[serviceName];
|
||||
let memory, memorySwap;
|
||||
if (containerConfig && containerConfig.memory && containerConfig.memorySwap) {
|
||||
@@ -670,7 +711,7 @@ function getEnvironment(app, callback) {
|
||||
appdb.getAddonConfigByAppId(app.id, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
if (app.manifest.addons['docker']) result.push({ name: 'DOCKER_HOST', value: `tcp://172.18.0.1:${config.get('dockerProxyPort')}` });
|
||||
if (app.manifest.addons['docker']) result.push({ name: 'DOCKER_HOST', value: `tcp://172.18.0.1:${constants.DOCKER_PROXY_PORT}` });
|
||||
|
||||
return callback(null, result.map(function (e) { return e.name + '=' + e.value; }));
|
||||
});
|
||||
@@ -780,10 +821,12 @@ function setupOauth(app, options, callback) {
|
||||
clients.add(appId, clients.TYPE_OAUTH, redirectURI, scope, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
||||
|
||||
var env = [
|
||||
{ name: 'OAUTH_CLIENT_ID', value: result.id },
|
||||
{ name: 'OAUTH_CLIENT_SECRET', value: result.clientSecret },
|
||||
{ name: 'OAUTH_ORIGIN', value: config.adminOrigin() }
|
||||
{ name: `${envPrefix}OAUTH_CLIENT_ID`, value: result.id },
|
||||
{ name: `${envPrefix}OAUTH_CLIENT_SECRET`, value: result.clientSecret },
|
||||
{ name: `${envPrefix}OAUTH_ORIGIN`, value: settings.adminOrigin() }
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting oauth addon config to %j', env);
|
||||
@@ -817,17 +860,19 @@ function setupEmail(app, options, callback) {
|
||||
|
||||
const mailInDomains = mailDomains.filter(function (d) { return d.enabled; }).map(function (d) { return d.domain; }).join(',');
|
||||
|
||||
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
||||
|
||||
// note that "external" access info can be derived from MAIL_DOMAIN (since it's part of user documentation)
|
||||
var env = [
|
||||
{ name: 'MAIL_SMTP_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_SMTP_PORT', value: '2525' },
|
||||
{ name: 'MAIL_IMAP_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_IMAP_PORT', value: '9993' },
|
||||
{ name: 'MAIL_SIEVE_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_SIEVE_PORT', value: '4190' },
|
||||
{ name: 'MAIL_DOMAIN', value: app.domain },
|
||||
{ name: 'MAIL_DOMAINS', value: mailInDomains },
|
||||
{ name: 'LDAP_MAILBOXES_BASE_DN', value: 'ou=mailboxes,dc=cloudron' }
|
||||
{ name: `${envPrefix}MAIL_SMTP_SERVER`, value: 'mail' },
|
||||
{ name: `${envPrefix}MAIL_SMTP_PORT`, value: '2525' },
|
||||
{ name: `${envPrefix}MAIL_IMAP_SERVER`, value: 'mail' },
|
||||
{ name: `${envPrefix}MAIL_IMAP_PORT`, value: '9993' },
|
||||
{ name: `${envPrefix}MAIL_SIEVE_SERVER`, value: 'mail' },
|
||||
{ name: `${envPrefix}MAIL_SIEVE_PORT`, value: '4190' },
|
||||
{ name: `${envPrefix}MAIL_DOMAIN`, value: app.domain },
|
||||
{ name: `${envPrefix}MAIL_DOMAINS`, value: mailInDomains },
|
||||
{ name: `${envPrefix}LDAP_MAILBOXES_BASE_DN`, value: 'ou=mailboxes,dc=cloudron' }
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting up Email');
|
||||
@@ -853,14 +898,16 @@ function setupLdap(app, options, callback) {
|
||||
|
||||
if (!app.sso) return callback(null);
|
||||
|
||||
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
||||
|
||||
var env = [
|
||||
{ name: 'LDAP_SERVER', value: '172.18.0.1' },
|
||||
{ name: 'LDAP_PORT', value: '' + config.get('ldapPort') },
|
||||
{ name: 'LDAP_URL', value: 'ldap://172.18.0.1:' + config.get('ldapPort') },
|
||||
{ name: 'LDAP_USERS_BASE_DN', value: 'ou=users,dc=cloudron' },
|
||||
{ name: 'LDAP_GROUPS_BASE_DN', value: 'ou=groups,dc=cloudron' },
|
||||
{ name: 'LDAP_BIND_DN', value: 'cn='+ app.id + ',ou=apps,dc=cloudron' },
|
||||
{ name: 'LDAP_BIND_PASSWORD', value: hat(4 * 128) } // this is ignored
|
||||
{ name: `${envPrefix}LDAP_SERVER`, value: '172.18.0.1' },
|
||||
{ name: `${envPrefix}LDAP_PORT`, value: '' + constants.LDAP_PORT },
|
||||
{ name: `${envPrefix}LDAP_URL`, value: 'ldap://172.18.0.1:' + constants.LDAP_PORT },
|
||||
{ name: `${envPrefix}LDAP_USERS_BASE_DN`, value: 'ou=users,dc=cloudron' },
|
||||
{ name: `${envPrefix}LDAP_GROUPS_BASE_DN`, value: 'ou=groups,dc=cloudron' },
|
||||
{ name: `${envPrefix}LDAP_BIND_DN`, value: 'cn='+ app.id + ',ou=apps,dc=cloudron' },
|
||||
{ name: `${envPrefix}LDAP_BIND_PASSWORD`, value: hat(4 * 128) } // this is ignored
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting up LDAP');
|
||||
@@ -885,19 +932,21 @@ function setupSendMail(app, options, callback) {
|
||||
|
||||
debugApp(app, 'Setting up SendMail');
|
||||
|
||||
appdb.getAddonConfigByName(app.id, 'sendmail', 'MAIL_SMTP_PASSWORD', function (error, existingPassword) {
|
||||
appdb.getAddonConfigByName(app.id, 'sendmail', '%MAIL_SMTP_PASSWORD', function (error, existingPassword) {
|
||||
if (error && error.reason !== DatabaseError.NOT_FOUND) return callback(error);
|
||||
|
||||
var password = error ? hat(4 * 48) : existingPassword; // see box#565 for password length
|
||||
|
||||
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
||||
|
||||
var env = [
|
||||
{ name: 'MAIL_SMTP_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_SMTP_PORT', value: '2525' },
|
||||
{ name: 'MAIL_SMTPS_PORT', value: '2465' },
|
||||
{ name: 'MAIL_SMTP_USERNAME', value: app.mailboxName + '@' + app.domain },
|
||||
{ name: 'MAIL_SMTP_PASSWORD', value: password },
|
||||
{ name: 'MAIL_FROM', value: app.mailboxName + '@' + app.domain },
|
||||
{ name: 'MAIL_DOMAIN', value: app.domain }
|
||||
{ name: `${envPrefix}MAIL_SMTP_SERVER`, value: 'mail' },
|
||||
{ name: `${envPrefix}MAIL_SMTP_PORT`, value: '2525' },
|
||||
{ name: `${envPrefix}MAIL_SMTPS_PORT`, value: '2465' },
|
||||
{ name: `${envPrefix}MAIL_SMTP_USERNAME`, value: app.mailboxName + '@' + app.domain },
|
||||
{ name: `${envPrefix}MAIL_SMTP_PASSWORD`, value: password },
|
||||
{ name: `${envPrefix}MAIL_FROM`, value: app.mailboxName + '@' + app.domain },
|
||||
{ name: `${envPrefix}MAIL_DOMAIN`, value: app.domain }
|
||||
];
|
||||
debugApp(app, 'Setting sendmail addon config to %j', env);
|
||||
appdb.setAddonConfig(app.id, 'sendmail', env, callback);
|
||||
@@ -921,18 +970,20 @@ function setupRecvMail(app, options, callback) {
|
||||
|
||||
debugApp(app, 'Setting up recvmail');
|
||||
|
||||
appdb.getAddonConfigByName(app.id, 'recvmail', 'MAIL_IMAP_PASSWORD', function (error, existingPassword) {
|
||||
appdb.getAddonConfigByName(app.id, 'recvmail', '%MAIL_IMAP_PASSWORD', function (error, existingPassword) {
|
||||
if (error && error.reason !== DatabaseError.NOT_FOUND) return callback(error);
|
||||
|
||||
var password = error ? hat(4 * 48) : existingPassword; // see box#565 for password length
|
||||
|
||||
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
||||
|
||||
var env = [
|
||||
{ name: 'MAIL_IMAP_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_IMAP_PORT', value: '9993' },
|
||||
{ name: 'MAIL_IMAP_USERNAME', value: app.mailboxName + '@' + app.domain },
|
||||
{ name: 'MAIL_IMAP_PASSWORD', value: password },
|
||||
{ name: 'MAIL_TO', value: app.mailboxName + '@' + app.domain },
|
||||
{ name: 'MAIL_DOMAIN', value: app.domain }
|
||||
{ name: `${envPrefix}MAIL_IMAP_SERVER`, value: 'mail' },
|
||||
{ name: `${envPrefix}MAIL_IMAP_PORT`, value: '9993' },
|
||||
{ name: `${envPrefix}MAIL_IMAP_USERNAME`, value: app.mailboxName + '@' + app.domain },
|
||||
{ name: `${envPrefix}MAIL_IMAP_PASSWORD`, value: password },
|
||||
{ name: `${envPrefix}MAIL_TO`, value: app.mailboxName + '@' + app.domain },
|
||||
{ name: `${envPrefix}MAIL_DOMAIN`, value: app.domain }
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting sendmail addon config to %j', env);
|
||||
@@ -977,6 +1028,7 @@ function startMysql(existingInfra, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
const cmd = `docker run --restart=always -d --name="mysql" \
|
||||
--hostname mysql \
|
||||
--net cloudron \
|
||||
--net-alias mysql \
|
||||
--log-driver syslog \
|
||||
@@ -1014,7 +1066,7 @@ function setupMySql(app, options, callback) {
|
||||
|
||||
debugApp(app, 'Setting up mysql');
|
||||
|
||||
appdb.getAddonConfigByName(app.id, 'mysql', 'MYSQL_PASSWORD', function (error, existingPassword) {
|
||||
appdb.getAddonConfigByName(app.id, 'mysql', '%MYSQL_PASSWORD', function (error, existingPassword) {
|
||||
if (error && error.reason !== DatabaseError.NOT_FOUND) return callback(error);
|
||||
|
||||
const tmp = mysqlDatabaseName(app.id);
|
||||
@@ -1033,19 +1085,21 @@ function setupMySql(app, options, callback) {
|
||||
if (error) return callback(new Error('Error setting up mysql: ' + error));
|
||||
if (response.statusCode !== 201) return callback(new Error(`Error setting up mysql. Status code: ${response.statusCode} message: ${response.body.message}`));
|
||||
|
||||
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
||||
|
||||
var env = [
|
||||
{ name: 'MYSQL_USERNAME', value: data.username },
|
||||
{ name: 'MYSQL_PASSWORD', value: data.password },
|
||||
{ name: 'MYSQL_HOST', value: 'mysql' },
|
||||
{ name: 'MYSQL_PORT', value: '3306' }
|
||||
{ name: `${envPrefix}MYSQL_USERNAME`, value: data.username },
|
||||
{ name: `${envPrefix}MYSQL_PASSWORD`, value: data.password },
|
||||
{ name: `${envPrefix}MYSQL_HOST`, value: 'mysql' },
|
||||
{ name: `${envPrefix}MYSQL_PORT`, value: '3306' }
|
||||
];
|
||||
|
||||
if (options.multipleDatabases) {
|
||||
env = env.concat({ name: 'MYSQL_DATABASE_PREFIX', value: `${data.prefix}_` });
|
||||
env = env.concat({ name: `${envPrefix}MYSQL_DATABASE_PREFIX`, value: `${data.prefix}_` });
|
||||
} else {
|
||||
env = env.concat(
|
||||
{ name: 'MYSQL_URL', value: `mysql://${data.username}:${data.password}@mysql/${data.database}` },
|
||||
{ name: 'MYSQL_DATABASE', value: data.database }
|
||||
{ name: `${envPrefix}MYSQL_URL`, value: `mysql://${data.username}:${data.password}@mysql/${data.database}` },
|
||||
{ name: `${envPrefix}MYSQL_DATABASE`, value: data.database }
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1189,6 +1243,7 @@ function startPostgresql(existingInfra, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
const cmd = `docker run --restart=always -d --name="postgresql" \
|
||||
--hostname postgresql \
|
||||
--net cloudron \
|
||||
--net-alias postgresql \
|
||||
--log-driver syslog \
|
||||
@@ -1227,7 +1282,7 @@ function setupPostgreSql(app, options, callback) {
|
||||
|
||||
const { database, username } = postgreSqlNames(app.id);
|
||||
|
||||
appdb.getAddonConfigByName(app.id, 'postgresql', 'POSTGRESQL_PASSWORD', function (error, existingPassword) {
|
||||
appdb.getAddonConfigByName(app.id, 'postgresql', '%POSTGRESQL_PASSWORD', function (error, existingPassword) {
|
||||
if (error && error.reason !== DatabaseError.NOT_FOUND) return callback(error);
|
||||
|
||||
const data = {
|
||||
@@ -1243,13 +1298,15 @@ function setupPostgreSql(app, options, callback) {
|
||||
if (error) return callback(new Error('Error setting up postgresql: ' + error));
|
||||
if (response.statusCode !== 201) return callback(new Error(`Error setting up postgresql. Status code: ${response.statusCode} message: ${response.body.message}`));
|
||||
|
||||
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
||||
|
||||
var env = [
|
||||
{ name: 'POSTGRESQL_URL', value: `postgres://${data.username}:${data.password}@postgresql/${data.database}` },
|
||||
{ name: 'POSTGRESQL_USERNAME', value: data.username },
|
||||
{ name: 'POSTGRESQL_PASSWORD', value: data.password },
|
||||
{ name: 'POSTGRESQL_HOST', value: 'postgresql' },
|
||||
{ name: 'POSTGRESQL_PORT', value: '5432' },
|
||||
{ name: 'POSTGRESQL_DATABASE', value: data.database }
|
||||
{ name: `${envPrefix}POSTGRESQL_URL`, value: `postgres://${data.username}:${data.password}@postgresql/${data.database}` },
|
||||
{ name: `${envPrefix}POSTGRESQL_USERNAME`, value: data.username },
|
||||
{ name: `${envPrefix}POSTGRESQL_PASSWORD`, value: data.password },
|
||||
{ name: `${envPrefix}POSTGRESQL_HOST`, value: 'postgresql' },
|
||||
{ name: `${envPrefix}POSTGRESQL_PORT`, value: '5432' },
|
||||
{ name: `${envPrefix}POSTGRESQL_DATABASE`, value: data.database }
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting postgresql addon config to %j', env);
|
||||
@@ -1363,6 +1420,7 @@ function startMongodb(existingInfra, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
const cmd = `docker run --restart=always -d --name="mongodb" \
|
||||
--hostname mongodb \
|
||||
--net cloudron \
|
||||
--net-alias mongodb \
|
||||
--log-driver syslog \
|
||||
@@ -1399,13 +1457,14 @@ function setupMongoDb(app, options, callback) {
|
||||
|
||||
debugApp(app, 'Setting up mongodb');
|
||||
|
||||
appdb.getAddonConfigByName(app.id, 'mongodb', 'MONGODB_PASSWORD', function (error, existingPassword) {
|
||||
appdb.getAddonConfigByName(app.id, 'mongodb', '%MONGODB_PASSWORD', function (error, existingPassword) {
|
||||
if (error && error.reason !== DatabaseError.NOT_FOUND) return callback(error);
|
||||
|
||||
const data = {
|
||||
database: app.id,
|
||||
username: app.id,
|
||||
password: error ? hat(4 * 128) : existingPassword
|
||||
password: error ? hat(4 * 128) : existingPassword,
|
||||
oplog: !!options.oplog
|
||||
};
|
||||
|
||||
getServiceDetails('mongodb', 'CLOUDRON_MONGODB_TOKEN', function (error, result) {
|
||||
@@ -1415,15 +1474,21 @@ function setupMongoDb(app, options, callback) {
|
||||
if (error) return callback(new Error('Error setting up mongodb: ' + error));
|
||||
if (response.statusCode !== 201) return callback(new Error(`Error setting up mongodb. Status code: ${response.statusCode}`));
|
||||
|
||||
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
||||
|
||||
var env = [
|
||||
{ name: 'MONGODB_URL', value : `mongodb://${data.username}:${data.password}@mongodb/${data.database}` },
|
||||
{ name: 'MONGODB_USERNAME', value : data.username },
|
||||
{ name: 'MONGODB_PASSWORD', value: data.password },
|
||||
{ name: 'MONGODB_HOST', value : 'mongodb' },
|
||||
{ name: 'MONGODB_PORT', value : '27017' },
|
||||
{ name: 'MONGODB_DATABASE', value : data.database }
|
||||
{ name: `${envPrefix}MONGODB_URL`, value : `mongodb://${data.username}:${data.password}@mongodb:27017/${data.database}` },
|
||||
{ name: `${envPrefix}MONGODB_USERNAME`, value : data.username },
|
||||
{ name: `${envPrefix}MONGODB_PASSWORD`, value: data.password },
|
||||
{ name: `${envPrefix}MONGODB_HOST`, value : 'mongodb' },
|
||||
{ name: `${envPrefix}MONGODB_PORT`, value : '27017' },
|
||||
{ name: `${envPrefix}MONGODB_DATABASE`, value : data.database }
|
||||
];
|
||||
|
||||
if (options.oplog) {
|
||||
env.push({ name: `${envPrefix}MONGODB_OPLOG_URL`, value : `mongodb://${data.username}:${data.password}@mongodb:27017/local?authSource=${data.database}` });
|
||||
}
|
||||
|
||||
debugApp(app, 'Setting mongodb addon config to %j', env);
|
||||
appdb.setAddonConfig(app.id, 'mongodb', env, callback);
|
||||
});
|
||||
@@ -1530,65 +1595,69 @@ function setupRedis(app, options, callback) {
|
||||
|
||||
const redisName = 'redis-' + app.id;
|
||||
|
||||
docker.inspect(redisName, function (error, result) {
|
||||
if (!error) {
|
||||
debug(`Re-using existing redis container with state: ${result.State}`);
|
||||
return callback();
|
||||
appdb.getAddonConfigByName(app.id, 'redis', '%REDIS_PASSWORD', function (error, existingPassword) {
|
||||
if (error && error.reason !== DatabaseError.NOT_FOUND) return callback(error);
|
||||
|
||||
const redisPassword = error ? hat(4 * 48) : existingPassword; // see box#362 for password length
|
||||
const redisServiceToken = hat(4 * 48);
|
||||
|
||||
// Compute redis memory limit based on app's memory limit (this is arbitrary)
|
||||
var memoryLimit = app.memoryLimit || app.manifest.memoryLimit || 0;
|
||||
|
||||
if (memoryLimit === -1) { // unrestricted (debug mode)
|
||||
memoryLimit = 0;
|
||||
} else if (memoryLimit === 0 || memoryLimit <= (2 * 1024 * 1024 * 1024)) { // less than 2G (ram+swap)
|
||||
memoryLimit = 150 * 1024 * 1024; // 150m
|
||||
} else {
|
||||
memoryLimit = 600 * 1024 * 1024; // 600m
|
||||
}
|
||||
|
||||
appdb.getAddonConfigByName(app.id, 'redis', 'REDIS_PASSWORD', function (error, existingPassword) {
|
||||
if (error && error.reason !== DatabaseError.NOT_FOUND) return callback(error);
|
||||
const tag = infra.images.redis.tag;
|
||||
const label = app.fqdn;
|
||||
// note that we do not add appId label because this interferes with the stop/start app logic
|
||||
const cmd = `docker run --restart=always -d --name=${redisName} \
|
||||
--hostname ${redisName} \
|
||||
--label=location=${label} \
|
||||
--net cloudron \
|
||||
--net-alias ${redisName} \
|
||||
--log-driver syslog \
|
||||
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
||||
--log-opt syslog-format=rfc5424 \
|
||||
--log-opt tag="${redisName}" \
|
||||
-m ${memoryLimit/2} \
|
||||
--memory-swap ${memoryLimit} \
|
||||
--dns 172.18.0.1 \
|
||||
--dns-search=. \
|
||||
-e CLOUDRON_REDIS_PASSWORD="${redisPassword}" \
|
||||
-e CLOUDRON_REDIS_TOKEN="${redisServiceToken}" \
|
||||
-v "${paths.PLATFORM_DATA_DIR}/redis/${app.id}:/var/lib/redis" \
|
||||
--label isCloudronManaged=true \
|
||||
--read-only -v /tmp -v /run ${tag}`;
|
||||
|
||||
const redisPassword = error ? hat(4 * 48) : existingPassword; // see box#362 for password length
|
||||
const redisServiceToken = hat(4 * 48);
|
||||
const envPrefix = app.manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
||||
|
||||
// Compute redis memory limit based on app's memory limit (this is arbitrary)
|
||||
var memoryLimit = app.memoryLimit || app.manifest.memoryLimit || 0;
|
||||
var env = [
|
||||
{ name: `${envPrefix}REDIS_URL`, value: 'redis://redisuser:' + redisPassword + '@redis-' + app.id },
|
||||
{ name: `${envPrefix}REDIS_PASSWORD`, value: redisPassword },
|
||||
{ name: `${envPrefix}REDIS_HOST`, value: redisName },
|
||||
{ name: `${envPrefix}REDIS_PORT`, value: '6379' }
|
||||
];
|
||||
|
||||
if (memoryLimit === -1) { // unrestricted (debug mode)
|
||||
memoryLimit = 0;
|
||||
} else if (memoryLimit === 0 || memoryLimit <= (2 * 1024 * 1024 * 1024)) { // less than 2G (ram+swap)
|
||||
memoryLimit = 150 * 1024 * 1024; // 150m
|
||||
} else {
|
||||
memoryLimit = 600 * 1024 * 1024; // 600m
|
||||
}
|
||||
|
||||
const tag = infra.images.redis.tag;
|
||||
const label = app.fqdn;
|
||||
// note that we do not add appId label because this interferes with the stop/start app logic
|
||||
const cmd = `docker run --restart=always -d --name=${redisName} \
|
||||
--label=location=${label} \
|
||||
--net cloudron \
|
||||
--net-alias ${redisName} \
|
||||
--log-driver syslog \
|
||||
--log-opt syslog-address=udp://127.0.0.1:2514 \
|
||||
--log-opt syslog-format=rfc5424 \
|
||||
--log-opt tag="${redisName}" \
|
||||
-m ${memoryLimit/2} \
|
||||
--memory-swap ${memoryLimit} \
|
||||
--dns 172.18.0.1 \
|
||||
--dns-search=. \
|
||||
-e CLOUDRON_REDIS_PASSWORD="${redisPassword}" \
|
||||
-e CLOUDRON_REDIS_TOKEN="${redisServiceToken}" \
|
||||
-v "${paths.PLATFORM_DATA_DIR}/redis/${app.id}:/var/lib/redis" \
|
||||
--label isCloudronManaged=true \
|
||||
--read-only -v /tmp -v /run ${tag}`;
|
||||
|
||||
var env = [
|
||||
{ name: 'REDIS_URL', value: 'redis://redisuser:' + redisPassword + '@redis-' + app.id },
|
||||
{ name: 'REDIS_PASSWORD', value: redisPassword },
|
||||
{ name: 'REDIS_HOST', value: redisName },
|
||||
{ name: 'REDIS_PORT', value: '6379' }
|
||||
];
|
||||
|
||||
async.series([
|
||||
shell.exec.bind(null, 'startRedis', cmd),
|
||||
appdb.setAddonConfig.bind(null, app.id, 'redis', env),
|
||||
waitForService.bind(null, 'redis-' + app.id, 'CLOUDRON_REDIS_TOKEN')
|
||||
], function (error) {
|
||||
if (error) debug('Error setting up redis: ', error);
|
||||
callback(error);
|
||||
});
|
||||
async.series([
|
||||
(next) => {
|
||||
docker.inspect(redisName, function (inspectError, result) {
|
||||
if (!inspectError) {
|
||||
debug(`Re-using existing redis container with state: ${JSON.stringify(result.State)}`);
|
||||
return next();
|
||||
}
|
||||
shell.exec('startRedis', cmd, next);
|
||||
});
|
||||
},
|
||||
appdb.setAddonConfig.bind(null, app.id, 'redis', env),
|
||||
waitForService.bind(null, 'redis-' + app.id, 'CLOUDRON_REDIS_TOKEN')
|
||||
], function (error) {
|
||||
if (error) debug('Error setting up redis: ', error);
|
||||
callback(error);
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -1715,3 +1784,66 @@ function restartUnbound(callback) {
|
||||
|
||||
callback(null);
|
||||
}
|
||||
|
||||
function statusNginx(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
shell.exec('statusNginx', 'systemctl is-active nginx', function (error) {
|
||||
callback(null, { status: error ? exports.SERVICE_STATUS_STOPPED : exports.SERVICE_STATUS_ACTIVE });
|
||||
});
|
||||
}
|
||||
|
||||
function restartNginx(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
shell.sudo('reloadnginx', [ path.join(__dirname, 'scripts/reloadnginx.sh') ], {}, NOOP_CALLBACK);
|
||||
|
||||
callback(null);
|
||||
}
|
||||
|
||||
function statusSftp(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
docker.inspect('sftp', function (error, container) {
|
||||
if (error && error.reason === BoxError.NOT_FOUND) return callback(null, { status: exports.SERVICE_STATUS_STOPPED });
|
||||
if (error) return callback(new AddonsError(AddonsError.INTERNAL_ERROR, error));
|
||||
|
||||
docker.memoryUsage('sftp', function (error, result) {
|
||||
if (error) return callback(new AddonsError(AddonsError.INTERNAL_ERROR, error));
|
||||
|
||||
var tmp = {
|
||||
status: container.State.Running ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STOPPED,
|
||||
memoryUsed: result.memory_stats.usage,
|
||||
memoryPercent: parseInt(100 * result.memory_stats.usage / result.memory_stats.limit)
|
||||
};
|
||||
|
||||
callback(null, tmp);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function statusGraphite(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
docker.inspect('graphite', function (error, container) {
|
||||
if (error && error.reason === BoxError.NOT_FOUND) return callback(null, { status: exports.SERVICE_STATUS_STOPPED });
|
||||
if (error) return callback(new AddonsError(AddonsError.INTERNAL_ERROR, error));
|
||||
|
||||
request.get('http://127.0.0.1:8417/graphite-web/dashboard', { timeout: 3000 }, function (error, response) {
|
||||
if (error) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite: ${error.message}` });
|
||||
if (response.statusCode !== 200) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite. Status code: ${response.statusCode} message: ${response.body.message}` });
|
||||
|
||||
docker.memoryUsage('graphite', function (error, result) {
|
||||
if (error) return callback(new AddonsError(AddonsError.INTERNAL_ERROR, error));
|
||||
|
||||
var tmp = {
|
||||
status: container.State.Running ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STOPPED,
|
||||
memoryUsed: result.memory_stats.usage,
|
||||
memoryPercent: parseInt(100 * result.memory_stats.usage / result.memory_stats.limit)
|
||||
};
|
||||
|
||||
callback(null, tmp);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
+11
-7
@@ -72,10 +72,6 @@ server {
|
||||
ssl_dhparam /home/yellowtent/boxdata/dhparams.pem;
|
||||
add_header Strict-Transport-Security "max-age=15768000";
|
||||
|
||||
# https://developer.mozilla.org/en-US/docs/Web/HTTP/X-Frame-Options
|
||||
add_header X-Frame-Options "<%= xFrameOptions %>";
|
||||
proxy_hide_header X-Frame-Options;
|
||||
|
||||
# https://github.com/twitter/secureheaders
|
||||
# https://www.owasp.org/index.php/OWASP_Secure_Headers_Project#tab=Compatibility_Matrix
|
||||
# https://wiki.mozilla.org/Security/Guidelines/Web_Security
|
||||
@@ -90,9 +86,17 @@ server {
|
||||
add_header Referrer-Policy "no-referrer-when-downgrade";
|
||||
proxy_hide_header Referrer-Policy;
|
||||
|
||||
# gzip responses that are > 50k and not images
|
||||
gzip on;
|
||||
gzip_min_length 50k;
|
||||
gzip_types text/css text/javascript text/xml text/plain application/javascript application/x-javascript application/json;
|
||||
|
||||
# enable for proxied requests as well
|
||||
gzip_proxied any;
|
||||
|
||||
<% if ( endpoint === 'admin' ) { -%>
|
||||
# CSP headers for the admin/dashboard resources
|
||||
add_header Content-Security-Policy "default-src 'none'; connect-src wss: https: 'self' *.cloudron.io; script-src https: 'self' 'unsafe-inline' 'unsafe-eval'; img-src * data:; style-src https: 'unsafe-inline'; object-src 'none'; font-src https: 'self'; frame-ancestors 'none'; base-uri 'none'; form-action 'self';";
|
||||
add_header Content-Security-Policy "default-src 'none'; frame-src 'self' cloudron.io *.cloudron.io; connect-src wss: https: 'self' *.cloudron.io; script-src https: 'self' 'unsafe-inline' 'unsafe-eval'; img-src * data:; style-src https: 'unsafe-inline'; object-src 'none'; font-src https: 'self'; frame-ancestors 'none'; base-uri 'none'; form-action 'self';";
|
||||
<% } -%>
|
||||
|
||||
proxy_http_version 1.1;
|
||||
@@ -159,9 +163,9 @@ server {
|
||||
client_max_body_size 0;
|
||||
}
|
||||
|
||||
# graphite paths (uncomment block below and visit /graphite/index.html)
|
||||
# graphite paths (uncomment block below and visit /graphite-web/dashboard)
|
||||
# remember to comment out the CSP policy as well to access the graphite dashboard
|
||||
# location ~ ^/(graphite|content|metrics|dashboard|render|browser|composer)/ {
|
||||
# location ~ ^/graphite-web/ {
|
||||
# proxy_pass http://127.0.0.1:8417;
|
||||
# client_max_body_size 1m;
|
||||
# }
|
||||
|
||||
+49
-141
@@ -21,36 +21,9 @@ exports = module.exports = {
|
||||
getAppIdByAddonConfigValue: getAppIdByAddonConfigValue,
|
||||
|
||||
setHealth: setHealth,
|
||||
setInstallationCommand: setInstallationCommand,
|
||||
setRunCommand: setRunCommand,
|
||||
setTask: setTask,
|
||||
getAppStoreIds: getAppStoreIds,
|
||||
|
||||
setOwner: setOwner,
|
||||
transferOwnership: transferOwnership,
|
||||
|
||||
// installation codes (keep in sync in UI)
|
||||
ISTATE_PENDING_INSTALL: 'pending_install', // installs and fresh reinstalls
|
||||
ISTATE_PENDING_CLONE: 'pending_clone', // clone
|
||||
ISTATE_PENDING_CONFIGURE: 'pending_configure', // config (location, port) changes and on infra update
|
||||
ISTATE_PENDING_UNINSTALL: 'pending_uninstall', // uninstallation
|
||||
ISTATE_PENDING_RESTORE: 'pending_restore', // restore to previous backup or on upgrade
|
||||
ISTATE_PENDING_UPDATE: 'pending_update', // update from installed state preserving data
|
||||
ISTATE_PENDING_FORCE_UPDATE: 'pending_force_update', // update from any state preserving data
|
||||
ISTATE_PENDING_BACKUP: 'pending_backup', // backup the app
|
||||
ISTATE_ERROR: 'error', // error executing last pending_* command
|
||||
ISTATE_INSTALLED: 'installed', // app is installed
|
||||
|
||||
RSTATE_RUNNING: 'running',
|
||||
RSTATE_PENDING_START: 'pending_start',
|
||||
RSTATE_PENDING_STOP: 'pending_stop',
|
||||
RSTATE_STOPPED: 'stopped', // app stopped by us
|
||||
|
||||
// run codes (keep in sync in UI)
|
||||
HEALTH_HEALTHY: 'healthy',
|
||||
HEALTH_UNHEALTHY: 'unhealthy',
|
||||
HEALTH_ERROR: 'error',
|
||||
HEALTH_DEAD: 'dead',
|
||||
|
||||
// subdomain table types
|
||||
SUBDOMAIN_TYPE_PRIMARY: 'primary',
|
||||
SUBDOMAIN_TYPE_REDIRECT: 'redirect',
|
||||
@@ -65,11 +38,12 @@ var assert = require('assert'),
|
||||
safe = require('safetydance'),
|
||||
util = require('util');
|
||||
|
||||
var APPS_FIELDS_PREFIXED = [ 'apps.id', 'apps.appStoreId', 'apps.installationState', 'apps.installationProgress', 'apps.runState',
|
||||
var APPS_FIELDS_PREFIXED = [ 'apps.id', 'apps.appStoreId', 'apps.installationState', 'apps.errorJson', 'apps.runState',
|
||||
'apps.health', 'apps.containerId', 'apps.manifestJson', 'apps.httpPort', 'subdomains.subdomain AS location', 'subdomains.domain',
|
||||
'apps.accessRestrictionJson', 'apps.restoreConfigJson', 'apps.oldConfigJson', 'apps.updateConfigJson', 'apps.memoryLimit',
|
||||
'apps.xFrameOptions', 'apps.sso', 'apps.debugModeJson', 'apps.robotsTxt', 'apps.enableBackup',
|
||||
'apps.creationTime', 'apps.updateTime', 'apps.ownerId', 'apps.mailboxName', 'apps.enableAutomaticUpdate',
|
||||
'apps.accessRestrictionJson', 'apps.memoryLimit',
|
||||
'apps.label', 'apps.tagsJson', 'apps.taskId',
|
||||
'apps.sso', 'apps.debugModeJson', 'apps.robotsTxt', 'apps.enableBackup',
|
||||
'apps.creationTime', 'apps.updateTime', 'apps.mailboxName', 'apps.enableAutomaticUpdate',
|
||||
'apps.dataDir', 'apps.ts', 'apps.healthTime' ].join(',');
|
||||
|
||||
var PORT_BINDINGS_FIELDS = [ 'hostPort', 'type', 'environmentVariable', 'appId' ].join(',');
|
||||
@@ -83,17 +57,9 @@ function postProcess(result) {
|
||||
result.manifest = safe.JSON.parse(result.manifestJson);
|
||||
delete result.manifestJson;
|
||||
|
||||
assert(result.oldConfigJson === null || typeof result.oldConfigJson === 'string');
|
||||
result.oldConfig = safe.JSON.parse(result.oldConfigJson);
|
||||
delete result.oldConfigJson;
|
||||
|
||||
assert(result.updateConfigJson === null || typeof result.updateConfigJson === 'string');
|
||||
result.updateConfig = safe.JSON.parse(result.updateConfigJson);
|
||||
delete result.updateConfigJson;
|
||||
|
||||
assert(result.restoreConfigJson === null || typeof result.restoreConfigJson === 'string');
|
||||
result.restoreConfig = safe.JSON.parse(result.restoreConfigJson);
|
||||
delete result.restoreConfigJson;
|
||||
assert(result.tagsJson === null || typeof result.tagsJson === 'string');
|
||||
result.tags = safe.JSON.parse(result.tagsJson) || [];
|
||||
delete result.tagsJson;
|
||||
|
||||
assert(result.hostPorts === null || typeof result.hostPorts === 'string');
|
||||
assert(result.environmentVariables === null || typeof result.environmentVariables === 'string');
|
||||
@@ -116,9 +82,6 @@ function postProcess(result) {
|
||||
if (result.accessRestriction && !result.accessRestriction.users) result.accessRestriction.users = [];
|
||||
delete result.accessRestrictionJson;
|
||||
|
||||
// TODO remove later once all apps have this attribute
|
||||
result.xFrameOptions = result.xFrameOptions || 'SAMEORIGIN';
|
||||
|
||||
result.sso = !!result.sso; // make it bool
|
||||
result.enableBackup = !!result.enableBackup; // make it bool
|
||||
result.enableAutomaticUpdate = !!result.enableAutomaticUpdate; // make it bool
|
||||
@@ -141,8 +104,10 @@ function postProcess(result) {
|
||||
if (envNames[i]) result.env[envNames[i]] = envValues[i];
|
||||
}
|
||||
|
||||
// in the db, we store dataDir as unique/nullable
|
||||
result.dataDir = result.dataDir || '';
|
||||
result.error = safe.JSON.parse(result.errorJson);
|
||||
delete result.errorJson;
|
||||
|
||||
result.taskId = result.taskId ? String(result.taskId) : null;
|
||||
}
|
||||
|
||||
function get(id, callback) {
|
||||
@@ -255,14 +220,13 @@ function getAll(callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function add(id, appStoreId, manifest, location, domain, ownerId, portBindings, data, callback) {
|
||||
function add(id, appStoreId, manifest, location, domain, portBindings, data, callback) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof appStoreId, 'string');
|
||||
assert(manifest && typeof manifest === 'object');
|
||||
assert.strictEqual(typeof manifest.version, 'string');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof ownerId, 'string');
|
||||
assert.strictEqual(typeof portBindings, 'object');
|
||||
assert(data && typeof data === 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
@@ -271,24 +235,27 @@ function add(id, appStoreId, manifest, location, domain, ownerId, portBindings,
|
||||
|
||||
var manifestJson = JSON.stringify(manifest);
|
||||
|
||||
var accessRestriction = data.accessRestriction || null;
|
||||
var accessRestrictionJson = JSON.stringify(accessRestriction);
|
||||
var memoryLimit = data.memoryLimit || 0;
|
||||
var xFrameOptions = data.xFrameOptions || '';
|
||||
var installationState = data.installationState || exports.ISTATE_PENDING_INSTALL;
|
||||
var restoreConfigJson = data.restoreConfig ? JSON.stringify(data.restoreConfig) : null; // used when cloning
|
||||
var sso = 'sso' in data ? data.sso : null;
|
||||
var robotsTxt = 'robotsTxt' in data ? data.robotsTxt : null;
|
||||
var debugModeJson = data.debugMode ? JSON.stringify(data.debugMode) : null;
|
||||
var env = data.env || {};
|
||||
const accessRestriction = data.accessRestriction || null;
|
||||
const accessRestrictionJson = JSON.stringify(accessRestriction);
|
||||
const memoryLimit = data.memoryLimit || 0;
|
||||
const installationState = data.installationState;
|
||||
const runState = data.runState;
|
||||
const sso = 'sso' in data ? data.sso : null;
|
||||
const robotsTxt = 'robotsTxt' in data ? data.robotsTxt : null;
|
||||
const debugModeJson = data.debugMode ? JSON.stringify(data.debugMode) : null;
|
||||
const env = data.env || {};
|
||||
const label = data.label || null;
|
||||
const tagsJson = data.tags ? JSON.stringify(data.tags) : null;
|
||||
const mailboxName = data.mailboxName || null;
|
||||
|
||||
var queries = [];
|
||||
|
||||
queries.push({
|
||||
query: 'INSERT INTO apps (id, appStoreId, manifestJson, installationState, accessRestrictionJson, memoryLimit, xFrameOptions, restoreConfigJson, sso, debugModeJson, robotsTxt, ownerId, mailboxName) ' +
|
||||
' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
|
||||
args: [ id, appStoreId, manifestJson, installationState, accessRestrictionJson, memoryLimit, xFrameOptions, restoreConfigJson, sso, debugModeJson, robotsTxt, ownerId, mailboxName ]
|
||||
query: 'INSERT INTO apps (id, appStoreId, manifestJson, installationState, runState, accessRestrictionJson, memoryLimit, '
|
||||
+ 'sso, debugModeJson, robotsTxt, mailboxName, label, tagsJson) '
|
||||
+ ' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
|
||||
args: [ id, appStoreId, manifestJson, installationState, runState, accessRestrictionJson, memoryLimit,
|
||||
sso, debugModeJson, robotsTxt, mailboxName, label, tagsJson ]
|
||||
});
|
||||
|
||||
queries.push({
|
||||
@@ -414,6 +381,7 @@ function updateWithConstraints(id, app, constraints, callback) {
|
||||
assert(!('portBindings' in app) || typeof app.portBindings === 'object');
|
||||
assert(!('accessRestriction' in app) || typeof app.accessRestriction === 'object' || app.accessRestriction === '');
|
||||
assert(!('alternateDomains' in app) || Array.isArray(app.alternateDomains));
|
||||
assert(!('tags' in app) || Array.isArray(app.tags));
|
||||
assert(!('env' in app) || typeof app.env === 'object');
|
||||
|
||||
var queries = [ ];
|
||||
@@ -452,7 +420,7 @@ function updateWithConstraints(id, app, constraints, callback) {
|
||||
|
||||
var fields = [ ], values = [ ];
|
||||
for (var p in app) {
|
||||
if (p === 'manifest' || p === 'oldConfig' || p === 'updateConfig' || p === 'restoreConfig' || p === 'accessRestriction' || p === 'debugMode') {
|
||||
if (p === 'manifest' || p === 'tags' || p === 'accessRestriction' || p === 'debugMode' || p === 'error') {
|
||||
fields.push(`${p}Json = ?`);
|
||||
values.push(JSON.stringify(app[p]));
|
||||
} else if (p !== 'portBindings' && p !== 'location' && p !== 'domain' && p !== 'alternateDomains' && p !== 'env') {
|
||||
@@ -475,7 +443,6 @@ function updateWithConstraints(id, app, constraints, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
// not sure if health should influence runState
|
||||
function setHealth(appId, health, healthTime, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof health, 'string');
|
||||
@@ -484,53 +451,22 @@ function setHealth(appId, health, healthTime, callback) {
|
||||
|
||||
var values = { health, healthTime };
|
||||
|
||||
var constraints = 'AND runState NOT LIKE "pending_%" AND installationState = "installed"';
|
||||
|
||||
updateWithConstraints(appId, values, constraints, callback);
|
||||
updateWithConstraints(appId, values, '', callback);
|
||||
}
|
||||
|
||||
function setInstallationCommand(appId, installationState, values, callback) {
|
||||
function setTask(appId, values, options, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof installationState, 'string');
|
||||
|
||||
if (typeof values === 'function') {
|
||||
callback = values;
|
||||
values = { };
|
||||
} else {
|
||||
assert.strictEqual(typeof values, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
}
|
||||
|
||||
values.installationState = installationState;
|
||||
values.installationProgress = '';
|
||||
|
||||
// Rules are:
|
||||
// uninstall is allowed in any state
|
||||
// force update is allowed in any state including pending_uninstall! (for better or worse)
|
||||
// restore is allowed from installed or error state or currently restoring
|
||||
// configure is allowed in installed state or currently configuring or in error state
|
||||
// update and backup are allowed only in installed state
|
||||
|
||||
if (installationState === exports.ISTATE_PENDING_UNINSTALL || installationState === exports.ISTATE_PENDING_FORCE_UPDATE) {
|
||||
updateWithConstraints(appId, values, '', callback);
|
||||
} else if (installationState === exports.ISTATE_PENDING_RESTORE) {
|
||||
updateWithConstraints(appId, values, 'AND (installationState = "installed" OR installationState = "error" OR installationState = "pending_restore")', callback);
|
||||
} else if (installationState === exports.ISTATE_PENDING_UPDATE || installationState === exports.ISTATE_PENDING_BACKUP) {
|
||||
updateWithConstraints(appId, values, 'AND installationState = "installed"', callback);
|
||||
} else if (installationState === exports.ISTATE_PENDING_CONFIGURE) {
|
||||
updateWithConstraints(appId, values, 'AND (installationState = "installed" OR installationState = "pending_configure" OR installationState = "error")', callback);
|
||||
} else {
|
||||
callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, 'invalid installationState'));
|
||||
}
|
||||
}
|
||||
|
||||
function setRunCommand(appId, runState, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof runState, 'string');
|
||||
assert.strictEqual(typeof values, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var values = { runState: runState };
|
||||
updateWithConstraints(appId, values, 'AND runState NOT LIKE "pending_%" AND installationState = "installed"', callback);
|
||||
if (!options.requireNullTaskId) return updateWithConstraints(appId, values, '', callback);
|
||||
|
||||
if (options.requiredState === null) {
|
||||
updateWithConstraints(appId, values, 'AND taskId IS NULL', callback);
|
||||
} else {
|
||||
updateWithConstraints(appId, values, `AND taskId IS NULL AND installationState = "${options.requiredState}"`, callback);
|
||||
}
|
||||
}
|
||||
|
||||
function getAppStoreIds(callback) {
|
||||
@@ -615,13 +551,13 @@ function getAddonConfigByAppId(appId, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function getAppIdByAddonConfigValue(addonId, name, value, callback) {
|
||||
function getAppIdByAddonConfigValue(addonId, namePattern, value, callback) {
|
||||
assert.strictEqual(typeof addonId, 'string');
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof namePattern, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT appId FROM appAddonConfigs WHERE addonId = ? AND name = ? AND value = ?', [ addonId, name, value ], function (error, results) {
|
||||
database.query('SELECT appId FROM appAddonConfigs WHERE addonId = ? AND name LIKE ? AND value = ?', [ addonId, namePattern, value ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
if (results.length === 0) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
|
||||
|
||||
@@ -629,44 +565,16 @@ function getAppIdByAddonConfigValue(addonId, name, value, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function getAddonConfigByName(appId, addonId, name, callback) {
|
||||
function getAddonConfigByName(appId, addonId, namePattern, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof addonId, 'string');
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof namePattern, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT value FROM appAddonConfigs WHERE appId = ? AND addonId = ? AND name = ?', [ appId, addonId, name ], function (error, results) {
|
||||
database.query('SELECT value FROM appAddonConfigs WHERE appId = ? AND addonId = ? AND name LIKE ?', [ appId, addonId, namePattern ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
if (results.length === 0) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
|
||||
|
||||
callback(null, results[0].value);
|
||||
});
|
||||
}
|
||||
|
||||
function setOwner(appId, ownerId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof ownerId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('UPDATE apps SET ownerId=? WHERE appId=?', [ ownerId, appId ], function (error, results) {
|
||||
if (error && error.code === 'ER_NO_REFERENCED_ROW_2') return callback(new DatabaseError(DatabaseError.NOT_FOUND, 'No such user'));
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
if (results.length === 0) return callback(new DatabaseError(DatabaseError.NOT_FOUND, 'No such app'));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
function transferOwnership(oldOwnerId, newOwnerId, callback) {
|
||||
assert.strictEqual(typeof oldOwnerId, 'string');
|
||||
assert.strictEqual(typeof newOwnerId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('UPDATE apps SET ownerId=? WHERE ownerId=?', [ newOwnerId, oldOwnerId ], function (error) {
|
||||
if (error && error.code === 'ER_NO_REFERENCED_ROW_2') return callback(new DatabaseError(DatabaseError.NOT_FOUND, 'No such user'));
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
+41
-31
@@ -4,10 +4,12 @@ var appdb = require('./appdb.js'),
|
||||
apps = require('./apps.js'),
|
||||
assert = require('assert'),
|
||||
async = require('async'),
|
||||
auditSource = require('./auditsource.js'),
|
||||
DatabaseError = require('./databaseerror.js'),
|
||||
debug = require('debug')('box:apphealthmonitor'),
|
||||
docker = require('./docker.js').connection,
|
||||
docker = require('./docker.js'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
safe = require('safetydance'),
|
||||
superagent = require('superagent'),
|
||||
util = require('util');
|
||||
|
||||
@@ -18,11 +20,9 @@ exports = module.exports = {
|
||||
const HEALTHCHECK_INTERVAL = 10 * 1000; // every 10 seconds. this needs to be small since the UI makes only healthy apps clickable
|
||||
const UNHEALTHY_THRESHOLD = 10 * 60 * 1000; // 10 minutes
|
||||
|
||||
const OOM_MAIL_LIMIT = 60 * 60 * 1000; // 60 minutes
|
||||
const OOM_EVENT_LIMIT = 60 * 60 * 1000; // 60 minutes
|
||||
let gLastOomMailTime = Date.now() - (5 * 60 * 1000); // pretend we sent email 5 minutes ago
|
||||
|
||||
const AUDIT_SOURCE = { userId: null, username: 'healthmonitor' };
|
||||
|
||||
function debugApp(app) {
|
||||
assert(typeof app === 'object');
|
||||
|
||||
@@ -36,20 +36,20 @@ function setHealth(app, health, callback) {
|
||||
|
||||
let now = new Date(), healthTime = app.healthTime, curHealth = app.health;
|
||||
|
||||
if (health === appdb.HEALTH_HEALTHY) {
|
||||
if (health === apps.HEALTH_HEALTHY) {
|
||||
healthTime = now;
|
||||
if (curHealth && curHealth !== appdb.HEALTH_HEALTHY) { // app starts out with null health
|
||||
if (curHealth && curHealth !== apps.HEALTH_HEALTHY) { // app starts out with null health
|
||||
debugApp(app, 'app switched from %s to healthy', curHealth);
|
||||
|
||||
// do not send mails for dev apps
|
||||
if (!app.debugMode) eventlog.add(eventlog.ACTION_APP_UP, AUDIT_SOURCE, { app: app });
|
||||
if (!app.debugMode) eventlog.add(eventlog.ACTION_APP_UP, auditSource.HEALTH_MONITOR, { app: app });
|
||||
}
|
||||
} else if (Math.abs(now - healthTime) > UNHEALTHY_THRESHOLD) {
|
||||
if (curHealth === appdb.HEALTH_HEALTHY) {
|
||||
if (curHealth === apps.HEALTH_HEALTHY) {
|
||||
debugApp(app, 'marking as unhealthy since not seen for more than %s minutes', UNHEALTHY_THRESHOLD/(60 * 1000));
|
||||
|
||||
// do not send mails for dev apps
|
||||
if (!app.debugMode) eventlog.add(eventlog.ACTION_APP_DOWN, AUDIT_SOURCE, { app: app });
|
||||
if (!app.debugMode) eventlog.add(eventlog.ACTION_APP_DOWN, auditSource.HEALTH_MONITOR, { app: app });
|
||||
}
|
||||
} else {
|
||||
debugApp(app, 'waiting for %s seconds to update the app health', (UNHEALTHY_THRESHOLD - Math.abs(now - healthTime))/1000);
|
||||
@@ -72,50 +72,61 @@ function checkAppHealth(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (app.installationState !== appdb.ISTATE_INSTALLED || app.runState !== appdb.RSTATE_RUNNING) {
|
||||
if (app.installationState !== apps.ISTATE_INSTALLED || app.runState !== apps.RSTATE_RUNNING) {
|
||||
debugApp(app, 'skipped. istate:%s rstate:%s', app.installationState, app.runState);
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
var container = docker.getContainer(app.containerId),
|
||||
manifest = app.manifest;
|
||||
const manifest = app.manifest;
|
||||
|
||||
container.inspect(function (err, data) {
|
||||
if (err || !data || !data.State) {
|
||||
docker.inspect(app.containerId, function (error, data) {
|
||||
if (error || !data || !data.State) {
|
||||
debugApp(app, 'Error inspecting container');
|
||||
return setHealth(app, appdb.HEALTH_ERROR, callback);
|
||||
return setHealth(app, apps.HEALTH_ERROR, callback);
|
||||
}
|
||||
|
||||
if (data.State.Running !== true) {
|
||||
debugApp(app, 'exited');
|
||||
return setHealth(app, appdb.HEALTH_DEAD, callback);
|
||||
return setHealth(app, apps.HEALTH_DEAD, callback);
|
||||
}
|
||||
|
||||
// non-appstore apps may not have healthCheckPath
|
||||
if (!manifest.healthCheckPath) return setHealth(app, appdb.HEALTH_HEALTHY, callback);
|
||||
if (!manifest.healthCheckPath) return setHealth(app, apps.HEALTH_HEALTHY, callback);
|
||||
|
||||
// poll through docker network instead of nginx to bypass any potential oauth proxy
|
||||
var healthCheckUrl = 'http://127.0.0.1:' + app.httpPort + manifest.healthCheckPath;
|
||||
superagent
|
||||
.get(healthCheckUrl)
|
||||
.set('Host', app.fqdn) // required for some apache configs with rewrite rules
|
||||
.set('User-Agent', 'Mozilla') // required for some apps (e.g. minio)
|
||||
.set('User-Agent', 'Mozilla (CloudronHealth)') // required for some apps (e.g. minio)
|
||||
.redirects(0)
|
||||
.timeout(HEALTHCHECK_INTERVAL)
|
||||
.end(function (error, res) {
|
||||
if (error && !error.response) {
|
||||
debugApp(app, 'not alive (network error): %s', error.message);
|
||||
setHealth(app, appdb.HEALTH_UNHEALTHY, callback);
|
||||
setHealth(app, apps.HEALTH_UNHEALTHY, callback);
|
||||
} else if (res.statusCode >= 400) { // 2xx and 3xx are ok
|
||||
debugApp(app, 'not alive : %s', error || res.status);
|
||||
setHealth(app, appdb.HEALTH_UNHEALTHY, callback);
|
||||
setHealth(app, apps.HEALTH_UNHEALTHY, callback);
|
||||
} else {
|
||||
setHealth(app, appdb.HEALTH_HEALTHY, callback);
|
||||
setHealth(app, apps.HEALTH_HEALTHY, callback);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getContainerInfo(containerId, callback) {
|
||||
docker.inspect(containerId, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
const appId = safe.query(result, 'Config.Labels.appId', null);
|
||||
|
||||
if (!appId) return callback(null, null /* app */, { name: result.Name }); // addon
|
||||
|
||||
apps.get(appId, callback); // don't get by container id as this can be an exec container
|
||||
});
|
||||
}
|
||||
|
||||
/*
|
||||
OOM can be tested using stress tool like so:
|
||||
docker run -ti -m 100M cloudron/base:0.10.0 /bin/bash
|
||||
@@ -134,21 +145,20 @@ function processDockerEvents(intervalSecs, callback) {
|
||||
|
||||
stream.setEncoding('utf8');
|
||||
stream.on('data', function (data) {
|
||||
var ev = JSON.parse(data);
|
||||
var containerId = ev.id;
|
||||
const event = JSON.parse(data);
|
||||
const containerId = String(event.id);
|
||||
|
||||
appdb.getByContainerId(containerId, function (error, app) { // this can error for addons
|
||||
var program = error || !app.id ? containerId : `app-${app.id}`;
|
||||
var now = Date.now();
|
||||
getContainerInfo(containerId, function (error, app, addon) {
|
||||
const program = error ? containerId : (app ? app.fqdn : addon.name);
|
||||
const now = Date.now();
|
||||
const notifyUser = !(app && app.debugMode) && ((now - gLastOomMailTime) > OOM_EVENT_LIMIT);
|
||||
|
||||
const notifyUser = (!app || !app.debugMode) && (now - gLastOomMailTime > OOM_MAIL_LIMIT);
|
||||
|
||||
debug('OOM %s notifyUser: %s. lastOomTime: %s (now: %s)', program, notifyUser, gLastOomMailTime, now, ev);
|
||||
debug('OOM %s notifyUser: %s. lastOomTime: %s (now: %s)', program, notifyUser, gLastOomMailTime, now);
|
||||
|
||||
// do not send mails for dev apps
|
||||
if (notifyUser) {
|
||||
// app can be null for addon containers
|
||||
eventlog.add(eventlog.ACTION_APP_OOM, AUDIT_SOURCE, { ev: ev, containerId: containerId, app: app || null });
|
||||
eventlog.add(eventlog.ACTION_APP_OOM, auditSource.HEALTH_MONITOR, { event: event, containerId: containerId, addon: addon || null, app: app || null });
|
||||
|
||||
gLastOomMailTime = now;
|
||||
}
|
||||
@@ -177,7 +187,7 @@ function processApp(callback) {
|
||||
if (error) console.error(error);
|
||||
|
||||
var alive = result
|
||||
.filter(function (a) { return a.installationState === appdb.ISTATE_INSTALLED && a.runState === appdb.RSTATE_RUNNING && a.health === appdb.HEALTH_HEALTHY; })
|
||||
.filter(function (a) { return a.installationState === apps.ISTATE_INSTALLED && a.runState === apps.RSTATE_RUNNING && a.health === apps.HEALTH_HEALTHY; })
|
||||
.map(function (a) { return (a.location || 'naked_domain') + '|' + a.manifest.id; }).join(', ');
|
||||
|
||||
debug('apps alive: [%s]', alive);
|
||||
|
||||
+1002
-456
File diff suppressed because it is too large
Load Diff
+260
-67
@@ -1,8 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
purchase: purchase,
|
||||
unpurchase: unpurchase,
|
||||
getApps: getApps,
|
||||
getApp: getApp,
|
||||
getAppVersion: getAppVersion,
|
||||
|
||||
registerWithLoginCredentials: registerWithLoginCredentials,
|
||||
registerWithLicense: registerWithLicense,
|
||||
|
||||
purchaseApp: purchaseApp,
|
||||
unpurchaseApp: unpurchaseApp,
|
||||
|
||||
getSubscription: getSubscription,
|
||||
isFreePlan: isFreePlan,
|
||||
@@ -12,9 +19,7 @@ exports = module.exports = {
|
||||
getAppUpdate: getAppUpdate,
|
||||
getBoxUpdate: getBoxUpdate,
|
||||
|
||||
getAccount: getAccount,
|
||||
|
||||
sendFeedback: sendFeedback,
|
||||
createTicket: createTicket,
|
||||
|
||||
AppstoreError: AppstoreError
|
||||
};
|
||||
@@ -22,16 +27,20 @@ exports = module.exports = {
|
||||
var apps = require('./apps.js'),
|
||||
assert = require('assert'),
|
||||
async = require('async'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
custom = require('./custom.js'),
|
||||
debug = require('debug')('box:appstore'),
|
||||
domains = require('./domains.js'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
groups = require('./groups.js'),
|
||||
mail = require('./mail.js'),
|
||||
os = require('os'),
|
||||
safe = require('safetydance'),
|
||||
semver = require('semver'),
|
||||
settings = require('./settings.js'),
|
||||
superagent = require('superagent'),
|
||||
sysinfo = require('./sysinfo.js'),
|
||||
users = require('./users.js'),
|
||||
util = require('util');
|
||||
|
||||
function AppstoreError(reason, errorOrMessage) {
|
||||
@@ -55,37 +64,85 @@ function AppstoreError(reason, errorOrMessage) {
|
||||
util.inherits(AppstoreError, Error);
|
||||
AppstoreError.INTERNAL_ERROR = 'Internal Error';
|
||||
AppstoreError.EXTERNAL_ERROR = 'External Error';
|
||||
AppstoreError.NOT_FOUND = 'Internal Error';
|
||||
AppstoreError.BILLING_REQUIRED = 'Billing Required';
|
||||
AppstoreError.ALREADY_EXISTS = 'Already Exists';
|
||||
AppstoreError.ACCESS_DENIED = 'Access Denied';
|
||||
AppstoreError.NOT_FOUND = 'Not Found';
|
||||
AppstoreError.PLAN_LIMIT = 'Plan limit reached'; // upstream 402 (subsciption_expired and subscription_required)
|
||||
AppstoreError.LICENSE_ERROR = 'License Error'; // upstream 422 (no license, invalid license)
|
||||
AppstoreError.INVALID_TOKEN = 'Invalid token'; // upstream 401 (invalid token)
|
||||
AppstoreError.NOT_REGISTERED = 'Not registered'; // upstream 412 (no token, not set yet)
|
||||
AppstoreError.ALREADY_REGISTERED = 'Already registered';
|
||||
|
||||
var NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
|
||||
function getAppstoreConfig(callback) {
|
||||
function getCloudronToken(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
settings.getAppstoreConfig(function (error, result) {
|
||||
settings.getCloudronToken(function (error, token) {
|
||||
if (error) return callback(new AppstoreError(AppstoreError.INTERNAL_ERROR, error));
|
||||
if (!result.token) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED));
|
||||
if (!token) return callback(new AppstoreError(AppstoreError.NOT_REGISTERED));
|
||||
|
||||
callback(null, result);
|
||||
callback(null, token);
|
||||
});
|
||||
}
|
||||
|
||||
function login(email, password, totpToken, callback) {
|
||||
assert.strictEqual(typeof email, 'string');
|
||||
assert.strictEqual(typeof password, 'string');
|
||||
assert.strictEqual(typeof totpToken, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var data = {
|
||||
email: email,
|
||||
password: password,
|
||||
totpToken: totpToken
|
||||
};
|
||||
|
||||
const url = settings.apiServerOrigin() + '/api/v1/login';
|
||||
superagent.post(url).send(data).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error.message));
|
||||
if (result.statusCode === 401) return callback(new AppstoreError(AppstoreError.ACCESS_DENIED));
|
||||
if (result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, `login status code: ${result.statusCode}`));
|
||||
|
||||
callback(null, result.body); // { userId, accessToken }
|
||||
});
|
||||
}
|
||||
|
||||
function registerUser(email, password, callback) {
|
||||
assert.strictEqual(typeof email, 'string');
|
||||
assert.strictEqual(typeof password, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var data = {
|
||||
email: email,
|
||||
password: password,
|
||||
};
|
||||
|
||||
const url = settings.apiServerOrigin() + '/api/v1/register_user';
|
||||
superagent.post(url).send(data).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error.message));
|
||||
if (result.statusCode === 409) return callback(new AppstoreError(AppstoreError.ALREADY_EXISTS));
|
||||
if (result.statusCode !== 201) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, `register status code: ${result.statusCode}`));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
function getSubscription(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
getCloudronToken(function (error, token) {
|
||||
if (error) return callback(error);
|
||||
|
||||
const url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/subscription';
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
const url = settings.apiServerOrigin() + '/api/v1/subscription';
|
||||
superagent.get(url).query({ accessToken: token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error.message));
|
||||
if (result.statusCode === 401) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, 'invalid appstore token'));
|
||||
if (result.statusCode === 403) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, 'wrong user'));
|
||||
if (result.statusCode === 502) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, 'stripe error'));
|
||||
if (result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, 'unknown error'));
|
||||
if (result.statusCode === 401) return callback(new AppstoreError(AppstoreError.INVALID_TOKEN));
|
||||
if (result.statusCode === 422) return callback(new AppstoreError(AppstoreError.LICENSE_ERROR));
|
||||
if (result.statusCode === 502) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, `Stripe error: ${error.message}`));
|
||||
if (result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, `Unknown error: ${error.message}`));
|
||||
|
||||
callback(null, result.body.subscription);
|
||||
callback(null, result.body); // { email, subscription }
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -95,22 +152,24 @@ function isFreePlan(subscription) {
|
||||
}
|
||||
|
||||
// See app.js install it will create a db record first but remove it again if appstore purchase fails
|
||||
function purchase(appId, data, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof data, 'object');
|
||||
function purchaseApp(data, callback) {
|
||||
assert.strictEqual(typeof data, 'object'); // { appstoreId, manifestId, appId }
|
||||
assert(data.appstoreId || data.manifestId);
|
||||
assert.strictEqual(typeof data.appId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
getCloudronToken(function (error, token) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/apps/' + appId;
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/cloudronapps`;
|
||||
|
||||
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
superagent.post(url).send(data).query({ accessToken: token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error.message));
|
||||
if (result.statusCode === 404) return callback(new AppstoreError(AppstoreError.NOT_FOUND));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED));
|
||||
if (result.statusCode === 402) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED, result.body.message));
|
||||
if (result.statusCode === 404) return callback(new AppstoreError(AppstoreError.NOT_FOUND)); // appstoreId does not exist
|
||||
if (result.statusCode === 401) return callback(new AppstoreError(AppstoreError.INVALID_TOKEN));
|
||||
if (result.statusCode === 402) return callback(new AppstoreError(AppstoreError.PLAN_LIMIT, result.body.message));
|
||||
if (result.statusCode === 422) return callback(new AppstoreError(AppstoreError.LICENSE_ERROR, result.body.message));
|
||||
// 200 if already purchased, 201 is newly purchased
|
||||
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
@@ -118,26 +177,27 @@ function purchase(appId, data, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function unpurchase(appId, data, callback) {
|
||||
function unpurchaseApp(appId, data, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof data, 'object');
|
||||
assert.strictEqual(typeof data, 'object'); // { appstoreId, manifestId }
|
||||
assert(data.appstoreId || data.manifestId);
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
getCloudronToken(function (error, token) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/apps/' + appId;
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/cloudronapps/${appId}`;
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED));
|
||||
superagent.get(url).query({ accessToken: token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error.message));
|
||||
if (result.statusCode === 404) return callback(null); // was never purchased
|
||||
if (result.statusCode === 401) return callback(new AppstoreError(AppstoreError.INVALID_TOKEN));
|
||||
if (result.statusCode === 422) return callback(new AppstoreError(AppstoreError.LICENSE_ERROR, result.body.message));
|
||||
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
superagent.del(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
superagent.del(url).send(data).query({ accessToken: token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED));
|
||||
if (result.statusCode === 401) return callback(new AppstoreError(AppstoreError.INVALID_TOKEN));
|
||||
if (result.statusCode !== 204) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
@@ -149,7 +209,7 @@ function unpurchase(appId, data, callback) {
|
||||
function sendAliveStatus(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
var allSettings, allDomains, mailDomains, loginEvents;
|
||||
let allSettings, allDomains, mailDomains, loginEvents, userCount, groupCount;
|
||||
|
||||
async.series([
|
||||
function (callback) {
|
||||
@@ -179,6 +239,20 @@ function sendAliveStatus(callback) {
|
||||
loginEvents = result;
|
||||
callback();
|
||||
});
|
||||
},
|
||||
function (callback) {
|
||||
users.count(function (error, result) {
|
||||
if (error) return callback(new AppstoreError(AppstoreError.INTERNAL_ERROR, error));
|
||||
userCount = result;
|
||||
callback();
|
||||
});
|
||||
},
|
||||
function (callback) {
|
||||
groups.count(function (error, result) {
|
||||
if (error) return callback(new AppstoreError(AppstoreError.INTERNAL_ERROR, error));
|
||||
groupCount = result;
|
||||
callback();
|
||||
});
|
||||
}
|
||||
], function (error) {
|
||||
if (error) return callback(error);
|
||||
@@ -198,15 +272,17 @@ function sendAliveStatus(callback) {
|
||||
catchAllCount: mailDomains.filter(function (d) { return d.catchAll.length !== 0; }).length,
|
||||
relayProviders: Array.from(new Set(mailDomains.map(function (d) { return d.relay.provider; })))
|
||||
},
|
||||
userCount: userCount,
|
||||
groupCount: groupCount,
|
||||
appAutoupdatePattern: allSettings[settings.APP_AUTOUPDATE_PATTERN_KEY],
|
||||
boxAutoupdatePattern: allSettings[settings.BOX_AUTOUPDATE_PATTERN_KEY],
|
||||
timeZone: allSettings[settings.TIME_ZONE_KEY],
|
||||
};
|
||||
|
||||
var data = {
|
||||
version: config.version(),
|
||||
adminFqdn: config.adminFqdn(),
|
||||
provider: config.provider(),
|
||||
version: constants.VERSION,
|
||||
adminFqdn: settings.adminFqdn(),
|
||||
provider: sysinfo.provider(),
|
||||
backendSettings: backendSettings,
|
||||
machine: {
|
||||
cpus: os.cpus(),
|
||||
@@ -217,13 +293,15 @@ function sendAliveStatus(callback) {
|
||||
}
|
||||
};
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
getCloudronToken(function (error, token) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/alive';
|
||||
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/alive`;
|
||||
superagent.post(url).send(data).query({ accessToken: token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 404) return callback(new AppstoreError(AppstoreError.NOT_FOUND));
|
||||
if (result.statusCode === 401) return callback(new AppstoreError(AppstoreError.INVALID_TOKEN));
|
||||
if (result.statusCode === 422) return callback(new AppstoreError(AppstoreError.LICENSE_ERROR, result.body.message));
|
||||
if (result.statusCode !== 201) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Sending alive status failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
@@ -235,23 +313,32 @@ function sendAliveStatus(callback) {
|
||||
function getBoxUpdate(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
getCloudronToken(function (error, token) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/boxupdate';
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/boxupdate`;
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token, boxVersion: config.version() }).timeout(10 * 1000).end(function (error, result) {
|
||||
superagent.get(url).query({ accessToken: token, boxVersion: constants.VERSION }).timeout(10 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error.message));
|
||||
if (result.statusCode === 401) return callback(new AppstoreError(AppstoreError.INVALID_TOKEN));
|
||||
if (result.statusCode === 422) return callback(new AppstoreError(AppstoreError.LICENSE_ERROR, result.body.message));
|
||||
if (result.statusCode === 204) return callback(null); // no update
|
||||
if (result.statusCode !== 200 || !result.body) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response: %s %s', result.statusCode, result.text)));
|
||||
|
||||
var updateInfo = result.body;
|
||||
|
||||
if (!semver.valid(updateInfo.version) || semver.gt(config.version(), updateInfo.version)) {
|
||||
if (!semver.valid(updateInfo.version) || semver.gt(constants.VERSION, updateInfo.version)) {
|
||||
return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Invalid update version: %s %s', result.statusCode, result.text)));
|
||||
}
|
||||
|
||||
// updateInfo: { version, changelog, upgrade, sourceTarballUrl, sourceTarballSigUrl, boxVersionsUrl, boxVersionsSigUrl }
|
||||
// updateInfo: { version, changelog, sourceTarballUrl, sourceTarballSigUrl, boxVersionsUrl, boxVersionsSigUrl }
|
||||
if (!updateInfo.version || typeof updateInfo.version !== 'string') return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response (bad version): %s %s', result.statusCode, result.text)));
|
||||
if (!updateInfo.changelog || !Array.isArray(updateInfo.changelog)) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response (bad version): %s %s', result.statusCode, result.text)));
|
||||
if (!updateInfo.sourceTarballUrl || typeof updateInfo.sourceTarballUrl !== 'string') return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response (bad sourceTarballUrl): %s %s', result.statusCode, result.text)));
|
||||
if (!updateInfo.sourceTarballSigUrl || typeof updateInfo.sourceTarballSigUrl !== 'string') return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response (bad sourceTarballSigUrl): %s %s', result.statusCode, result.text)));
|
||||
if (!updateInfo.boxVersionsUrl || typeof updateInfo.boxVersionsUrl !== 'string') return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response (bad boxVersionsUrl): %s %s', result.statusCode, result.text)));
|
||||
if (!updateInfo.boxVersionsSigUrl || typeof updateInfo.boxVersionsSigUrl !== 'string') return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response (bad boxVersionsSigUrl): %s %s', result.statusCode, result.text)));
|
||||
|
||||
callback(null, updateInfo);
|
||||
});
|
||||
});
|
||||
@@ -261,13 +348,15 @@ function getAppUpdate(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
getCloudronToken(function (error, token) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/appupdate';
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/appupdate`;
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token, boxVersion: config.version(), appId: app.appStoreId, appVersion: app.manifest.version }).timeout(10 * 1000).end(function (error, result) {
|
||||
superagent.get(url).query({ accessToken: token, boxVersion: constants.VERSION, appId: app.appStoreId, appVersion: app.manifest.version }).timeout(10 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 401) return callback(new AppstoreError(AppstoreError.INVALID_TOKEN));
|
||||
if (result.statusCode === 422) return callback(new AppstoreError(AppstoreError.LICENSE_ERROR, result.body.message));
|
||||
if (result.statusCode === 204) return callback(null); // no update
|
||||
if (result.statusCode !== 200 || !result.body) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response: %s %s', result.statusCode, result.text)));
|
||||
|
||||
@@ -288,25 +377,73 @@ function getAppUpdate(app, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function getAccount(callback) {
|
||||
function registerCloudron(data, callback) {
|
||||
assert.strictEqual(typeof data, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/register_cloudron`;
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId;
|
||||
superagent.post(url).send(data).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error.message));
|
||||
if (result.statusCode !== 201) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, `Unable to register cloudron: ${error.message}`));
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token }).timeout(10 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response: %s %s', result.statusCode, result.text)));
|
||||
// cloudronId, token, licenseKey
|
||||
if (!result.body.cloudronId) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, 'Invalid response - no cloudron id'));
|
||||
if (!result.body.cloudronToken) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, 'Invalid response - no token'));
|
||||
if (!result.body.licenseKey) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, 'Invalid response - no license'));
|
||||
|
||||
// { profile: { id, email, groupId, billing, firstName, lastName, company, street, city, zip, state, country } }
|
||||
callback(null, result.body.profile);
|
||||
async.series([
|
||||
settings.setCloudronId.bind(null, result.body.cloudronId),
|
||||
settings.setCloudronToken.bind(null, result.body.cloudronToken),
|
||||
settings.setLicenseKey.bind(null, result.body.licenseKey),
|
||||
], function (error) {
|
||||
if (error) return callback(new AppstoreError(AppstoreError.INTERNAL_ERROR, error));
|
||||
|
||||
debug(`registerCloudron: Cloudron registered with id ${result.body.cloudronId}`);
|
||||
|
||||
callback();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function sendFeedback(info, callback) {
|
||||
function registerWithLicense(license, domain, callback) {
|
||||
assert.strictEqual(typeof license, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getCloudronToken(function (error, token) {
|
||||
if (token) return callback(new AppstoreError(AppstoreError.ALREADY_REGISTERED));
|
||||
|
||||
registerCloudron({ license, domain }, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function registerWithLoginCredentials(options, callback) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
function maybeSignup(done) {
|
||||
if (!options.signup) return done();
|
||||
|
||||
registerUser(options.email, options.password, done);
|
||||
}
|
||||
|
||||
getCloudronToken(function (error, token) {
|
||||
if (token) return callback(new AppstoreError(AppstoreError.ALREADY_REGISTERED));
|
||||
|
||||
maybeSignup(function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
login(options.email, options.password, options.totpToken || '', function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
registerCloudron({ domain: settings.adminDomain(), accessToken: result.accessToken }, callback);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function createTicket(info, callback) {
|
||||
assert.strictEqual(typeof info, 'object');
|
||||
assert.strictEqual(typeof info.email, 'string');
|
||||
assert.strictEqual(typeof info.displayName, 'string');
|
||||
@@ -320,17 +457,21 @@ function sendFeedback(info, callback) {
|
||||
apps.get(info.appId, callback);
|
||||
}
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
getCloudronToken(function (error, token) {
|
||||
if (error) return callback(error);
|
||||
|
||||
collectAppInfoIfNeeded(function (error, result) {
|
||||
if (error) console.error('Unable to get app info', error);
|
||||
if (result) info.app = result;
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/feedback';
|
||||
let url = settings.apiServerOrigin() + '/api/v1/ticket';
|
||||
|
||||
superagent.post(url).query({ accessToken: appstoreConfig.token }).send(info).timeout(10 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
info.supportEmail = custom.spec().support.email; // destination address for tickets
|
||||
|
||||
superagent.post(url).query({ accessToken: token }).send(info).timeout(10 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error.message));
|
||||
if (result.statusCode === 401) return callback(new AppstoreError(AppstoreError.INVALID_TOKEN));
|
||||
if (result.statusCode === 422) return callback(new AppstoreError(AppstoreError.LICENSE_ERROR, result.body.message));
|
||||
if (result.statusCode !== 201) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response: %s %s', result.statusCode, result.text)));
|
||||
|
||||
callback(null);
|
||||
@@ -338,3 +479,55 @@ function sendFeedback(info, callback) {
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getApps(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getCloudronToken(function (error, token) {
|
||||
if (error) return callback(error);
|
||||
|
||||
settings.getUnstableAppsConfig(function (error, unstable) {
|
||||
if (error) return callback(new AppstoreError(AppstoreError.INTERNAL_ERROR, error));
|
||||
const url = `${settings.apiServerOrigin()}/api/v1/apps`;
|
||||
superagent.get(url).query({ accessToken: token, boxVersion: constants.VERSION, unstable: unstable }).timeout(10 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error.message));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppstoreError(AppstoreError.INVALID_TOKEN));
|
||||
if (result.statusCode === 422) return callback(new AppstoreError(AppstoreError.LICENSE_ERROR, result.body.message));
|
||||
if (result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('App listing failed. %s %j', result.status, result.body)));
|
||||
if (!result.body.apps) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response: %s %s', result.statusCode, result.text)));
|
||||
|
||||
callback(null, result.body.apps);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getAppVersion(appId, version, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof version, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getCloudronToken(function (error, token) {
|
||||
if (error) return callback(error);
|
||||
|
||||
let url = `${settings.apiServerOrigin()}/api/v1/apps/${appId}`;
|
||||
if (version !== 'latest') url += `/versions/${version}`;
|
||||
|
||||
superagent.get(url).query({ accessToken: token }).timeout(10 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error.message));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppstoreError(AppstoreError.INVALID_TOKEN));
|
||||
if (result.statusCode === 404) return callback(new AppstoreError(AppstoreError.NOT_FOUND));
|
||||
if (result.statusCode === 422) return callback(new AppstoreError(AppstoreError.LICENSE_ERROR, result.body.message));
|
||||
if (result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('App fetch failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null, result.body);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getApp(appId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAppVersion(appId, 'latest', callback);
|
||||
}
|
||||
|
||||
+608
-471
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,86 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
scheduleTask: scheduleTask
|
||||
};
|
||||
|
||||
let assert = require('assert'),
|
||||
debug = require('debug')('box:apptaskmanager'),
|
||||
fs = require('fs'),
|
||||
locker = require('./locker.js'),
|
||||
safe = require('safetydance'),
|
||||
path = require('path'),
|
||||
paths = require('./paths.js'),
|
||||
tasks = require('./tasks.js');
|
||||
|
||||
let gActiveTasks = { }; // indexed by app id
|
||||
let gPendingTasks = [ ];
|
||||
let gInitialized = false;
|
||||
|
||||
const TASK_CONCURRENCY = 3;
|
||||
const NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
|
||||
function waitText(lockOperation) {
|
||||
if (lockOperation === locker.OP_BOX_UPDATE) return 'Waiting for Cloudron to finish updating. See the Settings view';
|
||||
if (lockOperation === locker.OP_PLATFORM_START) return 'Waiting for Cloudron to initialize';
|
||||
if (lockOperation === locker.OP_FULL_BACKUP) return 'Wait for Cloudron to finish backup. See the Backups view';
|
||||
|
||||
return ''; // cannot happen
|
||||
}
|
||||
|
||||
function initializeSync() {
|
||||
gInitialized = true;
|
||||
locker.on('unlocked', startNextTask);
|
||||
}
|
||||
|
||||
// callback is called when task is finished
|
||||
function scheduleTask(appId, taskId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof taskId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (!gInitialized) initializeSync();
|
||||
|
||||
if (appId in gActiveTasks) {
|
||||
return callback(new Error(`Task for %s is already active: ${appId}`));
|
||||
}
|
||||
|
||||
if (Object.keys(gActiveTasks).length >= TASK_CONCURRENCY) {
|
||||
debug(`Reached concurrency limit, queueing task id ${taskId}`);
|
||||
tasks.update(taskId, { percent: 0, message: 'Waiting for other app tasks to complete' }, NOOP_CALLBACK);
|
||||
gPendingTasks.push({ appId, taskId, callback });
|
||||
return;
|
||||
}
|
||||
|
||||
var lockError = locker.recursiveLock(locker.OP_APPTASK);
|
||||
|
||||
if (lockError) {
|
||||
debug(`Could not get lock. ${lockError.message}, queueing task id ${taskId}`);
|
||||
tasks.update(taskId, { percent: 0, message: waitText(lockError.operation) }, NOOP_CALLBACK);
|
||||
gPendingTasks.push({ appId, taskId, callback });
|
||||
return;
|
||||
}
|
||||
|
||||
gActiveTasks[appId] = {};
|
||||
|
||||
const logFile = path.join(paths.LOG_DIR, appId, 'apptask.log');
|
||||
|
||||
if (!fs.existsSync(path.dirname(logFile))) safe.fs.mkdirSync(path.dirname(logFile)); // ensure directory
|
||||
|
||||
tasks.startTask(taskId, { logFile }, function (error, result) {
|
||||
callback(error, result);
|
||||
|
||||
delete gActiveTasks[appId];
|
||||
locker.unlock(locker.OP_APPTASK); // unlock event will trigger next task
|
||||
});
|
||||
}
|
||||
|
||||
function startNextTask() {
|
||||
if (gPendingTasks.length === 0) return;
|
||||
|
||||
assert(Object.keys(gActiveTasks).length < TASK_CONCURRENCY);
|
||||
|
||||
const t = gPendingTasks.shift();
|
||||
scheduleTask(t.appId, t.taskId, t.callback);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
CRON: { userId: null, username: 'cron' },
|
||||
HEALTH_MONITOR: { userId: null, username: 'healthmonitor' },
|
||||
APP_TASK: { userId: null, username: 'apptask' },
|
||||
EXTERNAL_LDAP_TASK: { userId: null, username: 'externalldap' },
|
||||
|
||||
fromRequest: fromRequest
|
||||
};
|
||||
|
||||
function fromRequest(req) {
|
||||
var ip = req.headers['x-forwarded-for'] || req.connection.remoteAddress || null;
|
||||
return { ip: ip, username: req.user ? req.user.username : null, userId: req.user ? req.user.id : null };
|
||||
}
|
||||
+12
-12
@@ -6,7 +6,7 @@ var assert = require('assert'),
|
||||
safe = require('safetydance'),
|
||||
util = require('util');
|
||||
|
||||
var BACKUPS_FIELDS = [ 'id', 'creationTime', 'version', 'type', 'dependsOn', 'state', 'manifestJson', 'format' ];
|
||||
var BACKUPS_FIELDS = [ 'id', 'creationTime', 'version', 'type', 'dependsOn', 'state', 'manifestJson', 'format', 'preserveSecs' ];
|
||||
|
||||
exports = module.exports = {
|
||||
add: add,
|
||||
@@ -103,21 +103,21 @@ function get(id, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function add(backup, callback) {
|
||||
assert(backup && typeof backup === 'object');
|
||||
assert.strictEqual(typeof backup.id, 'string');
|
||||
assert.strictEqual(typeof backup.version, 'string');
|
||||
assert(backup.type === exports.BACKUP_TYPE_APP || backup.type === exports.BACKUP_TYPE_BOX);
|
||||
assert(util.isArray(backup.dependsOn));
|
||||
assert.strictEqual(typeof backup.manifest, 'object');
|
||||
assert.strictEqual(typeof backup.format, 'string');
|
||||
function add(id, data, callback) {
|
||||
assert(data && typeof data === 'object');
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof data.version, 'string');
|
||||
assert(data.type === exports.BACKUP_TYPE_APP || data.type === exports.BACKUP_TYPE_BOX);
|
||||
assert(util.isArray(data.dependsOn));
|
||||
assert.strictEqual(typeof data.manifest, 'object');
|
||||
assert.strictEqual(typeof data.format, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var creationTime = backup.creationTime || new Date(); // allow tests to set the time
|
||||
var manifestJson = JSON.stringify(backup.manifest);
|
||||
var creationTime = data.creationTime || new Date(); // allow tests to set the time
|
||||
var manifestJson = JSON.stringify(data.manifest);
|
||||
|
||||
database.query('INSERT INTO backups (id, version, type, creationTime, state, dependsOn, manifestJson, format) VALUES (?, ?, ?, ?, ?, ?, ?, ?)',
|
||||
[ backup.id, backup.version, backup.type, creationTime, exports.BACKUP_STATE_NORMAL, backup.dependsOn.join(','), manifestJson, backup.format ],
|
||||
[ id, data.version, data.type, creationTime, exports.BACKUP_STATE_NORMAL, data.dependsOn.join(','), manifestJson, data.format ],
|
||||
function (error) {
|
||||
if (error && error.code === 'ER_DUP_ENTRY') return callback(new DatabaseError(DatabaseError.ALREADY_EXISTS));
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
+145
-111
@@ -29,6 +29,8 @@ exports = module.exports = {
|
||||
injectPrivateFields: injectPrivateFields,
|
||||
removePrivateFields: removePrivateFields,
|
||||
|
||||
checkConfiguration: checkConfiguration,
|
||||
|
||||
SECRET_PLACEHOLDER: String.fromCharCode(0x25CF).repeat(8),
|
||||
|
||||
// for testing
|
||||
@@ -38,31 +40,30 @@ exports = module.exports = {
|
||||
};
|
||||
|
||||
var addons = require('./addons.js'),
|
||||
appdb = require('./appdb.js'),
|
||||
apps = require('./apps.js'),
|
||||
AppsError = require('./apps.js').AppsError,
|
||||
async = require('async'),
|
||||
assert = require('assert'),
|
||||
backupdb = require('./backupdb.js'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
crypto = require('crypto'),
|
||||
database = require('./database.js'),
|
||||
DatabaseError = require('./databaseerror.js'),
|
||||
DataLayout = require('./datalayout.js'),
|
||||
debug = require('debug')('box:backups'),
|
||||
df = require('@sindresorhus/df'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
fs = require('fs'),
|
||||
locker = require('./locker.js'),
|
||||
mailer = require('./mailer.js'),
|
||||
mkdirp = require('mkdirp'),
|
||||
once = require('once'),
|
||||
path = require('path'),
|
||||
paths = require('./paths.js'),
|
||||
progressStream = require('progress-stream'),
|
||||
prettyBytes = require('pretty-bytes'),
|
||||
safe = require('safetydance'),
|
||||
shell = require('./shell.js'),
|
||||
settings = require('./settings.js'),
|
||||
superagent = require('superagent'),
|
||||
syncer = require('./syncer.js'),
|
||||
tar = require('tar-fs'),
|
||||
tasks = require('./tasks.js'),
|
||||
@@ -105,7 +106,6 @@ BackupsError.NOT_FOUND = 'not found';
|
||||
// choose which storage backend we use for test purpose we use s3
|
||||
function api(provider) {
|
||||
switch (provider) {
|
||||
case 'caas': return require('./storage/s3.js');
|
||||
case 's3': return require('./storage/s3.js');
|
||||
case 'gcs': return require('./storage/gcs.js');
|
||||
case 'filesystem': return require('./storage/filesystem.js');
|
||||
@@ -113,6 +113,8 @@ function api(provider) {
|
||||
case 's3-v4-compat': return require('./storage/s3.js');
|
||||
case 'digitalocean-spaces': return require('./storage/s3.js');
|
||||
case 'exoscale-sos': return require('./storage/s3.js');
|
||||
case 'wasabi': return require('./storage/s3.js');
|
||||
case 'scaleway-objectstorage': return require('./storage/s3.js');
|
||||
case 'noop': return require('./storage/noop.js');
|
||||
default: return null;
|
||||
}
|
||||
@@ -285,8 +287,15 @@ function tarPack(dataLayout, key, callback) {
|
||||
var pack = tar.pack('/', {
|
||||
dereference: false, // pack the symlink and not what it points to
|
||||
entries: dataLayout.localPaths(),
|
||||
ignoreStatError: (path, err) => {
|
||||
debug(`tarPack: error stat'ing ${path} - ${err.code}`);
|
||||
return err.code === 'ENOENT'; // ignore if file or dir got removed (probably some temporary file)
|
||||
},
|
||||
map: function(header) {
|
||||
header.name = dataLayout.toRemotePath(header.name);
|
||||
// the tar pax format allows us to encode filenames > 100 and size > 8GB (see #640)
|
||||
// https://www.systutorials.com/docs/linux/man/5-star/
|
||||
if (header.size > 8589934590 || header.name > 99) header.pax = { size: header.size };
|
||||
return header;
|
||||
},
|
||||
strict: false // do not error for unknown types (skip fifo, char/block devices)
|
||||
@@ -404,6 +413,34 @@ function saveFsMetadata(dataLayout, metadataFile, callback) {
|
||||
callback();
|
||||
}
|
||||
|
||||
// the du call in the function below requires root
|
||||
function checkFreeDiskSpace(backupConfig, dataLayout, callback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (backupConfig.provider !== 'filesystem') return callback();
|
||||
|
||||
let used = 0;
|
||||
for (let localPath of dataLayout.localPaths()) {
|
||||
debug(`checkFreeDiskSpace: getting disk usage of ${localPath}`);
|
||||
let result = safe.child_process.execSync(`du -Dsb ${localPath}`, { encoding: 'utf8' });
|
||||
if (!result) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, safe.error));
|
||||
used += parseInt(result, 10);
|
||||
}
|
||||
|
||||
debug(`checkFreeDiskSpace: ${used} bytes`);
|
||||
|
||||
df.file(backupConfig.backupFolder).then(function (diskUsage) {
|
||||
const needed = used + (1024 * 1024 * 1024); // check if there is atleast 1GB left afterwards
|
||||
if (diskUsage.available <= needed) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, `Not enough disk space for backup. Needed: ${prettyBytes(needed)} Available: ${prettyBytes(diskUsage.available)}`));
|
||||
|
||||
callback(null);
|
||||
}).catch(function (error) {
|
||||
callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
});
|
||||
}
|
||||
|
||||
// this function is called via backupupload (since it needs root to traverse app's directory)
|
||||
function upload(backupId, format, dataLayoutString, progressCallback, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
@@ -419,29 +456,33 @@ function upload(backupId, format, dataLayoutString, progressCallback, callback)
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
if (format === 'tgz') {
|
||||
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
|
||||
retryCallback = once(retryCallback); // protect again upload() erroring much later after tar stream error
|
||||
checkFreeDiskSpace(backupConfig, dataLayout, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
tarPack(dataLayout, backupConfig.key || null, function (error, tarStream) {
|
||||
if (error) return retryCallback(error);
|
||||
if (format === 'tgz') {
|
||||
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
|
||||
retryCallback = once(retryCallback); // protect again upload() erroring much later after tar stream error
|
||||
|
||||
tarStream.on('progress', function(progress) {
|
||||
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
|
||||
if (!transferred && !speed) return progressCallback({ message: 'Uploading backup' }); // 0M@0Mbps looks wrong
|
||||
progressCallback({ message: `Uploading backup ${transferred}M@${speed}Mbps` });
|
||||
tarPack(dataLayout, backupConfig.key || null, function (error, tarStream) {
|
||||
if (error) return retryCallback(error);
|
||||
|
||||
tarStream.on('progress', function(progress) {
|
||||
const transferred = Math.round(progress.transferred/1024/1024), speed = Math.round(progress.speed/1024/1024);
|
||||
if (!transferred && !speed) return progressCallback({ message: 'Uploading backup' }); // 0M@0Mbps looks wrong
|
||||
progressCallback({ message: `Uploading backup ${transferred}M@${speed}Mbps` });
|
||||
});
|
||||
tarStream.on('error', retryCallback); // already returns BackupsError
|
||||
|
||||
api(backupConfig.provider).upload(backupConfig, getBackupFilePath(backupConfig, backupId, format), tarStream, retryCallback);
|
||||
});
|
||||
tarStream.on('error', retryCallback); // already returns BackupsError
|
||||
|
||||
api(backupConfig.provider).upload(backupConfig, getBackupFilePath(backupConfig, backupId, format), tarStream, retryCallback);
|
||||
});
|
||||
}, callback);
|
||||
} else {
|
||||
async.series([
|
||||
saveFsMetadata.bind(null, dataLayout, `${dataLayout.localRoot()}/fsmetadata.json`),
|
||||
sync.bind(null, backupConfig, backupId, dataLayout, progressCallback)
|
||||
], callback);
|
||||
}
|
||||
}, callback);
|
||||
} else {
|
||||
async.series([
|
||||
saveFsMetadata.bind(null, dataLayout, `${dataLayout.localRoot()}/fsmetadata.json`),
|
||||
sync.bind(null, backupConfig, backupId, dataLayout, progressCallback)
|
||||
], callback);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -505,9 +546,9 @@ function restoreFsMetadata(dataLayout, metadataFile, callback) {
|
||||
debug(`Recreating empty directories in ${dataLayout.toString()}`);
|
||||
|
||||
var metadataJson = safe.fs.readFileSync(metadataFile, 'utf8');
|
||||
if (metadataJson === null) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, 'Error loading fsmetadata.txt:' + safe.error.message));
|
||||
if (metadataJson === null) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, 'Error loading fsmetadata.json:' + safe.error.message));
|
||||
var metadata = safe.JSON.parse(metadataJson);
|
||||
if (metadata === null) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, 'Error parsing fsmetadata.txt:' + safe.error.message));
|
||||
if (metadata === null) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, 'Error parsing fsmetadata.json:' + safe.error.message));
|
||||
|
||||
async.eachSeries(metadata.emptyDirs, function createPath(emptyDir, iteratorDone) {
|
||||
mkdirp(dataLayout.toLocalPath(emptyDir), iteratorDone);
|
||||
@@ -634,7 +675,7 @@ function restore(backupConfig, backupId, progressCallback, callback) {
|
||||
|
||||
debug('restore: database imported');
|
||||
|
||||
callback();
|
||||
settings.initCache(callback);
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -685,7 +726,7 @@ function runBackupUpload(backupId, format, dataLayout, progressCallback, callbac
|
||||
callback();
|
||||
}).on('message', function (message) {
|
||||
if (!message.result) return progressCallback(message);
|
||||
debug(`runBackupUpload: result - ${message}`);
|
||||
debug(`runBackupUpload: result - ${JSON.stringify(message)}`);
|
||||
result = message.result;
|
||||
});
|
||||
}
|
||||
@@ -749,37 +790,9 @@ function uploadBoxSnapshot(backupConfig, progressCallback, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
function backupDone(apiConfig, backupId, appBackupIds, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert(Array.isArray(appBackupIds));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (apiConfig.provider !== 'caas') return callback();
|
||||
|
||||
debug('[%s] backupDone: %s apps %j', backupId, backupId, appBackupIds);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/boxes/' + apiConfig.fqdn + '/backupDone';
|
||||
var data = {
|
||||
boxVersion: config.version(),
|
||||
backupId: backupId,
|
||||
appId: null, // now unused
|
||||
appVersion: null, // now unused
|
||||
appBackupIds: appBackupIds
|
||||
};
|
||||
|
||||
superagent.post(url).send(data).query({ token: apiConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode !== 200) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, result.text));
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
function rotateBoxBackup(backupConfig, timestamp, appBackupIds, progressCallback, callback) {
|
||||
function rotateBoxBackup(backupConfig, tag, appBackupIds, progressCallback, callback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof timestamp, 'string');
|
||||
assert.strictEqual(typeof tag, 'string');
|
||||
assert(Array.isArray(appBackupIds));
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
@@ -787,13 +800,13 @@ function rotateBoxBackup(backupConfig, timestamp, appBackupIds, progressCallback
|
||||
var snapshotInfo = getSnapshotInfo('box');
|
||||
if (!snapshotInfo) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, 'Snapshot info missing or corrupt'));
|
||||
|
||||
var snapshotTime = snapshotInfo.timestamp.replace(/[T.]/g, '-').replace(/[:Z]/g,'');
|
||||
var backupId = util.format('%s/box_%s_v%s', timestamp, snapshotTime, config.version());
|
||||
const snapshotTime = snapshotInfo.timestamp.replace(/[T.]/g, '-').replace(/[:Z]/g,''); // add this to filename to make it unique, so it's easy to download them
|
||||
const backupId = util.format('%s/box_%s_v%s', tag, snapshotTime, constants.VERSION);
|
||||
const format = backupConfig.format;
|
||||
|
||||
debug(`Rotating box backup to id ${backupId}`);
|
||||
|
||||
backupdb.add({ id: backupId, version: config.version(), type: backupdb.BACKUP_TYPE_BOX, dependsOn: appBackupIds, manifest: null, format: format }, function (error) {
|
||||
backupdb.add(backupId, { version: constants.VERSION, type: backupdb.BACKUP_TYPE_BOX, dependsOn: appBackupIds, manifest: null, format: format }, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
var copy = api(backupConfig.provider).copy(backupConfig, getBackupFilePath(backupConfig, 'snapshot/box', format), getBackupFilePath(backupConfig, backupId, format));
|
||||
@@ -807,19 +820,15 @@ function rotateBoxBackup(backupConfig, timestamp, appBackupIds, progressCallback
|
||||
|
||||
debug(`Rotated box backup successfully as id ${backupId}`);
|
||||
|
||||
backupDone(backupConfig, backupId, appBackupIds, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(null, backupId);
|
||||
});
|
||||
callback(null, backupId);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function backupBoxWithAppBackupIds(appBackupIds, timestamp, progressCallback, callback) {
|
||||
function backupBoxWithAppBackupIds(appBackupIds, tag, progressCallback, callback) {
|
||||
assert(Array.isArray(appBackupIds));
|
||||
assert.strictEqual(typeof timestamp, 'string');
|
||||
assert.strictEqual(typeof tag, 'string');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
@@ -829,7 +838,7 @@ function backupBoxWithAppBackupIds(appBackupIds, timestamp, progressCallback, ca
|
||||
uploadBoxSnapshot(backupConfig, progressCallback, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
rotateBoxBackup(backupConfig, timestamp, appBackupIds, progressCallback, callback);
|
||||
rotateBoxBackup(backupConfig, tag, appBackupIds, progressCallback, callback);
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -837,10 +846,10 @@ function backupBoxWithAppBackupIds(appBackupIds, timestamp, progressCallback, ca
|
||||
function canBackupApp(app) {
|
||||
// only backup apps that are installed or pending configure or called from apptask. Rest of them are in some
|
||||
// state not good for consistent backup (i.e addons may not have been setup completely)
|
||||
return (app.installationState === appdb.ISTATE_INSTALLED && app.health === appdb.HEALTH_HEALTHY) ||
|
||||
app.installationState === appdb.ISTATE_PENDING_CONFIGURE ||
|
||||
app.installationState === appdb.ISTATE_PENDING_BACKUP || // called from apptask
|
||||
app.installationState === appdb.ISTATE_PENDING_UPDATE; // called from apptask
|
||||
return (app.installationState === apps.ISTATE_INSTALLED && app.health === apps.HEALTH_HEALTHY) ||
|
||||
app.installationState === apps.ISTATE_PENDING_CONFIGURE ||
|
||||
app.installationState === apps.ISTATE_PENDING_BACKUP || // called from apptask
|
||||
app.installationState === apps.ISTATE_PENDING_UPDATE; // called from apptask
|
||||
}
|
||||
|
||||
function snapshotApp(app, progressCallback, callback) {
|
||||
@@ -850,7 +859,7 @@ function snapshotApp(app, progressCallback, callback) {
|
||||
|
||||
progressCallback({ message: `Snapshotting app ${app.fqdn}` });
|
||||
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/config.json'), JSON.stringify(apps.getAppConfig(app)))) {
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/config.json'), JSON.stringify(app))) {
|
||||
return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, 'Error creating config.json: ' + safe.error.message));
|
||||
}
|
||||
|
||||
@@ -861,24 +870,25 @@ function snapshotApp(app, progressCallback, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function rotateAppBackup(backupConfig, app, timestamp, progressCallback, callback) {
|
||||
function rotateAppBackup(backupConfig, app, tag, options, progressCallback, callback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof timestamp, 'string');
|
||||
assert.strictEqual(typeof tag, 'string');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var snapshotInfo = getSnapshotInfo(app.id);
|
||||
if (!snapshotInfo) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, 'Snapshot info missing or corrupt'));
|
||||
|
||||
var snapshotTime = snapshotInfo.timestamp.replace(/[T.]/g, '-').replace(/[:Z]/g,'');
|
||||
var manifest = snapshotInfo.restoreConfig ? snapshotInfo.restoreConfig.manifest : snapshotInfo.manifest; // compat
|
||||
var backupId = util.format('%s/app_%s_%s_v%s', timestamp, app.id, snapshotTime, manifest.version);
|
||||
const snapshotTime = snapshotInfo.timestamp.replace(/[T.]/g, '-').replace(/[:Z]/g,''); // add this for unique filename which helps when downloading them
|
||||
const backupId = util.format('%s/app_%s_%s_v%s', tag, app.id, snapshotTime, manifest.version);
|
||||
const format = backupConfig.format;
|
||||
|
||||
debug(`Rotating app backup of ${app.id} to id ${backupId}`);
|
||||
|
||||
backupdb.add({ id: backupId, version: manifest.version, type: backupdb.BACKUP_TYPE_APP, dependsOn: [ ], manifest: manifest, format: format }, function (error) {
|
||||
backupdb.add(backupId, { version: manifest.version, type: backupdb.BACKUP_TYPE_APP, dependsOn: [ ], manifest: manifest, format: format }, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
var copy = api(backupConfig.provider).copy(backupConfig, getBackupFilePath(backupConfig, `snapshot/app_${app.id}`, format), getBackupFilePath(backupConfig, backupId, format));
|
||||
@@ -886,7 +896,7 @@ function rotateAppBackup(backupConfig, app, timestamp, progressCallback, callbac
|
||||
copy.on('done', function (copyBackupError) {
|
||||
const state = copyBackupError ? backupdb.BACKUP_STATE_ERROR : backupdb.BACKUP_STATE_NORMAL;
|
||||
|
||||
backupdb.update(backupId, { state: state }, function (error) {
|
||||
backupdb.update(backupId, { preserveSecs: options.preserveSecs || 0, state: state }, function (error) {
|
||||
if (copyBackupError) return callback(copyBackupError);
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
@@ -927,9 +937,10 @@ function uploadAppSnapshot(backupConfig, app, progressCallback, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function backupAppWithTimestamp(app, timestamp, progressCallback, callback) {
|
||||
function backupAppWithTag(app, tag, options, progressCallback, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof timestamp, 'string');
|
||||
assert.strictEqual(typeof tag, 'string');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
@@ -941,21 +952,22 @@ function backupAppWithTimestamp(app, timestamp, progressCallback, callback) {
|
||||
uploadAppSnapshot(backupConfig, app, progressCallback, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
rotateAppBackup(backupConfig, app, timestamp, progressCallback, callback);
|
||||
rotateAppBackup(backupConfig, app, tag, options, progressCallback, callback);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function backupApp(app, progressCallback, callback) {
|
||||
function backupApp(app, options, progressCallback, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const timestamp = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
|
||||
const tag = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
|
||||
|
||||
debug(`backupApp - Backing up ${app.fqdn} with timestamp ${timestamp}`);
|
||||
debug(`backupApp - Backing up ${app.fqdn} with tag ${tag}`);
|
||||
|
||||
backupAppWithTimestamp(app, timestamp, progressCallback, callback);
|
||||
backupAppWithTag(app, tag, options, progressCallback, callback);
|
||||
}
|
||||
|
||||
// this function expects you to have a lock. Unlike other progressCallback this also has a progress field
|
||||
@@ -963,7 +975,7 @@ function backupBoxAndApps(progressCallback, callback) {
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var timestamp = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
|
||||
const tag = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
|
||||
|
||||
apps.getAll(function (error, allApps) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
@@ -980,7 +992,7 @@ function backupBoxAndApps(progressCallback, callback) {
|
||||
return iteratorCallback(null, null); // nothing to backup
|
||||
}
|
||||
|
||||
backupAppWithTimestamp(app, timestamp, (progress) => progressCallback({ percent: percent, message: progress.message }), function (error, backupId) {
|
||||
backupAppWithTag(app, tag, { /* options */ }, (progress) => progressCallback({ percent: percent, message: progress.message }), function (error, backupId) {
|
||||
if (error && error.reason !== BackupsError.BAD_STATE) {
|
||||
debugApp(app, 'Unable to backup', error);
|
||||
return iteratorCallback(error);
|
||||
@@ -998,7 +1010,7 @@ function backupBoxAndApps(progressCallback, callback) {
|
||||
progressCallback({ percent: percent, message: 'Backing up system data' });
|
||||
percent += step;
|
||||
|
||||
backupBoxWithAppBackupIds(backupIds, timestamp, (progress) => progressCallback({ percent: percent, message: progress.message }), callback);
|
||||
backupBoxWithAppBackupIds(backupIds, tag, (progress) => progressCallback({ percent: percent, message: progress.message }), callback);
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -1007,19 +1019,21 @@ function startBackupTask(auditSource, callback) {
|
||||
let error = locker.lock(locker.OP_FULL_BACKUP);
|
||||
if (error) return callback(new BackupsError(BackupsError.BAD_STATE, `Cannot backup now: ${error.message}`));
|
||||
|
||||
let task = tasks.startTask(tasks.TASK_BACKUP, []);
|
||||
task.on('error', (error) => callback(new BackupsError(BackupsError.INTERNAL_ERROR, error)));
|
||||
task.on('start', (taskId) => {
|
||||
tasks.add(tasks.TASK_BACKUP, [ ], function (error, taskId) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
eventlog.add(eventlog.ACTION_BACKUP_START, auditSource, { taskId });
|
||||
|
||||
tasks.startTask(taskId, {}, function (error, result) {
|
||||
locker.unlock(locker.OP_FULL_BACKUP);
|
||||
|
||||
const errorMessage = error ? error.message : '';
|
||||
|
||||
eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId: taskId, errorMessage: errorMessage, backupId: result });
|
||||
});
|
||||
|
||||
callback(null, taskId);
|
||||
});
|
||||
task.on('finish', (error, result) => {
|
||||
locker.unlock(locker.OP_FULL_BACKUP);
|
||||
|
||||
if (error) mailer.backupFailed(error);
|
||||
|
||||
eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { errorMessage: error ? error.message : null, backupId: result });
|
||||
});
|
||||
}
|
||||
|
||||
function ensureBackup(auditSource, callback) {
|
||||
@@ -1095,6 +1109,7 @@ function cleanupAppBackups(backupConfig, referencedAppBackups, callback) {
|
||||
|
||||
async.eachSeries(appBackups, function iterator(appBackup, iteratorDone) {
|
||||
if (referencedAppBackups.indexOf(appBackup.id) !== -1) return iteratorDone();
|
||||
if ((now - appBackup.creationTime) < (appBackup.preserveSecs * 1000)) return iteratorDone();
|
||||
if ((now - appBackup.creationTime) < (backupConfig.retentionSecs * 1000)) return iteratorDone();
|
||||
|
||||
debug('cleanupAppBackups: removing %s', appBackup.id);
|
||||
@@ -1242,17 +1257,36 @@ function cleanup(auditSource, progressCallback, callback) {
|
||||
}
|
||||
|
||||
function startCleanupTask(auditSource, callback) {
|
||||
let task = tasks.startTask(tasks.TASK_CLEAN_BACKUPS, [ auditSource ]);
|
||||
task.on('error', (error) => callback(new BackupsError(BackupsError.INTERNAL_ERROR, error)));
|
||||
task.on('start', (taskId) => {
|
||||
eventlog.add(eventlog.ACTION_BACKUP_CLEANUP_START, auditSource, { taskId });
|
||||
|
||||
tasks.add(tasks.TASK_CLEAN_BACKUPS, [ auditSource ], function (error, taskId) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
tasks.startTask(taskId, {}, (error, result) => { // result is { removedBoxBackups, removedAppBackups }
|
||||
eventlog.add(eventlog.ACTION_BACKUP_CLEANUP_FINISH, auditSource, {
|
||||
taskId,
|
||||
errorMessage: error ? error.message : null,
|
||||
removedBoxBackups: result ? result.removedBoxBackups : [],
|
||||
removedAppBackups: result ? result.removedAppBackups : []
|
||||
});
|
||||
});
|
||||
|
||||
callback(null, taskId);
|
||||
});
|
||||
task.on('finish', (error, result) => { // result is { removedBoxBackups, removedAppBackups }
|
||||
eventlog.add(eventlog.ACTION_BACKUP_CLEANUP_FINISH, auditSource, {
|
||||
errorMessage: error ? error.message : null,
|
||||
removedBoxBackups: result ? result.removedBoxBackups : [],
|
||||
removedAppBackups: result ? result.removedAppBackups : []
|
||||
});
|
||||
}
|
||||
|
||||
function checkConfiguration(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
let message = '';
|
||||
if (backupConfig.provider === 'noop') {
|
||||
message = 'Cloudron backups are disabled. Please ensure this server is backed up using alternate means. See https://cloudron.io/documentation/backups/#storage-providers for more information.';
|
||||
} else if (backupConfig.provider === 'filesystem' && !backupConfig.externalDisk) {
|
||||
message = 'Cloudron backups are currently on the same disk as the Cloudron server instance. This is dangerous and can lead to complete data loss if the disk fails. See https://cloudron.io/documentation/backups/#storage-providers for storing backups in an external location.';
|
||||
}
|
||||
|
||||
callback(null, message);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -0,0 +1,56 @@
|
||||
/* jslint node:true */
|
||||
|
||||
'use strict';
|
||||
|
||||
const assert = require('assert'),
|
||||
util = require('util'),
|
||||
_ = require('underscore');
|
||||
|
||||
exports = module.exports = BoxError;
|
||||
|
||||
function BoxError(reason, errorOrMessage, details) {
|
||||
assert.strictEqual(typeof reason, 'string');
|
||||
assert(errorOrMessage instanceof Error || typeof errorOrMessage === 'string' || typeof errorOrMessage === 'undefined');
|
||||
assert(typeof details === 'object' || typeof details === 'undefined');
|
||||
|
||||
Error.call(this);
|
||||
Error.captureStackTrace(this, this.constructor);
|
||||
|
||||
this.name = this.constructor.name;
|
||||
this.reason = reason;
|
||||
this.details = details || {};
|
||||
|
||||
if (typeof errorOrMessage === 'undefined') {
|
||||
this.message = reason;
|
||||
} else if (typeof errorOrMessage === 'string') {
|
||||
this.message = errorOrMessage;
|
||||
} else { // error object
|
||||
this.message = errorOrMessage.message;
|
||||
this.nestedError = errorOrMessage;
|
||||
_.extend(this.details, errorOrMessage); // copy enumerable properies
|
||||
}
|
||||
}
|
||||
util.inherits(BoxError, Error);
|
||||
BoxError.ACCESS_DENIED = 'Access Denied';
|
||||
BoxError.ALREADY_EXISTS = 'Already Exists';
|
||||
BoxError.BAD_FIELD = 'Bad Field';
|
||||
BoxError.COLLECTD_ERROR = 'Collectd Error';
|
||||
BoxError.CONFLICT = 'Conflict';
|
||||
BoxError.DATABASE_ERROR = 'Database Error';
|
||||
BoxError.DNS_ERROR = 'DNS Error';
|
||||
BoxError.DOCKER_ERROR = 'Docker Error';
|
||||
BoxError.EXTERNAL_ERROR = 'External Error';
|
||||
BoxError.FS_ERROR = 'FileSystem Error';
|
||||
BoxError.INTERNAL_ERROR = 'Internal Error';
|
||||
BoxError.LOGROTATE_ERROR = 'Logrotate Error';
|
||||
BoxError.NETWORK_ERROR = 'Network Error';
|
||||
BoxError.NOT_FOUND = 'Not found';
|
||||
BoxError.OPENSSL_ERROR = 'OpenSSL Error';
|
||||
BoxError.REVERSEPROXY_ERROR = 'ReverseProxy Error';
|
||||
BoxError.TASK_ERROR = 'Task Error';
|
||||
BoxError.TRY_AGAIN = 'Try Again';
|
||||
BoxError.UNKNOWN_ERROR = 'Unknown Error'; // only used for porting
|
||||
|
||||
BoxError.prototype.toPlainObject = function () {
|
||||
return _.extend({}, { message: this.message, reason: this.reason }, this.details);
|
||||
};
|
||||
-130
@@ -1,130 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
verifySetupToken: verifySetupToken,
|
||||
setupDone: setupDone,
|
||||
|
||||
sendHeartbeat: sendHeartbeat,
|
||||
setPtrRecord: setPtrRecord,
|
||||
|
||||
CaasError: CaasError
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
config = require('./config.js'),
|
||||
debug = require('debug')('box:caas'),
|
||||
settings = require('./settings.js'),
|
||||
superagent = require('superagent'),
|
||||
util = require('util');
|
||||
|
||||
function CaasError(reason, errorOrMessage) {
|
||||
assert.strictEqual(typeof reason, 'string');
|
||||
assert(errorOrMessage instanceof Error || typeof errorOrMessage === 'string' || typeof errorOrMessage === 'undefined');
|
||||
|
||||
Error.call(this);
|
||||
Error.captureStackTrace(this, this.constructor);
|
||||
|
||||
this.name = this.constructor.name;
|
||||
this.reason = reason;
|
||||
if (typeof errorOrMessage === 'undefined') {
|
||||
this.message = reason;
|
||||
} else if (typeof errorOrMessage === 'string') {
|
||||
this.message = errorOrMessage;
|
||||
} else {
|
||||
this.message = 'Internal error';
|
||||
this.nestedError = errorOrMessage;
|
||||
}
|
||||
}
|
||||
util.inherits(CaasError, Error);
|
||||
CaasError.BAD_FIELD = 'Field error';
|
||||
CaasError.BAD_STATE = 'Bad state';
|
||||
CaasError.INVALID_TOKEN = 'Invalid Token';
|
||||
CaasError.INTERNAL_ERROR = 'Internal Error';
|
||||
CaasError.EXTERNAL_ERROR = 'External Error';
|
||||
|
||||
function getCaasConfig(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
settings.getCaasConfig(function (error, result) {
|
||||
if (error) return callback(new CaasError(CaasError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, result);
|
||||
});
|
||||
}
|
||||
|
||||
function verifySetupToken(setupToken, callback) {
|
||||
assert.strictEqual(typeof setupToken, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
settings.getCaasConfig(function (error, caasConfig) {
|
||||
if (error) return callback(new CaasError(CaasError.INTERNAL_ERROR, error));
|
||||
|
||||
superagent.get(config.apiServerOrigin() + '/api/v1/boxes/' + caasConfig.boxId + '/setup/verify').query({ setupToken: setupToken })
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new CaasError(CaasError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403) return callback(new CaasError(CaasError.INVALID_TOKEN));
|
||||
if (result.statusCode === 409) return callback(new CaasError(CaasError.BAD_STATE, 'Already setup'));
|
||||
if (result.statusCode !== 200) return callback(new CaasError(CaasError.EXTERNAL_ERROR, error));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function setupDone(setupToken, callback) {
|
||||
assert.strictEqual(typeof setupToken, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
settings.getCaasConfig(function (error, caasConfig) {
|
||||
if (error) return callback(new CaasError(CaasError.INTERNAL_ERROR, error));
|
||||
|
||||
// Now let the api server know we got activated
|
||||
superagent.post(config.apiServerOrigin() + '/api/v1/boxes/' + caasConfig.boxId + '/setup/done').query({ setupToken: setupToken })
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new CaasError(CaasError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403) return callback(new CaasError(CaasError.INVALID_TOKEN));
|
||||
if (result.statusCode === 409) return callback(new CaasError(CaasError.BAD_STATE, 'Already setup'));
|
||||
if (result.statusCode !== 201) return callback(new CaasError(CaasError.EXTERNAL_ERROR, error));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function sendHeartbeat() {
|
||||
assert(config.provider() === 'caas', 'Heartbeat is only sent for managed cloudrons');
|
||||
|
||||
getCaasConfig(function (error, result) {
|
||||
if (error) return debug('Caas config missing', error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/boxes/' + result.boxId + '/heartbeat';
|
||||
superagent.post(url).query({ token: result.token, version: config.version() }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) debug('Network error sending heartbeat.', error);
|
||||
else if (result.statusCode !== 200) debug('Server responded to heartbeat with %s %s', result.statusCode, result.text);
|
||||
else debug('Heartbeat sent to %s', url);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function setPtrRecord(domain, callback) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getCaasConfig(function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
superagent
|
||||
.post(config.apiServerOrigin() + '/api/v1/boxes/' + result.boxId + '/ptr')
|
||||
.query({ token: result.token })
|
||||
.send({ domain: domain })
|
||||
.timeout(5 * 1000)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new CaasError(CaasError.EXTERNAL_ERROR, 'Cannot reach appstore'));
|
||||
if (result.statusCode !== 202) return callback(new CaasError(CaasError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body)));
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
});
|
||||
}
|
||||
+69
-80
@@ -2,6 +2,7 @@
|
||||
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
BoxError = require('../boxerror.js'),
|
||||
crypto = require('crypto'),
|
||||
debug = require('debug')('box:cert/acme2'),
|
||||
domains = require('../domains.js'),
|
||||
@@ -24,31 +25,6 @@ exports = module.exports = {
|
||||
_getChallengeSubdomain: getChallengeSubdomain
|
||||
};
|
||||
|
||||
function Acme2Error(reason, errorOrMessage) {
|
||||
assert.strictEqual(typeof reason, 'string');
|
||||
assert(errorOrMessage instanceof Error || typeof errorOrMessage === 'string' || typeof errorOrMessage === 'undefined');
|
||||
|
||||
Error.call(this);
|
||||
Error.captureStackTrace(this, this.constructor);
|
||||
|
||||
this.name = this.constructor.name;
|
||||
this.reason = reason;
|
||||
if (typeof errorOrMessage === 'undefined') {
|
||||
this.message = reason;
|
||||
} else if (typeof errorOrMessage === 'string') {
|
||||
this.message = errorOrMessage;
|
||||
} else {
|
||||
this.message = 'Internal error';
|
||||
this.nestedError = errorOrMessage;
|
||||
}
|
||||
}
|
||||
util.inherits(Acme2Error, Error);
|
||||
Acme2Error.INTERNAL_ERROR = 'Internal Error';
|
||||
Acme2Error.EXTERNAL_ERROR = 'External Error';
|
||||
Acme2Error.ALREADY_EXISTS = 'Already Exists';
|
||||
Acme2Error.NOT_COMPLETED = 'Not Completed';
|
||||
Acme2Error.FORBIDDEN = 'Forbidden';
|
||||
|
||||
// http://jose.readthedocs.org/en/latest/
|
||||
// https://www.ietf.org/proceedings/92/slides/slides-92-acme-1.pdf
|
||||
// https://community.letsencrypt.org/t/list-of-client-implementations/2103
|
||||
@@ -80,7 +56,7 @@ function urlBase64Encode(string) {
|
||||
}
|
||||
|
||||
function b64(str) {
|
||||
var buf = util.isBuffer(str) ? str : new Buffer(str);
|
||||
var buf = util.isBuffer(str) ? str : Buffer.from(str);
|
||||
return urlBase64Encode(buf.toString('base64'));
|
||||
}
|
||||
|
||||
@@ -158,8 +134,8 @@ Acme2.prototype.updateContact = function (registrationUri, callback) {
|
||||
|
||||
const that = this;
|
||||
this.sendSignedRequest(registrationUri, JSON.stringify(payload), function (error, result) {
|
||||
if (error) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, 'Network error when registering user: ' + error.message));
|
||||
if (result.statusCode !== 200) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, util.format('Failed to update contact. Expecting 200, got %s %s', result.statusCode, result.text)));
|
||||
if (error) return callback(new BoxError(BoxError.NETWORK_ERROR, `Network error when updating contact: ${error.message}`));
|
||||
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, util.format('Failed to update contact. Expecting 200, got %s %s', result.statusCode, result.text)));
|
||||
|
||||
debug(`updateContact: contact of user updated to ${that.email}`);
|
||||
|
||||
@@ -178,9 +154,9 @@ Acme2.prototype.registerUser = function (callback) {
|
||||
|
||||
var that = this;
|
||||
this.sendSignedRequest(this.directory.newAccount, JSON.stringify(payload), function (error, result) {
|
||||
if (error) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, 'Network error when registering new account: ' + error.message));
|
||||
if (error) return callback(new BoxError(BoxError.NETWORK_ERROR, `Network error when registering user: ${error.message}`));
|
||||
// 200 if already exists. 201 for new accounts
|
||||
if (result.statusCode !== 200 && result.statusCode !== 201) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, util.format('Failed to register new account. Expecting 200 or 201, got %s %s', result.statusCode, result.text)));
|
||||
if (result.statusCode !== 200 && result.statusCode !== 201) return callback(new BoxError(BoxError.EXTERNAL_ERROR, util.format('Failed to register new account. Expecting 200 or 201, got %s %s', result.statusCode, result.text)));
|
||||
|
||||
debug(`registerUser: user registered keyid: ${result.headers.location}`);
|
||||
|
||||
@@ -204,17 +180,17 @@ Acme2.prototype.newOrder = function (domain, callback) {
|
||||
debug('newOrder: %s', domain);
|
||||
|
||||
this.sendSignedRequest(this.directory.newOrder, JSON.stringify(payload), function (error, result) {
|
||||
if (error) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, 'Network error when registering domain: ' + error.message));
|
||||
if (result.statusCode === 403) return callback(new Acme2Error(Acme2Error.FORBIDDEN, result.body.detail));
|
||||
if (result.statusCode !== 201) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, util.format('Failed to register user. Expecting 201, got %s %s', result.statusCode, result.text)));
|
||||
if (error) return callback(new BoxError(BoxError.NETWORK_ERROR, `Network error when creating new order: ${error.message}`));
|
||||
if (result.statusCode === 403) return callback(new BoxError(BoxError.ACCESS_DENIED, `Forbidden sending signed request: ${result.body.detail}`));
|
||||
if (result.statusCode !== 201) return callback(new BoxError(BoxError.EXTERNAL_ERROR, util.format('Failed to register user. Expecting 201, got %s %s', result.statusCode, result.text)));
|
||||
|
||||
debug('newOrder: created order %s %j', domain, result.body);
|
||||
|
||||
const order = result.body, orderUrl = result.headers.location;
|
||||
|
||||
if (!Array.isArray(order.authorizations)) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, 'invalid authorizations in order'));
|
||||
if (typeof order.finalize !== 'string') return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, 'invalid finalize in order'));
|
||||
if (typeof orderUrl !== 'string') return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, 'invalid order location in order header'));
|
||||
if (!Array.isArray(order.authorizations)) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'invalid authorizations in order'));
|
||||
if (typeof order.finalize !== 'string') return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'invalid finalize in order'));
|
||||
if (typeof orderUrl !== 'string') return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'invalid order location in order header'));
|
||||
|
||||
callback(null, order, orderUrl);
|
||||
});
|
||||
@@ -232,18 +208,18 @@ Acme2.prototype.waitForOrder = function (orderUrl, callback) {
|
||||
superagent.get(orderUrl).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) {
|
||||
debug('waitForOrder: network error getting uri %s', orderUrl);
|
||||
return retryCallback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, error.message)); // network error
|
||||
return retryCallback(new BoxError(BoxError.NETWORK_ERROR, `Network error waiting for order: ${error.message}`)); // network error
|
||||
}
|
||||
if (result.statusCode !== 200) {
|
||||
debug('waitForOrder: invalid response code getting uri %s', result.statusCode);
|
||||
return retryCallback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, 'Bad response code:' + result.statusCode));
|
||||
return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, 'Bad response code:' + result.statusCode));
|
||||
}
|
||||
|
||||
debug('waitForOrder: status is "%s %j', result.body.status, result.body);
|
||||
|
||||
if (result.body.status === 'pending' || result.body.status === 'processing') return retryCallback(new Acme2Error(Acme2Error.NOT_COMPLETED));
|
||||
if (result.body.status === 'pending' || result.body.status === 'processing') return retryCallback(new BoxError(BoxError.TRY_AGAIN, `Request is in ${result.body.status} state`));
|
||||
else if (result.body.status === 'valid' && result.body.certificate) return retryCallback(null, result.body.certificate);
|
||||
else return retryCallback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, 'Unexpected status or invalid response: ' + result.body));
|
||||
else return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, 'Unexpected status or invalid response: ' + result.body));
|
||||
});
|
||||
}, callback);
|
||||
};
|
||||
@@ -277,8 +253,8 @@ Acme2.prototype.notifyChallengeReady = function (challenge, callback) {
|
||||
};
|
||||
|
||||
this.sendSignedRequest(challenge.url, JSON.stringify(payload), function (error, result) {
|
||||
if (error) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, 'Network error when notifying challenge: ' + error.message));
|
||||
if (result.statusCode !== 200) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, util.format('Failed to notify challenge. Expecting 200, got %s %s', result.statusCode, result.text)));
|
||||
if (error) return callback(new BoxError(BoxError.NETWORK_ERROR, `Network error when notifying challenge: ${error.message}`));
|
||||
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, util.format('Failed to notify challenge. Expecting 200, got %s %s', result.statusCode, result.text)));
|
||||
|
||||
callback();
|
||||
});
|
||||
@@ -296,18 +272,18 @@ Acme2.prototype.waitForChallenge = function (challenge, callback) {
|
||||
superagent.get(challenge.url).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) {
|
||||
debug('waitForChallenge: network error getting uri %s', challenge.url);
|
||||
return retryCallback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, error.message)); // network error
|
||||
return retryCallback(new BoxError(BoxError.NETWORK_ERROR, `Network error waiting for challenge: ${error.message}`));
|
||||
}
|
||||
if (result.statusCode !== 200) {
|
||||
debug('waitForChallenge: invalid response code getting uri %s', result.statusCode);
|
||||
return retryCallback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, 'Bad response code:' + result.statusCode));
|
||||
return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, 'Bad response code:' + result.statusCode));
|
||||
}
|
||||
|
||||
debug('waitForChallenge: status is "%s %j', result.body.status, result.body);
|
||||
|
||||
if (result.body.status === 'pending') return retryCallback(new Acme2Error(Acme2Error.NOT_COMPLETED));
|
||||
if (result.body.status === 'pending') return retryCallback(new BoxError(BoxError.TRY_AGAIN));
|
||||
else if (result.body.status === 'valid') return retryCallback();
|
||||
else return retryCallback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, 'Unexpected status: ' + result.body.status));
|
||||
else return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, 'Unexpected status: ' + result.body.status));
|
||||
});
|
||||
}, function retryFinished(error) {
|
||||
// async.retry will pass 'undefined' as second arg making it unusable with async.waterfall()
|
||||
@@ -329,9 +305,9 @@ Acme2.prototype.signCertificate = function (domain, finalizationUrl, csrDer, cal
|
||||
debug('signCertificate: sending sign request');
|
||||
|
||||
this.sendSignedRequest(finalizationUrl, JSON.stringify(payload), function (error, result) {
|
||||
if (error) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, 'Network error when signing certificate: ' + error.message));
|
||||
if (error) return callback(new BoxError(BoxError.NETWORK_ERROR, `Network error when signing certificate: ${error.message}`));
|
||||
// 429 means we reached the cert limit for this domain
|
||||
if (result.statusCode !== 200) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, util.format('Failed to sign certificate. Expecting 200, got %s %s', result.statusCode, result.text)));
|
||||
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, util.format('Failed to sign certificate. Expecting 200, got %s %s', result.statusCode, result.text)));
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
@@ -351,15 +327,15 @@ Acme2.prototype.createKeyAndCsr = function (hostname, callback) {
|
||||
debug('createKeyAndCsr: reuse the key for renewal at %s', privateKeyFile);
|
||||
} else {
|
||||
var key = safe.child_process.execSync('openssl genrsa 4096');
|
||||
if (!key) return callback(new Acme2Error(Acme2Error.INTERNAL_ERROR, safe.error));
|
||||
if (!safe.fs.writeFileSync(privateKeyFile, key)) return callback(new Acme2Error(Acme2Error.INTERNAL_ERROR, safe.error));
|
||||
if (!key) return callback(new BoxError(BoxError.OPENSSL_ERROR, safe.error));
|
||||
if (!safe.fs.writeFileSync(privateKeyFile, key)) return callback(new BoxError(BoxError.FS_ERROR, safe.error));
|
||||
|
||||
debug('createKeyAndCsr: key file saved at %s', privateKeyFile);
|
||||
}
|
||||
|
||||
var csrDer = safe.child_process.execSync(`openssl req -new -key ${privateKeyFile} -outform DER -subj /CN=${hostname}`);
|
||||
if (!csrDer) return callback(new Acme2Error(Acme2Error.INTERNAL_ERROR, safe.error));
|
||||
if (!safe.fs.writeFileSync(csrFile, csrDer)) return callback(new Acme2Error(Acme2Error.INTERNAL_ERROR, safe.error)); // bookkeeping
|
||||
if (!csrDer) return callback(new BoxError(BoxError.OPENSSL_ERROR, safe.error));
|
||||
if (!safe.fs.writeFileSync(csrFile, csrDer)) return callback(new BoxError(BoxError.FS_ERROR, safe.error)); // bookkeeping
|
||||
|
||||
debug('createKeyAndCsr: csr file (DER) saved at %s', csrFile);
|
||||
|
||||
@@ -373,25 +349,29 @@ Acme2.prototype.downloadCertificate = function (hostname, certUrl, callback) {
|
||||
|
||||
var outdir = paths.APP_CERTS_DIR;
|
||||
|
||||
superagent.get(certUrl).buffer().parse(function (res, done) {
|
||||
var data = [ ];
|
||||
res.on('data', function(chunk) { data.push(chunk); });
|
||||
res.on('end', function () { res.text = Buffer.concat(data); done(); });
|
||||
}).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, 'Network error when downloading certificate'));
|
||||
if (result.statusCode === 202) return callback(new Acme2Error(Acme2Error.INTERNAL_ERROR, 'Retry not implemented yet'));
|
||||
if (result.statusCode !== 200) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, util.format('Failed to get cert. Expecting 200, got %s %s', result.statusCode, result.text)));
|
||||
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
|
||||
debug('downloadCertificate: downloading certificate');
|
||||
|
||||
const fullChainPem = result.text;
|
||||
superagent.get(certUrl).buffer().parse(function (res, done) {
|
||||
var data = [ ];
|
||||
res.on('data', function(chunk) { data.push(chunk); });
|
||||
res.on('end', function () { res.text = Buffer.concat(data); done(); });
|
||||
}).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return retryCallback(new BoxError(BoxError.NETWORK_ERROR, `Network error when downloading certificate: ${error.message}`));
|
||||
if (result.statusCode === 202) return retryCallback(new BoxError(BoxError.TRY_AGAIN, 'Retry'));
|
||||
if (result.statusCode !== 200) return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, util.format('Failed to get cert. Expecting 200, got %s %s', result.statusCode, result.text)));
|
||||
|
||||
const certName = hostname.replace('*.', '_.');
|
||||
var certificateFile = path.join(outdir, `${certName}.cert`);
|
||||
if (!safe.fs.writeFileSync(certificateFile, fullChainPem)) return callback(new Acme2Error(Acme2Error.INTERNAL_ERROR, safe.error));
|
||||
const fullChainPem = result.text;
|
||||
|
||||
debug('downloadCertificate: cert file for %s saved at %s', hostname, certificateFile);
|
||||
const certName = hostname.replace('*.', '_.');
|
||||
var certificateFile = path.join(outdir, `${certName}.cert`);
|
||||
if (!safe.fs.writeFileSync(certificateFile, fullChainPem)) return retryCallback(new BoxError(BoxError.FS_ERROR, safe.error));
|
||||
|
||||
callback();
|
||||
});
|
||||
debug('downloadCertificate: cert file for %s saved at %s', hostname, certificateFile);
|
||||
|
||||
retryCallback(null);
|
||||
});
|
||||
}, callback);
|
||||
};
|
||||
|
||||
Acme2.prototype.prepareHttpChallenge = function (hostname, domain, authorization, callback) {
|
||||
@@ -402,7 +382,7 @@ Acme2.prototype.prepareHttpChallenge = function (hostname, domain, authorization
|
||||
|
||||
debug('acmeFlow: challenges: %j', authorization);
|
||||
let httpChallenges = authorization.challenges.filter(function(x) { return x.type === 'http-01'; });
|
||||
if (httpChallenges.length === 0) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, 'no http challenges'));
|
||||
if (httpChallenges.length === 0) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'no http challenges'));
|
||||
let challenge = httpChallenges[0];
|
||||
|
||||
debug('prepareHttpChallenge: preparing for challenge %j', challenge);
|
||||
@@ -412,7 +392,7 @@ Acme2.prototype.prepareHttpChallenge = function (hostname, domain, authorization
|
||||
debug('prepareHttpChallenge: writing %s to %s', keyAuthorization, path.join(paths.ACME_CHALLENGES_DIR, challenge.token));
|
||||
|
||||
fs.writeFile(path.join(paths.ACME_CHALLENGES_DIR, challenge.token), keyAuthorization, function (error) {
|
||||
if (error) return callback(new Acme2Error(Acme2Error.INTERNAL_ERROR, error));
|
||||
if (error) return callback(new BoxError(BoxError.FS_ERROR, error));
|
||||
|
||||
callback(null, challenge);
|
||||
});
|
||||
@@ -454,7 +434,7 @@ Acme2.prototype.prepareDnsChallenge = function (hostname, domain, authorization,
|
||||
|
||||
debug('acmeFlow: challenges: %j', authorization);
|
||||
let dnsChallenges = authorization.challenges.filter(function(x) { return x.type === 'dns-01'; });
|
||||
if (dnsChallenges.length === 0) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, 'no dns challenges'));
|
||||
if (dnsChallenges.length === 0) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'no dns challenges'));
|
||||
let challenge = dnsChallenges[0];
|
||||
|
||||
const keyAuthorization = this.getKeyAuthorization(challenge.token);
|
||||
@@ -467,10 +447,10 @@ Acme2.prototype.prepareDnsChallenge = function (hostname, domain, authorization,
|
||||
debug(`prepareDnsChallenge: update ${challengeSubdomain} with ${txtValue}`);
|
||||
|
||||
domains.upsertDnsRecords(challengeSubdomain, domain, 'TXT', [ `"${txtValue}"` ], function (error) {
|
||||
if (error) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(error);
|
||||
|
||||
domains.waitForDnsRecord(challengeSubdomain, domain, 'TXT', txtValue, { interval: 5000, times: 200 }, function (error) {
|
||||
if (error) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(null, challenge);
|
||||
});
|
||||
@@ -493,7 +473,7 @@ Acme2.prototype.cleanupDnsChallenge = function (hostname, domain, challenge, cal
|
||||
debug(`cleanupDnsChallenge: remove ${challengeSubdomain} with ${txtValue}`);
|
||||
|
||||
domains.removeDnsRecords(challengeSubdomain, domain, 'TXT', [ `"${txtValue}"` ], function (error) {
|
||||
if (error) return callback(new Acme2Error(Acme2Error.EXTERNAL_ERROR, error));
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(null);
|
||||
});
|
||||
@@ -505,10 +485,12 @@ Acme2.prototype.prepareChallenge = function (hostname, domain, authorizationUrl,
|
||||
assert.strictEqual(typeof authorizationUrl, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug(`prepareChallenge: http: ${this.performHttpAuthorization}`);
|
||||
|
||||
const that = this;
|
||||
superagent.get(authorizationUrl).timeout(30 * 1000).end(function (error, response) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (response.statusCode !== 200) return callback(new Error('Invalid response code getting authorization : ' + response.statusCode));
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, `Network error when preparing challenge: ${error.message}`));
|
||||
if (response.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response code getting authorization : ' + response.statusCode));
|
||||
|
||||
const authorization = response.body;
|
||||
|
||||
@@ -526,6 +508,8 @@ Acme2.prototype.cleanupChallenge = function (hostname, domain, challenge, callba
|
||||
assert.strictEqual(typeof challenge, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug(`cleanupChallenge: http: ${this.performHttpAuthorization}`);
|
||||
|
||||
if (this.performHttpAuthorization) {
|
||||
this.cleanupHttpChallenge(hostname, domain, challenge, callback);
|
||||
} else {
|
||||
@@ -541,7 +525,7 @@ Acme2.prototype.acmeFlow = function (hostname, domain, callback) {
|
||||
if (!fs.existsSync(paths.ACME_ACCOUNT_KEY_FILE)) {
|
||||
debug('getCertificate: generating acme account key on first run');
|
||||
this.accountKeyPem = safe.child_process.execSync('openssl genrsa 4096');
|
||||
if (!this.accountKeyPem) return callback(new Acme2Error(Acme2Error.INTERNAL_ERROR, safe.error));
|
||||
if (!this.accountKeyPem) return callback(new BoxError(BoxError.OPENSSL_ERROR, safe.error));
|
||||
|
||||
safe.fs.writeFileSync(paths.ACME_ACCOUNT_KEY_FILE, this.accountKeyPem);
|
||||
} else {
|
||||
@@ -586,8 +570,8 @@ Acme2.prototype.getDirectory = function (callback) {
|
||||
const that = this;
|
||||
|
||||
superagent.get(this.caDirectory).timeout(30 * 1000).end(function (error, response) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (response.statusCode !== 200) return callback(new Error('Invalid response code when fetching directory : ' + response.statusCode));
|
||||
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, `Network error getting directory: ${error.message}`));
|
||||
if (response.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response code when fetching directory : ' + response.statusCode));
|
||||
|
||||
if (typeof response.body.newNonce !== 'string' ||
|
||||
typeof response.body.newOrder !== 'string' ||
|
||||
@@ -631,6 +615,11 @@ function getCertificate(hostname, domain, options, callback) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var acme = new Acme2(options || { });
|
||||
acme.getCertificate(hostname, domain, callback);
|
||||
let attempt = 1;
|
||||
async.retry({ times: 3, interval: 0 }, function (retryCallback) {
|
||||
debug(`getCertificate: attempt ${attempt++}`);
|
||||
|
||||
let acme = new Acme2(options || { });
|
||||
acme.getCertificate(hostname, domain, retryCallback);
|
||||
}, callback);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,38 @@
|
||||
'use strict';
|
||||
|
||||
let assert = require('assert'),
|
||||
fs = require('fs'),
|
||||
path = require('path');
|
||||
|
||||
exports = module.exports = {
|
||||
getChanges: getChanges
|
||||
};
|
||||
|
||||
function getChanges(version) {
|
||||
assert.strictEqual(typeof version, 'string');
|
||||
|
||||
let changelog = [ ];
|
||||
const lines = fs.readFileSync(path.join(__dirname, '../CHANGES'), 'utf8').split('\n');
|
||||
|
||||
version = version.replace(/[+-].*/, ''); // strip prerelease
|
||||
|
||||
let i;
|
||||
for (i = 0; i < lines.length; i++) {
|
||||
if (lines[i] === '[' + version + ']') break;
|
||||
}
|
||||
|
||||
for (i = i + 1; i < lines.length; i++) {
|
||||
if (lines[i] === '') continue;
|
||||
if (lines[i][0] === '[') break;
|
||||
|
||||
lines[i] = lines[i].trim();
|
||||
|
||||
// detect and remove list style - and * in changelog lines
|
||||
if (lines[i].indexOf('-') === 0) lines[i] = lines[i].slice(1).trim();
|
||||
if (lines[i].indexOf('*') === 0) lines[i] = lines[i].slice(1).trim();
|
||||
|
||||
changelog.push(lines[i]);
|
||||
}
|
||||
|
||||
return changelog;
|
||||
}
|
||||
+21
-6
@@ -18,6 +18,8 @@ exports = module.exports = {
|
||||
|
||||
addDefaultClients: addDefaultClients,
|
||||
|
||||
removeTokenPrivateFields: removeTokenPrivateFields,
|
||||
|
||||
// client type enums
|
||||
TYPE_EXTERNAL: 'external',
|
||||
TYPE_BUILT_IN: 'built-in',
|
||||
@@ -39,7 +41,8 @@ var apps = require('./apps.js'),
|
||||
users = require('./users.js'),
|
||||
UsersError = users.UsersError,
|
||||
util = require('util'),
|
||||
uuid = require('uuid');
|
||||
uuid = require('uuid'),
|
||||
_ = require('underscore');
|
||||
|
||||
function ClientsError(reason, errorOrMessage) {
|
||||
assert.strictEqual(typeof reason, 'string');
|
||||
@@ -273,16 +276,24 @@ function addTokenByUserId(clientId, userId, expiresAt, options, callback) {
|
||||
accesscontrol.scopesForUser(user, function (error, userScopes) {
|
||||
if (error) return callback(new ClientsError(ClientsError.INTERNAL_ERROR, error));
|
||||
|
||||
var scope = accesscontrol.canonicalScopeString(result.scope);
|
||||
var authorizedScopes = accesscontrol.intersectScopes(userScopes, scope.split(','));
|
||||
const scope = accesscontrol.canonicalScopeString(result.scope);
|
||||
const authorizedScopes = accesscontrol.intersectScopes(userScopes, scope.split(','));
|
||||
|
||||
var token = tokendb.generateToken();
|
||||
const token = {
|
||||
id: 'tid-' + uuid.v4(),
|
||||
accessToken: hat(8 * 32),
|
||||
identifier: userId,
|
||||
clientId: result.id,
|
||||
expires: expiresAt,
|
||||
scope: authorizedScopes.join(','),
|
||||
name: name
|
||||
};
|
||||
|
||||
tokendb.add(token, userId, result.id, expiresAt, authorizedScopes.join(','), name, function (error) {
|
||||
tokendb.add(token, function (error) {
|
||||
if (error) return callback(new ClientsError(ClientsError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, {
|
||||
accessToken: token,
|
||||
accessToken: token.accessToken,
|
||||
tokenScopes: authorizedScopes,
|
||||
identifier: userId,
|
||||
clientId: result.id,
|
||||
@@ -342,3 +353,7 @@ function addDefaultClients(origin, callback) {
|
||||
clientdb.upsert.bind(null, 'cid-cli', 'Cloudron Tool', 'built-in', 'secret-cli', origin, '*')
|
||||
], callback);
|
||||
}
|
||||
|
||||
function removeTokenPrivateFields(token) {
|
||||
return _.pick(token, 'id', 'identifier', 'clientId', 'scope', 'expires', 'name');
|
||||
}
|
||||
|
||||
+125
-130
@@ -6,7 +6,6 @@ exports = module.exports = {
|
||||
initialize: initialize,
|
||||
uninitialize: uninitialize,
|
||||
getConfig: getConfig,
|
||||
getDisks: getDisks,
|
||||
getLogs: getLogs,
|
||||
|
||||
reboot: reboot,
|
||||
@@ -16,25 +15,27 @@ exports = module.exports = {
|
||||
|
||||
prepareDashboardDomain: prepareDashboardDomain,
|
||||
setDashboardDomain: setDashboardDomain,
|
||||
setDashboardAndMailDomain: setDashboardAndMailDomain,
|
||||
renewCerts: renewCerts,
|
||||
|
||||
runSystemChecks: runSystemChecks,
|
||||
setupDashboard: setupDashboard,
|
||||
|
||||
// exposed for testing
|
||||
_checkDiskSpace: checkDiskSpace
|
||||
runSystemChecks: runSystemChecks,
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
var apps = require('./apps.js'),
|
||||
assert = require('assert'),
|
||||
async = require('async'),
|
||||
auditSource = require('./auditsource.js'),
|
||||
backups = require('./backups.js'),
|
||||
clients = require('./clients.js'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
cron = require('./cron.js'),
|
||||
debug = require('debug')('box:cloudron'),
|
||||
domains = require('./domains.js'),
|
||||
DomainsError = require('./domains.js').DomainsError,
|
||||
df = require('@sindresorhus/df'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
custom = require('./custom.js'),
|
||||
fs = require('fs'),
|
||||
mail = require('./mail.js'),
|
||||
notifications = require('./notifications.js'),
|
||||
@@ -43,11 +44,14 @@ var assert = require('assert'),
|
||||
paths = require('./paths.js'),
|
||||
platform = require('./platform.js'),
|
||||
reverseProxy = require('./reverseproxy.js'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js'),
|
||||
shell = require('./shell.js'),
|
||||
spawn = require('child_process').spawn,
|
||||
split = require('split'),
|
||||
sysinfo = require('./sysinfo.js'),
|
||||
tasks = require('./tasks.js'),
|
||||
TaskError = require('./tasks.js').TaskError,
|
||||
users = require('./users.js'),
|
||||
util = require('util');
|
||||
|
||||
@@ -83,9 +87,9 @@ CloudronError.ALREADY_UPTODATE = 'No Update Available';
|
||||
function initialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
cron.startPreActivationJobs(callback);
|
||||
|
||||
runStartupTasks();
|
||||
|
||||
notifyUpdate(callback);
|
||||
}
|
||||
|
||||
function uninitialize(callback) {
|
||||
@@ -105,17 +109,36 @@ function onActivated(callback) {
|
||||
// 2. the restore code path can run without sudo (since mail/ is non-root)
|
||||
async.series([
|
||||
platform.start,
|
||||
cron.startPostActivationJobs
|
||||
cron.startJobs
|
||||
], callback);
|
||||
}
|
||||
|
||||
function notifyUpdate(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const version = safe.fs.readFileSync(paths.VERSION_FILE, 'utf8');
|
||||
if (version === constants.VERSION) return callback();
|
||||
|
||||
eventlog.add(eventlog.ACTION_UPDATE_FINISH, auditSource.CRON, { oldVersion: version || 'dev', newVersion: constants.VERSION }, function (error) {
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
|
||||
tasks.setCompletedByType(tasks.TASK_UPDATE, { error: null }, function (error) {
|
||||
if (error && error.reason !== TaskError.NOT_FOUND) return callback(error); // when hotfixing, task may not exist
|
||||
|
||||
safe.fs.writeFileSync(paths.VERSION_FILE, constants.VERSION, 'utf8');
|
||||
|
||||
callback();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// each of these tasks can fail. we will add some routes to fix/re-run them
|
||||
function runStartupTasks() {
|
||||
// configure nginx to be reachable by IP
|
||||
reverseProxy.configureDefaultServer(NOOP_CALLBACK);
|
||||
reverseProxy.writeDefaultConfig(NOOP_CALLBACK);
|
||||
|
||||
// always generate webadmin config since we have no versioning mechanism for the ejs
|
||||
if (config.adminDomain()) reverseProxy.writeAdminConfig(config.adminDomain(), NOOP_CALLBACK);
|
||||
if (settings.adminDomain()) reverseProxy.writeAdminConfig(settings.adminDomain(), NOOP_CALLBACK);
|
||||
|
||||
// check activation state and start the platform
|
||||
users.isActivated(function (error, activated) {
|
||||
@@ -126,32 +149,6 @@ function runStartupTasks() {
|
||||
});
|
||||
}
|
||||
|
||||
function getDisks(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var disks = {
|
||||
boxDataDisk: null,
|
||||
platformDataDisk: null,
|
||||
appsDataDisk: null
|
||||
};
|
||||
|
||||
df.file(paths.BOX_DATA_DIR).then(function (result) {
|
||||
disks.boxDataDisk = result.filesystem;
|
||||
|
||||
return df.file(paths.PLATFORM_DATA_DIR);
|
||||
}).then(function (result) {
|
||||
disks.platformDataDisk = result.filesystem;
|
||||
|
||||
return df.file(paths.APPS_DATA_DIR);
|
||||
}).then(function (result) {
|
||||
disks.appsDataDisk = result.filesystem;
|
||||
|
||||
callback(null, disks);
|
||||
}).catch(function (error) {
|
||||
callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
});
|
||||
}
|
||||
|
||||
function getConfig(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
@@ -160,17 +157,17 @@ function getConfig(callback) {
|
||||
|
||||
// be picky about what we send out here since this is sent for 'normal' users as well
|
||||
callback(null, {
|
||||
apiServerOrigin: config.apiServerOrigin(),
|
||||
webServerOrigin: config.webServerOrigin(),
|
||||
adminDomain: config.adminDomain(),
|
||||
adminFqdn: config.adminFqdn(),
|
||||
mailFqdn: config.mailFqdn(),
|
||||
version: config.version(),
|
||||
isDemo: config.isDemo(),
|
||||
edition: config.edition(),
|
||||
apiServerOrigin: settings.apiServerOrigin(),
|
||||
webServerOrigin: settings.webServerOrigin(),
|
||||
adminDomain: settings.adminDomain(),
|
||||
adminFqdn: settings.adminFqdn(),
|
||||
mailFqdn: settings.mailFqdn(),
|
||||
version: constants.VERSION,
|
||||
isDemo: settings.isDemo(),
|
||||
memory: os.totalmem(),
|
||||
provider: config.provider(),
|
||||
cloudronName: allSettings[settings.CLOUDRON_NAME_KEY]
|
||||
provider: sysinfo.provider(),
|
||||
cloudronName: allSettings[settings.CLOUDRON_NAME_KEY],
|
||||
uiSpec: custom.uiSpec()
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -190,10 +187,10 @@ function isRebootRequired(callback) {
|
||||
function runSystemChecks() {
|
||||
async.parallel([
|
||||
checkBackupConfiguration,
|
||||
checkDiskSpace,
|
||||
checkMailStatus
|
||||
], function () {
|
||||
debug('runSystemChecks: done');
|
||||
checkMailStatus,
|
||||
checkRebootRequired
|
||||
], function (error) {
|
||||
debug('runSystemChecks: done', error);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -202,55 +199,10 @@ function checkBackupConfiguration(callback) {
|
||||
|
||||
debug('Checking backup configuration');
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return console.error(error);
|
||||
backups.checkConfiguration(function (error, message) {
|
||||
if (error) return callback(error);
|
||||
|
||||
if (backupConfig.provider === 'noop') {
|
||||
notifications.backupConfigWarning('Cloudron backups are disabled. Please ensure this server is backed up using alternate means. See https://cloudron.io/documentation/backups/#storage-providers for more information.');
|
||||
} else if (backupConfig.provider === 'filesystem' && !backupConfig.externalDisk) {
|
||||
notifications.backupConfigWarning('Cloudron backups are currently on the same disk as the Cloudron server instance. This is dangerous and can lead to complete data loss if the disk fails. See https://cloudron.io/documentation/backups/#storage-providers for storing backups in an external location.');
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function checkDiskSpace(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('Checking disk space');
|
||||
|
||||
getDisks(function (error, disks) {
|
||||
if (error) {
|
||||
debug('df error %s', error.message);
|
||||
return callback();
|
||||
}
|
||||
|
||||
df().then(function (entries) {
|
||||
/*
|
||||
[{
|
||||
filesystem: '/dev/disk1',
|
||||
size: 499046809600,
|
||||
used: 443222245376,
|
||||
available: 55562420224,
|
||||
capacity: 0.89,
|
||||
mountpoint: '/'
|
||||
}, ...]
|
||||
*/
|
||||
var oos = entries.some(function (entry) {
|
||||
// ignore other filesystems but where box, app and platform data is
|
||||
if (entry.filesystem !== disks.boxDataDisk && entry.filesystem !== disks.platformDataDisk && entry.filesystem !== disks.appsDataDisk) return false;
|
||||
|
||||
return (entry.available <= (1.25 * 1024 * 1024 * 1024)); // 1.5G
|
||||
});
|
||||
|
||||
debug('Disk space checked. ok: %s', !oos);
|
||||
|
||||
if (oos) notifications.diskSpaceWarning(JSON.stringify(entries, null, 4));
|
||||
|
||||
callback();
|
||||
}).catch(function (error) {
|
||||
if (error) console.error(error);
|
||||
callback();
|
||||
});
|
||||
notifications.alert(notifications.ALERT_BACKUP_CONFIG, 'Backup configuration is unsafe', message, callback);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -259,28 +211,22 @@ function checkMailStatus(callback) {
|
||||
|
||||
debug('checking mail status');
|
||||
|
||||
domains.getAll(function (error, allDomains) {
|
||||
mail.checkConfiguration(function (error, message) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.filterSeries(allDomains, function (domainObject, iteratorCallback) {
|
||||
mail.getStatus(config.adminDomain(), function (error, result) {
|
||||
if (error) return iteratorCallback(null, true);
|
||||
notifications.alert(notifications.ALERT_MAIL_STATUS, 'Email is not configured properly', message, callback);
|
||||
});
|
||||
}
|
||||
|
||||
let mailError = Object.keys(result.dns).some((record) => !result.dns[record].status);
|
||||
if (result.rbl && !result.rbl.status) mailError = true;
|
||||
if (result.relay && !result.relay.status) mailError = true;
|
||||
function checkRebootRequired(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
iteratorCallback(null, mailError);
|
||||
});
|
||||
}, function (error, erroredDomainObjects) {
|
||||
if (error || erroredDomainObjects.length === 0) return callback(error);
|
||||
debug('checking if reboot required');
|
||||
|
||||
const erroredDomains = erroredDomainObjects.map((d) => d.domain);
|
||||
debug(`checkMailStatus: ${erroredDomains.join(',')} failed status checks`);
|
||||
if (erroredDomains.length) notifications.mailStatusWarning(`Email status check of the following domain(s) failed - ${erroredDomains.join(',')}. See the Status tab in the [Email view](/#/email/) for more information.`);
|
||||
isRebootRequired(function (error, rebootRequired) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback();
|
||||
});
|
||||
notifications.alert(notifications.ALERT_REBOOT, 'Reboot Required', rebootRequired ? 'To finish security updates, a [reboot](/#/system) is necessary.' : '', callback);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -304,7 +250,8 @@ function getLogs(unit, options, callback) {
|
||||
|
||||
// need to handle box.log without subdir
|
||||
if (unit === 'box') args.push(path.join(paths.LOG_DIR, 'box.log'));
|
||||
else args.push(path.join(paths.LOG_DIR, unit, 'app.log'));
|
||||
else if (unit.startsWith('crash-')) args.push(path.join(paths.CRASH_LOG_DIR, unit.slice(6) + '.log'));
|
||||
else return callback(new CloudronError(CloudronError.BAD_FIELD, 'No such unit'));
|
||||
|
||||
var cp = spawn('/usr/bin/tail', args);
|
||||
|
||||
@@ -336,11 +283,30 @@ function prepareDashboardDomain(domain, auditSource, callback) {
|
||||
|
||||
debug(`prepareDashboardDomain: ${domain}`);
|
||||
|
||||
let task = tasks.startTask(tasks.TASK_PREPARE_DASHBOARD_DOMAIN, [ domain, auditSource ]);
|
||||
task.on('error', (error) => callback(new CloudronError(CloudronError.INTERNAL_ERROR, error)));
|
||||
task.on('start', (taskId) => callback(null, taskId));
|
||||
domains.get(domain, function (error, domainObject) {
|
||||
if (error && error.reason === DomainsError.NOT_FOUND) return callback(new CloudronError(CloudronError.BAD_FIELD, 'No such domain'));
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
|
||||
const fqdn = domains.fqdn(constants.ADMIN_LOCATION, domainObject);
|
||||
|
||||
apps.getAll(function (error, result) {
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
|
||||
const conflict = result.filter(app => app.fqdn === fqdn);
|
||||
if (conflict.length) return callback(new CloudronError(CloudronError.BAD_STATE, 'Dashboard location conflicts with an existing app'));
|
||||
|
||||
tasks.add(tasks.TASK_PREPARE_DASHBOARD_DOMAIN, [ domain, auditSource ], function (error, taskId) {
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
|
||||
tasks.startTask(taskId, {}, NOOP_CALLBACK);
|
||||
|
||||
callback(null, taskId);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// call this only pre activation since it won't start mail server
|
||||
function setDashboardDomain(domain, auditSource, callback) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
@@ -357,29 +323,58 @@ function setDashboardDomain(domain, auditSource, callback) {
|
||||
|
||||
const fqdn = domains.fqdn(constants.ADMIN_LOCATION, domainObject);
|
||||
|
||||
config.setAdminDomain(domain);
|
||||
config.setAdminLocation(constants.ADMIN_LOCATION);
|
||||
config.setAdminFqdn(fqdn);
|
||||
|
||||
clients.addDefaultClients(config.adminOrigin(), function (error) {
|
||||
async.series([
|
||||
(done) => settings.setAdmin(domain, fqdn, done),
|
||||
(done) => clients.addDefaultClients(settings.adminOrigin(), done)
|
||||
], function (error) {
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
|
||||
eventlog.add(eventlog.ACTION_DASHBOARD_DOMAIN_UPDATE, auditSource, { domain: domain, fqdn: fqdn });
|
||||
|
||||
mail.setMailFqdn(fqdn, domain, NOOP_CALLBACK);
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// call this only post activation because it will restart mail server
|
||||
function setDashboardAndMailDomain(domain, auditSource, callback) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug(`setDashboardAndMailDomain: ${domain}`);
|
||||
|
||||
setDashboardDomain(domain, auditSource, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
mail.onMailFqdnChanged(NOOP_CALLBACK); // this will update dns and re-configure mail server
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
function setupDashboard(auditSource, progressCallback, callback) {
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
async.series([
|
||||
domains.prepareDashboardDomain.bind(null, settings.adminDomain(), auditSource, progressCallback),
|
||||
setDashboardDomain.bind(null, settings.adminDomain(), auditSource)
|
||||
], callback);
|
||||
}
|
||||
|
||||
function renewCerts(options, auditSource, callback) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
let task = tasks.startTask(tasks.TASK_RENEW_CERTS, [ options, auditSource ]);
|
||||
task.on('error', (error) => callback(new CloudronError(CloudronError.INTERNAL_ERROR, error)));
|
||||
task.on('start', (taskId) => callback(null, taskId));
|
||||
tasks.add(tasks.TASK_RENEW_CERTS, [ options, auditSource ], function (error, taskId) {
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
|
||||
tasks.startTask(taskId, {}, NOOP_CALLBACK);
|
||||
|
||||
callback(null, taskId);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -30,3 +30,13 @@ LoadPlugin "table"
|
||||
</Result>
|
||||
</Table>
|
||||
</Plugin>
|
||||
|
||||
<Plugin python>
|
||||
<Module du>
|
||||
<Path>
|
||||
Instance "<%= appId %>"
|
||||
Dir "<%= appDataDir %>"
|
||||
</Path>
|
||||
</Module>
|
||||
</Plugin>
|
||||
|
||||
|
||||
-248
@@ -1,248 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
baseDir: baseDir,
|
||||
|
||||
// values set here will be lost after a upgrade/update. use the sqlite database
|
||||
// for persistent values that need to be backed up
|
||||
get: get,
|
||||
set: set,
|
||||
|
||||
// ifdefs to check environment
|
||||
CLOUDRON: process.env.BOX_ENV === 'cloudron',
|
||||
TEST: process.env.BOX_ENV === 'test',
|
||||
|
||||
// convenience getters
|
||||
provider: provider,
|
||||
apiServerOrigin: apiServerOrigin,
|
||||
webServerOrigin: webServerOrigin,
|
||||
adminDomain: adminDomain,
|
||||
setFqdn: setAdminDomain,
|
||||
setAdminDomain: setAdminDomain,
|
||||
setAdminFqdn: setAdminFqdn,
|
||||
setAdminLocation: setAdminLocation,
|
||||
version: version,
|
||||
database: database,
|
||||
edition: edition,
|
||||
|
||||
// these values are derived
|
||||
adminOrigin: adminOrigin,
|
||||
internalAdminOrigin: internalAdminOrigin,
|
||||
sysadminOrigin: sysadminOrigin, // caas routes
|
||||
adminLocation: adminLocation,
|
||||
adminFqdn: adminFqdn,
|
||||
mailLocation: mailLocation,
|
||||
mailFqdn: mailFqdn,
|
||||
hasIPv6: hasIPv6,
|
||||
dkimSelector: dkimSelector,
|
||||
|
||||
isManaged: isManaged,
|
||||
isDemo: isDemo,
|
||||
|
||||
// feature flags based on editions (these have a separate license from standard edition)
|
||||
isSpacesEnabled: isSpacesEnabled,
|
||||
|
||||
// for testing resets to defaults
|
||||
_reset: _reset
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
fs = require('fs'),
|
||||
path = require('path'),
|
||||
safe = require('safetydance'),
|
||||
_ = require('underscore');
|
||||
|
||||
|
||||
// assert on unknown environment can't proceed
|
||||
assert(exports.CLOUDRON || exports.TEST, 'Unknown environment. This should not happen!');
|
||||
|
||||
var data = { };
|
||||
|
||||
function baseDir() {
|
||||
const homeDir = process.env.HOME || process.env.HOMEPATH || process.env.USERPROFILE;
|
||||
if (exports.CLOUDRON) return homeDir;
|
||||
if (exports.TEST) return path.join(homeDir, '.cloudron_test');
|
||||
// cannot reach
|
||||
}
|
||||
|
||||
const cloudronConfigFileName = exports.CLOUDRON ? '/etc/cloudron/cloudron.conf' : path.join(baseDir(), 'cloudron.conf');
|
||||
|
||||
function saveSync() {
|
||||
// only save values we want to have in the cloudron.conf, see start.sh
|
||||
var conf = {
|
||||
apiServerOrigin: data.apiServerOrigin,
|
||||
webServerOrigin: data.webServerOrigin,
|
||||
adminDomain: data.adminDomain,
|
||||
adminFqdn: data.adminFqdn,
|
||||
adminLocation: data.adminLocation,
|
||||
provider: data.provider,
|
||||
isDemo: data.isDemo,
|
||||
edition: data.edition
|
||||
};
|
||||
|
||||
fs.writeFileSync(cloudronConfigFileName, JSON.stringify(conf, null, 4)); // functions are ignored by JSON.stringify
|
||||
}
|
||||
|
||||
function _reset(callback) {
|
||||
safe.fs.unlinkSync(cloudronConfigFileName);
|
||||
|
||||
initConfig();
|
||||
|
||||
if (callback) callback();
|
||||
}
|
||||
|
||||
function initConfig() {
|
||||
// setup defaults
|
||||
data.adminFqdn = '';
|
||||
data.adminDomain = '';
|
||||
data.adminLocation = 'my';
|
||||
data.port = 3000;
|
||||
data.apiServerOrigin = null;
|
||||
data.webServerOrigin = null;
|
||||
data.provider = 'generic';
|
||||
data.smtpPort = 2525; // this value comes from mail container
|
||||
data.sysadminPort = 3001;
|
||||
data.ldapPort = 3002;
|
||||
data.dockerProxyPort = 3003;
|
||||
data.edition = '';
|
||||
|
||||
// keep in sync with start.sh
|
||||
data.database = {
|
||||
hostname: '127.0.0.1',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
port: 3306,
|
||||
name: 'box'
|
||||
};
|
||||
|
||||
// overrides for local testings
|
||||
if (exports.TEST) {
|
||||
data.port = 5454;
|
||||
data.apiServerOrigin = 'http://localhost:6060'; // hock doesn't support https
|
||||
|
||||
// see setupTest script how the mysql-server is run
|
||||
data.database.hostname = require('child_process').execSync('docker inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}" mysql-server').toString().trim();
|
||||
}
|
||||
|
||||
// overwrite defaults with saved config
|
||||
var existingData = safe.JSON.parse(safe.fs.readFileSync(cloudronConfigFileName, 'utf8'));
|
||||
_.extend(data, existingData);
|
||||
}
|
||||
|
||||
initConfig();
|
||||
|
||||
// set(obj) or set(key, value)
|
||||
function set(key, value) {
|
||||
if (typeof key === 'object') {
|
||||
var obj = key;
|
||||
for (var k in obj) {
|
||||
assert(k in data, 'config.js is missing key "' + k + '"');
|
||||
data[k] = obj[k];
|
||||
}
|
||||
} else {
|
||||
data = safe.set(data, key, value);
|
||||
}
|
||||
|
||||
saveSync();
|
||||
}
|
||||
|
||||
function get(key) {
|
||||
assert.strictEqual(typeof key, 'string');
|
||||
|
||||
return safe.query(data, key);
|
||||
}
|
||||
|
||||
function apiServerOrigin() {
|
||||
return get('apiServerOrigin');
|
||||
}
|
||||
|
||||
function webServerOrigin() {
|
||||
return get('webServerOrigin');
|
||||
}
|
||||
|
||||
function setAdminDomain(domain) {
|
||||
set('adminDomain', domain);
|
||||
}
|
||||
|
||||
function adminDomain() {
|
||||
return get('adminDomain');
|
||||
}
|
||||
|
||||
function mailLocation() {
|
||||
return get('adminLocation'); // not a typo! should be same as admin location until we figure out certificates
|
||||
}
|
||||
|
||||
function setAdminLocation(location) {
|
||||
set('adminLocation', location);
|
||||
}
|
||||
|
||||
|
||||
function adminLocation() {
|
||||
return get('adminLocation');
|
||||
}
|
||||
|
||||
function setAdminFqdn(adminFqdn) {
|
||||
set('adminFqdn', adminFqdn);
|
||||
}
|
||||
|
||||
function adminFqdn() {
|
||||
return get('adminFqdn');
|
||||
}
|
||||
|
||||
function mailFqdn() {
|
||||
return adminFqdn();
|
||||
}
|
||||
|
||||
function adminOrigin() {
|
||||
return 'https://' + adminFqdn();
|
||||
}
|
||||
|
||||
function internalAdminOrigin() {
|
||||
return 'http://127.0.0.1:' + get('port');
|
||||
}
|
||||
|
||||
function sysadminOrigin() {
|
||||
return 'http://127.0.0.1:' + get('sysadminPort');
|
||||
}
|
||||
|
||||
function version() {
|
||||
if (exports.TEST) return '3.0.0-test';
|
||||
return fs.readFileSync(path.join(__dirname, '../VERSION'), 'utf8').trim();
|
||||
}
|
||||
|
||||
function database() {
|
||||
return get('database');
|
||||
}
|
||||
|
||||
function isDemo() {
|
||||
return get('isDemo') === true;
|
||||
}
|
||||
|
||||
function isSpacesEnabled() {
|
||||
return get('edition') === 'education';
|
||||
}
|
||||
|
||||
function provider() {
|
||||
return get('provider');
|
||||
}
|
||||
|
||||
function isManaged() {
|
||||
return edition() === 'hostingprovider';
|
||||
}
|
||||
|
||||
function hasIPv6() {
|
||||
const IPV6_PROC_FILE = '/proc/net/if_inet6';
|
||||
// on contabo, /proc/net/if_inet6 is an empty file. so just exists is not enough
|
||||
return fs.existsSync(IPV6_PROC_FILE) && fs.readFileSync(IPV6_PROC_FILE, 'utf8').trim().length !== 0;
|
||||
}
|
||||
|
||||
// it has to change with the adminLocation so that multiple cloudrons
|
||||
// can send out emails at the same time.
|
||||
function dkimSelector() {
|
||||
var loc = adminLocation();
|
||||
return loc === 'my' ? 'cloudron' : `cloudron-${loc.replace(/\./g, '')}`;
|
||||
}
|
||||
|
||||
function edition() {
|
||||
return get('edition');
|
||||
}
|
||||
+20
-2
@@ -1,7 +1,12 @@
|
||||
'use strict';
|
||||
|
||||
let fs = require('fs'),
|
||||
path = require('path');
|
||||
|
||||
const CLOUDRON = process.env.BOX_ENV === 'cloudron',
|
||||
TEST = process.env.BOX_ENV === 'test';
|
||||
|
||||
exports = module.exports = {
|
||||
API_LOCATION: 'api', // this is unused but reserved for future use (#403)
|
||||
SMTP_LOCATION: 'smtp',
|
||||
IMAP_LOCATION: 'imap',
|
||||
|
||||
@@ -19,6 +24,12 @@ exports = module.exports = {
|
||||
|
||||
ADMIN_LOCATION: 'my',
|
||||
|
||||
PORT: CLOUDRON ? 3000 : 5454,
|
||||
INTERNAL_SMTP_PORT: 2525, // this value comes from the mail container
|
||||
SYSADMIN_PORT: 3001, // unused
|
||||
LDAP_PORT: 3002,
|
||||
DOCKER_PROXY_PORT: 3003,
|
||||
|
||||
NGINX_DEFAULT_CONFIG_FILE_NAME: 'default.conf',
|
||||
|
||||
GHOST_USER_FILE: '/tmp/cloudron_ghost.json',
|
||||
@@ -29,6 +40,13 @@ exports = module.exports = {
|
||||
|
||||
DEMO_USERNAME: 'cloudron',
|
||||
|
||||
AUTOUPDATE_PATTERN_NEVER: 'never'
|
||||
AUTOUPDATE_PATTERN_NEVER: 'never',
|
||||
|
||||
SECRET_PLACEHOLDER: String.fromCharCode(0x25CF).repeat(8),
|
||||
|
||||
CLOUDRON: CLOUDRON,
|
||||
TEST: TEST,
|
||||
|
||||
VERSION: process.env.BOX_ENV === 'cloudron' ? fs.readFileSync(path.join(__dirname, '../VERSION'), 'utf8').trim() : '4.2.0-test'
|
||||
};
|
||||
|
||||
|
||||
@@ -0,0 +1,60 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
sendFailureLogs: sendFailureLogs
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
auditSource = require('./auditsource.js'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
safe = require('safetydance'),
|
||||
path = require('path'),
|
||||
paths = require('./paths.js'),
|
||||
util = require('util');
|
||||
|
||||
const COLLECT_LOGS_CMD = path.join(__dirname, 'scripts/collectlogs.sh');
|
||||
|
||||
const CRASH_LOG_TIMESTAMP_OFFSET = 1000 * 60 * 60; // 60 min
|
||||
const CRASH_LOG_TIMESTAMP_FILE = '/tmp/crashlog.timestamp';
|
||||
|
||||
function collectLogs(unitName, callback) {
|
||||
assert.strictEqual(typeof unitName, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var logs = safe.child_process.execSync('sudo ' + COLLECT_LOGS_CMD + ' ' + unitName, { encoding: 'utf8' });
|
||||
if (!logs) return callback(safe.error);
|
||||
|
||||
callback(null, logs);
|
||||
}
|
||||
|
||||
function sendFailureLogs(unitName, callback) {
|
||||
assert.strictEqual(typeof unitName, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// check if we already sent a mail in the last CRASH_LOG_TIME_OFFSET window
|
||||
const timestamp = safe.fs.readFileSync(CRASH_LOG_TIMESTAMP_FILE, 'utf8');
|
||||
if (timestamp && (parseInt(timestamp) + CRASH_LOG_TIMESTAMP_OFFSET) > Date.now()) {
|
||||
console.log('Crash log already sent within window');
|
||||
return callback();
|
||||
}
|
||||
|
||||
collectLogs(unitName, function (error, logs) {
|
||||
if (error) {
|
||||
console.error('Failed to collect logs.', error);
|
||||
logs = util.format('Failed to collect logs.', error);
|
||||
}
|
||||
|
||||
const crashId = `${new Date().toISOString()}`;
|
||||
console.log(`Creating crash log for ${unitName} with id ${crashId}`);
|
||||
|
||||
if (!safe.fs.writeFileSync(path.join(paths.CRASH_LOG_DIR, `${crashId}.log`), logs)) console.log(`Failed to stash logs to ${crashId}.log:`, safe.error);
|
||||
|
||||
eventlog.add(eventlog.ACTION_PROCESS_CRASH, auditSource.HEALTH_MONITOR, { processName: unitName, crashId: crashId }, function (error) {
|
||||
if (error) console.log(`Error sending crashlog. Logs stashed at ${crashId}.log`);
|
||||
|
||||
safe.fs.writeFileSync(CRASH_LOG_TIMESTAMP_FILE, String(Date.now()));
|
||||
|
||||
callback();
|
||||
});
|
||||
});
|
||||
}
|
||||
+41
-56
@@ -1,24 +1,24 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
startPostActivationJobs: startPostActivationJobs,
|
||||
startPreActivationJobs: startPreActivationJobs,
|
||||
startJobs: startJobs,
|
||||
|
||||
stopJobs: stopJobs
|
||||
stopJobs: stopJobs,
|
||||
|
||||
handleSettingsChanged: handleSettingsChanged
|
||||
};
|
||||
|
||||
var appHealthMonitor = require('./apphealthmonitor.js'),
|
||||
apps = require('./apps.js'),
|
||||
appstore = require('./appstore.js'),
|
||||
assert = require('assert'),
|
||||
auditSource = require('./auditsource.js'),
|
||||
backups = require('./backups.js'),
|
||||
caas = require('./caas.js'),
|
||||
cloudron = require('./cloudron.js'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
CronJob = require('cron').CronJob,
|
||||
debug = require('debug')('box:cron'),
|
||||
digest = require('./digest.js'),
|
||||
disks = require('./disks.js'),
|
||||
dyndns = require('./dyndns.js'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
janitor = require('./janitor.js'),
|
||||
@@ -34,13 +34,12 @@ var gJobs = {
|
||||
appUpdateChecker: null,
|
||||
backup: null,
|
||||
boxUpdateChecker: null,
|
||||
caasHeartbeat: null,
|
||||
systemChecks: null,
|
||||
diskSpaceChecker: null,
|
||||
certificateRenew: null,
|
||||
cleanupBackups: null,
|
||||
cleanupEventlog: null,
|
||||
cleanupTokens: null,
|
||||
digestEmail: null,
|
||||
dockerVolumeCleaner: null,
|
||||
dynamicDns: null,
|
||||
schedulerSync: null,
|
||||
@@ -48,7 +47,6 @@ var gJobs = {
|
||||
};
|
||||
|
||||
var NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
var AUDIT_SOURCE = { userId: null, username: 'cron' };
|
||||
|
||||
// cron format
|
||||
// Seconds: 0-59
|
||||
@@ -58,24 +56,7 @@ var AUDIT_SOURCE = { userId: null, username: 'cron' };
|
||||
// Months: 0-11
|
||||
// Day of Week: 0-6
|
||||
|
||||
function startPreActivationJobs(callback) {
|
||||
if (config.provider() === 'caas') {
|
||||
// hack: send the first heartbeat only after we are running for 60 seconds
|
||||
// required as we end up sending a heartbeat and then cloudron-setup reboots the server
|
||||
var seconds = (new Date()).getSeconds() - 1;
|
||||
if (seconds === -1) seconds = 59;
|
||||
|
||||
gJobs.caasHeartbeat = new CronJob({
|
||||
cronTime: `${seconds} */1 * * * *`, // every minute
|
||||
onTick: caas.sendHeartbeat,
|
||||
start: true
|
||||
});
|
||||
}
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
function startPostActivationJobs(callback) {
|
||||
function startJobs(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var randomHourMinute = Math.floor(60*Math.random());
|
||||
@@ -85,11 +66,6 @@ function startPostActivationJobs(callback) {
|
||||
start: true
|
||||
});
|
||||
|
||||
settings.on(settings.TIME_ZONE_KEY, recreateJobs);
|
||||
settings.on(settings.APP_AUTOUPDATE_PATTERN_KEY, appAutoupdatePatternChanged);
|
||||
settings.on(settings.BOX_AUTOUPDATE_PATTERN_KEY, boxAutoupdatePatternChanged);
|
||||
settings.on(settings.DYNAMIC_DNS_KEY, dynamicDnsChanged);
|
||||
|
||||
settings.getAll(function (error, allSettings) {
|
||||
if (error) return callback(error);
|
||||
|
||||
@@ -102,6 +78,19 @@ function startPostActivationJobs(callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function handleSettingsChanged(key, value) {
|
||||
assert.strictEqual(typeof key, 'string');
|
||||
|
||||
// value is a variant
|
||||
switch (key) {
|
||||
case settings.TIME_ZONE_KEY: recreateJobs(value); break;
|
||||
case settings.APP_AUTOUPDATE_PATTERN_KEY: appAutoupdatePatternChanged(value); break;
|
||||
case settings.BOX_AUTOUPDATE_PATTERN_KEY: boxAutoupdatePatternChanged(value); break;
|
||||
case settings.DYNAMIC_DNS_KEY: dynamicDnsChanged(value); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
|
||||
function recreateJobs(tz) {
|
||||
assert.strictEqual(typeof tz, 'string');
|
||||
|
||||
@@ -110,27 +99,36 @@ function recreateJobs(tz) {
|
||||
if (gJobs.backup) gJobs.backup.stop();
|
||||
gJobs.backup = new CronJob({
|
||||
cronTime: '00 00 */6 * * *', // check every 6 hours
|
||||
onTick: backups.ensureBackup.bind(null, AUDIT_SOURCE, NOOP_CALLBACK),
|
||||
onTick: backups.ensureBackup.bind(null, auditSource.CRON, NOOP_CALLBACK),
|
||||
start: true,
|
||||
timeZone: tz
|
||||
});
|
||||
|
||||
if (gJobs.systemChecks) gJobs.systemChecks.stop();
|
||||
gJobs.systemChecks = new CronJob({
|
||||
cronTime: '00 30 * * * *', // every hour
|
||||
cronTime: '00 30 * * * *', // every 30 minutes. if you change this interval, change the notification messages with correct duration
|
||||
onTick: cloudron.runSystemChecks,
|
||||
start: true,
|
||||
runOnInit: true, // run system check immediately
|
||||
timeZone: tz
|
||||
});
|
||||
|
||||
if (gJobs.diskSpaceChecker) gJobs.diskSpaceChecker.stop();
|
||||
gJobs.diskSpaceChecker = new CronJob({
|
||||
cronTime: '00 30 * * * *', // every 30 minutes. if you change this interval, change the notification messages with correct duration
|
||||
onTick: () => disks.checkDiskSpace(NOOP_CALLBACK),
|
||||
start: true,
|
||||
runOnInit: true, // run system check immediately
|
||||
timeZone: tz
|
||||
});
|
||||
|
||||
// randomized pattern per cloudron every hour
|
||||
var randomMinute = Math.floor(60*Math.random());
|
||||
|
||||
if (gJobs.boxUpdateCheckerJob) gJobs.boxUpdateCheckerJob.stop();
|
||||
gJobs.boxUpdateCheckerJob = new CronJob({
|
||||
cronTime: '00 ' + randomMinute + ' * * * *', // once an hour
|
||||
onTick: updateChecker.checkBoxUpdates,
|
||||
onTick: () => updateChecker.checkBoxUpdates(NOOP_CALLBACK),
|
||||
start: true,
|
||||
timeZone: tz
|
||||
});
|
||||
@@ -138,7 +136,7 @@ function recreateJobs(tz) {
|
||||
if (gJobs.appUpdateChecker) gJobs.appUpdateChecker.stop();
|
||||
gJobs.appUpdateChecker = new CronJob({
|
||||
cronTime: '00 ' + randomMinute + ' * * * *', // once an hour
|
||||
onTick: updateChecker.checkAppUpdates,
|
||||
onTick: () => updateChecker.checkAppUpdates(NOOP_CALLBACK),
|
||||
start: true,
|
||||
timeZone: tz
|
||||
});
|
||||
@@ -154,7 +152,7 @@ function recreateJobs(tz) {
|
||||
if (gJobs.cleanupBackups) gJobs.cleanupBackups.stop();
|
||||
gJobs.cleanupBackups = new CronJob({
|
||||
cronTime: '00 45 */6 * * *', // every 6 hours. try not to overlap with ensureBackup job
|
||||
onTick: backups.startCleanupTask.bind(null, AUDIT_SOURCE, NOOP_CALLBACK),
|
||||
onTick: backups.startCleanupTask.bind(null, auditSource.CRON, NOOP_CALLBACK),
|
||||
start: true,
|
||||
timeZone: tz
|
||||
});
|
||||
@@ -177,7 +175,7 @@ function recreateJobs(tz) {
|
||||
|
||||
if (gJobs.schedulerSync) gJobs.schedulerSync.stop();
|
||||
gJobs.schedulerSync = new CronJob({
|
||||
cronTime: config.TEST ? '*/10 * * * * *' : '00 */1 * * * *', // every minute
|
||||
cronTime: constants.TEST ? '*/10 * * * * *' : '00 */1 * * * *', // every minute
|
||||
onTick: scheduler.sync,
|
||||
start: true,
|
||||
timeZone: tz
|
||||
@@ -186,15 +184,7 @@ function recreateJobs(tz) {
|
||||
if (gJobs.certificateRenew) gJobs.certificateRenew.stop();
|
||||
gJobs.certificateRenew = new CronJob({
|
||||
cronTime: '00 00 */12 * * *', // every 12 hours
|
||||
onTick: cloudron.renewCerts.bind(null, {}, AUDIT_SOURCE, NOOP_CALLBACK),
|
||||
start: true,
|
||||
timeZone: tz
|
||||
});
|
||||
|
||||
if (gJobs.digestEmail) gJobs.digestEmail.stop();
|
||||
gJobs.digestEmail = new CronJob({
|
||||
cronTime: '00 00 00 * * 3', // every wednesday
|
||||
onTick: digest.maybeSend,
|
||||
onTick: cloudron.renewCerts.bind(null, {}, auditSource.CRON, NOOP_CALLBACK),
|
||||
start: true,
|
||||
timeZone: tz
|
||||
});
|
||||
@@ -224,7 +214,7 @@ function boxAutoupdatePatternChanged(pattern) {
|
||||
var updateInfo = updateChecker.getUpdateInfo();
|
||||
if (updateInfo.box) {
|
||||
debug('Starting autoupdate to %j', updateInfo.box);
|
||||
updater.updateToLatest(AUDIT_SOURCE, NOOP_CALLBACK);
|
||||
updater.updateToLatest({ skipBackup: false }, auditSource.CRON, NOOP_CALLBACK);
|
||||
} else {
|
||||
debug('No box auto updates available');
|
||||
}
|
||||
@@ -250,7 +240,7 @@ function appAutoupdatePatternChanged(pattern) {
|
||||
var updateInfo = updateChecker.getUpdateInfo();
|
||||
if (updateInfo.apps) {
|
||||
debug('Starting app update to %j', updateInfo.apps);
|
||||
apps.autoupdateApps(updateInfo.apps, AUDIT_SOURCE, NOOP_CALLBACK);
|
||||
apps.autoupdateApps(updateInfo.apps, auditSource.CRON, NOOP_CALLBACK);
|
||||
} else {
|
||||
debug('No app auto updates available');
|
||||
}
|
||||
@@ -268,8 +258,8 @@ function dynamicDnsChanged(enabled) {
|
||||
|
||||
if (enabled) {
|
||||
gJobs.dynamicDns = new CronJob({
|
||||
cronTime: '00 */10 * * * *',
|
||||
onTick: dyndns.sync,
|
||||
cronTime: '5 * * * * *', // we only update the records if the ip has changed.
|
||||
onTick: dyndns.sync.bind(null, auditSource.CRON, NOOP_CALLBACK),
|
||||
start: true,
|
||||
timeZone: gJobs.boxUpdateCheckerJob.cronTime.zone // hack
|
||||
});
|
||||
@@ -282,11 +272,6 @@ function dynamicDnsChanged(enabled) {
|
||||
function stopJobs(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
settings.removeListener(settings.TIME_ZONE_KEY, recreateJobs);
|
||||
settings.removeListener(settings.APP_AUTOUPDATE_PATTERN_KEY, appAutoupdatePatternChanged);
|
||||
settings.removeListener(settings.BOX_AUTOUPDATE_PATTERN_KEY, boxAutoupdatePatternChanged);
|
||||
settings.removeListener(settings.DYNAMIC_DNS_KEY, dynamicDnsChanged);
|
||||
|
||||
for (var job in gJobs) {
|
||||
if (!gJobs[job]) continue;
|
||||
gJobs[job].stop();
|
||||
|
||||
@@ -0,0 +1,66 @@
|
||||
'use strict';
|
||||
|
||||
let debug = require('debug')('box:custom'),
|
||||
lodash = require('lodash'),
|
||||
paths = require('./paths.js'),
|
||||
safe = require('safetydance'),
|
||||
yaml = require('js-yaml');
|
||||
|
||||
exports = module.exports = {
|
||||
uiSpec: uiSpec,
|
||||
spec: spec
|
||||
};
|
||||
|
||||
const DEFAULT_SPEC = {
|
||||
appstore: {
|
||||
blacklist: [],
|
||||
whitelist: null // null imples, not set. this is an object and not an array
|
||||
},
|
||||
backups: {
|
||||
configurable: true
|
||||
},
|
||||
domains: {
|
||||
dynamicDns: true,
|
||||
changeDashboardDomain: true
|
||||
},
|
||||
subscription: {
|
||||
configurable: true
|
||||
},
|
||||
support: {
|
||||
email: 'support@cloudron.io',
|
||||
remoteSupport: true,
|
||||
ticketFormBody:
|
||||
'Use this form to open support tickets. You can also write directly to [support@cloudron.io](mailto:support@cloudron.io).\n\n'
|
||||
+ '* [Knowledge Base & App Docs](https://cloudron.io/documentation/apps/?support_view)\n'
|
||||
+ '* [Custom App Packaging & API](https://cloudron.io/developer/packaging/?support_view)\n'
|
||||
+ '* [Forum](https://forum.cloudron.io/)\n\n',
|
||||
submitTickets: true
|
||||
},
|
||||
alerts: {
|
||||
email: '',
|
||||
notifyCloudronAdmins: false
|
||||
},
|
||||
footer: {
|
||||
body: '© 2019 [Cloudron](https://cloudron.io) [Forum <i class="fa fa-comments"></i>](https://forum.cloudron.io)'
|
||||
}
|
||||
};
|
||||
|
||||
const gSpec = (function () {
|
||||
try {
|
||||
if (!safe.fs.existsSync(paths.CUSTOM_FILE)) return DEFAULT_SPEC;
|
||||
const c = yaml.safeLoad(safe.fs.readFileSync(paths.CUSTOM_FILE, 'utf8'));
|
||||
return lodash.merge({}, DEFAULT_SPEC, c);
|
||||
} catch (e) {
|
||||
debug(`Error loading features file from ${paths.CUSTOM_FILE} : ${e.message}`);
|
||||
return DEFAULT_SPEC;
|
||||
}
|
||||
})();
|
||||
|
||||
// flags sent to the UI. this is separate because we have values that are secret to the backend
|
||||
function uiSpec() {
|
||||
return gSpec;
|
||||
}
|
||||
|
||||
function spec() {
|
||||
return gSpec;
|
||||
}
|
||||
+26
-12
@@ -15,7 +15,7 @@ exports = module.exports = {
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
child_process = require('child_process'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
mysql = require('mysql'),
|
||||
once = require('once'),
|
||||
util = require('util');
|
||||
@@ -23,24 +23,38 @@ var assert = require('assert'),
|
||||
var gConnectionPool = null,
|
||||
gDefaultConnection = null;
|
||||
|
||||
const gDatabase = {
|
||||
hostname: '127.0.0.1',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
port: 3306,
|
||||
name: 'box'
|
||||
};
|
||||
|
||||
function initialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (gConnectionPool !== null) return callback(null);
|
||||
|
||||
if (constants.TEST) {
|
||||
// see setupTest script how the mysql-server is run
|
||||
gDatabase.hostname = require('child_process').execSync('docker inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}" mysql-server').toString().trim();
|
||||
}
|
||||
|
||||
gConnectionPool = mysql.createPool({
|
||||
connectionLimit: 5, // this has to be > 1 since we store one connection as 'default'. the rest for transactions
|
||||
host: config.database().hostname,
|
||||
user: config.database().username,
|
||||
password: config.database().password,
|
||||
port: config.database().port,
|
||||
database: config.database().name,
|
||||
host: gDatabase.hostname,
|
||||
user: gDatabase.username,
|
||||
password: gDatabase.password,
|
||||
port: gDatabase.port,
|
||||
database: gDatabase.name,
|
||||
multipleStatements: false,
|
||||
ssl: false
|
||||
ssl: false,
|
||||
timezone: 'Z' // mysql follows the SYSTEM timezone. on Cloudron, this is UTC
|
||||
});
|
||||
|
||||
gConnectionPool.on('connection', function (connection) {
|
||||
connection.query('USE ' + config.database().name);
|
||||
connection.query('USE ' + gDatabase.name);
|
||||
connection.query('SET SESSION sql_mode = \'strict_all_tables\'');
|
||||
});
|
||||
|
||||
@@ -86,8 +100,8 @@ function clear(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var cmd = util.format('mysql --host="%s" --user="%s" --password="%s" -Nse "SHOW TABLES" %s | grep -v "^migrations$" | while read table; do mysql --host="%s" --user="%s" --password="%s" -e "SET FOREIGN_KEY_CHECKS = 0; TRUNCATE TABLE $table" %s; done',
|
||||
config.database().hostname, config.database().username, config.database().password, config.database().name,
|
||||
config.database().hostname, config.database().username, config.database().password, config.database().name);
|
||||
gDatabase.hostname, gDatabase.username, gDatabase.password, gDatabase.name,
|
||||
gDatabase.hostname, gDatabase.username, gDatabase.password, gDatabase.name);
|
||||
|
||||
async.series([
|
||||
child_process.exec.bind(null, cmd),
|
||||
@@ -177,7 +191,7 @@ function importFromFile(file, callback) {
|
||||
assert.strictEqual(typeof file, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var cmd = `/usr/bin/mysql -h "${config.database().hostname}" -u ${config.database().username} -p${config.database().password} ${config.database().name} < ${file}`;
|
||||
var cmd = `/usr/bin/mysql -h "${gDatabase.hostname}" -u ${gDatabase.username} -p${gDatabase.password} ${gDatabase.name} < ${file}`;
|
||||
|
||||
async.series([
|
||||
query.bind(null, 'CREATE DATABASE IF NOT EXISTS box'),
|
||||
@@ -189,7 +203,7 @@ function exportToFile(file, callback) {
|
||||
assert.strictEqual(typeof file, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var cmd = `/usr/bin/mysqldump -h "${config.database().hostname}" -u root -p${config.database().password} --single-transaction --routines --triggers ${config.database().name} > "${file}"`;
|
||||
var cmd = `/usr/bin/mysqldump -h "${gDatabase.hostname}" -u root -p${gDatabase.password} --single-transaction --routines --triggers ${gDatabase.name} > "${file}"`;
|
||||
|
||||
child_process.exec(cmd, callback);
|
||||
}
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
var debug = require('debug')('box:digest'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
updatechecker = require('./updatechecker.js'),
|
||||
mailer = require('./mailer.js'),
|
||||
settings = require('./settings.js');
|
||||
|
||||
var NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
|
||||
exports = module.exports = {
|
||||
maybeSend: maybeSend
|
||||
};
|
||||
|
||||
function maybeSend(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
settings.getEmailDigest(function (error, enabled) {
|
||||
if (error) return callback(error);
|
||||
|
||||
if (!enabled) {
|
||||
debug('Email digest is disabled');
|
||||
return callback();
|
||||
}
|
||||
|
||||
var updateInfo = updatechecker.getUpdateInfo();
|
||||
var pendingAppUpdates = updateInfo.apps || {};
|
||||
pendingAppUpdates = Object.keys(pendingAppUpdates).map(function (key) { return pendingAppUpdates[key]; });
|
||||
|
||||
eventlog.getByCreationTime(new Date(new Date() - 7*86400000), function (error, events) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var appUpdates = events.filter(function (e) { return e.action === eventlog.ACTION_APP_UPDATE; }).map(function (e) { return e.data; });
|
||||
var boxUpdates = events.filter(function (e) { return e.action === eventlog.ACTION_UPDATE; }).map(function (e) { return e.data; });
|
||||
var certRenewals = events.filter(function (e) { return e.action === eventlog.ACTION_CERTIFICATE_RENEWAL; }).map(function (e) { return e.data; });
|
||||
var usersAdded = events.filter(function (e) { return e.action === eventlog.ACTION_USER_ADD; }).map(function (e) { return e.data; });
|
||||
var usersRemoved = events.filter(function (e) { return e.action === eventlog.ACTION_USER_REMOVE; }).map(function (e) { return e.data; });
|
||||
var finishedBackups = events.filter(function (e) { return e.action === eventlog.ACTION_BACKUP_FINISH && !e.errorMessage; }).map(function (e) { return e.data; });
|
||||
|
||||
if (error) return callback(error);
|
||||
|
||||
var info = {
|
||||
pendingAppUpdates: pendingAppUpdates,
|
||||
pendingBoxUpdate: updateInfo.box || null,
|
||||
|
||||
finishedAppUpdates: appUpdates,
|
||||
finishedBoxUpdates: boxUpdates,
|
||||
|
||||
certRenewals: certRenewals,
|
||||
finishedBackups: finishedBackups, // only the successful backups
|
||||
usersAdded: usersAdded,
|
||||
usersRemoved: usersRemoved // unused because we don't have username to work with
|
||||
};
|
||||
|
||||
// always send digest for backup failure notification
|
||||
debug('maybeSend: sending digest email', info);
|
||||
mailer.sendDigest(info);
|
||||
|
||||
callback();
|
||||
});
|
||||
});
|
||||
}
|
||||
+118
@@ -0,0 +1,118 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
getDisks: getDisks,
|
||||
checkDiskSpace: checkDiskSpace
|
||||
};
|
||||
|
||||
const apps = require('./apps.js'),
|
||||
assert = require('assert'),
|
||||
async = require('async'),
|
||||
debug = require('debug')('box:disks'),
|
||||
df = require('@sindresorhus/df'),
|
||||
docker = require('./docker.js'),
|
||||
notifications = require('./notifications.js'),
|
||||
paths = require('./paths.js'),
|
||||
util = require('util');
|
||||
|
||||
function DisksError(reason, errorOrMessage) {
|
||||
assert.strictEqual(typeof reason, 'string');
|
||||
assert(errorOrMessage instanceof Error || typeof errorOrMessage === 'string' || typeof errorOrMessage === 'undefined');
|
||||
|
||||
Error.call(this);
|
||||
Error.captureStackTrace(this, this.constructor);
|
||||
|
||||
this.name = this.constructor.name;
|
||||
this.reason = reason;
|
||||
if (typeof errorOrMessage === 'undefined') {
|
||||
this.message = reason;
|
||||
} else if (typeof errorOrMessage === 'string') {
|
||||
this.message = errorOrMessage;
|
||||
} else {
|
||||
this.message = 'Internal error';
|
||||
this.nestedError = errorOrMessage;
|
||||
}
|
||||
}
|
||||
util.inherits(DisksError, Error);
|
||||
DisksError.INTERNAL_ERROR = 'Internal Error';
|
||||
DisksError.EXTERNAL_ERROR = 'External Error';
|
||||
|
||||
function getDisks(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const dfAsync = async.asyncify(df), dfFileAsync = async.asyncify(df.file);
|
||||
|
||||
docker.info(function (error, info) {
|
||||
if (error) return callback(new DisksError(DisksError.INTERNAL_ERROR, error));
|
||||
|
||||
async.series([
|
||||
dfAsync,
|
||||
dfFileAsync.bind(null, paths.BOX_DATA_DIR),
|
||||
dfFileAsync.bind(null, paths.PLATFORM_DATA_DIR),
|
||||
dfFileAsync.bind(null, paths.APPS_DATA_DIR),
|
||||
dfFileAsync.bind(null, info.DockerRootDir)
|
||||
], function (error, values) {
|
||||
if (error) return callback(new DisksError(DisksError.INTERNAL_ERROR, error));
|
||||
|
||||
// filter by ext4 and then sort to make sure root disk is first
|
||||
const ext4Disks = values[0].filter((r) => r.type === 'ext4').sort((a, b) => a.mountpoint.localeCompare(b.mountpoint));
|
||||
|
||||
const disks = {
|
||||
disks: ext4Disks, // root disk is first
|
||||
boxDataDisk: values[1].filesystem,
|
||||
mailDataDisk: values[1].filesystem,
|
||||
platformDataDisk: values[2].filesystem,
|
||||
appsDataDisk: values[3].filesystem,
|
||||
dockerDataDisk: values[4].filesystem,
|
||||
apps: {}
|
||||
};
|
||||
|
||||
apps.getAll(function (error, allApps) {
|
||||
if (error) return callback(new DisksError(DisksError.INTERNAL_ERROR, error));
|
||||
|
||||
async.eachSeries(allApps, function (app, iteratorDone) {
|
||||
if (!app.dataDir) {
|
||||
disks.apps[app.id] = disks.appsDataDisk;
|
||||
return iteratorDone();
|
||||
}
|
||||
|
||||
dfFileAsync(app.dataDir, function (error, result) {
|
||||
disks.apps[app.id] = error ? disks.appsDataDisk : result.filesystem; // ignore any errors
|
||||
iteratorDone();
|
||||
});
|
||||
}, function (error) {
|
||||
if (error) return callback(new DisksError(DisksError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, disks);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function checkDiskSpace(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('Checking disk space');
|
||||
|
||||
getDisks(function (error, disks) {
|
||||
if (error) {
|
||||
debug('checkDiskSpace: error getting disks %s', error.message);
|
||||
return callback();
|
||||
}
|
||||
|
||||
var oos = disks.disks.some(function (entry) {
|
||||
// ignore other filesystems but where box, app and platform data is
|
||||
if (entry.filesystem !== disks.boxDataDisk
|
||||
&& entry.filesystem !== disks.platformDataDisk
|
||||
&& entry.filesystem !== disks.appsDataDisk
|
||||
&& entry.filesystem !== disks.dockerDataDisk) return false;
|
||||
|
||||
return (entry.available <= (1.25 * 1024 * 1024 * 1024)); // 1.5G
|
||||
});
|
||||
|
||||
debug('checkDiskSpace: disk space checked. ok: %s', !oos);
|
||||
|
||||
notifications.alert(notifications.ALERT_DISK_SPACE, 'Server is running out of disk space', oos ? JSON.stringify(disks.disks, null, 4) : '', callback);
|
||||
});
|
||||
}
|
||||
+4
-4
@@ -11,10 +11,10 @@ exports = module.exports = {
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
config = require('../config.js'),
|
||||
debug = require('debug')('box:dns/caas'),
|
||||
domains = require('../domains.js'),
|
||||
DomainsError = require('../domains.js').DomainsError,
|
||||
settings = require('../settings.js'),
|
||||
superagent = require('superagent'),
|
||||
util = require('util'),
|
||||
waitForDns = require('./waitfordns.js');
|
||||
@@ -58,7 +58,7 @@ function upsert(domainObject, location, type, values, callback) {
|
||||
};
|
||||
|
||||
superagent
|
||||
.post(config.apiServerOrigin() + '/api/v1/domains/' + fqdn)
|
||||
.post(settings.apiServerOrigin() + '/api/v1/caas/domains/' + fqdn)
|
||||
.query({ token: dnsConfig.token })
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
@@ -84,7 +84,7 @@ function get(domainObject, location, type, callback) {
|
||||
debug('get: zoneName: %s subdomain: %s type: %s fqdn: %s', domainObject.domain, location, type, fqdn);
|
||||
|
||||
superagent
|
||||
.get(config.apiServerOrigin() + '/api/v1/domains/' + fqdn)
|
||||
.get(settings.apiServerOrigin() + '/api/v1/caas/domains/' + fqdn)
|
||||
.query({ token: dnsConfig.token, type: type })
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
@@ -111,7 +111,7 @@ function del(domainObject, location, type, values, callback) {
|
||||
};
|
||||
|
||||
superagent
|
||||
.del(config.apiServerOrigin() + '/api/v1/domains/' + getFqdn(location, domainObject.domain))
|
||||
.del(settings.apiServerOrigin() + '/api/v1/caas/domains/' + getFqdn(location, domainObject.domain))
|
||||
.query({ token: dnsConfig.token })
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
|
||||
+21
-9
@@ -45,17 +45,20 @@ function getInternal(dnsConfig, zoneName, name, type, callback) {
|
||||
|
||||
var nextPage = null, matchingRecords = [];
|
||||
|
||||
debug(`getInternal: getting dns records of ${zoneName} with ${name} and type ${type}`);
|
||||
|
||||
async.doWhilst(function (iteratorDone) {
|
||||
var url = nextPage ? nextPage : DIGITALOCEAN_ENDPOINT + '/v2/domains/' + zoneName + '/records';
|
||||
|
||||
superagent.get(url)
|
||||
.set('Authorization', 'Bearer ' + dnsConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, util.format('Network error %s', error.message)));
|
||||
if (result.statusCode === 404) return callback(new DomainsError(DomainsError.NOT_FOUND, formatError(result)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new DomainsError(DomainsError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 200) return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, formatError(result)));
|
||||
if (error && !error.response) return iteratorDone(new DomainsError(DomainsError.EXTERNAL_ERROR, util.format('Network error %s', error.message)));
|
||||
if (result.statusCode === 404) return iteratorDone(new DomainsError(DomainsError.NOT_FOUND, formatError(result)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return iteratorDone(new DomainsError(DomainsError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 200) return iteratorDone(new DomainsError(DomainsError.EXTERNAL_ERROR, formatError(result)));
|
||||
|
||||
matchingRecords = matchingRecords.concat(result.body.domain_records.filter(function (record) {
|
||||
return (record.type === type && record.name === name);
|
||||
@@ -66,9 +69,9 @@ function getInternal(dnsConfig, zoneName, name, type, callback) {
|
||||
iteratorDone();
|
||||
});
|
||||
}, function () { return !!nextPage; }, function (error) {
|
||||
if (error) return callback(error);
|
||||
debug('getInternal:', error, JSON.stringify(matchingRecords));
|
||||
|
||||
debug('getInternal: %j', matchingRecords);
|
||||
if (error) return callback(error);
|
||||
|
||||
return callback(null, matchingRecords);
|
||||
});
|
||||
@@ -106,7 +109,7 @@ function upsert(domainObject, location, type, values, callback) {
|
||||
name: name,
|
||||
data: value,
|
||||
priority: priority,
|
||||
ttl: 1
|
||||
ttl: 30 // Recent DO DNS API break means this value must atleast be 30
|
||||
};
|
||||
|
||||
if (i >= result.length) {
|
||||
@@ -114,6 +117,7 @@ function upsert(domainObject, location, type, values, callback) {
|
||||
.set('Authorization', 'Bearer ' + dnsConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return iteratorCallback(new DomainsError(DomainsError.EXTERNAL_ERROR, util.format('Network error %s', error.message)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return iteratorCallback(new DomainsError(DomainsError.ACCESS_DENIED, formatError(result)));
|
||||
@@ -129,6 +133,7 @@ function upsert(domainObject, location, type, values, callback) {
|
||||
.set('Authorization', 'Bearer ' + dnsConfig.token)
|
||||
.send(data)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.end(function (error, result) {
|
||||
// increment, as we have consumed the record
|
||||
++i;
|
||||
@@ -143,7 +148,13 @@ function upsert(domainObject, location, type, values, callback) {
|
||||
return iteratorCallback(null);
|
||||
});
|
||||
}
|
||||
}, callback);
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('upsert: completed with recordIds:%j', recordIds);
|
||||
|
||||
callback();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -196,6 +207,7 @@ function del(domainObject, location, type, values, callback) {
|
||||
superagent.del(DIGITALOCEAN_ENDPOINT + '/v2/domains/' + zoneName + '/records/' + tmp[0].id)
|
||||
.set('Authorization', 'Bearer ' + dnsConfig.token)
|
||||
.timeout(30 * 1000)
|
||||
.retry(5)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, util.format('Network error %s', error.message)));
|
||||
if (result.statusCode === 404) return callback(null);
|
||||
@@ -245,7 +257,7 @@ function verifyDnsConfig(domainObject, callback) {
|
||||
|
||||
if (nameservers.map(function (n) { return n.toLowerCase(); }).indexOf('ns1.digitalocean.com') === -1) {
|
||||
debug('verifyDnsConfig: %j does not contains DO NS', nameservers);
|
||||
return callback(new DomainsError(DomainsError.BAD_FIELD, 'Domain nameservers are not set to Digital Ocean'));
|
||||
return callback(new DomainsError(DomainsError.BAD_FIELD, 'Domain nameservers are not set to DigitalOcean'));
|
||||
}
|
||||
|
||||
const location = 'cloudrontestdns';
|
||||
|
||||
+2
-2
@@ -15,7 +15,7 @@ var assert = require('assert'),
|
||||
dns = require('../native-dns.js'),
|
||||
domains = require('../domains.js'),
|
||||
DomainsError = require('../domains.js').DomainsError,
|
||||
GCDNS = require('@google-cloud/dns'),
|
||||
GCDNS = require('@google-cloud/dns').DNS,
|
||||
util = require('util'),
|
||||
waitForDns = require('./waitfordns.js'),
|
||||
_ = require('underscore');
|
||||
@@ -46,7 +46,7 @@ function getZoneByName(dnsConfig, zoneName, callback) {
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var gcdns = GCDNS(getDnsCredentials(dnsConfig));
|
||||
var gcdns = new GCDNS(getDnsCredentials(dnsConfig));
|
||||
|
||||
gcdns.getZones(function (error, zones) {
|
||||
if (error && error.message === 'invalid_grant') return callback(new DomainsError(DomainsError.ACCESS_DENIED, 'The key was probably revoked'));
|
||||
|
||||
+76
-41
@@ -15,14 +15,14 @@ var assert = require('assert'),
|
||||
dns = require('../native-dns.js'),
|
||||
domains = require('../domains.js'),
|
||||
DomainsError = require('../domains.js').DomainsError,
|
||||
Namecheap = require('namecheap'),
|
||||
safe = require('safetydance'),
|
||||
superagent = require('superagent'),
|
||||
sysinfo = require('../sysinfo.js'),
|
||||
util = require('util'),
|
||||
waitForDns = require('./waitfordns.js');
|
||||
waitForDns = require('./waitfordns.js'),
|
||||
xml2js = require('xml2js');
|
||||
|
||||
function formatError(response) {
|
||||
return util.format('NameCheap DNS error [%s] %j', response.code, response.message);
|
||||
}
|
||||
const ENDPOINT = 'https://api.namecheap.com/xml.response';
|
||||
|
||||
function removePrivateFields(domainObject) {
|
||||
domainObject.config.token = domains.SECRET_PLACEHOLDER;
|
||||
@@ -33,37 +33,19 @@ function injectPrivateFields(newConfig, currentConfig) {
|
||||
if (newConfig.token === domains.SECRET_PLACEHOLDER) newConfig.token = currentConfig.token;
|
||||
}
|
||||
|
||||
// Only send required fields - https://www.namecheap.com/support/api/methods/domains-dns/set-hosts.aspx
|
||||
function mapHosts(hosts) {
|
||||
return hosts.map(function (host) {
|
||||
let tmp = {};
|
||||
|
||||
tmp.TTL = '300';
|
||||
tmp.RecordType = host.RecordType || host.Type;
|
||||
tmp.HostName = host.HostName || host.Name;
|
||||
tmp.Address = host.Address;
|
||||
|
||||
if (tmp.RecordType === 'MX') {
|
||||
tmp.EmailType = 'MX';
|
||||
if (host.MXPref) tmp.MXPref = host.MXPref;
|
||||
}
|
||||
|
||||
return tmp;
|
||||
});
|
||||
}
|
||||
|
||||
function getApi(dnsConfig, callback) {
|
||||
function getQuery(dnsConfig, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(new DomainsError(DomainsError.INTERNAL_ERROR, error));
|
||||
|
||||
// Note that for all NameCheap calls to go through properly, the public IP returned by the getPublicIp method below must be whitelisted on NameCheap's API dashboard
|
||||
let namecheap = new Namecheap(dnsConfig.username, dnsConfig.token, ip);
|
||||
namecheap.setUsername(dnsConfig.username);
|
||||
|
||||
callback(null, namecheap);
|
||||
callback(null, {
|
||||
ApiUser: dnsConfig.username,
|
||||
ApiKey: dnsConfig.token,
|
||||
UserName: dnsConfig.username,
|
||||
ClientIp: ip
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -74,15 +56,36 @@ function getInternal(dnsConfig, zoneName, subdomain, type, callback) {
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getApi(dnsConfig, function (error, namecheap) {
|
||||
getQuery(dnsConfig, function (error, query) {
|
||||
if (error) return callback(error);
|
||||
|
||||
namecheap.domains.dns.getHosts(zoneName, function (error, result) {
|
||||
if (error) return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, formatError(error)));
|
||||
query.Command = 'namecheap.domains.dns.getHosts';
|
||||
query.SLD = zoneName.split('.')[0];
|
||||
query.TLD = zoneName.split('.')[1];
|
||||
|
||||
debug('entire getInternal response: %j', result);
|
||||
superagent.get(ENDPOINT).query(query).end(function (error, result) {
|
||||
if (error) return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, error));
|
||||
|
||||
return callback(null, result['DomainDNSGetHostsResult']['host']);
|
||||
var parser = new xml2js.Parser();
|
||||
parser.parseString(result.text, function (error, result) {
|
||||
if (error) return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, error));
|
||||
|
||||
var tmp = result.ApiResponse;
|
||||
if (tmp['$'].Status !== 'OK') {
|
||||
var errorMessage = safe.query(tmp, 'Errors[0].Error[0]._', 'Invalid response');
|
||||
if (errorMessage === 'API Key is invalid or API access has not been enabled') return callback(new DomainsError(DomainsError.ACCESS_DENIED, errorMessage));
|
||||
|
||||
return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, errorMessage));
|
||||
}
|
||||
if (!tmp.CommandResponse[0]) return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, 'Invalid response'));
|
||||
if (!tmp.CommandResponse[0].DomainDNSGetHostsResult[0]) return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, 'Invalid response'));
|
||||
|
||||
var hosts = result.ApiResponse.CommandResponse[0].DomainDNSGetHostsResult[0].host.map(function (h) {
|
||||
return h['$'];
|
||||
});
|
||||
|
||||
callback(null, hosts);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -93,15 +96,47 @@ function setInternal(dnsConfig, zoneName, hosts, callback) {
|
||||
assert(Array.isArray(hosts));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
let mappedHosts = mapHosts(hosts);
|
||||
|
||||
getApi(dnsConfig, function (error, namecheap) {
|
||||
getQuery(dnsConfig, function (error, query) {
|
||||
if (error) return callback(error);
|
||||
|
||||
namecheap.domains.dns.setHosts(zoneName, mappedHosts, function (error, result) {
|
||||
if (error) return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, formatError(error)));
|
||||
query.Command = 'namecheap.domains.dns.setHosts';
|
||||
query.SLD = zoneName.split('.')[0];
|
||||
query.TLD = zoneName.split('.')[1];
|
||||
|
||||
return callback(null, result);
|
||||
// Map to query params https://www.namecheap.com/support/api/methods/domains-dns/set-hosts.aspx
|
||||
hosts.forEach(function (host, i) {
|
||||
var n = i+1; // api starts with 1 not 0
|
||||
query['TTL' + n] = '300'; // keep it low
|
||||
query['HostName' + n] = host.HostName || host.Name;
|
||||
query['RecordType' + n] = host.RecordType || host.Type;
|
||||
query['Address' + n] = host.Address;
|
||||
|
||||
if (host.Type === 'MX') {
|
||||
query['EmailType' + n] = 'MX';
|
||||
if (host.MXPref) query['MXPref' + n] = host.MXPref;
|
||||
}
|
||||
});
|
||||
|
||||
superagent.post(ENDPOINT).query(query).end(function (error, result) {
|
||||
if (error) return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, error));
|
||||
|
||||
var parser = new xml2js.Parser();
|
||||
parser.parseString(result.text, function (error, result) {
|
||||
if (error) return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, error));
|
||||
|
||||
var tmp = result.ApiResponse;
|
||||
if (tmp['$'].Status !== 'OK') {
|
||||
var errorMessage = safe.query(tmp, 'Errors[0].Error[0]._', 'Invalid response');
|
||||
if (errorMessage === 'API Key is invalid or API access has not been enabled') return callback(new DomainsError(DomainsError.ACCESS_DENIED, errorMessage));
|
||||
|
||||
return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, errorMessage));
|
||||
}
|
||||
if (!tmp.CommandResponse[0]) return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, 'Invalid response'));
|
||||
if (!tmp.CommandResponse[0].DomainDNSSetHostsResult[0]) return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, 'Invalid response'));
|
||||
if (tmp.CommandResponse[0].DomainDNSSetHostsResult[0]['$'].IsSuccess !== 'true') return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, 'Invalid response'));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
+4
-4
@@ -131,7 +131,7 @@ function getInternal(dnsConfig, zoneName, name, type, callback) {
|
||||
|
||||
result.body.records.forEach(function (r) {
|
||||
// name.com api simply strips empty properties
|
||||
r.host = r.host || '@';
|
||||
r.host = r.host || '';
|
||||
});
|
||||
|
||||
var results = result.body.records.filter(function (r) {
|
||||
@@ -153,7 +153,7 @@ function upsert(domainObject, location, type, values, callback) {
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = domains.getName(domainObject, location, type) || '@';
|
||||
name = domains.getName(domainObject, location, type) || '';
|
||||
|
||||
debug(`upsert: ${name} in zone ${zoneName} of type ${type} with values ${JSON.stringify(values)}`);
|
||||
|
||||
@@ -174,7 +174,7 @@ function get(domainObject, location, type, callback) {
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = domains.getName(domainObject, location, type) || '@';
|
||||
name = domains.getName(domainObject, location, type) || '';
|
||||
|
||||
getInternal(dnsConfig, zoneName, name, type, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
@@ -196,7 +196,7 @@ function del(domainObject, location, type, values, callback) {
|
||||
|
||||
const dnsConfig = domainObject.config,
|
||||
zoneName = domainObject.zoneName,
|
||||
name = domains.getName(domainObject, location, type) || '@';
|
||||
name = domains.getName(domainObject, location, type) || '';
|
||||
|
||||
debug(`del: ${name} in zone ${zoneName} of type ${type} with values ${JSON.stringify(values)}`);
|
||||
|
||||
|
||||
+89
-74
@@ -1,13 +1,12 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
DockerError: DockerError,
|
||||
|
||||
connection: connectionInstance(),
|
||||
setRegistryConfig: setRegistryConfig,
|
||||
|
||||
ping: ping,
|
||||
|
||||
info: info,
|
||||
downloadImage: downloadImage,
|
||||
createContainer: createContainer,
|
||||
startContainer: startContainer,
|
||||
@@ -22,6 +21,7 @@ exports = module.exports = {
|
||||
getContainerIdByIp: getContainerIdByIp,
|
||||
inspect: inspect,
|
||||
inspectByName: inspect,
|
||||
getEvents: getEvents,
|
||||
memoryUsage: memoryUsage,
|
||||
execContainer: execContainer,
|
||||
createVolume: createVolume,
|
||||
@@ -32,31 +32,22 @@ exports = module.exports = {
|
||||
// timeout is optional
|
||||
function connectionInstance(timeout) {
|
||||
var Docker = require('dockerode');
|
||||
var docker;
|
||||
|
||||
if (process.env.BOX_ENV === 'test') {
|
||||
// test code runs a docker proxy on this port
|
||||
docker = new Docker({ host: 'http://localhost', port: 5687, timeout: timeout });
|
||||
|
||||
// proxy code uses this to route to the real docker
|
||||
docker.options = { socketPath: '/var/run/docker.sock' };
|
||||
} else {
|
||||
docker = new Docker({ socketPath: '/var/run/docker.sock', timeout: timeout });
|
||||
}
|
||||
|
||||
var docker = new Docker({ socketPath: '/var/run/docker.sock', timeout: timeout });
|
||||
return docker;
|
||||
}
|
||||
|
||||
var addons = require('./addons.js'),
|
||||
async = require('async'),
|
||||
assert = require('assert'),
|
||||
BoxError = require('./boxerror.js'),
|
||||
child_process = require('child_process'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
debug = require('debug')('box:docker.js'),
|
||||
once = require('once'),
|
||||
path = require('path'),
|
||||
settings = require('./settings.js'),
|
||||
shell = require('./shell.js'),
|
||||
safe = require('safetydance'),
|
||||
spawn = child_process.spawn,
|
||||
util = require('util'),
|
||||
_ = require('underscore');
|
||||
@@ -64,30 +55,7 @@ var addons = require('./addons.js'),
|
||||
const CLEARVOLUME_CMD = path.join(__dirname, 'scripts/clearvolume.sh'),
|
||||
MKDIRVOLUME_CMD = path.join(__dirname, 'scripts/mkdirvolume.sh');
|
||||
|
||||
function DockerError(reason, errorOrMessage) {
|
||||
assert.strictEqual(typeof reason, 'string');
|
||||
assert(errorOrMessage instanceof Error || typeof errorOrMessage === 'string' || typeof errorOrMessage === 'undefined');
|
||||
|
||||
Error.call(this);
|
||||
Error.captureStackTrace(this, this.constructor);
|
||||
|
||||
this.name = this.constructor.name;
|
||||
this.reason = reason;
|
||||
if (typeof errorOrMessage === 'undefined') {
|
||||
this.message = reason;
|
||||
} else if (typeof errorOrMessage === 'string') {
|
||||
this.message = errorOrMessage;
|
||||
} else {
|
||||
this.message = 'Internal error';
|
||||
this.nestedError = errorOrMessage;
|
||||
}
|
||||
}
|
||||
util.inherits(DockerError, Error);
|
||||
DockerError.INTERNAL_ERROR = 'Internal Error';
|
||||
DockerError.NOT_FOUND = 'Not found';
|
||||
DockerError.BAD_FIELD = 'Bad field';
|
||||
|
||||
function debugApp(app, args) {
|
||||
function debugApp(app) {
|
||||
assert(typeof app === 'object');
|
||||
|
||||
debug(app.fqdn + ' ' + util.format.apply(util, Array.prototype.slice.call(arguments, 1)));
|
||||
@@ -102,8 +70,8 @@ function setRegistryConfig(auth, callback) {
|
||||
// currently, auth info is not stashed in the db but maybe it should for restore to work?
|
||||
const cmd = isLogin ? `docker login ${auth.serveraddress} --username ${auth.username} --password ${auth.password}` : `docker logout ${auth.serveraddress}`;
|
||||
|
||||
child_process.exec(cmd, { }, function (error, stdout, stderr) {
|
||||
if (error) return callback(new DockerError(DockerError.BAD_FIELD, stderr));
|
||||
child_process.exec(cmd, { }, function (error /*, stdout, stderr */) {
|
||||
if (error) return callback(new BoxError(BoxError.ACCESS_DENIED, error.message));
|
||||
|
||||
callback();
|
||||
});
|
||||
@@ -116,8 +84,8 @@ function ping(callback) {
|
||||
var docker = connectionInstance(1000);
|
||||
|
||||
docker.ping(function (error, result) {
|
||||
if (error) return callback(new DockerError(DockerError.INTERNAL_ERROR, error));
|
||||
if (result !== 'OK') return callback(new DockerError(DockerError.INTERNAL_ERROR, 'Unable to ping the docker daemon'));
|
||||
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
|
||||
if (result !== 'OK') return callback(new BoxError(BoxError.DOCKER_ERROR, 'Unable to ping the docker daemon'));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
@@ -128,23 +96,32 @@ function pullImage(manifest, callback) {
|
||||
|
||||
// Use docker CLI here to support downloading of private repos. for dockerode, we have to use
|
||||
// https://github.com/apocas/dockerode#pull-from-private-repos
|
||||
shell.spawn('pullImage', '/usr/bin/docker', [ 'pull', manifest.dockerImage ], {}, function (error) {
|
||||
if (error) {
|
||||
debug(`pullImage: Error pulling image ${manifest.dockerImage} of ${manifest.id}: ${error.message}`);
|
||||
return callback(new Error('Failed to pull image'));
|
||||
}
|
||||
docker.pull(manifest.dockerImage, function (error, stream) {
|
||||
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, 'Unable to pull image. statusCode: ' + error.statusCode));
|
||||
|
||||
var image = docker.getImage(manifest.dockerImage);
|
||||
// https://github.com/dotcloud/docker/issues/1074 says each status message
|
||||
// is emitted as a chunk
|
||||
stream.on('data', function (chunk) {
|
||||
var data = safe.JSON.parse(chunk) || { };
|
||||
debug('pullImage %s: %j', manifest.id, data);
|
||||
|
||||
image.inspect(function (err, data) {
|
||||
if (err) return callback(new Error('Error inspecting image:' + err.message));
|
||||
if (!data || !data.Config) return callback(new Error('Missing Config in image:' + JSON.stringify(data, null, 4)));
|
||||
if (!data.Config.Entrypoint && !data.Config.Cmd) return callback(new Error('Only images with entry point are allowed'));
|
||||
// The data.status here is useless because this is per layer as opposed to per image
|
||||
if (!data.status && data.error) {
|
||||
debug('pullImage error %s: %s', manifest.id, data.errorDetail.message);
|
||||
}
|
||||
});
|
||||
|
||||
if (data.Config.ExposedPorts) debug('This image of %s exposes ports: %j', manifest.id, data.Config.ExposedPorts);
|
||||
stream.on('end', function () {
|
||||
debug('downloaded image %s of %s successfully', manifest.dockerImage, manifest.id);
|
||||
|
||||
callback(null);
|
||||
});
|
||||
|
||||
stream.on('error', function (error) {
|
||||
debug('error pulling image %s of %s: %j', manifest.dockerImage, manifest.id, error);
|
||||
|
||||
callback(new BoxError(BoxError.DOCKER_ERROR, error.message));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -175,19 +152,24 @@ function createSubcontainer(app, name, cmd, options, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var docker = exports.connection,
|
||||
isAppContainer = !cmd; // non app-containers are like scheduler containers
|
||||
isAppContainer = !cmd; // non app-containers are like scheduler and exec (terminal) containers
|
||||
|
||||
var manifest = app.manifest;
|
||||
var exposedPorts = {}, dockerPortBindings = { };
|
||||
var domain = app.fqdn;
|
||||
// TODO: these should all have the CLOUDRON_ prefix
|
||||
var stdEnv = [
|
||||
const hostname = isAppContainer ? app.id : name;
|
||||
|
||||
const envPrefix = manifest.manifestVersion <= 1 ? '' : 'CLOUDRON_';
|
||||
|
||||
let stdEnv = [
|
||||
'CLOUDRON=1',
|
||||
'CLOUDRON_PROXY_IP=172.18.0.1',
|
||||
'WEBADMIN_ORIGIN=' + config.adminOrigin(),
|
||||
'API_ORIGIN=' + config.adminOrigin(),
|
||||
'APP_ORIGIN=https://' + domain,
|
||||
'APP_DOMAIN=' + domain
|
||||
`CLOUDRON_APP_HOSTNAME=${app.id}`,
|
||||
`CLOUDRON_ADMIN_EMAIL=${app.adminEmail}`,
|
||||
`${envPrefix}WEBADMIN_ORIGIN=${settings.adminOrigin()}`,
|
||||
`${envPrefix}API_ORIGIN=${settings.adminOrigin()}`,
|
||||
`${envPrefix}APP_ORIGIN=https://${domain}`,
|
||||
`${envPrefix}APP_DOMAIN=${domain}`
|
||||
];
|
||||
|
||||
// docker portBindings requires ports to be exposed
|
||||
@@ -234,9 +216,9 @@ function createSubcontainer(app, name, cmd, options, callback) {
|
||||
// Note that Hostname has no effect on DNS. We have to use the --net-alias for dns.
|
||||
// Hostname cannot be set with container NetworkMode
|
||||
var containerOptions = {
|
||||
name: name, // used for filtering logs
|
||||
name: name, // for referencing containers
|
||||
Tty: isAppContainer,
|
||||
Hostname: app.id, // set to something 'constant' so app containers can use this to communicate (across app updates)
|
||||
Hostname: hostname,
|
||||
Image: app.manifest.dockerImage,
|
||||
Cmd: (isAppContainer && app.debugMode && app.debugMode.cmd) ? app.debugMode.cmd : cmd,
|
||||
Env: stdEnv.concat(addonEnv).concat(portEnv).concat(appEnv),
|
||||
@@ -272,10 +254,17 @@ function createSubcontainer(app, name, cmd, options, callback) {
|
||||
},
|
||||
CpuShares: 512, // relative to 1024 for system processes
|
||||
VolumesFrom: isAppContainer ? null : [ app.containerId + ':rw' ],
|
||||
NetworkMode: 'cloudron',
|
||||
NetworkMode: 'cloudron', // user defined bridge network
|
||||
Dns: ['172.18.0.1'], // use internal dns
|
||||
DnsSearch: ['.'], // use internal dns
|
||||
SecurityOpt: [ 'apparmor=docker-cloudron-app' ]
|
||||
},
|
||||
NetworkingConfig: {
|
||||
EndpointsConfig: {
|
||||
cloudron: {
|
||||
Aliases: [ name ] // this allows sub-containers reach app containers by name
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -308,7 +297,8 @@ function startContainer(containerId, callback) {
|
||||
debug('Starting container %s', containerId);
|
||||
|
||||
container.start(function (error) {
|
||||
if (error && error.statusCode !== 304) return callback(new Error('Error starting container :' + error));
|
||||
if (error && error.statusCode === 404) return callback(new BoxError(BoxError.NOT_FOUND));
|
||||
if (error && error.statusCode !== 304) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
@@ -384,7 +374,7 @@ function deleteContainers(appId, options, callback) {
|
||||
if (options.managedOnly) labels.push('isCloudronManaged=true');
|
||||
|
||||
docker.listContainers({ all: 1, filters: JSON.stringify({ label: labels }) }, function (error, containers) {
|
||||
if (error) return callback(error);
|
||||
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
|
||||
|
||||
async.eachSeries(containers, function (container, iteratorDone) {
|
||||
deleteContainer(container.Id, iteratorDone);
|
||||
@@ -401,7 +391,7 @@ function stopContainers(appId, callback) {
|
||||
debug('stopping containers of %s', appId);
|
||||
|
||||
docker.listContainers({ all: 1, filters: JSON.stringify({ label: [ 'appId=' + appId ] }) }, function (error, containers) {
|
||||
if (error) return callback(error);
|
||||
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
|
||||
|
||||
async.eachSeries(containers, function (container, iteratorDone) {
|
||||
stopContainer(container.Id, iteratorDone);
|
||||
@@ -445,7 +435,7 @@ function getContainerIdByIp(ip, callback) {
|
||||
|
||||
docker.getNetwork('cloudron').inspect(function (error, bridge) {
|
||||
if (error && error.statusCode === 404) return callback(new Error('Unable to find the cloudron network'));
|
||||
if (error) return callback(error);
|
||||
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
|
||||
|
||||
var containerId;
|
||||
for (var id in bridge.Containers) {
|
||||
@@ -467,13 +457,26 @@ function inspect(containerId, callback) {
|
||||
var container = exports.connection.getContainer(containerId);
|
||||
|
||||
container.inspect(function (error, result) {
|
||||
if (error && error.statusCode === 404) return callback(new DockerError(DockerError.NOT_FOUND));
|
||||
if (error) return callback(new DockerError(DockerError.INTERNAL_ERROR, error));
|
||||
if (error && error.statusCode === 404) return callback(new BoxError(BoxError.NOT_FOUND));
|
||||
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
|
||||
|
||||
callback(null, result);
|
||||
});
|
||||
}
|
||||
|
||||
function getEvents(options, callback) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
let docker = exports.connection;
|
||||
|
||||
docker.getEvents(options, function (error, stream) {
|
||||
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
|
||||
|
||||
callback(null, stream);
|
||||
});
|
||||
}
|
||||
|
||||
function memoryUsage(containerId, callback) {
|
||||
assert.strictEqual(typeof containerId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
@@ -481,8 +484,8 @@ function memoryUsage(containerId, callback) {
|
||||
var container = exports.connection.getContainer(containerId);
|
||||
|
||||
container.stats({ stream: false }, function (error, result) {
|
||||
if (error && error.statusCode === 404) return callback(new DockerError(DockerError.NOT_FOUND));
|
||||
if (error) return callback(new DockerError(DockerError.INTERNAL_ERROR, error));
|
||||
if (error && error.statusCode === 404) return callback(new BoxError(BoxError.NOT_FOUND));
|
||||
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
|
||||
|
||||
callback(null, result);
|
||||
});
|
||||
@@ -546,7 +549,7 @@ function createVolume(app, name, volumeDataDir, callback) {
|
||||
if (error) return callback(new Error(`Error creating app data dir: ${error.message}`));
|
||||
|
||||
docker.createVolume(volumeOptions, function (error) {
|
||||
if (error) return callback(error);
|
||||
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
|
||||
|
||||
callback();
|
||||
});
|
||||
@@ -563,7 +566,7 @@ function clearVolume(app, name, options, callback) {
|
||||
let volume = docker.getVolume(name);
|
||||
volume.inspect(function (error, v) {
|
||||
if (error && error.statusCode === 404) return callback();
|
||||
if (error) return callback(error);
|
||||
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, error));
|
||||
|
||||
const volumeDataDir = v.Options.device;
|
||||
shell.sudo('clearVolume', [ CLEARVOLUME_CMD, options.removeDirectory ? 'rmdir' : 'clear', volumeDataDir ], {}, callback);
|
||||
@@ -585,3 +588,15 @@ function removeVolume(app, name, callback) {
|
||||
callback();
|
||||
});
|
||||
}
|
||||
|
||||
function info(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
let docker = exports.connection;
|
||||
|
||||
docker.info(function (error, result) {
|
||||
if (error) return callback(new BoxError(BoxError.DOCKER_ERROR, 'Error connecting to docker'));
|
||||
|
||||
callback(null, result);
|
||||
});
|
||||
}
|
||||
|
||||
+6
-6
@@ -8,7 +8,7 @@ exports = module.exports = {
|
||||
var apps = require('./apps.js'),
|
||||
AppsError = apps.AppsError,
|
||||
assert = require('assert'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
express = require('express'),
|
||||
debug = require('debug')('box:dockerproxy'),
|
||||
http = require('http'),
|
||||
@@ -29,7 +29,7 @@ function authorizeApp(req, res, next) {
|
||||
// - only allow managing and inspection of containers belonging to the app
|
||||
|
||||
// make the tests pass for now
|
||||
if (config.TEST) {
|
||||
if (constants.TEST) {
|
||||
req.app = { id: 'testappid' };
|
||||
return next();
|
||||
}
|
||||
@@ -120,7 +120,7 @@ function start(callback) {
|
||||
|
||||
let proxyServer = express();
|
||||
|
||||
if (config.TEST) {
|
||||
if (constants.TEST) {
|
||||
proxyServer.use(function (req, res, next) {
|
||||
debug('proxying: ' + req.method, req.url);
|
||||
next();
|
||||
@@ -135,9 +135,9 @@ function start(callback) {
|
||||
.use(middleware.lastMile());
|
||||
|
||||
gHttpServer = http.createServer(proxyServer);
|
||||
gHttpServer.listen(config.get('dockerProxyPort'), '0.0.0.0', callback);
|
||||
gHttpServer.listen(constants.DOCKER_PROXY_PORT, '0.0.0.0', callback);
|
||||
|
||||
debug(`startDockerProxy: started proxy on port ${config.get('dockerProxyPort')}`);
|
||||
debug(`startDockerProxy: started proxy on port ${constants.DOCKER_PROXY_PORT}`);
|
||||
|
||||
gHttpServer.on('upgrade', function (req, client, head) {
|
||||
// Create a new tcp connection to the TCP server
|
||||
@@ -150,7 +150,7 @@ function start(callback) {
|
||||
if (req.headers['content-type'] === 'application/json') {
|
||||
// TODO we have to parse the immediate upgrade request body, but I don't know how
|
||||
let plainBody = '{"Detach":false,"Tty":false}\r\n';
|
||||
upgradeMessage += `Content-Type: application/json\r\n`;
|
||||
upgradeMessage += 'Content-Type: application/json\r\n';
|
||||
upgradeMessage += `Content-Length: ${Buffer.byteLength(plainBody)}\r\n`;
|
||||
upgradeMessage += '\r\n';
|
||||
upgradeMessage += plainBody;
|
||||
|
||||
+31
-9
@@ -26,6 +26,8 @@ module.exports = exports = {
|
||||
|
||||
parentDomain: parentDomain,
|
||||
|
||||
checkDnsRecords: checkDnsRecords,
|
||||
|
||||
prepareDashboardDomain: prepareDashboardDomain,
|
||||
|
||||
DomainsError: DomainsError,
|
||||
@@ -35,7 +37,6 @@ module.exports = exports = {
|
||||
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
DatabaseError = require('./databaseerror.js'),
|
||||
debug = require('debug')('box:domains'),
|
||||
@@ -44,6 +45,7 @@ var assert = require('assert'),
|
||||
reverseProxy = require('./reverseproxy.js'),
|
||||
ReverseProxyError = reverseProxy.ReverseProxyError,
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js'),
|
||||
sysinfo = require('./sysinfo.js'),
|
||||
tld = require('tldjs'),
|
||||
util = require('util'),
|
||||
@@ -76,7 +78,7 @@ DomainsError.BAD_FIELD = 'Bad Field';
|
||||
DomainsError.STILL_BUSY = 'Still busy';
|
||||
DomainsError.IN_USE = 'In Use';
|
||||
DomainsError.INTERNAL_ERROR = 'Internal error';
|
||||
DomainsError.ACCESS_DENIED = 'Access denied';
|
||||
DomainsError.ACCESS_DENIED = 'Access Denied';
|
||||
DomainsError.INVALID_PROVIDER = 'provider must be route53, gcdns, digitalocean, gandi, cloudflare, namecom, noop, wildcard, manual or caas';
|
||||
|
||||
// choose which subdomain backend we use for test purpose we use route53
|
||||
@@ -145,13 +147,12 @@ function validateHostname(location, domainObject) {
|
||||
const hostname = fqdn(location, domainObject);
|
||||
|
||||
const RESERVED_LOCATIONS = [
|
||||
constants.API_LOCATION,
|
||||
constants.SMTP_LOCATION,
|
||||
constants.IMAP_LOCATION
|
||||
];
|
||||
if (RESERVED_LOCATIONS.indexOf(location) !== -1) return new DomainsError(DomainsError.BAD_FIELD, location + ' is reserved');
|
||||
|
||||
if (hostname === config.adminFqdn()) return new DomainsError(DomainsError.BAD_FIELD, location + ' is reserved');
|
||||
if (hostname === settings.adminFqdn()) return new DomainsError(DomainsError.BAD_FIELD, location + ' is reserved');
|
||||
|
||||
// workaround https://github.com/oncletom/tld.js/issues/73
|
||||
var tmp = hostname.replace('_', '-');
|
||||
@@ -344,7 +345,7 @@ function del(domain, auditSource, callback) {
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (domain === config.adminDomain()) return callback(new DomainsError(DomainsError.IN_USE));
|
||||
if (domain === settings.adminDomain()) return callback(new DomainsError(DomainsError.IN_USE));
|
||||
|
||||
domaindb.del(domain, function (error) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new DomainsError(DomainsError.NOT_FOUND));
|
||||
@@ -395,7 +396,7 @@ function getDnsRecords(location, domain, type, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
get(domain, function (error, domainObject) {
|
||||
if (error) return callback(new DomainsError(DomainsError.INTERNAL_ERROR, error));
|
||||
if (error) return callback(error);
|
||||
|
||||
api(domainObject.provider).get(domainObject, location, type, function (error, values) {
|
||||
if (error) return callback(error);
|
||||
@@ -405,6 +406,25 @@ function getDnsRecords(location, domain, type, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function checkDnsRecords(location, domain, callback) {
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getDnsRecords(location, domain, 'A', function (error, values) {
|
||||
if (error) return callback(error);
|
||||
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, error.message));
|
||||
|
||||
if (values.length === 0) return callback(null, { needsOverwrite: false }); // does not exist
|
||||
if (values[0] === ip) return callback(null, { needsOverwrite: false }); // exists but in sync
|
||||
|
||||
callback(null, { needsOverwrite: true });
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// note: for TXT records the values must be quoted
|
||||
function upsertDnsRecords(location, domain, type, values, callback) {
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
@@ -494,15 +514,17 @@ function prepareDashboardDomain(domain, auditSource, progressCallback, callback)
|
||||
get(domain, function (error, domainObject) {
|
||||
if (error) return callback(error);
|
||||
|
||||
const adminFqdn = fqdn(constants.ADMIN_LOCATION, domainObject);
|
||||
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(new DomainsError(DomainsError.EXTERNAL_ERROR, error.message));
|
||||
|
||||
async.series([
|
||||
(done) => { progressCallback({ percent: 10, message: 'Updating DNS' }); done(); },
|
||||
(done) => { progressCallback({ percent: 10, message: `Updating DNS of ${adminFqdn}` }); done(); },
|
||||
upsertDnsRecords.bind(null, constants.ADMIN_LOCATION, domain, 'A', [ ip ]),
|
||||
(done) => { progressCallback({ percent: 40, message: 'Waiting for DNS' }); done(); },
|
||||
(done) => { progressCallback({ percent: 40, message: `Waiting for DNS of ${adminFqdn}` }); done(); },
|
||||
waitForDnsRecord.bind(null, constants.ADMIN_LOCATION, domain, 'A', ip, { interval: 30000, times: 50000 }),
|
||||
(done) => { progressCallback({ percent: 70, message: 'Getting certificate' }); done(); },
|
||||
(done) => { progressCallback({ percent: 70, message: `Getting certificate of ${adminFqdn}` }); done(); },
|
||||
reverseProxy.ensureCertificate.bind(null, fqdn(constants.ADMIN_LOCATION, domainObject), domain, auditSource)
|
||||
], function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
+10
-10
@@ -4,22 +4,22 @@ exports = module.exports = {
|
||||
sync: sync
|
||||
};
|
||||
|
||||
var appdb = require('./appdb.js'),
|
||||
apps = require('./apps.js'),
|
||||
let apps = require('./apps.js'),
|
||||
assert = require('assert'),
|
||||
async = require('async'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
debug = require('debug')('box:dyndns'),
|
||||
domains = require('./domains.js'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
paths = require('./paths.js'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js'),
|
||||
sysinfo = require('./sysinfo.js');
|
||||
|
||||
var NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
|
||||
// called for dynamic dns setups where we have to update the IP
|
||||
function sync(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
function sync(auditSource, callback) {
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(error);
|
||||
@@ -32,7 +32,7 @@ function sync(callback) {
|
||||
|
||||
debug(`refreshDNS: updating ip from ${info.ip} to ${ip}`);
|
||||
|
||||
domains.upsertDnsRecords(config.adminLocation(), config.adminDomain(), 'A', [ ip ], function (error) {
|
||||
domains.upsertDnsRecords(constants.ADMIN_LOCATION, settings.adminDomain(), 'A', [ ip ], function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('refreshDNS: updated admin location');
|
||||
@@ -42,7 +42,7 @@ function sync(callback) {
|
||||
|
||||
async.each(result, function (app, callback) {
|
||||
// do not change state of installing apps since apptask will error if dns record already exists
|
||||
if (app.installationState !== appdb.ISTATE_INSTALLED) return callback();
|
||||
if (app.installationState !== apps.ISTATE_INSTALLED) return callback();
|
||||
|
||||
domains.upsertDnsRecords(app.location, app.domain, 'A', [ ip ], callback);
|
||||
}, function (error) {
|
||||
@@ -50,7 +50,7 @@ function sync(callback) {
|
||||
|
||||
debug('refreshDNS: updated apps');
|
||||
|
||||
eventlog.add(eventlog.ACTION_DYNDNS_UPDATE, { userId: null, username: 'cron' }, { fromIp: info.ip, toIp: ip });
|
||||
eventlog.add(eventlog.ACTION_DYNDNS_UPDATE, auditSource, { fromIp: info.ip, toIp: ip });
|
||||
info.ip = ip;
|
||||
safe.fs.writeFileSync(paths.DYNDNS_INFO_FILE, JSON.stringify(info), 'utf8');
|
||||
|
||||
|
||||
+8
-23
@@ -13,19 +13,20 @@ exports = module.exports = {
|
||||
ACTION_ACTIVATE: 'cloudron.activate',
|
||||
ACTION_APP_CLONE: 'app.clone',
|
||||
ACTION_APP_CONFIGURE: 'app.configure',
|
||||
ACTION_APP_REPAIR: 'app.repair',
|
||||
ACTION_APP_INSTALL: 'app.install',
|
||||
ACTION_APP_RESTORE: 'app.restore',
|
||||
ACTION_APP_UNINSTALL: 'app.uninstall',
|
||||
ACTION_APP_UPDATE: 'app.update',
|
||||
ACTION_APP_UPDATE_FINISH: 'app.update.finish',
|
||||
ACTION_APP_LOGIN: 'app.login',
|
||||
ACTION_APP_OOM: 'app.oom',
|
||||
ACTION_APP_UP: 'app.up',
|
||||
ACTION_APP_DOWN: 'app.down',
|
||||
ACTION_APP_TASK_CRASH: 'app.task.crash',
|
||||
|
||||
ACTION_BACKUP_FINISH: 'backup.finish',
|
||||
ACTION_BACKUP_START: 'backup.start',
|
||||
ACTION_BACKUP_CLEANUP_START: 'backup.cleanup.start',
|
||||
ACTION_BACKUP_CLEANUP_START: 'backup.cleanup.start', // obsolete
|
||||
ACTION_BACKUP_CLEANUP_FINISH: 'backup.cleanup.finish',
|
||||
|
||||
ACTION_CERTIFICATE_NEW: 'certificate.new',
|
||||
@@ -48,6 +49,7 @@ exports = module.exports = {
|
||||
ACTION_RESTORE: 'cloudron.restore', // unused
|
||||
ACTION_START: 'cloudron.start',
|
||||
ACTION_UPDATE: 'cloudron.update',
|
||||
ACTION_UPDATE_FINISH: 'cloudron.update.finish',
|
||||
|
||||
ACTION_USER_ADD: 'user.add',
|
||||
ACTION_USER_LOGIN: 'user.login',
|
||||
@@ -105,28 +107,11 @@ function add(action, source, data, callback) {
|
||||
api(uuid.v4(), action, source, data, function (error, id) {
|
||||
if (error) return callback(new EventLogError(EventLogError.INTERNAL_ERROR, error));
|
||||
|
||||
// decide if we want to add notifications as well
|
||||
if (action === exports.ACTION_USER_ADD) {
|
||||
notifications.userAdded(source.userId, id, data.user);
|
||||
} else if (action === exports.ACTION_USER_REMOVE) {
|
||||
notifications.userRemoved(source.userId, id, data.user);
|
||||
} else if (action === exports.ACTION_USER_UPDATE && data.adminStatusChanged) {
|
||||
notifications.adminChanged(source.userId, id, data.user);
|
||||
} else if (action === exports.ACTION_APP_OOM) {
|
||||
notifications.oomEvent(id, data.app ? data.app.id : data.containerId, { app: data.app, details: data });
|
||||
} else if (action === exports.ACTION_APP_DOWN) {
|
||||
notifications.appDied(id, data.app);
|
||||
} else if (action === exports.ACTION_APP_UP) {
|
||||
notifications.appUp(id, data.app);
|
||||
} else if (action === exports.ACTION_APP_TASK_CRASH) {
|
||||
notifications.apptaskCrash(id, data.appId, data.crashLogFile);
|
||||
} else if (action === exports.ACTION_PROCESS_CRASH) {
|
||||
notifications.processCrash(id, data.processName, data.crashLogFile);
|
||||
} else {
|
||||
// no notification
|
||||
}
|
||||
notifications.onEvent(id, action, source, data, function (error) {
|
||||
if (error) return callback(new EventLogError(EventLogError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, { id: id });
|
||||
callback(null, { id: id });
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
+12
-12
@@ -145,7 +145,7 @@ function clear(callback) {
|
||||
database.query('DELETE FROM eventlog', function (error) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(error);
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -153,19 +153,19 @@ function delByCreationTime(creationTime, callback) {
|
||||
assert(util.isDate(creationTime));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// since notifications reference eventlog items, we have to clean them up as well
|
||||
database.query('SELECT * FROM eventlog WHERE creationTime < ?', [ creationTime ], function (error, result) {
|
||||
// remove notifications that reference the events as well
|
||||
database.query('SELECT * FROM eventlog WHERE creationTime <= ?', [ creationTime ], function (error, result) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
async.eachSeries(result, function (item, callback) {
|
||||
database.query('DELETE FROM notifications WHERE eventId=?', [ item.id ], function (error) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
async.eachSeries(result, function (item, iteratorCallback) {
|
||||
async.series([
|
||||
database.query.bind(null, 'DELETE FROM notifications WHERE eventId=?', [ item.id ]),
|
||||
database.query.bind(null, 'DELETE FROM eventlog WHERE id=?', [ item.id ])
|
||||
], iteratorCallback);
|
||||
}, function (error) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
database.query('DELETE FROM eventlog WHERE id=?', [ item.id ], function (error) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
callback();
|
||||
});
|
||||
});
|
||||
}, callback);
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -0,0 +1,231 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
ExternalLdapError: ExternalLdapError,
|
||||
|
||||
verifyPassword: verifyPassword,
|
||||
|
||||
testConfig: testConfig,
|
||||
startSyncer: startSyncer,
|
||||
|
||||
sync: sync
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
auditsource = require('./auditsource.js'),
|
||||
debug = require('debug')('box:externalldap'),
|
||||
ldap = require('ldapjs'),
|
||||
settings = require('./settings.js'),
|
||||
tasks = require('./tasks.js'),
|
||||
users = require('./users.js'),
|
||||
UserError = users.UsersError,
|
||||
util = require('util');
|
||||
|
||||
function ExternalLdapError(reason, errorOrMessage) {
|
||||
assert.strictEqual(typeof reason, 'string');
|
||||
assert(errorOrMessage instanceof Error || typeof errorOrMessage === 'string' || typeof errorOrMessage === 'undefined');
|
||||
|
||||
Error.call(this);
|
||||
Error.captureStackTrace(this, this.constructor);
|
||||
|
||||
this.name = this.constructor.name;
|
||||
this.reason = reason;
|
||||
if (typeof errorOrMessage === 'undefined') {
|
||||
this.message = reason;
|
||||
} else if (typeof errorOrMessage === 'string') {
|
||||
this.message = errorOrMessage;
|
||||
} else {
|
||||
this.message = 'Internal error';
|
||||
this.nestedError = errorOrMessage;
|
||||
}
|
||||
}
|
||||
util.inherits(ExternalLdapError, Error);
|
||||
ExternalLdapError.EXTERNAL_ERROR = 'external error';
|
||||
ExternalLdapError.INTERNAL_ERROR = 'internal error';
|
||||
ExternalLdapError.INVALID_CREDENTIALS = 'invalid credentials';
|
||||
ExternalLdapError.BAD_STATE = 'bad state';
|
||||
ExternalLdapError.BAD_FIELD = 'bad field';
|
||||
ExternalLdapError.NOT_FOUND = 'not found';
|
||||
|
||||
// performs service bind if required
|
||||
function getClient(externalLdapConfig, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// basic validation to not crash
|
||||
try { ldap.parseDN(externalLdapConfig.baseDn); } catch (e) { return callback(new ExternalLdapError(ExternalLdapError.BAD_FIELD, 'invalid baseDn')); }
|
||||
try { ldap.parseFilter(externalLdapConfig.filter); } catch (e) { return callback(new ExternalLdapError(ExternalLdapError.BAD_FIELD, 'invalid filter')); }
|
||||
if (externalLdapConfig.bindDn) try { ldap.parseFilter(externalLdapConfig.bindDn); } catch (e) { return callback(new ExternalLdapError(ExternalLdapError.INVALID_CREDENTIALS)); }
|
||||
|
||||
var client;
|
||||
try {
|
||||
client = ldap.createClient({ url: externalLdapConfig.url });
|
||||
} catch (e) {
|
||||
if (e instanceof ldap.ProtocolError) return callback(new ExternalLdapError(ExternalLdapError.BAD_FIELD, 'url protocol is invalid'));
|
||||
return callback(new ExternalLdapError(ExternalLdapError.INTERNAL_ERROR, e));
|
||||
}
|
||||
|
||||
if (!externalLdapConfig.bindDn) return callback(null, client);
|
||||
|
||||
client.bind(externalLdapConfig.bindDn, externalLdapConfig.bindPassword, function (error) {
|
||||
if (error instanceof ldap.InvalidCredentialsError) return callback(new ExternalLdapError(ExternalLdapError.INVALID_CREDENTIALS));
|
||||
if (error) return callback(new ExternalLdapError(ExternalLdapError.EXTERNAL_ERROR, error));
|
||||
|
||||
callback(null, client, externalLdapConfig);
|
||||
});
|
||||
}
|
||||
|
||||
function testConfig(config, callback) {
|
||||
assert.strictEqual(typeof config, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (!config.enabled) return callback();
|
||||
|
||||
if (!config.url) return callback(new ExternalLdapError(ExternalLdapError.BAD_FIELD, 'url must not be empty'));
|
||||
if (!config.baseDn) return callback(new ExternalLdapError(ExternalLdapError.BAD_FIELD, 'basedn must not be empty'));
|
||||
if (!config.filter) return callback(new ExternalLdapError(ExternalLdapError.BAD_FIELD, 'filter must not be empty'));
|
||||
|
||||
getClient(config, function (error, client) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var opts = {
|
||||
filter: config.filter,
|
||||
scope: 'sub'
|
||||
};
|
||||
|
||||
client.search(config.baseDn, opts, function (error, result) {
|
||||
if (error) return callback(new ExternalLdapError(ExternalLdapError.EXTERNAL_ERROR, error));
|
||||
|
||||
result.on('searchEntry', function (entry) {});
|
||||
result.on('error', function (error) { callback(new ExternalLdapError(ExternalLdapError.BAD_FIELD, 'Unable to search directory')); });
|
||||
result.on('end', function (result) { callback(); });
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function verifyPassword(user, password, callback) {
|
||||
assert.strictEqual(typeof user, 'object');
|
||||
assert.strictEqual(typeof password, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
settings.getExternalLdapConfig(function (error, externalLdapConfig) {
|
||||
if (error) return callback(new ExternalLdapError(ExternalLdapError.INTERNAL_ERROR, error));
|
||||
if (!externalLdapConfig.enabled) return callback(new ExternalLdapError(ExternalLdapError.BAD_STATE, 'not enabled'));
|
||||
|
||||
getClient(externalLdapConfig, function (error, client) {
|
||||
if (error) return callback(error);
|
||||
|
||||
const dn = `uid=${user.username},${externalLdapConfig.baseDn}`;
|
||||
|
||||
client.bind(dn, password, function (error) {
|
||||
if (error instanceof ldap.InvalidCredentialsError) return callback(new ExternalLdapError(ExternalLdapError.INVALID_CREDENTIALS));
|
||||
if (error) return callback(new ExternalLdapError(ExternalLdapError.EXTERNAL_ERROR, error));
|
||||
|
||||
callback();
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function startSyncer(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
settings.getExternalLdapConfig(function (error, externalLdapConfig) {
|
||||
if (error) return callback(new ExternalLdapError(ExternalLdapError.INTERNAL_ERROR, error));
|
||||
if (!externalLdapConfig.enabled) return callback(new ExternalLdapError(ExternalLdapError.BAD_STATE, 'not enabled'));
|
||||
|
||||
tasks.add(tasks.TASK_SYNC_EXTERNAL_LDAP, [], function (error, taskId) {
|
||||
if (error) return callback(new ExternalLdapError(ExternalLdapError.INTERNAL_ERROR, error));
|
||||
|
||||
tasks.startTask(taskId, {}, function (error, result) {
|
||||
debug('sync: done', error, result);
|
||||
});
|
||||
|
||||
callback(null, taskId);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function sync(progressCallback, callback) {
|
||||
assert.strictEqual(typeof progressCallback, 'function');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('Start user syncing ...');
|
||||
|
||||
settings.getExternalLdapConfig(function (error, externalLdapConfig) {
|
||||
if (error) return callback(new ExternalLdapError(ExternalLdapError.INTERNAL_ERROR, error));
|
||||
if (!externalLdapConfig.enabled) return callback(new ExternalLdapError(ExternalLdapError.BAD_STATE, 'not enabled'));
|
||||
|
||||
getClient(externalLdapConfig, function (error, client) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var opts = {
|
||||
paged: true,
|
||||
filter: externalLdapConfig.filter,
|
||||
scope: 'sub' // We may have to make this configurable
|
||||
};
|
||||
|
||||
debug(`Listing users at ${externalLdapConfig.baseDn} with filter ${externalLdapConfig.filter}`);
|
||||
|
||||
client.search(externalLdapConfig.baseDn, opts, function (error, result) {
|
||||
if (error) return callback(new ExternalLdapError(ExternalLdapError.EXTERNAL_ERROR, error));
|
||||
|
||||
var ldapUsers = [];
|
||||
|
||||
result.on('searchEntry', function (entry) {
|
||||
ldapUsers.push(entry.object);
|
||||
});
|
||||
|
||||
result.on('error', function (error) {
|
||||
callback(new ExternalLdapError(ExternalLdapError.EXTERNAL_ERROR, error));
|
||||
});
|
||||
|
||||
result.on('end', function (result) {
|
||||
if (result.status !== 0) return callback(new ExternalLdapError(ExternalLdapError.EXTERNAL_ERROR, 'Server returned status ' + result.status));
|
||||
|
||||
debug(`Found ${ldapUsers.length} users`);
|
||||
|
||||
// we ignore all errors here and just log them for now
|
||||
async.eachSeries(ldapUsers, function (user, callback) {
|
||||
|
||||
// ignore the bindDn user if any
|
||||
if (user.dn === externalLdapConfig.bindDn) return callback();
|
||||
|
||||
users.getByUsername(user.uid, function (error, result) {
|
||||
if (error && error.reason !== UserError.NOT_FOUND) {
|
||||
console.error(error);
|
||||
return callback();
|
||||
}
|
||||
|
||||
if (error) {
|
||||
debug('[adding user] ', user.uid, user.mail, user.cn);
|
||||
|
||||
users.create(user.uid, null, user.mail, user.cn, { source: 'ldap' }, auditsource.EXTERNAL_LDAP_TASK, function (error) {
|
||||
if (error) console.error('Failed to create user', user, error);
|
||||
callback();
|
||||
});
|
||||
} else if (result.source !== 'ldap') {
|
||||
debug('[conflicting user]', user.uid, user.mail, user.cn);
|
||||
|
||||
callback();
|
||||
} else if (result.email !== user.mail || result.displayName !== user.cn) {
|
||||
debug('[updating user] ', user.uid, user.mail, user.cn);
|
||||
|
||||
users.update(result.id, { email: user.mail, fallbackEmail: user.mail, displayName: user.cn }, auditsource.EXTERNAL_LDAP_TASK, function (error) {
|
||||
if (error) console.error('Failed to update user', user, error);
|
||||
callback();
|
||||
});
|
||||
} else {
|
||||
// user known and up-to-date
|
||||
callback();
|
||||
}
|
||||
});
|
||||
}, function () {
|
||||
debug('User sync done.');
|
||||
callback();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -19,6 +19,7 @@ function startGraphite(existingInfra, callback) {
|
||||
if (existingInfra.version === infra.version && infra.images.graphite.tag === existingInfra.images.graphite.tag) return callback();
|
||||
|
||||
const cmd = `docker run --restart=always -d --name="graphite" \
|
||||
--hostname graphite \
|
||||
--net cloudron \
|
||||
--net-alias graphite \
|
||||
--log-driver syslog \
|
||||
|
||||
+13
-1
@@ -20,7 +20,9 @@ exports = module.exports = {
|
||||
getGroups: getGroups,
|
||||
|
||||
setMembership: setMembership,
|
||||
getMembership: getMembership
|
||||
getMembership: getMembership,
|
||||
|
||||
count: count
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
@@ -268,3 +270,13 @@ function getGroups(userId, callback) {
|
||||
callback(null, results);
|
||||
});
|
||||
}
|
||||
|
||||
function count(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
groupdb.count(function (error, count) {
|
||||
if (error) return callback(new GroupsError(GroupsError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, count);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
exports = module.exports = {
|
||||
// a version change recreates all containers with latest docker config
|
||||
'version': '48.12.1',
|
||||
'version': '48.16.0',
|
||||
|
||||
'baseImages': [
|
||||
{ repo: 'cloudron/base', tag: 'cloudron/base:1.0.0@sha256:147a648a068a2e746644746bbfb42eb7a50d682437cead3c67c933c546357617' }
|
||||
@@ -15,11 +15,12 @@ exports = module.exports = {
|
||||
// a major version bump in the db containers will trigger the restore logic that uses the db dumps
|
||||
// docker inspect --format='{{index .RepoDigests 0}}' $IMAGE to get the sha256
|
||||
'images': {
|
||||
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:2.0.1@sha256:5a13360da4a2085c7d474bea6b1090c5eb24732d4f73459942af7612d4993d7f' },
|
||||
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:2.0.2@sha256:a28320f313785816be60e3f865e09065504170a3d20ed37de675c719b32b01eb' },
|
||||
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:2.0.2@sha256:6dcee0731dfb9b013ed94d56205eee219040ee806c7e251db3b3886eaa4947ff' },
|
||||
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:2.0.2@sha256:95e006390ddce7db637e1672eb6f3c257d3c2652747424f529b1dee3cbe6728c' },
|
||||
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:2.1.0@sha256:6d1bf221cfe6124957e2c58b57c0a47214353496009296acb16adf56df1da9d5' },
|
||||
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:2.0.0@sha256:8a88dd334b62b578530a014ca1a2425a54cb9df1e475f5d3a36806e5cfa22121' },
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:2.1.0@sha256:131db42dcb90111f679ab1f0f37c552f93f797d9b803b2346c7c202daf86ac36' },
|
||||
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:2.0.2@sha256:454f035d60b768153d4f31210380271b5ba1c09367c9d95c7fa37f9e39d2f59c' }
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:2.4.0@sha256:209f76833ff8cce58be8a09897c378e3b706e1e318249870afb7294a4ee83cad' },
|
||||
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:2.2.0@sha256:fc9ca69d16e6ebdbd98ed53143d4a0d2212eef60cb638dc71219234e6f427a2c' },
|
||||
'sftp': { repo: 'cloudron/sftp', tag: 'cloudron/sftp:0.1.0@sha256:e177c5bf5f38c84ce1dea35649c22a1b05f96eec67a54a812c5a35e585670f0f' }
|
||||
}
|
||||
};
|
||||
|
||||
+1
-1
@@ -63,7 +63,7 @@ function cleanupTmpVolume(containerInfo, callback) {
|
||||
assert.strictEqual(typeof containerInfo, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var cmd = 'find /tmp -mtime +10 -exec rm -rf {} +'.split(' '); // 10 days old
|
||||
var cmd = 'find /tmp -type f -mtime +10 -exec rm -rf {} +'.split(' '); // 10 day old files
|
||||
|
||||
debug('cleanupTmpVolume %j', containerInfo.Names);
|
||||
|
||||
|
||||
+93
-24
@@ -9,17 +9,18 @@ var assert = require('assert'),
|
||||
appdb = require('./appdb.js'),
|
||||
apps = require('./apps.js'),
|
||||
async = require('async'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
DatabaseError = require('./databaseerror.js'),
|
||||
debug = require('debug')('box:ldap'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
users = require('./users.js'),
|
||||
UsersError = users.UsersError,
|
||||
ldap = require('ldapjs'),
|
||||
mail = require('./mail.js'),
|
||||
MailError = mail.MailError,
|
||||
mailboxdb = require('./mailboxdb.js'),
|
||||
safe = require('safetydance');
|
||||
path = require('path'),
|
||||
safe = require('safetydance'),
|
||||
users = require('./users.js'),
|
||||
UsersError = users.UsersError;
|
||||
|
||||
var gServer = null;
|
||||
|
||||
@@ -95,9 +96,9 @@ function finalSend(results, req, res, next) {
|
||||
|
||||
var resultCookie;
|
||||
if (last < max) {
|
||||
resultCookie = new Buffer(last.toString());
|
||||
resultCookie = Buffer.from(last.toString());
|
||||
} else {
|
||||
resultCookie = new Buffer('');
|
||||
resultCookie = Buffer.from('');
|
||||
}
|
||||
|
||||
res.controls.push(new ldap.PagedResultsControl({
|
||||
@@ -134,7 +135,7 @@ function userSearch(req, res, next) {
|
||||
var dn = ldap.parseDN('cn=' + entry.id + ',ou=users,dc=cloudron');
|
||||
|
||||
var groups = [ GROUP_USERS_DN ];
|
||||
if (entry.admin || req.app.ownerId === entry.id) groups.push(GROUP_ADMINS_DN);
|
||||
if (entry.admin) groups.push(GROUP_ADMINS_DN);
|
||||
|
||||
var displayName = entry.displayName || entry.username || ''; // displayName can be empty and username can be null
|
||||
var nameParts = displayName.split(' ');
|
||||
@@ -154,7 +155,7 @@ function userSearch(req, res, next) {
|
||||
givenName: firstName,
|
||||
username: entry.username,
|
||||
samaccountname: entry.username, // to support ActiveDirectory clients
|
||||
isadmin: (entry.admin || req.app.ownerId === entry.id) ? 1 : 0,
|
||||
isadmin: entry.admin,
|
||||
memberof: groups
|
||||
}
|
||||
};
|
||||
@@ -194,7 +195,7 @@ function groupSearch(req, res, next) {
|
||||
|
||||
groups.forEach(function (group) {
|
||||
var dn = ldap.parseDN('cn=' + group.name + ',ou=groups,dc=cloudron');
|
||||
var members = group.admin ? result.filter(function (entry) { return entry.admin || req.app.ownerId === entry.id; }) : result;
|
||||
var members = group.admin ? result.filter(function (entry) { return entry.admin; }) : result;
|
||||
|
||||
var obj = {
|
||||
dn: dn.toString(),
|
||||
@@ -243,7 +244,7 @@ function groupAdminsCompare(req, res, next) {
|
||||
// we only support memberuid here, if we add new group attributes later add them here
|
||||
if (req.attribute === 'memberuid') {
|
||||
var found = result.find(function (u) { return u.id === req.value; });
|
||||
if (found && (found.admin || req.app.ownerId == found.id)) return res.end(true);
|
||||
if (found && found.admin) return res.end(true);
|
||||
}
|
||||
|
||||
res.end(false);
|
||||
@@ -270,11 +271,7 @@ function mailboxSearch(req, res, next) {
|
||||
objectcategory: 'mailbox',
|
||||
cn: `${mailbox.name}@${mailbox.domain}`,
|
||||
uid: `${mailbox.name}@${mailbox.domain}`,
|
||||
mail: `${mailbox.name}@${mailbox.domain}`,
|
||||
displayname: 'Max Mustermann',
|
||||
givenName: 'Max',
|
||||
username: 'mmustermann',
|
||||
samaccountname: 'mmustermann'
|
||||
mail: `${mailbox.name}@${mailbox.domain}`
|
||||
}
|
||||
};
|
||||
|
||||
@@ -374,7 +371,7 @@ function mailingListSearch(req, res, next) {
|
||||
var parts = email.split('@');
|
||||
if (parts.length !== 2) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
|
||||
mailboxdb.getGroup(parts[0], parts[1], function (error, group) {
|
||||
mailboxdb.getList(parts[0], parts[1], function (error, list) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
if (error) return next(new ldap.OperationsError(error.toString()));
|
||||
|
||||
@@ -385,9 +382,9 @@ function mailingListSearch(req, res, next) {
|
||||
attributes: {
|
||||
objectclass: ['mailGroup'],
|
||||
objectcategory: 'mailGroup',
|
||||
cn: `${group.name}@${group.domain}`, // fully qualified
|
||||
mail: `${group.name}@${group.domain}`,
|
||||
mgrpRFC822MailMember: group.members.map(function (m) { return `${m}@${group.domain}`; })
|
||||
cn: `${list.name}@${list.domain}`, // fully qualified
|
||||
mail: `${list.name}@${list.domain}`,
|
||||
mgrpRFC822MailMember: list.members // fully qualified
|
||||
}
|
||||
};
|
||||
|
||||
@@ -481,6 +478,71 @@ function authenticateUserMailbox(req, res, next) {
|
||||
});
|
||||
}
|
||||
|
||||
function authenticateSftp(req, res, next) {
|
||||
debug('sftp auth: %s (from %s)', req.dn.toString(), req.connection.ldap.id);
|
||||
|
||||
if (!req.dn.rdns[0].attrs.cn) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
|
||||
var email = req.dn.rdns[0].attrs.cn.value.toLowerCase();
|
||||
var parts = email.split('@');
|
||||
if (parts.length !== 2) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
|
||||
// actual user bind
|
||||
users.verifyWithUsername(parts[0], req.credentials, function (error) {
|
||||
if (error) return next(new ldap.InvalidCredentialsError(req.dn.toString()));
|
||||
|
||||
debug('sftp auth: success');
|
||||
|
||||
res.end();
|
||||
});
|
||||
}
|
||||
|
||||
function userSearchSftp(req, res, next) {
|
||||
debug('sftp user search: dn %s, scope %s, filter %s (from %s)', req.dn.toString(), req.scope, req.filter.toString(), req.connection.ldap.id);
|
||||
|
||||
if (req.filter.attribute !== 'username' || !req.filter.value) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
|
||||
var parts = req.filter.value.split('@');
|
||||
if (parts.length !== 2) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
|
||||
var username = parts[0];
|
||||
var appFqdn = parts[1];
|
||||
|
||||
apps.getByFqdn(appFqdn, function (error, app) {
|
||||
if (error) return next(new ldap.OperationsError(error.toString()));
|
||||
|
||||
// only allow apps which specify "ftp" support in the localstorage addon
|
||||
if (!safe.query(app.manifest.addons, 'localstorage.ftp.uid')) return next(new ldap.UnavailableError('Not supported'));
|
||||
if (typeof app.manifest.addons.localstorage.ftp.uid !== 'number') return next(new ldap.UnavailableError('Bad uid, must be a number'));
|
||||
|
||||
const uidNumber = app.manifest.addons.localstorage.ftp.uid;
|
||||
|
||||
users.getByUsername(username, function (error, user) {
|
||||
if (error) return next(new ldap.OperationsError(error.toString()));
|
||||
|
||||
apps.hasAccessTo(app, user, function (error, hasAccess) {
|
||||
if (error) return next(new ldap.OperationsError(error.toString()));
|
||||
if (!hasAccess) return next(new ldap.InsufficientAccessRightsError('Not authorized'));
|
||||
|
||||
var obj = {
|
||||
dn: ldap.parseDN(`cn=${username}@${appFqdn},ou=sftp,dc=cloudron`).toString(),
|
||||
attributes: {
|
||||
homeDirectory: path.join('/app/data', app.id, 'data'),
|
||||
objectclass: ['user'],
|
||||
objectcategory: 'person',
|
||||
cn: user.id,
|
||||
uid: `${username}@${appFqdn}`, // for bind after search
|
||||
uidNumber: uidNumber, // unix uid for ftp access
|
||||
gidNumber: uidNumber // unix gid for ftp access
|
||||
}
|
||||
};
|
||||
|
||||
finalSend([ obj ], req, res, next);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function authenticateMailAddon(req, res, next) {
|
||||
debug('mail addon auth: %s (from %s)', req.dn.toString(), req.connection.ldap.id);
|
||||
|
||||
@@ -498,13 +560,13 @@ function authenticateMailAddon(req, res, next) {
|
||||
|
||||
if (addonId === 'recvmail' && !domain.enabled) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
|
||||
let name;
|
||||
if (addonId === 'sendmail') name = 'MAIL_SMTP_PASSWORD';
|
||||
else if (addonId === 'recvmail') name = 'MAIL_IMAP_PASSWORD';
|
||||
let namePattern; // manifest v2 has a CLOUDRON_ prefix for names
|
||||
if (addonId === 'sendmail') namePattern = '%MAIL_SMTP_PASSWORD';
|
||||
else if (addonId === 'recvmail') namePattern = '%MAIL_IMAP_PASSWORD';
|
||||
else return next(new ldap.OperationsError('Invalid DN'));
|
||||
|
||||
// note: with sendmail addon, apps can send mail without a mailbox (unlike users)
|
||||
appdb.getAppIdByAddonConfigValue(addonId, name, req.credentials || '', function (error, appId) {
|
||||
appdb.getAppIdByAddonConfigValue(addonId, namePattern, req.credentials || '', function (error, appId) {
|
||||
if (error && error.reason !== DatabaseError.NOT_FOUND) return next(new ldap.OperationsError(error.message));
|
||||
if (appId) { // matched app password
|
||||
eventlog.add(eventlog.ACTION_APP_LOGIN, { authType: 'ldap', mailboxId: email }, { appId: appId, addonId: addonId });
|
||||
@@ -542,6 +604,10 @@ function start(callback) {
|
||||
|
||||
gServer = ldap.createServer({ log: logger });
|
||||
|
||||
gServer.on('error', function (error) {
|
||||
console.error('LDAP:', error);
|
||||
});
|
||||
|
||||
gServer.search('ou=users,dc=cloudron', authenticateApp, userSearch);
|
||||
gServer.search('ou=groups,dc=cloudron', authenticateApp, groupSearch);
|
||||
gServer.bind('ou=users,dc=cloudron', authenticateApp, authenticateUser, authorizeUserForApp);
|
||||
@@ -555,6 +621,9 @@ function start(callback) {
|
||||
gServer.bind('ou=recvmail,dc=cloudron', authenticateMailAddon); // dovecot
|
||||
gServer.bind('ou=sendmail,dc=cloudron', authenticateMailAddon); // haraka
|
||||
|
||||
gServer.bind('ou=sftp,dc=cloudron', authenticateSftp); // sftp
|
||||
gServer.search('ou=sftp,dc=cloudron', userSearchSftp);
|
||||
|
||||
gServer.compare('cn=users,ou=groups,dc=cloudron', authenticateApp, groupUsersCompare);
|
||||
gServer.compare('cn=admins,ou=groups,dc=cloudron', authenticateApp, groupAdminsCompare);
|
||||
|
||||
@@ -571,7 +640,7 @@ function start(callback) {
|
||||
res.end();
|
||||
});
|
||||
|
||||
gServer.listen(config.get('ldapPort'), '0.0.0.0', callback);
|
||||
gServer.listen(constants.LDAP_PORT, '0.0.0.0', callback);
|
||||
}
|
||||
|
||||
function stop(callback) {
|
||||
|
||||
+5
-1
@@ -22,7 +22,11 @@ Locker.prototype.OP_APPTASK = 'apptask';
|
||||
Locker.prototype.lock = function (operation) {
|
||||
assert.strictEqual(typeof operation, 'string');
|
||||
|
||||
if (this._operation !== null) return new Error('Already locked for ' + this._operation);
|
||||
if (this._operation !== null) {
|
||||
let error = new Error(`Locked for ${this._operation}`);
|
||||
error.operation = this._operation;
|
||||
return error;
|
||||
}
|
||||
|
||||
this._operation = operation;
|
||||
++this._lockDepth;
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
sendFailureLogs: sendFailureLogs
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
safe = require('safetydance'),
|
||||
path = require('path'),
|
||||
paths = require('./paths.js'),
|
||||
util = require('util');
|
||||
|
||||
var COLLECT_LOGS_CMD = path.join(__dirname, 'scripts/collectlogs.sh');
|
||||
|
||||
var CRASH_LOG_TIMESTAMP_OFFSET = 1000 * 60 * 60; // 60 min
|
||||
var CRASH_LOG_TIMESTAMP_FILE = '/tmp/crashlog.timestamp';
|
||||
var CRASH_LOG_STASH_FILE = '/tmp/crashlog';
|
||||
|
||||
const AUDIT_SOURCE = { userId: null, username: 'healthmonitor' };
|
||||
|
||||
function collectLogs(unitName, callback) {
|
||||
assert.strictEqual(typeof unitName, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var logs = safe.child_process.execSync('sudo ' + COLLECT_LOGS_CMD + ' ' + unitName, { encoding: 'utf8' });
|
||||
if (!logs) return callback(safe.error);
|
||||
|
||||
logs = logs + '\n\n=====================================\n\n';
|
||||
|
||||
// special case for box since the real logs are at path.join(paths.LOG_DIR, 'box.log')
|
||||
if (unitName === 'box.service') {
|
||||
logs += safe.child_process.execSync('tail --lines=500 ' + path.join(paths.LOG_DIR, 'box.log'), { encoding: 'utf8' });
|
||||
}
|
||||
|
||||
callback(null, logs);
|
||||
}
|
||||
|
||||
function sendFailureLogs(unitName) {
|
||||
assert.strictEqual(typeof unitName, 'string');
|
||||
|
||||
collectLogs(unitName, function (error, logs) {
|
||||
if (error) {
|
||||
console.error('Failed to collect logs.', error);
|
||||
logs = util.format('Failed to collect logs.', error);
|
||||
}
|
||||
|
||||
console.log('Sending failure logs for', unitName);
|
||||
|
||||
if (!safe.fs.writeFileSync(CRASH_LOG_STASH_FILE, logs)) console.log(`Failed to stash logs to ${CRASH_LOG_STASH_FILE}`);
|
||||
|
||||
var timestamp = safe.fs.readFileSync(CRASH_LOG_TIMESTAMP_FILE, 'utf8');
|
||||
|
||||
// check if we already sent a mail in the last CRASH_LOG_TIME_OFFSET window
|
||||
if (timestamp && (parseInt(timestamp) + CRASH_LOG_TIMESTAMP_OFFSET) > Date.now()) {
|
||||
console.log('Crash log already sent within window. Stashing logs.');
|
||||
return;
|
||||
}
|
||||
|
||||
eventlog.add(eventlog.ACTION_PROCESS_CRASH, AUDIT_SOURCE, { processName: unitName, crashLogFile: CRASH_LOG_STASH_FILE }, function (error) {
|
||||
if (error) console.log(`Error sending crashlog. Logs stashed at ${CRASH_LOG_STASH_FILE}`);
|
||||
|
||||
// write the new timestamp file and delete stash file
|
||||
safe.fs.writeFileSync(CRASH_LOG_TIMESTAMP_FILE, String(Date.now()));
|
||||
});
|
||||
});
|
||||
}
|
||||
+20
-8
@@ -1,11 +1,23 @@
|
||||
# Generated by apptask for the /run mount
|
||||
# Generated by apptask
|
||||
|
||||
# keep upto 7 rotated logs. rotation triggered daily or ahead of time if size is > 1M
|
||||
<%= volumePath %>/*.log <%= volumePath %>/*/*.log <%= volumePath %>/*/*/*.log {
|
||||
rotate 7
|
||||
daily
|
||||
compress
|
||||
maxsize=1M
|
||||
missingok
|
||||
delaycompress
|
||||
copytruncate
|
||||
rotate 7
|
||||
daily
|
||||
compress
|
||||
maxsize 1M
|
||||
missingok
|
||||
delaycompress
|
||||
copytruncate
|
||||
}
|
||||
|
||||
/home/yellowtent/platformdata/logs/<%= appId %>/*.log {
|
||||
# only keep one rotated file, we currently do not send that over the api
|
||||
rotate 1
|
||||
size 10M
|
||||
missingok
|
||||
# we never compress so we can simply tail the files
|
||||
nocompress
|
||||
copytruncate
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user