Compare commits
1327 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 7a63512d21 | |||
| 7ed18b3016 | |||
| 0d3d7d2b88 | |||
| 457f76367c | |||
| 74b3d06cd7 | |||
| d22f385402 | |||
| 08f920afee | |||
| b1109ba6ea | |||
| 7700d236a5 | |||
| b10abb1944 | |||
| dd6eeac000 | |||
| 7b8bb5dac4 | |||
| bf444a722d | |||
| a954a23add | |||
| 98aa785ad0 | |||
| ee485d8b2a | |||
| 081b596ebf | |||
| 56f4cbe44a | |||
| ab5b754c22 | |||
| f030aa95ba | |||
| bad947e2ac | |||
| 02b43382c8 | |||
| 4ed35c25a5 | |||
| 0d4f963756 | |||
| 1139c077b0 | |||
| 84afdb2e3a | |||
| 115f9b408f | |||
| d6ce51dabd | |||
| 54bc4b32c8 | |||
| 6537cf700f | |||
| c5e0b45b22 | |||
| cbfd7cf1a6 | |||
| e96199927d | |||
| a67d690291 | |||
| 30ddda723d | |||
| d9bf2f1724 | |||
| 915cfbe7dd | |||
| aeb883623b | |||
| 2d163c1e76 | |||
| 74e79c00fc | |||
| f87f92708b | |||
| b2ff16eb1e | |||
| 0c9f557d21 | |||
| f7dd8c0a23 | |||
| 3067e0940d | |||
| 969fba83ea | |||
| 70a15d01c9 | |||
| efc0a3b68d | |||
| c108cd2d5f | |||
| e67f023a56 | |||
| 208e4267df | |||
| 92b6464cd7 | |||
| ab66c8cb81 | |||
| 2ac12de204 | |||
| 373c003223 | |||
| f236bd3316 | |||
| 9d386bd071 | |||
| 665aa2ad3d | |||
| e8ca423ac4 | |||
| a53214cb29 | |||
| af4296e40c | |||
| 50d396725e | |||
| e0c894d333 | |||
| 044c25311f | |||
| d56575facf | |||
| 05775a843d | |||
| 5261831ca2 | |||
| b0c967ba57 | |||
| 2902c6ca7a | |||
| 0c5aea2fb2 | |||
| de2999cb56 | |||
| 28c1a70ae1 | |||
| ff4d3de1b1 | |||
| ac4f12447b | |||
| 325814e7ca | |||
| 00728dc833 | |||
| c95684af1e | |||
| 0a80bff055 | |||
| 9e7b10860d | |||
| 41eab11641 | |||
| b7abf404f3 | |||
| dc644570f7 | |||
| c4cb6b5819 | |||
| b7e9f0ed12 | |||
| 46df1d694a | |||
| 3efe8e3393 | |||
| e4b12f0c4e | |||
| 61b56d4679 | |||
| 051ac21fed | |||
| 892bd86810 | |||
| 5c4ae6066d | |||
| a35e048665 | |||
| 48f6c39ae5 | |||
| be03bd2c5b | |||
| f108376b25 | |||
| 70e23ed394 | |||
| 5fbfb7365f | |||
| 678865fa2a | |||
| 943dc14bf0 | |||
| c3919592ff | |||
| 442eb8a518 | |||
| 192e4f0a75 | |||
| 921550e3ed | |||
| 7d0cf1a754 | |||
| 6dec02e1bd | |||
| 14fc066af7 | |||
| 8fbad34716 | |||
| 75a344a316 | |||
| 3b8d500636 | |||
| a83bce021b | |||
| 725cf297ab | |||
| 5a2de0bcbb | |||
| cb814a50d7 | |||
| 5d34559f0a | |||
| 91ede59241 | |||
| 778342906e | |||
| c42f3341ca | |||
| a838b4c521 | |||
| 44d4934546 | |||
| 49db0d3641 | |||
| 2bebed2c19 | |||
| 2cf2dddcee | |||
| 306e11ae88 | |||
| 568397ec19 | |||
| 459314df17 | |||
| 693bc094cc | |||
| 9cdd2df696 | |||
| e9b308bb95 | |||
| 432a369bff | |||
| 76312495fd | |||
| 126d8b9bec | |||
| d001647704 | |||
| 8701b36123 | |||
| c56a24d4fb | |||
| e6eb54d572 | |||
| 68c26c1d12 | |||
| 437312811d | |||
| 68d4e70823 | |||
| 74f3a4dd6f | |||
| 3a74babcf4 | |||
| ab2f2c9aab | |||
| 8b11692e37 | |||
| abe04d7d10 | |||
| efe75f0c4e | |||
| b6c20877ea | |||
| 172d5bbdff | |||
| 6ed7a91cf9 | |||
| 61a7f1a126 | |||
| ba49c1e30c | |||
| ca5b69a07d | |||
| 998f736e6f | |||
| 969f8ad11f | |||
| 34ec09588a | |||
| 4091315589 | |||
| 91fb45584f | |||
| 180a455299 | |||
| a77bf54df7 | |||
| 74abce99ac | |||
| b2d27ee26a | |||
| 1466104681 | |||
| 4acd0bcdac | |||
| f9f2bd5c28 | |||
| a752b7139f | |||
| 2becf674ee | |||
| ef2c44ee2f | |||
| a5e5324f97 | |||
| 479261bcec | |||
| ac94a0b7f2 | |||
| 0f191324fa | |||
| b507ccaa33 | |||
| 9f6bc0b779 | |||
| 7306f1ddea | |||
| dc1d10837b | |||
| f58d6c04cc | |||
| f9dda85a38 | |||
| 8773c0f6e1 | |||
| 72a96c0d6a | |||
| 136ee363a8 | |||
| 9c5965311f | |||
| 78bd819a36 | |||
| 48df8b713d | |||
| 0e15fabf88 | |||
| ed83a11248 | |||
| 8d69e5f3b9 | |||
| 5dab697fd6 | |||
| a94d5d1b3e | |||
| 9c0af8b13e | |||
| a08ff89b78 | |||
| 2e06724927 | |||
| f7c7a36fc1 | |||
| 748d1b8471 | |||
| 032200b20f | |||
| 4cbb751d82 | |||
| 27e4f0cb82 | |||
| 321bfc6130 | |||
| 635426c37e | |||
| 33e7c8e904 | |||
| 616b4b86d8 | |||
| e3e6fd2bc9 | |||
| 07626dacb5 | |||
| bf711c6ebb | |||
| a4a3e19a92 | |||
| 16db4ac901 | |||
| 78d6b6d632 | |||
| 009b8abf1b | |||
| 4edd874695 | |||
| dda403caa9 | |||
| de44796b6f | |||
| 53e3626e51 | |||
| 9aa4fdc829 | |||
| 1ccc3b84b8 | |||
| d4b6768464 | |||
| 6e07a4ec08 | |||
| 1cee0f3831 | |||
| a52747cde0 | |||
| 14d575f514 | |||
| e43e904622 | |||
| 1dfa689d1c | |||
| 293e401852 | |||
| c565d0789e | |||
| 59ae1ac012 | |||
| 4cf2978088 | |||
| 707d34cb89 | |||
| 20a37030b6 | |||
| e1be8b669f | |||
| c723b289dc | |||
| 7c51c380ae | |||
| d75959772c | |||
| 37e23c9465 | |||
| 21c8f63dc1 | |||
| ca3b6e542a | |||
| 3e4466a41e | |||
| c1b5f56ac6 | |||
| 28c3ef772e | |||
| f1b23005c9 | |||
| 143ba831f4 | |||
| 5ca31f2484 | |||
| 5c272fe5d9 | |||
| 155877534f | |||
| a2a1d842fa | |||
| 260ac0afb7 | |||
| fb9372d93e | |||
| eb65f9e758 | |||
| 3265d7151c | |||
| 597af2e034 | |||
| 0b8f0bf731 | |||
| a7e10cead0 | |||
| 0e74a6df35 | |||
| 3fbaa385c4 | |||
| 29637bb4f4 | |||
| 9dba816711 | |||
| 9155f49d4c | |||
| 0e62780f55 | |||
| 998bc36673 | |||
| c2dbc40473 | |||
| cd5a14ce47 | |||
| 917122c812 | |||
| 21b8b8deba | |||
| 44c2aedb57 | |||
| 7e6a83df84 | |||
| ec4910a45e | |||
| 6558c78094 | |||
| 5df92d1903 | |||
| 05affa7d26 | |||
| 46c6c5a5a8 | |||
| 75da751c72 | |||
| b84f60671e | |||
| 8dcb06cb02 | |||
| 83bf739081 | |||
| 48a52fae2e | |||
| 0ddbda6068 | |||
| 360fa058ea | |||
| 489d2022e6 | |||
| f762d0c0a1 | |||
| 98cad0678d | |||
| 92acb2954f | |||
| 00a6e4c982 | |||
| bf9eb4bd87 | |||
| 2f4940acbd | |||
| 9f7ca552a6 | |||
| 4272d5be8a | |||
| 1babfb6e87 | |||
| 5663cf45f8 | |||
| d8cb2d1d25 | |||
| 174a60bb07 | |||
| 3d7094bf28 | |||
| 4d6616930a | |||
| 24875ba292 | |||
| c58b2677b6 | |||
| 25146e1134 | |||
| c0c35964fe | |||
| 0bf9ab0a2b | |||
| 6d86f4cbda | |||
| d2741bbeb9 | |||
| 690d02a353 | |||
| c629db9597 | |||
| 994f771d4d | |||
| 67fcf85abb | |||
| 527eace8f8 | |||
| e65230b833 | |||
| 3e8334040b | |||
| 2bcd3a8e4d | |||
| e75b85fc3a | |||
| c4362d3339 | |||
| 85e492a632 | |||
| b8d4b67043 | |||
| ffacd31259 | |||
| 19f6da88da | |||
| c0faae4e27 | |||
| a19c566eea | |||
| 3ec806452c | |||
| 0c73cd5219 | |||
| 9b6bf719ff | |||
| 25431d3cc4 | |||
| e0805df3b1 | |||
| 8392fec570 | |||
| 1c173ca83f | |||
| 05a67db761 | |||
| bb24d5cf9e | |||
| 8d2fbe931f | |||
| 0a8adaac9f | |||
| fa6d151325 | |||
| a7296a0339 | |||
| a6aee53ec2 | |||
| 963ab2e791 | |||
| ca724b8b03 | |||
| 88a929c85e | |||
| 2bc0270880 | |||
| 014b77b7aa | |||
| 06f8aa8f29 | |||
| a8c64bf9f7 | |||
| 41ef16fbec | |||
| 2a848a481b | |||
| 3963d76a80 | |||
| 8ede37a43d | |||
| 36534f6bb2 | |||
| 7eddcaf708 | |||
| 2cad93dfd2 | |||
| 9b1f8febf1 | |||
| d8d2572aa1 | |||
| 96a98a74ac | |||
| d0a244e392 | |||
| f09c89e33f | |||
| d53f0679e5 | |||
| 527093ebcb | |||
| bd5835b866 | |||
| 51ca1c7384 | |||
| 6dd70c0ef2 | |||
| acc90e16d7 | |||
| 4b3aca7413 | |||
| 8daee764d2 | |||
| 8d14832c6a | |||
| 051d04890b | |||
| 3dedda32d4 | |||
| d127b25f0f | |||
| 6a2b0eedb3 | |||
| 8c81a97a4b | |||
| d9ab1a78d5 | |||
| 593df8ed49 | |||
| b30def3620 | |||
| 9c02785d49 | |||
| f747343159 | |||
| 2971910ccf | |||
| 56534b9647 | |||
| a8d26067ee | |||
| 4212e4bb00 | |||
| 7b27ace7bf | |||
| d8944da68d | |||
| 433d797cb7 | |||
| 0b1d940128 | |||
| 6016024026 | |||
| e199293229 | |||
| 2ebe92fec3 | |||
| 628cf1e3de | |||
| 9e9aaf68f0 | |||
| b595ca422c | |||
| 9273a6c726 | |||
| 76d00d4e65 | |||
| 668c03a11b | |||
| 1e72d2d651 | |||
| 89fc8efc67 | |||
| 241dbf160e | |||
| e46bdc2caa | |||
| e1cb91ca76 | |||
| 709c742c46 | |||
| ecad9c499c | |||
| ed0879ffcd | |||
| 61e2878b08 | |||
| d97034bfb2 | |||
| 21942552d6 | |||
| dd68c8f91f | |||
| 28ce5f41e3 | |||
| 5694e676bd | |||
| db8c5a116f | |||
| fa39f0fbf3 | |||
| 1444bb038f | |||
| ac9e421ecf | |||
| b60cbe5a55 | |||
| 56d794745b | |||
| fd3b73bea2 | |||
| 78807782df | |||
| 754b29b263 | |||
| 9f97f48634 | |||
| 815e5d9d9a | |||
| 91ec2eaaf5 | |||
| f8d3a7cadd | |||
| d04a09b015 | |||
| 5d997bcc89 | |||
| f0dd90a1f5 | |||
| ee8ee8e786 | |||
| ee1a4411f8 | |||
| df6e6cb071 | |||
| ba5645a20e | |||
| ca502a2d55 | |||
| ecd53b48db | |||
| b9efb0b50b | |||
| 3fb5034ebd | |||
| afed3f3725 | |||
| b4f14575d7 | |||
| f437a1f48c | |||
| c3d7d867be | |||
| 96c16cd5d2 | |||
| af182e3df6 | |||
| d70ff7cd5b | |||
| 38331e71e2 | |||
| 322a9a18d7 | |||
| 423ef546a9 | |||
| e3f3241966 | |||
| eaef384ea5 | |||
| b85bc3aa01 | |||
| 01154d0ae6 | |||
| 6494050d66 | |||
| 8c7223ceed | |||
| 21afc71d89 | |||
| 7bf70956a1 | |||
| 9e9b8b095e | |||
| 0f543e6703 | |||
| f9973e765c | |||
| e089851ae9 | |||
| c524d68c2f | |||
| 5cccb50a31 | |||
| 3d375b687a | |||
| a93d453963 | |||
| f8ac2d4628 | |||
| d5ba73716b | |||
| 954224dafb | |||
| 8b341e2bf8 | |||
| 78fb9401ee | |||
| 4a5cbab194 | |||
| 19999abc50 | |||
| 5123b669d7 | |||
| 565c8445e1 | |||
| 404a019c56 | |||
| 24dee80aa6 | |||
| ce6df4bf96 | |||
| f8f6c7d93e | |||
| bafc6dce98 | |||
| 56ee4d8e25 | |||
| eeef221b4e | |||
| 4674653982 | |||
| a34180c27b | |||
| aa8ce2c62e | |||
| b3c6b8aa15 | |||
| 44a7a2579c | |||
| 39f0e476f2 | |||
| 003dc0dbaf | |||
| e39329218d | |||
| 8d3fbc5432 | |||
| 2780de631e | |||
| 399c756735 | |||
| 859311f9e5 | |||
| a9e89b57d9 | |||
| 4e68abe51d | |||
| 12083f5608 | |||
| d1efb2db56 | |||
| adde28523f | |||
| f122f46fe2 | |||
| ad7fadb4a9 | |||
| be383582e0 | |||
| 0a60365143 | |||
| 2f6cb3e913 | |||
| b0f85678d4 | |||
| e43413e063 | |||
| e39a5c8872 | |||
| fb4b75dd2a | |||
| 3c1ccc5cf4 | |||
| abd66d6524 | |||
| b61b7f80b5 | |||
| efa850614d | |||
| 21c534c806 | |||
| 7e4ff2440c | |||
| f415e19f6f | |||
| 97da8717ca | |||
| cbddb79d15 | |||
| bffb935f0f | |||
| e50e0f730b | |||
| 26f33a8e9b | |||
| 952b1f6304 | |||
| a3293c4c35 | |||
| 4892473eff | |||
| 221d5f95e1 | |||
| 84649b9471 | |||
| 44435559ab | |||
| c351660a9a | |||
| 0a24130fd4 | |||
| ea13f8f97e | |||
| d00801d020 | |||
| 8ced0aa78e | |||
| f5d32a9178 | |||
| 7fc45b3215 | |||
| 9bed14a3e8 | |||
| 71233ecd95 | |||
| 02097298c6 | |||
| be03dd0821 | |||
| 5b77d2f0cf | |||
| 781f543e87 | |||
| 6525a467a2 | |||
| 6cddd61a24 | |||
| b0ee116004 | |||
| 867a59d5d8 | |||
| 6f5085ebc3 | |||
| e8a93dcb1b | |||
| 09fe957cc7 | |||
| 020ccc8a99 | |||
| 7ed304bed8 | |||
| db1e39be11 | |||
| f163577264 | |||
| 9c7080aea1 | |||
| c05a7c188f | |||
| 72e912770a | |||
| 28c06d0a72 | |||
| 9805daa835 | |||
| a920fd011c | |||
| 1b979ee1e9 | |||
| 70eae477dc | |||
| c16f7c7891 | |||
| 63b8a5b658 | |||
| c0bf51b79f | |||
| 3d4178b35c | |||
| 34878bbc6a | |||
| e78d976c8f | |||
| ba9662f3fa | |||
| c8750a3bed | |||
| 9710f74250 | |||
| 52095cb8ab | |||
| c612966b41 | |||
| 90cf4f0784 | |||
| ec93d564e9 | |||
| 37f9e60978 | |||
| ca199961d5 | |||
| fd811ac334 | |||
| 609c1d3b78 | |||
| 9906ed37ae | |||
| dcdce6d995 | |||
| 9026c555f9 | |||
| 547a80f17b | |||
| 300d3dd545 | |||
| 6fce729ed2 | |||
| d233ee2a83 | |||
| 3240a71feb | |||
| 322be9e5ba | |||
| e67ecae2d2 | |||
| 75b3e7fc78 | |||
| 74c8d8cc6b | |||
| 51659a8d2d | |||
| 70acf1a719 | |||
| 8d2f3b0217 | |||
| e498678488 | |||
| 513517b15e | |||
| a96f8abaca | |||
| f7bcd54ef5 | |||
| d58e4f58c7 | |||
| 45f0f2adbe | |||
| 36c72dd935 | |||
| df9e2a7856 | |||
| 2b043aa95f | |||
| c0a09d1494 | |||
| 1c5c4b5705 | |||
| b56dcaac68 | |||
| fd91ccc844 | |||
| fca1a70eaa | |||
| ed81b7890c | |||
| cb8dcbf3dd | |||
| 4bdbf1f62e | |||
| 47a8b4fdc2 | |||
| 5720e90580 | |||
| f98e13d701 | |||
| d5d924861b | |||
| b81a92d407 | |||
| 22b0100354 | |||
| 6eb6eab3f4 | |||
| 57d5c2cc47 | |||
| 6a9eac7a24 | |||
| e4760a07f0 | |||
| 257e594de0 | |||
| 6fea022a04 | |||
| f34840d127 | |||
| f9706d6a05 | |||
| 61f7c1af48 | |||
| 00786dda05 | |||
| 8b9f44addc | |||
| 56c7dbb6e4 | |||
| c47f878203 | |||
| 8a2107e6eb | |||
| cd9f0f69d8 | |||
| 1da91b64f6 | |||
| a87dd65c1d | |||
| 7c63d9e758 | |||
| 329bf596ac | |||
| 2a57c4269a | |||
| ca8813dce3 | |||
| 3aebf51360 | |||
| 103f8db8cb | |||
| 04c127b78d | |||
| 9bef1bcf64 | |||
| 718413c089 | |||
| a34691df44 | |||
| 795e38fe82 | |||
| 1d348fb0f3 | |||
| 91f3318879 | |||
| c61808f4c6 | |||
| 991b2dad28 | |||
| f3d9a70de7 | |||
| 60758de10a | |||
| 6a0ef7a1c1 | |||
| 7cb451c157 | |||
| 3c31c96ad4 | |||
| 5d73f58631 | |||
| 4ca7cccdae | |||
| 82380b6b7c | |||
| 979c4e77e3 | |||
| e318fb0c01 | |||
| 77d2fb97e5 | |||
| 24e6c4d963 | |||
| 064c5cf7f2 | |||
| 891542bfb9 | |||
| 599702d410 | |||
| 3cb39754fd | |||
| f04345a99a | |||
| 3d59b8a5b0 | |||
| cf518b0285 | |||
| 52832c881a | |||
| 537fbff4aa | |||
| e3040b334d | |||
| 6c2879d567 | |||
| 595c89076f | |||
| c85f5b15c6 | |||
| 8fbed7e84b | |||
| ee3c5f67af | |||
| 52db28e876 | |||
| 65bc3491f6 | |||
| 82f512dc27 | |||
| 4b41378d08 | |||
| 1fd4e27d92 | |||
| 2420fef6b1 | |||
| 50074b936a | |||
| f98e68edc1 | |||
| 83e5daf08c | |||
| 53b43ca36b | |||
| d11842a7f8 | |||
| 6746781b46 | |||
| 78ec8e5c0c | |||
| 67a2ba957e | |||
| 9e558924bb | |||
| afcb3dd237 | |||
| 054de4813d | |||
| 57891c64b5 | |||
| 26361c037d | |||
| 2048b03431 | |||
| c12aba6c00 | |||
| 0bd0857189 | |||
| 978893250f | |||
| d0f4a76ca2 | |||
| 755c87b079 | |||
| 1da073c9bf | |||
| 96ead77520 | |||
| 178b04fead | |||
| 335631ac28 | |||
| 42778cb84d | |||
| 2f51088e67 | |||
| 378d7aee91 | |||
| ac53f8c747 | |||
| 5fe73c5a46 | |||
| a6f13eee14 | |||
| 86d23a4d35 | |||
| b25bb76792 | |||
| 7ba5d1e0d6 | |||
| f17bde2d97 | |||
| 93cafebfdb | |||
| 5538a91585 | |||
| 09cb468290 | |||
| 3b98eb0543 | |||
| 59936c6fbf | |||
| 5a7e636f2d | |||
| 401dc37a50 | |||
| 9f1af572a0 | |||
| 96e2fa159c | |||
| bc49a3e18a | |||
| ae19d8d754 | |||
| 13b067eb88 | |||
| af08f4e7b6 | |||
| 534e5781ba | |||
| 737e266729 | |||
| 07a133ebe9 | |||
| b0444edf7e | |||
| bcf37d833f | |||
| e7db2ab137 | |||
| 125b416463 | |||
| 800468fbe6 | |||
| 0c1e3ec6a0 | |||
| d97ee5d425 | |||
| a1be30c35a | |||
| ba3cb3b646 | |||
| daadefe6b9 | |||
| 12c849398e | |||
| 392492be04 | |||
| 4fd0c3c66c | |||
| 7d2e6d8d4d | |||
| f3e7249bdc | |||
| 53afb2606a | |||
| fbce71d031 | |||
| 1adde7d8e8 | |||
| d23599ba24 | |||
| ac35bcf9f0 | |||
| e4c5dfda60 | |||
| 99cfe564ae | |||
| 70a3cdc9bc | |||
| bd52068695 | |||
| ae54b57ca7 | |||
| 0eb3c26c05 | |||
| ca007ff979 | |||
| 2eb5c39388 | |||
| 014ce9df66 | |||
| a4dff215f1 | |||
| 0db4387013 | |||
| d81abfb2f0 | |||
| 0d880cf1e3 | |||
| b24d600b31 | |||
| 2ac52fc64f | |||
| 3bf07a3143 | |||
| cf883046b3 | |||
| 5e9808ad79 | |||
| 83ddf0a62c | |||
| cb7fea97af | |||
| 3a4ee3ec8c | |||
| 96dbda3949 | |||
| 7facf17ac6 | |||
| a939367ab6 | |||
| fd52f0ded4 | |||
| 84ba20493e | |||
| 4f9a9906c9 | |||
| 204340eac0 | |||
| d72fffb61f | |||
| cf4f0af0be | |||
| 07d0601342 | |||
| 4cd0e4d38d | |||
| 4f1a596123 | |||
| d3990eff39 | |||
| 6eab8bbdce | |||
| 61e130fb71 | |||
| 0c2267f9b4 | |||
| a4e822f1c0 | |||
| e9c5837059 | |||
| 17406e4560 | |||
| eb99f8b844 | |||
| 4fec2fe124 | |||
| 4045eb7a33 | |||
| 99d8baf36f | |||
| db7a4b75ae | |||
| dcd8c82a75 | |||
| d577756851 | |||
| 1e9c1e6ed0 | |||
| ecc76ed368 | |||
| 9e61f76aad | |||
| 11c2cecc9e | |||
| b5aed7b00a | |||
| 4d177e0d29 | |||
| f070082586 | |||
| f3483e6a92 | |||
| 91e56223ce | |||
| 631b830f4c | |||
| 63364ae017 | |||
| 3b162c6648 | |||
| b4fb73934b | |||
| 8f04163262 | |||
| 10b6664134 | |||
| 454ca86507 | |||
| 34020064bc | |||
| 67486b8177 | |||
| 6a4be98f19 | |||
| d5fb048364 | |||
| 6dd4d40692 | |||
| 04d6f94108 | |||
| 8d49f5a229 | |||
| f80713ba2f | |||
| 91f25465a4 | |||
| aa5cc68301 | |||
| acd00222e5 | |||
| 5697bcf43f | |||
| 5b7cc6642a | |||
| ee528470a7 | |||
| 3eed481d22 | |||
| 97b37cb45c | |||
| 49de39a1f3 | |||
| 6fe390b957 | |||
| 1a68467ff2 | |||
| 40de715f20 | |||
| 8d9fbb9cea | |||
| e3910d6587 | |||
| 50e712a93e | |||
| 1c8ddc10db | |||
| 1007a85fde | |||
| a0903f0890 | |||
| 1c40e51999 | |||
| c07df68558 | |||
| fd5a05db6c | |||
| 19d825db48 | |||
| 2862fec819 | |||
| 2df74ebe96 | |||
| 5794aaee0a | |||
| 229ca7f86b | |||
| 7edf43c627 | |||
| ae1dff980a | |||
| 01d0e56332 | |||
| 00990b6837 | |||
| 5886671fba | |||
| 5088cb47d9 | |||
| 60ae4972b0 | |||
| ad8ddf80f5 | |||
| c4d313a2c0 | |||
| 140e9fdd94 | |||
| 82b5c11374 | |||
| 3307b581af | |||
| 45e68ef6da | |||
| 4d7f9ba9a5 | |||
| 6d0cdc36b2 | |||
| 79541a68a5 | |||
| 845d386478 | |||
| 8771de5c12 | |||
| 76246b2952 | |||
| f994b68701 | |||
| 77558c823c | |||
| dd6a19ea85 | |||
| 16978f8c1a | |||
| f311c3da1c | |||
| 423e355fd6 | |||
| 8fadb3badc | |||
| 3845065085 | |||
| 801d848908 | |||
| e6eda1283c | |||
| a553755f4a | |||
| cd52459f05 | |||
| 1802201e9e | |||
| 2d72f49261 | |||
| cd42a6c2ea | |||
| 65f949e669 | |||
| f3fec9a33c | |||
| 13182de57f | |||
| c33566b553 | |||
| 4faf247898 | |||
| 9952a986eb | |||
| 40aaffe365 | |||
| 3745e96a6f | |||
| 157ce06f93 | |||
| 822dfb8af5 | |||
| 9ead482dc6 | |||
| 865c0a7aa7 | |||
| c760c42f92 | |||
| ded31b977e | |||
| 4781c4e364 | |||
| 8e123b017e | |||
| 658cbcdab9 | |||
| 0cc980f539 | |||
| da7648fe3f | |||
| 8db1073980 | |||
| f74f17af02 | |||
| 87ca05281d | |||
| 9780f77fa8 | |||
| 0bddd5a2c6 | |||
| 20f2a6e4c6 | |||
| 6d47737de7 | |||
| e8f9552ff9 | |||
| 9c76c5fc27 | |||
| f9d5f92397 | |||
| 3a2a05dfce | |||
| 5a291fa2a4 | |||
| 84d34ec004 | |||
| 63fca38f0b | |||
| e3b2799230 | |||
| 2efe72519e | |||
| eb3ae2c34f | |||
| eba79cd859 | |||
| d7d8cf97ed | |||
| 089f7301b8 | |||
| fb4f13eb13 | |||
| 89878ff9ad | |||
| ba62f577fa | |||
| 4c5bd2d318 | |||
| 3c318a72f7 | |||
| 23532eafea | |||
| 5b7a080d98 | |||
| 0a44b8c23b | |||
| c0c07c2839 | |||
| 96d2b32a9f | |||
| 795c2ad91c | |||
| fc9a9c3f87 | |||
| d141d6ba21 | |||
| 479da5393a | |||
| 307334ef81 | |||
| c1ec7a06bf | |||
| 1126a0fc1e | |||
| b5f678613b | |||
| b7e3447a46 | |||
| 32fa3b8a51 | |||
| fe0e4000a6 | |||
| 9ceeb70fc2 | |||
| aa8b4f1fba | |||
| 95ba51dfb2 | |||
| c74fb07ff7 | |||
| 03f1326073 | |||
| daa4c66e7f | |||
| 571abc56fe | |||
| 4aaeccecbd | |||
| 4287d69397 | |||
| de328e34d8 | |||
| 8d45ce6971 | |||
| fa3f173e8a | |||
| 414e9bdf05 | |||
| c342e52e7d | |||
| 78aa9c66f7 | |||
| 986ec02ac6 | |||
| 4e0bb9187a | |||
| 9c8a8571b4 | |||
| 7f30b8de9d | |||
| d1bfa4875a | |||
| 0250e1ea59 | |||
| 924fb337e8 | |||
| 0c9dce0c9f | |||
| 9e9470c6af | |||
| 471539d64b | |||
| 95127a868d | |||
| f34d429052 | |||
| 82e53bce36 | |||
| b04a417cfc | |||
| 77641f4b51 | |||
| 765d20c8be | |||
| d2420de594 | |||
| 8e9da38451 | |||
| ddb69eb25c | |||
| 11697f11cf | |||
| 35a2a656d3 | |||
| 6fc69c05ca | |||
| 65cff35be6 | |||
| 7467907c09 | |||
| d6c32a2632 | |||
| 7dc277a80c | |||
| 4881d090f0 | |||
| 48330423c6 | |||
| 88e844b545 | |||
| f45da2efc4 | |||
| f422614e7b | |||
| d164f881ca | |||
| 4994a5da49 | |||
| 393317d114 | |||
| 8de940ae36 | |||
| 374130d12a | |||
| 05fcdb0a67 | |||
| 23827974d8 | |||
| ae2c0f3503 | |||
| cbb93ef7ad | |||
| 4d3c6f7caa | |||
| 4f3c846e2b | |||
| 6ef2f974ae | |||
| 180cafad0c | |||
| f707f59765 | |||
| 969ef3fb11 | |||
| 7af3f85d7c | |||
| ffc0a75545 | |||
| d5b5bdb104 | |||
| 8ae65661dd | |||
| 423c4446de | |||
| 53cffd5133 | |||
| 15ff1fb093 | |||
| 195d388990 | |||
| d008e871da | |||
| 3e6295de92 | |||
| 788004245a | |||
| be5221d5b8 | |||
| dacc66bb35 | |||
| 5f26c3a2c1 | |||
| 228af62c39 | |||
| b531922175 | |||
| dad58efc94 | |||
| 7a3d3a3c74 | |||
| e5c42f2b90 | |||
| 6cbf64b88e | |||
| 9635f9aa24 | |||
| 893f9d87bc | |||
| bfda0d4891 | |||
| 65a62f9fbf | |||
| 6d74f7e26f | |||
| 14ca0c1623 | |||
| 3f6e8273a7 | |||
| 287b96925a | |||
| 608cc1e036 | |||
| 5fa27c4954 | |||
| 8deadece05 | |||
| 797dc26f47 | |||
| ddf7823b19 | |||
| 923e1d0524 | |||
| 339bc71435 | |||
| 863612356d | |||
| 56cdaefecc | |||
| 9e611b6ae3 | |||
| 7e26b4091b | |||
| d7702b96e5 | |||
| 41edd3778d | |||
| 0ac69cc6c9 | |||
| fbb01b1ce7 | |||
| a723203b28 | |||
| a995037f0a | |||
| 5ad4600fd4 | |||
| 8ddb670445 | |||
| ca5723bbc7 | |||
| 1b0a81cb3f | |||
| d92a2b070c | |||
| 4703f1afda | |||
| 3fad5e856c | |||
| cc66830a2d | |||
| 880f7b4cd3 | |||
| 5b9d4daafe | |||
| 410420e9d5 | |||
| ca7f80414e | |||
| 81b705b25b | |||
| 11c7ba1957 | |||
| f79e1993cb | |||
| fe71dc22fc | |||
| e3c72fa6ce | |||
| 27a542daec | |||
| aeba8e8fd2 | |||
| a0e122e578 | |||
| 29ae2cf8ca | |||
| abe72442ae | |||
| 8e134f3ae8 | |||
| 70042021aa | |||
| 6cc708136e | |||
| 00ac78c839 | |||
| 6f0c271e6e | |||
| ef3a125ce4 | |||
| d91e8bb87b | |||
| a7d7935451 | |||
| 8c011ea9b0 | |||
| c41b2c32f5 | |||
| 1e90ec95d3 | |||
| 1cca0aee6e | |||
| be73ec4b66 | |||
| 6c8b9b8799 | |||
| 0aea7cc347 | |||
| e15c3f05c2 | |||
| f516dddf30 | |||
| 8fb1bc29d1 | |||
| cc8f8b2339 | |||
| f7338c8210 | |||
| c04e8f33c5 | |||
| 8e1f190079 | |||
| 019cff8851 | |||
| 8a76788e7a | |||
| 33492333c7 | |||
| 710cdc7bb8 | |||
| 0471a14894 | |||
| e4b1b73408 | |||
| a6c2c608e4 | |||
| 1cd36319ff | |||
| a65611a37b | |||
| 4769d14414 | |||
| 64c2f3d8c3 | |||
| 2083efdef8 | |||
| a5efdb067d | |||
| 0584ace954 | |||
| 77d23d6c15 | |||
| a0c3a531b3 | |||
| 31ea1e677c | |||
| 2479af23ab | |||
| 83f36981f7 | |||
| 851e70be6e | |||
| f0ba126156 | |||
| 9dd51575ab | |||
| cf701b8eb0 | |||
| 9b4e81b476 | |||
| 788873e858 | |||
| 926224bd5d | |||
| d9a0bf457d | |||
| 6a5b0c194f | |||
| 9f117fcfdc | |||
| fe01d1bf28 | |||
| a94d44da75 | |||
| 8ded006dea | |||
| 5424a658f3 | |||
| b268a18695 | |||
| 06f1b9dd1b | |||
| e82bdfc996 | |||
| 40f5d0412b | |||
| 65af062c33 | |||
| 68a1efe3d3 | |||
| 3cb4d4b1ab | |||
| 0c706cffc0 | |||
| 5f888341ea | |||
| cf69a8c4ce | |||
| a3ece64273 | |||
| 4bda11edcf | |||
| 3913a8367b | |||
| bac0ea17c2 | |||
| d7f77de6c6 | |||
| d2d2818b0b | |||
| b58fe9edd6 | |||
| dee8bec2dc | |||
| d5db9657ca | |||
| 9378b949fb | |||
| ad9cb00f13 | |||
| 5ccca76b17 | |||
| cec52e14f6 | |||
| 977936018f | |||
| 261d15f0f7 | |||
| 14fe1dde58 | |||
| 737bbd26ee | |||
| f5db7c974f | |||
| 7303a09f2f | |||
| e3cfaabb74 | |||
| 3cae400b63 | |||
| 5dd10e7cd2 | |||
| f12358a10c | |||
| 23e3b0bd91 | |||
| 7a39cdda97 | |||
| 5460027a49 | |||
| 4cfee06297 | |||
| d0147a5e67 | |||
| a1dfc2b47b | |||
| 5eaade1079 | |||
| 19d8b90a12 | |||
| 6bc764090c | |||
| d64c4927aa | |||
| 9c45dec8b0 | |||
| a21750a4c9 | |||
| dda16331f6 | |||
| 7b93150047 | |||
| a98177fe71 | |||
| d95e68926b | |||
| ff3a748398 | |||
| 9354784f01 | |||
| e021a4b377 | |||
| 4fac5a785f | |||
| 4d42c116ce | |||
| 3879b55642 | |||
| 9d61ccaa45 | |||
| 6d8cf8456e | |||
| 5e1ad4ad93 | |||
| b29a6014d5 | |||
| bd7625031e | |||
| 9e881d1934 | |||
| 31f93f0255 | |||
| 67a7624da0 | |||
| 7fdf491815 | |||
| 798c2ff921 | |||
| 42c138e134 | |||
| b1b389dd7d | |||
| 8911081f85 | |||
| 9605fe3842 | |||
| bb91faf23c | |||
| ba56f7d15d | |||
| 6e73761983 | |||
| 588812a13a | |||
| b6d8721aed | |||
| da835afde1 | |||
| 5e22caa6e7 | |||
| 937931e885 | |||
| c2b140208e | |||
| f9a4d00b3c | |||
| eb2ef47df1 | |||
| cdb5dc2c53 | |||
| f6a2406091 | |||
| c7134d2da3 | |||
| 1692842bf0 | |||
| 8d78f06a34 | |||
| 1694a1536c | |||
| e0b9dc3623 | |||
| 644bc54a0d | |||
| b2d062bdf8 | |||
| 894d7a6e72 | |||
| fee513594f | |||
| 456c183622 | |||
| 0488aada9f | |||
| 54f7cf5f64 | |||
| e1740a0d4b | |||
| bac7d3ad84 | |||
| 6402b0c221 | |||
| 1f55bb52fc | |||
| 1029402d1e | |||
| abb371d81e | |||
| 779c9d79b3 | |||
| 832c11d785 | |||
| 85fb63298e | |||
| 25b9d5a746 | |||
| 6dc900bbd8 | |||
| e32b313cf2 | |||
| a01dea3932 | |||
| aa0e820605 | |||
| 13db61a0e2 | |||
| fce2cdce7f | |||
| 89bb690152 | |||
| 5c203dc759 | |||
| 4d737d535a | |||
| 558acf27a3 | |||
| 3da503ab8e | |||
| 299e8aceeb | |||
| b422a27be8 | |||
| f2312a6768 | |||
| 178ffe20a8 | |||
| 3b8edd4896 | |||
| f16aab7f80 | |||
| 09118d6b06 | |||
| bd57ee9461 | |||
| 1fbbe036ce | |||
| 94d7bc5328 | |||
| d709a5cfe4 | |||
| 188f000507 | |||
| 51d5b96fa1 | |||
| 11d12c591e | |||
| 245d17ad25 | |||
| e05e9c3ead | |||
| 8102d431e8 | |||
| 0f76cbbb95 | |||
| 2a45a9bbd4 | |||
| e68d627f72 | |||
| 1a3e3638ff | |||
| 8f912d8a1b | |||
| d891058f8c | |||
| 71fe094be1 | |||
| da857f520b | |||
| 39ff21bdf4 | |||
| 72dd7c74d5 | |||
| 7c7ef15e1c | |||
| b320e15ea7 | |||
| aa22ab8847 | |||
| 3e23c3efce | |||
| c4f96bbd6b | |||
| 649092ecb0 | |||
| 128a3b03c9 | |||
| 847ef6626f | |||
| 4643daeeec | |||
| 38178afd31 | |||
| 9c6324631d | |||
| 3a17bf9a0f | |||
| 602f8bcd04 | |||
| 785ae765a4 | |||
| c85120834c | |||
| 89d36b8ad4 | |||
| b9711d7b47 | |||
| 4f9273819a | |||
| e0d7850135 | |||
| 2c871705c7 | |||
| 2bb99db2c7 | |||
| 3fc5757e97 | |||
| 92ff19ffce | |||
| e9456f70f9 | |||
| ffbda22145 | |||
| b92ae44578 | |||
| b6ffc966cd | |||
| b42bc52093 | |||
| 806b458ff1 | |||
| d5d4e237bd | |||
| 956fe86250 | |||
| 4d000e377f | |||
| 39e827be04 | |||
| e50b4cb7ec | |||
| 1938ec635b | |||
| 03a3d367a4 | |||
| 38c2f75b5e | |||
| 9d98b55881 | |||
| 18e59c4754 | |||
| 64cb951206 | |||
| 77df520b07 | |||
| 32f94a03ce | |||
| fc6ce4945f | |||
| 17b7d89db9 | |||
| 6ea741e92f | |||
| 790ad4e74d | |||
| f92297cc99 | |||
| 0c6c835a39 | |||
| 514341172c | |||
| e535ffa778 | |||
| b86cfabd17 | |||
| b44f0b78a1 | |||
| 76d234d0bf | |||
| a694acba44 | |||
| 046120befc | |||
| b65fee4b73 | |||
| 153dcc1826 | |||
| fa4725176c | |||
| e42607fec6 | |||
| 297c1ff266 | |||
| 5afe75f137 | |||
| 4cfc85f6d3 | |||
| b03f901bbf | |||
| b9dfac94ed | |||
| c905adde1e | |||
| 0e7efa77a5 | |||
| 875ca0307f | |||
| 543c9843ba | |||
| 83254a16f9 | |||
| 466dfdf81f | |||
| 3d60a04b36 | |||
| 103cb10cad | |||
| 29ef079a83 | |||
| a55645770e | |||
| 132ddd2671 | |||
| fa5891b149 | |||
| d01929debc | |||
| 7c01ee58b5 | |||
| ec89f8719c | |||
| 9145022a2c | |||
| 9ae8ce3296 | |||
| eabf27f0c9 | |||
| 3102a15dff | |||
| 7747c482d4 | |||
| 444ca1888b | |||
| 86ccf5ea84 | |||
| ef088293b6 |
@@ -805,3 +805,382 @@
|
||||
* (mail) Set maximum email size to 25MB
|
||||
* Remove SimpleAuth addon
|
||||
|
||||
[0.107.0]
|
||||
* Support CSP for webinterface and OAuth views
|
||||
* (mail) Fix issue where Cloudron is only used to send emails
|
||||
|
||||
[0.108.0]
|
||||
* Redirect to /setupdns.html when restoring
|
||||
* Fix setting custom avatar
|
||||
* Do not allocate more than 4GB swap
|
||||
* Generate real passwords for sendmail/recvmail addons
|
||||
* Rate limit all authentication routes to prevent password brute force
|
||||
* Generate 128 byte password for MySQL multi-db addon
|
||||
|
||||
[0.109.0]
|
||||
* Add Referrer-policy
|
||||
* Add tooltip for admin email field explaining it is local & private
|
||||
* Verify AMI instance id during DNS setup instead of admin account setup
|
||||
* Split platform and app data folders and get rid of btrfs volumes
|
||||
|
||||
[0.110.0]
|
||||
* Fix disk usage graphs
|
||||
* Add --data-dir to cloudron-setup that allows customizing data location
|
||||
* Add UI to restore from any app backup
|
||||
* (mysql) Use utf8mb4 encoding for databases and backups
|
||||
* Allow installing a new app from a backup
|
||||
* Fix download of large files (> 1GB)
|
||||
* Fix app backup regression
|
||||
|
||||
[0.120.0]
|
||||
* Update Docker to 17.03.1-ce
|
||||
* Rework backup backend logic
|
||||
* Add UI to download logs
|
||||
* Fix crash when checking mail dns settings
|
||||
* Allow backup retention duration to be configured
|
||||
* Add minio backend for backups
|
||||
* Fix issue where Cloudron's with errored apps won't backup when using fs backend
|
||||
* Fix DNS check issue where PTR records was read from hosts file
|
||||
|
||||
[0.120.1]
|
||||
* Fix managed Cloudron backup cleanup
|
||||
|
||||
[0.130.0]
|
||||
* Use Cloudron DNS server only for containers created by Cloudron
|
||||
* Make Cloudron always start even if DNS credentials are invalid
|
||||
* Show warning if DNS configuration is not valid
|
||||
* Drop the '.enc' extension for non-encrypted backups
|
||||
* Do not encrypt backups when the backup key is empty
|
||||
* Do a multipart S3 download for slow internet connections
|
||||
* Support naked domains as external location
|
||||
|
||||
[0.130.1]
|
||||
* Fix app configure dialog regression
|
||||
|
||||
[0.130.2]
|
||||
* Fix app configure dialog regression and dns setup screen
|
||||
|
||||
[0.130.3]
|
||||
* Show error message if setup fails due to reserved username
|
||||
* (security) Do not print password in the logs in the configure route
|
||||
* Fix restore of unencrypted backups
|
||||
* Fix bug where FS backups have incorrect extension for unencrypted backups
|
||||
|
||||
[0.140.0]
|
||||
* HTTP2 support
|
||||
* Condense the dns checks in the settings view
|
||||
* Document new app store submission guidelines
|
||||
|
||||
[0.150.0]
|
||||
* Disable dnsmasq on OVH
|
||||
* Scale redis memory based on the app's memory limit
|
||||
* (security) Do not print the ssl cert in debug logs
|
||||
* Add noop storage backend to temporarily disable backups
|
||||
* Replace native-dns module with dig to prevent spurious crashes
|
||||
* Cleanup unfinished and errored backups
|
||||
* Set a timelimit of 4 hours for backup to finish
|
||||
|
||||
[0.160.0]
|
||||
* Fix disk graphs when using device mapper
|
||||
* Prevent email view from flickering
|
||||
* Prepare for 1.0
|
||||
|
||||
[1.0.0]
|
||||
* Make selfhosting great again
|
||||
|
||||
[1.0.1]
|
||||
* Notification improvements
|
||||
|
||||
[1.0.2]
|
||||
* Notification improvements
|
||||
|
||||
[1.1.0]
|
||||
* Add support for email catch-all
|
||||
* Support Cloudrons on subdomains
|
||||
|
||||
[1.1.1]
|
||||
* Notification improvements
|
||||
|
||||
[1.1.2]
|
||||
* Notification improvements
|
||||
|
||||
[1.1.3]
|
||||
* Notification improvements
|
||||
|
||||
[1.2.0]
|
||||
* Relay emails optionally via external SMTP server email (mailgun, sendgrid etc)
|
||||
* (experimental) Preserver the docker storage driver across updates
|
||||
* Reduce mysql password length to 48
|
||||
|
||||
[1.2.1]
|
||||
* Set max ttl of unbound to 5 minutes
|
||||
* Fix issue where mail container does not cleanup LDAP connections properly
|
||||
* Update node to 6.11.1
|
||||
|
||||
[1.3.0]
|
||||
* Add option to configure robots.txt for each app from the web interface
|
||||
* Make sure zoneName is not lost across updates
|
||||
* Save manually triggered app backups under a datetime prefix
|
||||
* Optionally disable FROM validation check in the mail container. This will allow apps to send emails with arbitrary FROM addresses
|
||||
* Set X-Forwarded-Port in the reverse proxy. This fixes a problem with plugins of certain apps (like Jetpack)
|
||||
* Send a weekly activity digest about pending and applied Cloudron and app updates
|
||||
|
||||
[1.4.0]
|
||||
* (mail) Update Haraka to 2.8.14. Contains many stability fixes
|
||||
* Exoscale SOS can now be used for backup storage
|
||||
* Fix cron pattern that made Cloudron erroneously send out weekly digest mails every hour on wednesday
|
||||
* Add Cloudflare DNS backend (thanks @abhishek)
|
||||
* Ensure Cloudron is only be installed on EXT4 root file system (required by Docker)
|
||||
* Mark app package major releases as blocking and require approval by Cloudron admin
|
||||
|
||||
[1.4.1]
|
||||
* Do not display backup region when using minio and exoscale SOS
|
||||
* Fix javascript error in email view
|
||||
* Add html version of the digest email
|
||||
* Fix issue where collectd was collecting information about devicemapper mounts
|
||||
|
||||
[1.5.0]
|
||||
* Update node to 6.11.2
|
||||
* Add a new view to display platform and app logs
|
||||
* Rework web UI to use flexbox
|
||||
* Add motd message to warn admins that to not run 'apt upgrade'
|
||||
* Switch default storage backend for new Cloudrons to overlay2
|
||||
* Add a custom graphite plugin to collect disk usage statistics
|
||||
* Rotate logs of all apps automatically
|
||||
|
||||
[1.6.0]
|
||||
* Allow apps to have 'network' capability (thanks @mehdi)
|
||||
* Fix crash in collectd disk usage collection script
|
||||
* Fix layout issues in update and oauth views
|
||||
* Use maxsize rule instead of size in lograte configs
|
||||
* Make it possible to skip backups per-app
|
||||
* Hide restore button for noop backend
|
||||
* Add popups and warnings for noop backend
|
||||
* Add webterminal to shell into apps from the admin UI
|
||||
* Update Haraka for a few crash fixes
|
||||
|
||||
[1.6.1]
|
||||
* Patch release for 1.6.0 to fix regressions
|
||||
* Allow apps to have 'network' capability (thanks @mehdi)
|
||||
* Fix crash in collectd disk usage collection script
|
||||
* Fix layout issues in update and oauth views
|
||||
* Use maxsize rule instead of size in lograte configs
|
||||
* Make it possible to skip backups per-app
|
||||
* Hide restore button for noop backend
|
||||
* Add popups and warnings for noop backend
|
||||
* Add webterminal to shell into apps from the admin UI
|
||||
* Update Haraka for a few crash fixes
|
||||
|
||||
[1.6.2]
|
||||
* Allow apps to have 'network' capability (thanks @mehdi)
|
||||
* Fix crash in collectd disk usage collection script
|
||||
* Fix layout issues in update and oauth views
|
||||
* Use maxsize rule instead of size in lograte configs
|
||||
* Make it possible to skip backups per-app
|
||||
* Hide restore button for noop backend
|
||||
* Add popups and warnings for noop backend
|
||||
* Add webterminal to shell into apps from the admin UI
|
||||
* Update Haraka for a few crash fixes
|
||||
|
||||
[1.6.3]
|
||||
* Fixes selection issue while clicking on empty flexbox space
|
||||
* Indicate directories can be downloaded in the web terminal
|
||||
* Do not show app update indicator for normal users
|
||||
* Display email notice when using Cloudflare DNS
|
||||
* Set MX records correctly when using Cloudflare DNS
|
||||
* Fix bug where webterminal can incorrectly appear in main view
|
||||
* Do not crash if DNS credentials are invalid
|
||||
|
||||
[1.6.4]
|
||||
* More descriptive Postmark email relay form
|
||||
* Fix file upload in chrome
|
||||
* Support Ctrl/Cmd+v webterminal pasting
|
||||
* Ensure unbound always starts up
|
||||
* Add option to run app in repair mode
|
||||
|
||||
[1.6.5]
|
||||
* DigitalOcean DNS: Add pagination
|
||||
* Cloudflare DNS: Optimize listing of DNS entries
|
||||
* Update node to 6.11.3
|
||||
* App volumes can now be symlinked individually to external storage
|
||||
* Periodically check if IP is blacklisted and notify admins
|
||||
* Do not ask password when re-configuring app (since it is non-destructive)
|
||||
* Move mail data inside boxdata directory. This makes the no-op backend more useful
|
||||
* Remove collectd stats when app is uninstalled
|
||||
|
||||
[1.7.0]
|
||||
* Add rsync format for backups. This feature allows incremental backups
|
||||
* Add Google DNS backend (thanks @syn)
|
||||
* Add DigitalOcean spaces backup storage backend
|
||||
* Add Cloudscale and Exoscale as supported VPS providers
|
||||
* Display backup progress and status in the web interface
|
||||
* Preliminary IPv6 support
|
||||
* Add IP RBL status to web interface
|
||||
* Add auto-update pattern `Every wednesday night`
|
||||
* Update Haraka to 2.8.15. This fixes the issue where emails were bounced with the message 'Send MAIL FROM first'
|
||||
* Do not overwrite existing subdomain when app's location is changed
|
||||
* Add button to send test email
|
||||
* Fix crash in carbon which made graphs disappear on some Cloudrons
|
||||
|
||||
[1.7.1]
|
||||
* Add rsync format for backups. This feature allows incremental backups
|
||||
* Add Google DNS backend (thanks @syn)
|
||||
* Add DigitalOcean spaces backup storage backend
|
||||
* Add Cloudscale and Exoscale as supported VPS providers
|
||||
* Display backup progress and status in the web interface
|
||||
* Preliminary IPv6 support
|
||||
* Add IP RBL status to web interface
|
||||
* Add auto-update pattern `Every wednesday night`
|
||||
* Update Haraka to 2.8.15. This fixes the issue where emails were bounced with the message 'Send MAIL FROM first'
|
||||
* Do not overwrite existing subdomain when app's location is changed
|
||||
* Add button to send test email
|
||||
* Fix crash in carbon which made graphs disappear on some Cloudrons
|
||||
|
||||
[1.7.2]
|
||||
* Add rsync format for backups. This feature allows incremental backups
|
||||
* Add Google DNS backend (thanks @syn)
|
||||
* Add Cloudscale and Exoscale as supported VPS providers
|
||||
* Display backup progress and status in the web interface
|
||||
* Preliminary IPv6 support
|
||||
* Add IP RBL status to web interface
|
||||
* Add auto-update pattern `Every wednesday night`
|
||||
* Update Haraka to 2.8.15. This fixes the issue where emails were bounced with the message 'Send MAIL FROM first'
|
||||
* Do not overwrite existing subdomain when app's location is changed
|
||||
* Add button to send test email
|
||||
* Fix crash in carbon which made graphs disappear on some Cloudrons
|
||||
* Fix issue where OAuth SSO did not work when alternate domain was used
|
||||
|
||||
[1.7.3]
|
||||
* Add rsync format for backups. This feature allows incremental backups
|
||||
* Add Google DNS backend (thanks @syn)
|
||||
* Add Cloudscale and Exoscale as supported VPS providers
|
||||
* Display backup progress and status in the web interface
|
||||
* Preliminary IPv6 support
|
||||
* Add IP RBL status to web interface
|
||||
* Add auto-update pattern `Every wednesday night`
|
||||
* Update Haraka to 2.8.15. This fixes the issue where emails were bounced with the message 'Send MAIL FROM first'
|
||||
* Do not overwrite existing subdomain when app's location is changed
|
||||
* Add button to send test email
|
||||
* Fix crash in carbon which made graphs disappear on some Cloudrons
|
||||
* Fix issue where OAuth SSO did not work when alternate domain was used
|
||||
|
||||
[1.7.4]
|
||||
* Add rsync format for backups. This feature allows incremental backups
|
||||
* Add Google DNS backend (thanks @syn)
|
||||
* Add DigitalOcean spaces backup storage backend
|
||||
* Add Cloudscale and Exoscale as supported VPS providers
|
||||
* Display backup progress and status in the web interface
|
||||
* Preliminary IPv6 support
|
||||
* Add IP RBL status to web interface
|
||||
* Add auto-update pattern `Every wednesday night`
|
||||
* Update Haraka to 2.8.15. This fixes the issue where emails were bounced with the message 'Send MAIL FROM first'
|
||||
* Do not overwrite existing subdomain when app's location is changed
|
||||
* Add button to send test email
|
||||
* Fix crash in carbon which made graphs disappear on some Cloudrons
|
||||
* Fix issue where OAuth SSO did not work when alternate domain was used
|
||||
* Changelog is now rendered in markdown format
|
||||
|
||||
[1.7.5]
|
||||
* Expose a TLS relay port from mail container for Go applications
|
||||
|
||||
[1.7.6]
|
||||
* Port bindings cannot be configured in update route anymore
|
||||
* Implement LDAP group compare
|
||||
* Pre-releases are now offered by appstore and not handled in box code anymore
|
||||
* LDAP pagination support. This will fix the warnings in NextCloud and Rocket.Chat
|
||||
* Check if directories can be created in the backup directory
|
||||
* Do not set the HTTPS agent when using HTTP with minio backup backend
|
||||
* Fix regression where a new domain config could not be set in the UI
|
||||
* New mail container release that fixes email sending with SOGo
|
||||
* Show 404 page for unknown domains
|
||||
|
||||
[1.7.7]
|
||||
* Allow setting app memory till memory limit
|
||||
* Make the dkim selector dynamic
|
||||
* Fix issue where app update dialog did not close
|
||||
* Fix LE cert renewal failures
|
||||
* Send user and cert info in digest emails
|
||||
* Send oom, app failures and other important mails to cloudron owner's alt mail
|
||||
|
||||
[1.8.0]
|
||||
* Fix group email bounce when a group has users that have not signed up yet
|
||||
* Do not restrict app memory limit to 4GB
|
||||
* Fix display of the latest backup in the weekly digest
|
||||
* Add UI to select users for access restriction
|
||||
* Update docker to 17.09
|
||||
* Update node to 6.11.5
|
||||
* Display package version of installed apps in the info dialog
|
||||
|
||||
[1.8.1]
|
||||
* Update node modules
|
||||
* Allow a restore operation if app is already restoring
|
||||
* Remove pre-install bundle support since it was hardly used
|
||||
* Make the test email mail address configurable
|
||||
* Allow admins to access all apps
|
||||
* Send feedback via appstore API (instead of email)
|
||||
* Show documentation URL in the app info dialog
|
||||
* Update Lets Encrypt agrement URL (https://letsencrypt.org/documents/LE-SA-v1.2-November-15-2017.pdf)
|
||||
|
||||
[1.8.2]
|
||||
* Update node modules
|
||||
* Allow a restore operation if app is already restoring
|
||||
* Remove pre-install bundle support since it was hardly used
|
||||
* Make the test email mail address configurable
|
||||
* Allow admins to access all apps
|
||||
* Send feedback via appstore API (instead of email)
|
||||
* Show documentation URL in the app info dialog
|
||||
* Update Lets Encrypt agrement URL (https://letsencrypt.org/documents/LE-SA-v1.2-November-15-2017.pdf)
|
||||
|
||||
[1.8.3]
|
||||
* Ensure domain database record exists
|
||||
|
||||
[1.8.4]
|
||||
* Fix issue where internal email was not delivered when email relay is enabled
|
||||
* Fix display of DNS records when email relay is enabled
|
||||
|
||||
[1.8.5]
|
||||
* Fix issues where unused addons were not cleaned on an app update causing uninstall to fail
|
||||
* Change UI text from 'Waiting' to 'Pending'
|
||||
|
||||
[1.9.0]
|
||||
* Prepare Cloudron for supporting multiple domains
|
||||
* Add Cloudron restore UI
|
||||
* Do not put app in errored state if backup fails
|
||||
* Display backup progress in CaaS
|
||||
* Add Google Cloud Storage backend for backups
|
||||
* Update node to 8.9.3 LTS
|
||||
* Set max email recepient limit (in outgoing emails) to 500
|
||||
|
||||
[1.9.1]
|
||||
* Prepare Cloudron for supporting multiple domains
|
||||
* Add Cloudron restore UI
|
||||
* Do not put app in errored state if backup fails
|
||||
* Display backup progress in CaaS
|
||||
* Add Google Cloud Storage backend for backups
|
||||
* Update node to 8.9.3 LTS
|
||||
* Set max email recepient limit (in outgoing emails) to 500
|
||||
* Put terminal and app logs viewer to separate window
|
||||
|
||||
[1.9.2]
|
||||
* Prepare Cloudron for supporting multiple domains
|
||||
* Add Cloudron restore UI
|
||||
* Do not put app in errored state if backup fails
|
||||
* Display backup progress in CaaS
|
||||
* Add Google Cloud Storage backend for backups
|
||||
* Update node to 8.9.3 LTS
|
||||
* Set max email recepient limit (in outgoing emails) to 500
|
||||
* Put terminal and app logs viewer to separate window
|
||||
|
||||
[1.9.3]
|
||||
* Prepare Cloudron for supporting multiple domains
|
||||
* Add Cloudron restore UI
|
||||
* Do not put app in errored state if backup fails
|
||||
* Display backup progress in CaaS
|
||||
* Add Google Cloud Storage backend for backups
|
||||
* Update node to 8.9.3 LTS
|
||||
* Set max email recepient limit (in outgoing emails) to 500
|
||||
* Put terminal and app logs viewer to separate window
|
||||
|
||||
[1.9.4]
|
||||
* Fix typo causing LE cert renewals to fail
|
||||
|
||||
|
||||
@@ -630,7 +630,7 @@ state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
box
|
||||
Copyright (C) 2016 Cloudron UG
|
||||
Copyright (C) 2016,2017 Cloudron UG
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published
|
||||
|
||||
@@ -9,10 +9,6 @@ a complex task.
|
||||
We are building the ultimate platform for self-hosting web apps. The Cloudron allows
|
||||
anyone to effortlessly host web applications on their server on their own terms.
|
||||
|
||||
Support us on
|
||||
[](https://flattr.com/submit/auto?user_id=cloudron&url=https://cloudron.io&title=Cloudron&tags=opensource&category=software)
|
||||
or [pay us a coffee](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=8982CKNM46D8U)
|
||||
|
||||
## Features
|
||||
|
||||
* Single click install for apps. Check out the [App Store](https://cloudron.io/appstore.html).
|
||||
@@ -33,9 +29,9 @@ or [pay us a coffee](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_
|
||||
* Trivially migrate to another server keeping your apps and data (for example, switch your
|
||||
infrastructure provider or move to a bigger server).
|
||||
|
||||
* Comprehensive [REST API](https://cloudron.io/references/api.html).
|
||||
* Comprehensive [REST API](https://cloudron.io/documentation/developer/api/).
|
||||
|
||||
* [CLI](https://git.cloudron.io/cloudron/cloudron-cli) to configure apps.
|
||||
* [CLI](https://cloudron.io/documentation/cli/) to configure apps.
|
||||
|
||||
* Alerts, audit logs, graphs, dns management ... and much more
|
||||
|
||||
@@ -46,32 +42,21 @@ Try our demo at https://my-demo.cloudron.me (username: cloudron password: cloudr
|
||||
## Installing
|
||||
|
||||
You can install the Cloudron platform on your own server or get a managed server
|
||||
from cloudron.io.
|
||||
from cloudron.io. In either case, the Cloudron platform will keep your server and
|
||||
apps up-to-date and secure.
|
||||
|
||||
* [Selfhosting](https://cloudron.io/references/selfhosting.html)
|
||||
* [Managed Hosting](https://cloudron.io/pricing.html)
|
||||
* [Selfhosting](https://cloudron.io/documentation/installation/) - [Pricing](https://cloudron.io/pricing.html)
|
||||
* [Managed Hosting](https://cloudron.io/managed.html)
|
||||
|
||||
## Documentation
|
||||
|
||||
* [User manual](https://cloudron.io/references/usermanual.html)
|
||||
* [Developer docs](https://cloudron.io/documentation.html)
|
||||
* [Architecture](https://cloudron.io/references/architecture.html)
|
||||
* [Documentation](https://cloudron.io/documentation/)
|
||||
|
||||
## Related repos
|
||||
|
||||
The [base image repo](https://git.cloudron.io/cloudron/docker-base-image) is the parent image of all
|
||||
the containers in the Cloudron.
|
||||
|
||||
The [graphite repo](https://git.cloudron.io/cloudron/docker-graphite) contains the graphite code
|
||||
that collects metrics for graphs.
|
||||
|
||||
The addons are located in separate repositories
|
||||
* [Redis](https://git.cloudron.io/cloudron/redis-addon)
|
||||
* [Postgresql](https://git.cloudron.io/cloudron/postgresql-addon)
|
||||
* [MySQL](https://git.cloudron.io/cloudron/mysql-addon)
|
||||
* [Mongodb](https://git.cloudron.io/cloudron/mongodb-addon)
|
||||
* [Mail](https://git.cloudron.io/cloudron/mail-addon)
|
||||
|
||||
## Community
|
||||
|
||||
* [Chat](https://chat.cloudron.io/)
|
||||
|
||||
@@ -26,7 +26,6 @@ debconf-set-selections <<< 'mysql-server mysql-server/root_password_again passwo
|
||||
apt-get -y install \
|
||||
acl \
|
||||
awscli \
|
||||
btrfs-tools \
|
||||
build-essential \
|
||||
cron \
|
||||
curl \
|
||||
@@ -40,62 +39,36 @@ apt-get -y install \
|
||||
rcconf \
|
||||
swaks \
|
||||
unattended-upgrades \
|
||||
unbound
|
||||
unbound \
|
||||
xfsprogs
|
||||
|
||||
# this ensures that unattended upgades are enabled, if it was disabled during ubuntu install time (see #346)
|
||||
# debconf-set-selection of unattended-upgrades/enable_auto_updates + dpkg-reconfigure does not work
|
||||
cp /usr/share/unattended-upgrades/20auto-upgrades /etc/apt/apt.conf.d/20auto-upgrades
|
||||
|
||||
echo "==> Installing node.js"
|
||||
mkdir -p /usr/local/node-6.9.2
|
||||
curl -sL https://nodejs.org/dist/v6.9.2/node-v6.9.2-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-6.9.2
|
||||
ln -sf /usr/local/node-6.9.2/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-6.9.2/bin/npm /usr/bin/npm
|
||||
mkdir -p /usr/local/node-8.9.3
|
||||
curl -sL https://nodejs.org/dist/v8.9.3/node-v8.9.3-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-8.9.3
|
||||
ln -sf /usr/local/node-8.9.3/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-8.9.3/bin/npm /usr/bin/npm
|
||||
apt-get install -y python # Install python which is required for npm rebuild
|
||||
[[ "$(python --version 2>&1)" == "Python 2.7."* ]] || die "Expecting python version to be 2.7.x"
|
||||
|
||||
# https://docs.docker.com/engine/installation/linux/ubuntulinux/
|
||||
echo "==> Installing Docker"
|
||||
docker_key="-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: GnuPG v1
|
||||
|
||||
mQINBFWln24BEADrBl5p99uKh8+rpvqJ48u4eTtjeXAWbslJotmC/CakbNSqOb9o
|
||||
ddfzRvGVeJVERt/Q/mlvEqgnyTQy+e6oEYN2Y2kqXceUhXagThnqCoxcEJ3+KM4R
|
||||
mYdoe/BJ/J/6rHOjq7Omk24z2qB3RU1uAv57iY5VGw5p45uZB4C4pNNsBJXoCvPn
|
||||
TGAs/7IrekFZDDgVraPx/hdiwopQ8NltSfZCyu/jPpWFK28TR8yfVlzYFwibj5WK
|
||||
dHM7ZTqlA1tHIG+agyPf3Rae0jPMsHR6q+arXVwMccyOi+ULU0z8mHUJ3iEMIrpT
|
||||
X+80KaN/ZjibfsBOCjcfiJSB/acn4nxQQgNZigna32velafhQivsNREFeJpzENiG
|
||||
HOoyC6qVeOgKrRiKxzymj0FIMLru/iFF5pSWcBQB7PYlt8J0G80lAcPr6VCiN+4c
|
||||
NKv03SdvA69dCOj79PuO9IIvQsJXsSq96HB+TeEmmL+xSdpGtGdCJHHM1fDeCqkZ
|
||||
hT+RtBGQL2SEdWjxbF43oQopocT8cHvyX6Zaltn0svoGs+wX3Z/H6/8P5anog43U
|
||||
65c0A+64Jj00rNDr8j31izhtQMRo892kGeQAaaxg4Pz6HnS7hRC+cOMHUU4HA7iM
|
||||
zHrouAdYeTZeZEQOA7SxtCME9ZnGwe2grxPXh/U/80WJGkzLFNcTKdv+rwARAQAB
|
||||
tDdEb2NrZXIgUmVsZWFzZSBUb29sIChyZWxlYXNlZG9ja2VyKSA8ZG9ja2VyQGRv
|
||||
Y2tlci5jb20+iQI4BBMBAgAiBQJVpZ9uAhsvBgsJCAcDAgYVCAIJCgsEFgIDAQIe
|
||||
AQIXgAAKCRD3YiFXLFJgnbRfEAC9Uai7Rv20QIDlDogRzd+Vebg4ahyoUdj0CH+n
|
||||
Ak40RIoq6G26u1e+sdgjpCa8jF6vrx+smpgd1HeJdmpahUX0XN3X9f9qU9oj9A4I
|
||||
1WDalRWJh+tP5WNv2ySy6AwcP9QnjuBMRTnTK27pk1sEMg9oJHK5p+ts8hlSC4Sl
|
||||
uyMKH5NMVy9c+A9yqq9NF6M6d6/ehKfBFFLG9BX+XLBATvf1ZemGVHQusCQebTGv
|
||||
0C0V9yqtdPdRWVIEhHxyNHATaVYOafTj/EF0lDxLl6zDT6trRV5n9F1VCEh4Aal8
|
||||
L5MxVPcIZVO7NHT2EkQgn8CvWjV3oKl2GopZF8V4XdJRl90U/WDv/6cmfI08GkzD
|
||||
YBHhS8ULWRFwGKobsSTyIvnbk4NtKdnTGyTJCQ8+6i52s+C54PiNgfj2ieNn6oOR
|
||||
7d+bNCcG1CdOYY+ZXVOcsjl73UYvtJrO0Rl/NpYERkZ5d/tzw4jZ6FCXgggA/Zxc
|
||||
jk6Y1ZvIm8Mt8wLRFH9Nww+FVsCtaCXJLP8DlJLASMD9rl5QS9Ku3u7ZNrr5HWXP
|
||||
HXITX660jglyshch6CWeiUATqjIAzkEQom/kEnOrvJAtkypRJ59vYQOedZ1sFVEL
|
||||
MXg2UCkD/FwojfnVtjzYaTCeGwFQeqzHmM241iuOmBYPeyTY5veF49aBJA1gEJOQ
|
||||
TvBR8Q==
|
||||
=Fm3p
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
"
|
||||
echo "$docker_key" | apt-key add -
|
||||
echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" > /etc/apt/sources.list.d/docker.list
|
||||
apt-get -y update
|
||||
|
||||
# create systemd drop-in file
|
||||
mkdir -p /etc/systemd/system/docker.service.d
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=devicemapper" > /etc/systemd/system/docker.service.d/cloudron.conf
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=overlay2" > /etc/systemd/system/docker.service.d/cloudron.conf
|
||||
|
||||
curl -sL https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_17.09.0~ce-0~ubuntu_amd64.deb -o /tmp/docker.deb
|
||||
# apt install with install deps (as opposed to dpkg -i)
|
||||
apt install -y /tmp/docker.deb
|
||||
rm /tmp/docker.deb
|
||||
|
||||
apt-get -y --allow-downgrades install docker-engine=1.12.5-0~ubuntu-xenial # apt-cache madison docker-engine
|
||||
apt-mark hold docker-engine # do not update docker
|
||||
storage_driver=$(docker info | grep "Storage Driver" | sed 's/.*: //')
|
||||
if [[ "${storage_driver}" != "devicemapper" ]]; then
|
||||
echo "Docker is using "${storage_driver}" instead of devicemapper"
|
||||
if [[ "${storage_driver}" != "overlay2" ]]; then
|
||||
echo "Docker is using "${storage_driver}" instead of overlay2"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -124,3 +97,11 @@ if ! apt-get install -y collectd collectd-utils; then
|
||||
sed -e 's/^FQDNLookup true/FQDNLookup false/' -i /etc/collectd/collectd.conf
|
||||
fi
|
||||
|
||||
# Disable bind for good measure (on online.net, kimsufi servers these are pre-installed and conflicts with unbound)
|
||||
systemctl stop bind9 || true
|
||||
systemctl disable bind9 || true
|
||||
|
||||
# on ovh images dnsmasq seems to run by default
|
||||
systemctl stop dnsmasq || true
|
||||
systemctl disable dnsmasq || true
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 33 KiB |
|
Before Width: | Height: | Size: 22 KiB |
|
Before Width: | Height: | Size: 9.0 KiB |
|
Before Width: | Height: | Size: 28 KiB |
|
Before Width: | Height: | Size: 5.5 KiB |
|
Before Width: | Height: | Size: 18 KiB |
|
Before Width: | Height: | Size: 74 KiB |
|
Before Width: | Height: | Size: 30 KiB |
|
Before Width: | Height: | Size: 5.5 KiB |
|
Before Width: | Height: | Size: 19 KiB |
|
Before Width: | Height: | Size: 5.5 KiB |
|
Before Width: | Height: | Size: 51 KiB |
|
Before Width: | Height: | Size: 132 KiB |
|
Before Width: | Height: | Size: 71 KiB |
|
Before Width: | Height: | Size: 14 KiB |
|
Before Width: | Height: | Size: 19 KiB |
|
Before Width: | Height: | Size: 5.7 KiB |
|
Before Width: | Height: | Size: 16 KiB |
|
Before Width: | Height: | Size: 49 KiB |
|
Before Width: | Height: | Size: 11 KiB |
|
Before Width: | Height: | Size: 36 KiB |
|
Before Width: | Height: | Size: 9.4 KiB |
|
Before Width: | Height: | Size: 9.7 KiB |
@@ -1,320 +0,0 @@
|
||||
# Overview
|
||||
|
||||
Addons are services like database, authentication, email, caching that are part of the
|
||||
Cloudron runtime. Setup, provisioning, scaling and maintanence of addons is taken care of
|
||||
by the runtime.
|
||||
|
||||
The fundamental idea behind addons is to allow sharing of Cloudron resources across applications.
|
||||
For example, a single MySQL server instance can be used across multiple apps. The Cloudron
|
||||
runtime sets up addons in such a way that apps are isolated from each other.
|
||||
|
||||
# Using Addons
|
||||
|
||||
Addons are opt-in and must be specified in the [Cloudron Manifest](/references/manifest.html).
|
||||
When the app runs, environment variables contain the necessary information to access the addon.
|
||||
For example, the mysql addon sets the `MYSQL_URL` environment variable which is the
|
||||
connection string that can be used to connect to the database.
|
||||
|
||||
When working with addons, developers need to remember the following:
|
||||
* Environment variables are subject to change every time the app restarts. This can happen if the
|
||||
Cloudron is rebooted or restored or the app crashes or an addon is re-provisioned. For this reason,
|
||||
applications must never cache the value of environment variables across restarts.
|
||||
|
||||
* Addons must be setup or updated on each application start up. Most applications use DB migration frameworks
|
||||
for this purpose to setup and update the DB schema.
|
||||
|
||||
* Addons are configured in the [addons section](/references/manifest.html#addons) of the manifest as below:
|
||||
```
|
||||
{
|
||||
...
|
||||
"addons": {
|
||||
"oauth": { },
|
||||
"redis" : { }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
# All addons
|
||||
|
||||
## email
|
||||
|
||||
This addon allows an app to send and recieve emails on behalf of the user. The intended use case is webmail applications.
|
||||
|
||||
If an app wants to send mail (e.g notifications), it must use the [sendmail](/references/addons#sendmail)
|
||||
addon. If the app wants to receive email (e.g user replying to notification), it must use the
|
||||
[recvmail](/references/addons#recvmail) addon instead.
|
||||
|
||||
Apps using the IMAP and ManageSieve services below must be prepared to accept self-signed certificates (this is not a problem
|
||||
because these are addresses internal to the Cloudron).
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
MAIL_SMTP_SERVER= # SMTP server IP or hostname. Supports STARTTLS (TLS upgrade is enforced).
|
||||
MAIL_SMTP_PORT= # SMTP server port
|
||||
MAIL_IMAP_SERVER= # IMAP server IP or hostname. TLS required.
|
||||
MAIL_IMAP_PORT= # IMAP server port
|
||||
MAIL_SIEVE_SERVER= # ManageSieve server IP or hostname. TLS required.
|
||||
MAIL_SIEVE_PORT= # ManageSieve server port
|
||||
MAIL_DOMAIN= # Domain of the mail server
|
||||
```
|
||||
|
||||
## ldap
|
||||
|
||||
This addon provides LDAP based authentication via LDAP version 3.
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
LDAP_SERVER= # ldap server IP
|
||||
LDAP_PORT= # ldap server port
|
||||
LDAP_URL= # ldap url of the form ldap://ip:port
|
||||
LDAP_USERS_BASE_DN= # ldap users base dn of the form ou=users,dc=cloudron
|
||||
LDAP_GROUPS_BASE_DN= # ldap groups base dn of the form ou=groups,dc=cloudron
|
||||
LDAP_BIND_DN= # DN to perform LDAP requests
|
||||
LDAP_BIND_PASSWORD= # Password to perform LDAP requests
|
||||
```
|
||||
|
||||
For debugging, [cloudron exec](https://www.npmjs.com/package/cloudron) can be used to run the `ldapsearch` client within the context of the app:
|
||||
```
|
||||
cloudron exec
|
||||
|
||||
# list users
|
||||
> ldapsearch -x -h "${LDAP_SERVER}" -p "${LDAP_PORT}" -b "${LDAP_USERS_BASE_DN}"
|
||||
|
||||
# list users with authentication (Substitute username and password below)
|
||||
> ldapsearch -x -D cn=<username>,${LDAP_USERS_BASE_DN} -w <password> -h "${LDAP_SERVER}" -p "${LDAP_PORT}" -b "${LDAP_USERS_BASE_DN}"
|
||||
|
||||
# list admins
|
||||
> ldapsearch -x -h "${LDAP_SERVER}" -p "${LDAP_PORT}" -b "${LDAP_USERS_BASE_DN}" "memberof=cn=admins,${LDAP_GROUPS_BASE_DN}"
|
||||
|
||||
# list groups
|
||||
> ldapsearch -x -h "${LDAP_SERVER}" -p "${LDAP_PORT}" -b "${LDAP_GROUPS_BASE_DN}"
|
||||
```
|
||||
|
||||
## localstorage
|
||||
|
||||
Since all Cloudron apps run within a read-only filesystem, this addon provides a writeable folder under `/app/data/`.
|
||||
All contents in that folder are included in the backup. On first run, this folder will be empty. File added in this path
|
||||
as part of the app's image (Dockerfile) won't be present. A common pattern is to create the directory structure required
|
||||
the app as part of the app's startup script.
|
||||
|
||||
The permissions and ownership of data within that directory are not guranteed to be preserved. For this reason, each app
|
||||
has to restore permissions as required by the app as part of the app's startup script.
|
||||
|
||||
If the app is running under the recommeneded `cloudron` user, this can be achieved with:
|
||||
```
|
||||
chown -R cloudron:cloudron /app/data
|
||||
```
|
||||
|
||||
## mongodb
|
||||
|
||||
By default, this addon provide mongodb 2.6.3.
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
MONGODB_URL= # mongodb url
|
||||
MONGODB_USERNAME= # username
|
||||
MONGODB_PASSWORD= # password
|
||||
MONGODB_HOST= # server IP/hostname
|
||||
MONGODB_PORT= # server port
|
||||
MONGODB_DATABASE= # database name
|
||||
```
|
||||
|
||||
For debugging, [cloudron exec](https://www.npmjs.com/package/cloudron) can be used to run the `mongo` shell within the context of the app:
|
||||
```
|
||||
cloudron exec
|
||||
|
||||
# mongo -u "${MONGODB_USERNAME}" -p "${MONGODB_PASSWORD}" ${MONGODB_HOST}:${MONGODB_PORT}/${MONGODB_DATABASE}
|
||||
|
||||
```
|
||||
## mysql
|
||||
|
||||
By default, this addon provides a single database on MySQL 5.6.19. The database is already created and the application
|
||||
only needs to create the tables.
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
MYSQL_URL= # the mysql url (only set when using a single database, see below)
|
||||
MYSQL_USERNAME= # username
|
||||
MYSQL_PASSWORD= # password
|
||||
MYSQL_HOST= # server IP/hostname
|
||||
MYSQL_PORT= # server port
|
||||
MYSQL_DATABASE= # database name (only set when using a single database, see below)
|
||||
```
|
||||
|
||||
For debugging, [cloudron exec](https://www.npmjs.com/package/cloudron) can be used to run the `mysql` client within the context of the app:
|
||||
```
|
||||
cloudron exec
|
||||
|
||||
> mysql --user=${MYSQL_USERNAME} --password=${MYSQL_PASSWORD} --host=${MYSQL_HOST} ${MYSQL_DATABASE}
|
||||
|
||||
```
|
||||
|
||||
The `multipleDatabases` option can be set to `true` if the app requires more than one database. When enabled,
|
||||
the following environment variables are injected:
|
||||
|
||||
```
|
||||
MYSQL_DATABASE_PREFIX= # prefix to use to create databases
|
||||
```
|
||||
|
||||
## oauth
|
||||
|
||||
The Cloudron OAuth 2.0 provider can be used in an app to implement Single Sign-On.
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
OAUTH_CLIENT_ID= # client id
|
||||
OAUTH_CLIENT_SECRET= # client secret
|
||||
```
|
||||
|
||||
The callback url required for the OAuth transaction can be contructed from the environment variables below:
|
||||
|
||||
```
|
||||
APP_DOMAIN= # hostname of the app
|
||||
APP_ORIGIN= # origin of the app of the form https://domain
|
||||
API_ORIGIN= # origin of the OAuth provider of the form https://my-cloudrondomain
|
||||
```
|
||||
|
||||
OAuth2 URLs can be constructed as follows:
|
||||
|
||||
```
|
||||
AuthorizationURL = ${API_ORIGIN}/api/v1/oauth/dialog/authorize # see above for API_ORIGIN
|
||||
TokenURL = ${API_ORIGIN}/api/v1/oauth/token
|
||||
```
|
||||
|
||||
The token obtained via OAuth has a restricted scope wherein they can only access the [profile API](/references/api.html#profile). This restriction
|
||||
is so that apps cannot make undesired changes to the user's Cloudron.
|
||||
|
||||
We currently provide OAuth2 integration for Ruby [omniauth](https://github.com/cloudron-io/omniauth-cloudron) and Node.js [passport](https://github.com/cloudron-io/passport-cloudron).
|
||||
|
||||
## postgresql
|
||||
|
||||
By default, this addon provides PostgreSQL 9.4.4.
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
POSTGRESQL_URL= # the postgresql url
|
||||
POSTGRESQL_USERNAME= # username
|
||||
POSTGRESQL_PASSWORD= # password
|
||||
POSTGRESQL_HOST= # server name
|
||||
POSTGRESQL_PORT= # server port
|
||||
POSTGRESQL_DATABASE= # database name
|
||||
```
|
||||
|
||||
The postgresql addon whitelists the hstore and pg_trgm extensions to be installable by the database owner.
|
||||
|
||||
For debugging, [cloudron exec](https://www.npmjs.com/package/cloudron) can be used to run the `psql` client within the context of the app:
|
||||
```
|
||||
cloudron exec
|
||||
|
||||
> PGPASSWORD=${POSTGRESQL_PASSWORD} psql -h ${POSTGRESQL_HOST} -p ${POSTGRESQL_PORT} -U ${POSTGRESQL_USERNAME} -d ${POSTGRESQL_DATABASE}
|
||||
```
|
||||
|
||||
## recvmail
|
||||
|
||||
The recvmail addon can be used to receive email for the application.
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
MAIL_IMAP_SERVER= # the IMAP server. this can be an IP or DNS name
|
||||
MAIL_IMAP_PORT= # the IMAP server port
|
||||
MAIL_IMAP_USERNAME= # the username to use for authentication
|
||||
MAIL_IMAP_PASSWORD= # the password to use for authentication
|
||||
MAIL_TO= # the "To" address to use
|
||||
MAIL_DOMAIN= # the mail for which email will be received
|
||||
```
|
||||
|
||||
The IMAP server only accepts TLS connections. The app must be prepared to accept self-signed certs (this is not a problem because the
|
||||
imap address is internal to the Cloudron).
|
||||
|
||||
For debugging, [cloudron exec](https://www.npmjs.com/package/cloudron) can be used to run the `openssl` tool within the context of the app:
|
||||
```
|
||||
cloudron exec
|
||||
|
||||
> openssl s_client -connect "${MAIL_IMAP_SERVER}:${MAIL_IMAP_PORT}" -crlf
|
||||
```
|
||||
|
||||
The IMAP command `? LOGIN username password` can then be used to test the authentication.
|
||||
|
||||
## redis
|
||||
|
||||
By default, this addon provides redis 2.8.13. The redis is configured to be persistent and data is preserved across updates
|
||||
and restarts.
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
REDIS_URL= # the redis url
|
||||
REDIS_HOST= # server name
|
||||
REDIS_PORT= # server port
|
||||
REDIS_PASSWORD= # password
|
||||
```
|
||||
|
||||
For debugging, [cloudron exec](https://www.npmjs.com/package/cloudron) can be used to run the `redis-cli` client within the context of the app:
|
||||
```
|
||||
cloudron exec
|
||||
|
||||
> redis-cli -h "${REDIS_HOST}" -p "${REDIS_PORT}" -a "${REDIS_PASSWORD}"
|
||||
```
|
||||
|
||||
## scheduler
|
||||
|
||||
The scheduler addon can be used to run tasks at periodic intervals (cron).
|
||||
|
||||
Scheduler can be configured as below:
|
||||
```
|
||||
"scheduler": {
|
||||
"update_feeds": {
|
||||
"schedule": "*/5 * * * *",
|
||||
"command": "/app/code/update_feed.sh"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
In the above example, `update_feeds` is the name of the task and is an arbitrary string.
|
||||
|
||||
`schedule` values must fall within the following ranges:
|
||||
|
||||
* Minutes: 0-59
|
||||
* Hours: 0-23
|
||||
* Day of Month: 1-31
|
||||
* Months: 0-11
|
||||
* Day of Week: 0-6
|
||||
|
||||
_NOTE_: scheduler does not support seconds
|
||||
|
||||
`schedule` supports ranges (like standard cron):
|
||||
|
||||
* Asterisk. E.g. *
|
||||
* Ranges. E.g. 1-3,5
|
||||
* Steps. E.g. */2
|
||||
|
||||
`command` is executed through a shell (sh -c). The command runs in the same launch environment
|
||||
as the application. Environment variables, volumes (`/tmp` and `/run`) are all
|
||||
shared with the main application.
|
||||
|
||||
If a task is still running when a new instance of the task is scheduled to be started, the previous
|
||||
task instance is killed.
|
||||
|
||||
|
||||
## sendmail
|
||||
|
||||
The sendmail addon can be used to send email from the application.
|
||||
|
||||
Exported environment variables:
|
||||
```
|
||||
MAIL_SMTP_SERVER= # the mail server (relay) that apps can use. this can be an IP or DNS name
|
||||
MAIL_SMTP_PORT= # the mail server port
|
||||
MAIL_SMTP_USERNAME= # the username to use for authentication as well as the `from` username when sending emails
|
||||
MAIL_SMTP_PASSWORD= # the password to use for authentication
|
||||
MAIL_FROM= # the "From" address to use
|
||||
MAIL_DOMAIN= # the domain name to use for email sending (i.e username@domain)
|
||||
```
|
||||
|
||||
The SMTP server does not require STARTTLS. If STARTTLS is used, the app must be prepared to accept self-signed certs.
|
||||
|
||||
For debugging, [cloudron exec](https://www.npmjs.com/package/cloudron) can be used to run the `swaks` tool within the context of the app:
|
||||
```
|
||||
cloudron exec
|
||||
|
||||
> swaks --server "${MAIL_SMTP_SERVER}" -p "${MAIL_SMTP_PORT}" --from "${MAIL_SMTP_USERNAME}@${MAIL_DOMAIN}" --body "Test mail from cloudron app at $(hostname -f)" --auth-user "${MAIL_SMTP_USERNAME}" --auth-password "${MAIL_SMTP_PASSWORD}"
|
||||
```
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
# Introduction
|
||||
|
||||
The Cloudron platform is designed to easily install and run web applications.
|
||||
The application architecture is designed to let the Cloudron take care of system
|
||||
operations like updates, backups, firewalls, domain management, certificate management
|
||||
etc. This allows app developers to focus on their application logic instead of deployment.
|
||||
|
||||
At a high level, an application provides an `image` and a `manifest`. The image is simply
|
||||
a docker image that is a bundle of the application code and it's dependencies. The manifest
|
||||
file specifies application runtime requirements like database type and authentication scheme.
|
||||
It also provides meta information for display purposes in the [Cloudron Store](/appstore.html)
|
||||
like the title, icon and pricing.
|
||||
|
||||
Web applications like blogs, wikis, password managers, code hosting, document editing,
|
||||
file syncers, notes, email, forums are a natural fit for the Cloudron. Decentralized "social"
|
||||
networks are also good app candidates for the Cloudron.
|
||||
|
||||
# Image
|
||||
|
||||
Application images are created using [Docker](https://www.docker.io). Docker provides a way
|
||||
to package (and containerize) the application as a filesystem which contains it's code, system libraries
|
||||
and just about anything the app requires. This flexible approach allows the application to use just
|
||||
about any language or framework.
|
||||
|
||||
Application images are instantiated as `containers`. Cloudron can run one or more isolated instances
|
||||
of the same application as one or more containers.
|
||||
|
||||
Containerizing your application provides the following benefits:
|
||||
* Apps run in the familiar environment that they were packaged for and can have libraries
|
||||
and packages that are independent of the host OS.
|
||||
* Containers isolate applications from one another.
|
||||
|
||||
The [base image](/references/baseimage.html) is the parent of all app images.
|
||||
|
||||
# Cloudron Manifest
|
||||
|
||||
Each app provides a `CloudronManifest.json` that specifies information required for the
|
||||
`Cloudron Store` and for the installation of the image in the Cloudron.
|
||||
|
||||
Information required for container installation includes:
|
||||
* List of `addons` like databases, caches, authentication mechanisms and file systems
|
||||
* The http port on which the container is listening for incoming requests
|
||||
* Additional TCP ports on which the application is listening to (for e.g., git, ssh,
|
||||
irc protocols)
|
||||
|
||||
Information required for the Cloudron Store includes:
|
||||
* Unique App Id
|
||||
* Title
|
||||
* Version
|
||||
* Logo
|
||||
|
||||
See the [manifest reference](/references/manifest.html) for more information.
|
||||
|
||||
# Addons
|
||||
|
||||
Addons are services like database, authentication, email, caching that are part of the
|
||||
Cloudron. Setup, provisioning, scaling and maintenance of addons is taken care of by the
|
||||
Cloudron.
|
||||
|
||||
The fundamental idea behind addons is to allow resource sharing across applications.
|
||||
For example, a single MySQL server instance can be used across multiple apps. The Cloudron
|
||||
sets up addons in such a way that apps are isolated from each other.
|
||||
|
||||
Addons are opt-in and must be specified in the Cloudron Manifest. When the app runs, environment
|
||||
variables contain the necessary information to access the addon. See the
|
||||
[addon reference](/references/addons.html) for more information.
|
||||
|
||||
# Authentication
|
||||
|
||||
The Cloudron provides a centralized dashboard to manage users, roles and permissions. Applications
|
||||
do not create or manage user credentials on their own and instead use one of the various
|
||||
authentication strategies provided by the Cloudron.
|
||||
|
||||
Authentication strategies include OAuth 2.0, LDAP or Simple Auth. See the
|
||||
[Authentication Reference](/references/authentication.html) for more information.
|
||||
|
||||
Authorizing users is application specific and it is only authentication that is delegated to the
|
||||
Cloudron.
|
||||
|
||||
# Cloudron Store
|
||||
|
||||
Cloudron Store provides a market place to publish and optionally monetize your app. Submitting to the
|
||||
Cloudron Store enables any Cloudron user to discover, purchase and install your application with
|
||||
a few clicks.
|
||||
|
||||
# What next?
|
||||
|
||||
* [Package an existing app for the Cloudron](/tutorials/packaging.html)
|
||||
@@ -1,105 +0,0 @@
|
||||
# Overview
|
||||
|
||||
Cloudron provides a centralized dashboard to manage users, roles and permissions. Applications
|
||||
do not create or manage user credentials on their own and instead use one of the various
|
||||
authentication strategies provided by the Cloudron.
|
||||
|
||||
Note that authentication only identifies a user and does not indicate if the user is authorized
|
||||
to perform an action in the application. Authorizing users is application specific and must be
|
||||
implemented by the application.
|
||||
|
||||
# Users & Admins
|
||||
|
||||
Cloudron user management is intentionally very simple. The owner (first user) of the
|
||||
Cloudron is `admin` by default. The `admin` role allows one to install, uninstall and reconfigure
|
||||
applications on the Cloudron.
|
||||
|
||||
A Cloudron `admin` can create one or more users. Cloudron users can login and use any of the installed
|
||||
apps in the Cloudron. In general, adding a cloudron user is akin to adding a person from one's family
|
||||
or organization or team because such users gain access to all apps in the Cloudron. Removing a user
|
||||
immediately revokes access from all apps.
|
||||
|
||||
A Cloudron `admin` can give admin privileges to one or more Cloudron users.
|
||||
|
||||
Each Cloudron user has an unique `username` and an `email`.
|
||||
|
||||
# Strategies
|
||||
|
||||
Cloudron provides multiple authentication strategies.
|
||||
|
||||
* OAuth 2.0 provided by the [OAuth addon](/references/addons.html#oauth)
|
||||
* LDAP provided by the [LDAP addon](/references/addons.html#ldap)
|
||||
|
||||
# Choosing a strategy
|
||||
|
||||
Applications can be broadly categorized based on their user management as follows:
|
||||
|
||||
* Multi-user aware
|
||||
* Such apps have a full fledged user system and support multiple users and groups.
|
||||
* These apps should use OAuth or LDAP.
|
||||
* LDAP and OAuth APIs allow apps to detect if the user is a cloudron `admin`. Apps should use this flag
|
||||
to show the application's admin panel for such users.
|
||||
|
||||
|
||||
* No user
|
||||
* Such apps have no concept of logged-in user.
|
||||
|
||||
* Single user
|
||||
* Such apps only have a single user who is usually also the `admin`.
|
||||
* These apps can use Simple Auth or LDAP since they can authenticate users with a simple HTTP or LDAP request.
|
||||
* Such apps _must_ set the `singleUser` property in the manifest which will restrict login to a single user
|
||||
(configurable through the Cloudron's admin panel).
|
||||
|
||||
# Public and Private apps
|
||||
|
||||
`Private` apps display content only when they have a signed-in user. These apps can choose one of the
|
||||
authentication strategies listed above.
|
||||
|
||||
`Public` apps display content to any visiting user (e.g a blog). These apps have a `login` url to allow
|
||||
the editors & admins to login. This path can be optionally set as the `configurePath` in the manifest for
|
||||
discoverability (for example, some blogs hide the login link).
|
||||
|
||||
Some apps allow the user to choose `private` or `public` mode or some other combination. Such configuration
|
||||
is done at app install time and cannot be changed using a settings interface. It is tempting to show the user
|
||||
a configuration dialog on first installation to switch the modes. This, however, leads the user to believe that
|
||||
this configuration can be changed at any time later. In the case where this setting can be changed dynamically
|
||||
from a settings ui in the app, it's better to simply put some sensible defaults and let the user discover
|
||||
the settings. In the case where such settings cannot be changed dynamically, it is best to simply publish two
|
||||
separate apps in the Cloudron store each with a different configuration.
|
||||
|
||||
# External User Registration
|
||||
|
||||
Some apps allow external users to register and create accounts. For example, a public company chat that
|
||||
can invite anyone to join or a blog allowing registered commenters.
|
||||
|
||||
Such applications must track Cloudron users and external registered users independently (for example, using a flag).
|
||||
As a thumb rule, apps must provide separate login buttons for each of the possible user sources. Such a design prevents
|
||||
external users from (inadvertently) spoofing Cloudron users.
|
||||
|
||||
Naively handling user registration enables attacks of the following kind:
|
||||
* An external user named `foo` registers in the app.
|
||||
* A LDAP user named `foo` is later created on the Cloudron.
|
||||
* When a user named `foo` logs in, the app cannot determine the correct `foo` anymore. Making separate login buttons for each
|
||||
login source clears the confusion for both the user and the app.
|
||||
|
||||
# Userid
|
||||
|
||||
The preferred approach to track users in an application is a uuid or the Cloudron `username`.
|
||||
The `username` in Cloudron is unique and cannot be changed.
|
||||
|
||||
Tracking users using `email` field is error prone since that may be changed by the user anytime.
|
||||
|
||||
# Single Sign-on
|
||||
|
||||
Single sign-on (SSO) is a property where a user logged in one application automatically logs into
|
||||
another application without having to re-enter his credentials. When applications implement the
|
||||
OAuth strategy, they automatically take part in Cloudron SSO. When a user signs in one application with
|
||||
OAuth, they will automatically log into any other app implementing OAuth.
|
||||
|
||||
Conversely, signing off from one app, logs them off from all the apps.
|
||||
|
||||
# Security
|
||||
|
||||
The LDAP and Simple Auth strategies require the user to provide their plain text passwords to the
|
||||
application. This might be a cause of concern and app developers are thus highly encouraged to integrate
|
||||
with OAuth. OAuth also has the advantage of supporting Single Sign On.
|
||||
@@ -1,94 +0,0 @@
|
||||
# Overview
|
||||
|
||||
The application's Dockerfile must specify the FROM base image to be `cloudron/base:0.10.0`.
|
||||
|
||||
The base image already contains most popular software packages including node, nginx, apache,
|
||||
ruby, PHP. Using the base image greatly reduces the size of app images.
|
||||
|
||||
The goal of the base image is simply to provide pre-downloaded software packages. The packages
|
||||
are not configured in any way and it's up to the application to configure them as they choose.
|
||||
For example, while `apache` is installed, there are no meaningful site configurations that the
|
||||
application can use.
|
||||
|
||||
# Packages
|
||||
|
||||
The following packages are part of the base image. If you need another version, you will have to
|
||||
install it yourself.
|
||||
|
||||
* Apache 2.4.18
|
||||
* Composer 1.2.0
|
||||
* Go 1.6.4, 1.7.5 (install under `/usr/local/go-<version>`)
|
||||
* Gunicorn 19.4.5
|
||||
* Java 1.8
|
||||
* Maven 3.3.9
|
||||
* Mongo 2.6.10
|
||||
* MySQL Client 5.7.17
|
||||
* nginx 1.10.0
|
||||
* Node 0.10.48, 0.12.18, 4.7.3, 6.9.5 (installed under `/usr/local/node-<version>`) [more information](#node-js)
|
||||
* Perl 5.22.1
|
||||
* PHP 7.0.13
|
||||
* Postgresql client 9.5.4
|
||||
* Python 2.7.12
|
||||
* Redis 3.0.6
|
||||
* Ruby 2.3.1
|
||||
* sqlite3 3.11.0
|
||||
* Supervisor 3.2.0
|
||||
* uwsgi 2.0.12
|
||||
|
||||
# Inspecting the base image
|
||||
|
||||
The base image can be inspected by installing [Docker](https://docs.docker.com/installation/).
|
||||
|
||||
Once installed, pull down the base image locally using the following command:
|
||||
```
|
||||
docker pull cloudron/base:0.10.0
|
||||
```
|
||||
|
||||
To inspect the base image:
|
||||
```
|
||||
docker run -ti cloudron/base:0.10.0 /bin/bash
|
||||
```
|
||||
|
||||
*Note:* Please use `docker 1.9.0` or above to pull the base image. Doing otherwise results in a base
|
||||
image with an incorrect image id. The image id of `cloudron/base:0.10.0` is `5ec8ca8525be`.
|
||||
|
||||
# The `cloudron` user
|
||||
|
||||
The base image contains a user named `cloudron` that apps can use to run their app.
|
||||
|
||||
It is good security practice to run apps as a non-previleged user.
|
||||
|
||||
# Env vars
|
||||
|
||||
The following environment variables are set as part of the application runtime.
|
||||
|
||||
## API_ORIGIN
|
||||
|
||||
API_ORIGIN is set to the HTTP(S) origin of this Cloudron's API. For example,
|
||||
`https://my-girish.cloudron.us`.
|
||||
|
||||
## APP_DOMAIN
|
||||
|
||||
APP_DOMAIN is set to the domain name of the application. For example, `app-girish.cloudron.us`.
|
||||
|
||||
## APP_ORIGIN
|
||||
|
||||
APP_ORIGIN is set to the HTTP(S) origin on the application. This is origin which the
|
||||
user can use to reach the application. For example, `https://app-girish.cloudron.us`.
|
||||
|
||||
## CLOUDRON
|
||||
|
||||
CLOUDRON is always set to '1'. This is useful to write Cloudron specific code.
|
||||
|
||||
## WEBADMIN_ORIGIN
|
||||
|
||||
WEBADMIN_ORIGIN is set to the HTTP(S) origin of the Cloudron's web admin. For example,
|
||||
`https://my-girish.cloudron.us`.
|
||||
|
||||
# Node.js
|
||||
|
||||
The base image comes pre-installed with various node.js versions.
|
||||
|
||||
They can be used by adding `ENV PATH /usr/local/node-<version>/bin:$PATH`.
|
||||
|
||||
See [Packages](/references/baseimage.html#packages) for available versions.
|
||||
@@ -1,93 +0,0 @@
|
||||
# Best practices
|
||||
|
||||
## Overview
|
||||
|
||||
This document explains the spirit of what makes a Cloudron app.
|
||||
|
||||
## No Setup
|
||||
|
||||
Cloudron apps do not show a setup screen after installation and should choose reasonable
|
||||
defaults.
|
||||
|
||||
Databases, email configuration should be automatically picked up using [addons](/references/addons.html).
|
||||
|
||||
Admin role for the application can be detected dynamically using one of the [authentication](/references/authentication.html)
|
||||
strategies.
|
||||
|
||||
## Image
|
||||
|
||||
The Dockerfile contains a specification for building an application image.
|
||||
|
||||
* Install any required software packages in the Dockerfile.
|
||||
|
||||
* Create static configuration files in the Dockerfile.
|
||||
|
||||
* Create symlinks to dynamic configuration files under `/run` in the Dockerfile.
|
||||
|
||||
* Docker supports restarting processes natively. Should your application crash, it will
|
||||
be restarted automatically. If your application is a single process, you do not require
|
||||
any process manager.
|
||||
|
||||
* The main process must handle `SIGTERM` and forward it as required to child processes. `bash`
|
||||
does not automatically forward signals to child processes. For this reason, when using a startup
|
||||
shell script, remember to use `exec <app>` as the last line. Doing so will replace bash with your
|
||||
program and allows your program to handle signals as required.
|
||||
|
||||
* Use `supervisor`, `pm2` or any of the other process managers if you application has more
|
||||
then one component. This excludes web servers like apache, nginx which can already manage their
|
||||
children by themselves. Be sure to pick a process manager that forwards signals to child processes.
|
||||
|
||||
* Disable auto updates for apps. Updates must be triggered through the Cloudron Store. This allows the admin
|
||||
to manage updates and downtime in a central location (the Cloudron Webadmin).
|
||||
|
||||
## File system
|
||||
|
||||
The Cloudron runs the application image as read-only. The app can only write to the following directories:
|
||||
|
||||
* `/tmp` - use this for temporary files.
|
||||
|
||||
* `/run` - use this for runtime configration and any dynamic data.
|
||||
|
||||
* `/app/data` - When the `localstorage` addon is enabled, any data under this directory is automatically backed up.
|
||||
|
||||
## Logging
|
||||
|
||||
Cloudron applications stream their logs to stdout and stderr. In contrast to logging
|
||||
to files, this approach has many advantages:
|
||||
|
||||
* App does not need to rotate logs and the Cloudron takes care of managing logs
|
||||
* App does not need special mechanism to release log file handles (on a log rotate)
|
||||
* Integrates better with tooling like `cloudron cli`
|
||||
|
||||
This document gives you some recipes for configuring popular libraries to log to stdout. See
|
||||
[base image](/references/baseimage.html#configuring) on how to configure various libraries to log to stdout/stderr.
|
||||
|
||||
|
||||
## Memory
|
||||
|
||||
By default, applications get 256MB RAM (including swap). This can be changed using the `memoryLimit` field in the manifest.
|
||||
|
||||
Design your application runtime for concurrent use by 10s of users. The Cloudron is not designed for concurrent access by
|
||||
100s or 1000s of users.
|
||||
|
||||
## Startup
|
||||
|
||||
* Apps must not present a post-installation screen on first run. It should be already pre-configured for
|
||||
a specific purpose.
|
||||
|
||||
* Do not run as `root`. Apps can use the `cloudron` user which is part of the [base image](/references/baseimage.html)
|
||||
for this purpose or create their own.
|
||||
|
||||
* When using the `localstorage` addon, the application must change the ownership of files in `/app/data` as desired using `chown`. This
|
||||
is necessary because file permissions may not be correctly preserved across backup, restore, application and base image
|
||||
updates.
|
||||
|
||||
* Addon information (mail, database) is exposed as environment variables. An application must use these values directly
|
||||
and not cache them across restarts. If the variables are stored in a configuration file, then the configuration file
|
||||
must be regenerated on every application start. This is usually done using a configuration template that is patched
|
||||
on every startup.
|
||||
|
||||
## Authentication
|
||||
|
||||
Apps should integrate with one of the [authentication strategies](/references/authentication.html).
|
||||
This saves the user from having to manage separate set of users for different apps.
|
||||
@@ -1,47 +0,0 @@
|
||||
# Cloudron Button
|
||||
|
||||
The `Cloudron Button` allows anyone to install an application with
|
||||
the click of a button on their Cloudron.
|
||||
|
||||
The button can be added to just about any website including the application's website
|
||||
and README.md files in GitHub repositories.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
The `Cloudron Button` is intended to work only for applications that have been
|
||||
published on the Cloudron Store. The [basic tutorial](/tutorials/basic.html#publishing)
|
||||
gives an overview of how to package and publish your application for the
|
||||
Cloudron Store.
|
||||
|
||||
## HTML Snippet
|
||||
|
||||
```
|
||||
<img src="https://cloudron.io/img/button32.png" href="https://cloudron.io/button.html?app=<appid>">
|
||||
```
|
||||
|
||||
_Note_: Replace `<appid>` with your application's id.
|
||||
|
||||
## Markdown Snippet
|
||||
|
||||
```
|
||||
[](https://cloudron.io/button.html?app=<appid>)
|
||||
```
|
||||
|
||||
_Note_: Replace `<appid>` with your application's id.
|
||||
|
||||
|
||||
## Button Height
|
||||
|
||||
The button may be used in different heights - 32, 48 and 64 pixels.
|
||||
|
||||
[](https://cloudron.io/button.html?app=io.gogs.cloudronapp)
|
||||
|
||||
[](https://cloudron.io/button.html?app=io.gogs.cloudronapp)
|
||||
|
||||
[](https://cloudron.io/button.html?app=io.gogs.cloudronapp)
|
||||
|
||||
or as SVG
|
||||
|
||||
[](https://cloudron.io/button.html?app=io.gogs.cloudronapp)
|
||||
|
||||
_Note_: Clicking the buttons above will install [Gogs](http://gogs.io/) on your Cloudron.
|
||||
@@ -1,441 +0,0 @@
|
||||
# Overview
|
||||
|
||||
Every Cloudron Application contains a `CloudronManifest.json`.
|
||||
|
||||
The manifest contains two categories of information:
|
||||
|
||||
* Information about displaying the app on the Cloudron Store. For example,
|
||||
the title, author information, description etc
|
||||
|
||||
* Information for installing the app on the Cloudron. This includes fields
|
||||
like httpPort, tcpPorts.
|
||||
|
||||
A CloudronManifest.json can **only** contain fields that are listed as part of this
|
||||
specification. The Cloudron Store and the Cloudron *may* reject applications that have
|
||||
extra fields.
|
||||
|
||||
Here is an example manifest:
|
||||
|
||||
```
|
||||
{
|
||||
"id": "com.example.test",
|
||||
"title": "Example Application",
|
||||
"author": "Girish Ramakrishnan <girish@cloudron.io>",
|
||||
"description": "This is an example app",
|
||||
"tagline": "A great beginning",
|
||||
"version": "0.0.1",
|
||||
"healthCheckPath": "/",
|
||||
"httpPort": 8000,
|
||||
"addons": {
|
||||
"localstorage": {}
|
||||
},
|
||||
"manifestVersion": 1,
|
||||
"website": "https://www.example.com",
|
||||
"contactEmail": "support@clourdon.io",
|
||||
"icon": "file://icon.png",
|
||||
"tags": [ "test", "collaboration" ],
|
||||
"mediaLinks": [ "https://images.rapgenius.com/fd0175ef780e2feefb30055be9f2e022.520x343x1.jpg" ]
|
||||
}
|
||||
```
|
||||
|
||||
# Fields
|
||||
|
||||
## addons
|
||||
|
||||
Type: object
|
||||
|
||||
Required: no
|
||||
|
||||
Allowed keys
|
||||
* [email](addons.html#email)
|
||||
* [ldap](addons.html#ldap)
|
||||
* [localstorage](addons.html#localstorage)
|
||||
* [mongodb](addons.html#mongodb)
|
||||
* [mysql](addons.html#mysql)
|
||||
* [oauth](addons.html#oauth)
|
||||
* [postgresql](addons.html#postgresql)
|
||||
* [recvmail](addons.html#recvmail)
|
||||
* [redis](addons.html#redis)
|
||||
* [sendmail](addons.html#sendmail)
|
||||
|
||||
The `addons` object lists all the [addons](addons.html) and the addon configuration used by the application.
|
||||
|
||||
Example:
|
||||
```
|
||||
"addons": {
|
||||
"localstorage": {},
|
||||
"mongodb": {}
|
||||
}
|
||||
```
|
||||
|
||||
## author
|
||||
|
||||
Type: string
|
||||
|
||||
Required: yes
|
||||
|
||||
The `author` field contains the name and email of the app developer (or company).
|
||||
|
||||
Example:
|
||||
```
|
||||
"author": "Cloudron UG <girish@cloudron.io>"
|
||||
```
|
||||
|
||||
## changelog
|
||||
|
||||
Type: markdown string
|
||||
|
||||
Required: no (required for submitting to the Cloudron Store)
|
||||
|
||||
The `changelog` field contains the changes in this version of the application. This string
|
||||
can be a markdown style bulleted list.
|
||||
|
||||
Example:
|
||||
```
|
||||
"changelog": "* Add support for IE8 \n* New logo"
|
||||
```
|
||||
|
||||
## contactEmail
|
||||
|
||||
Type: email
|
||||
|
||||
Required: yes
|
||||
|
||||
The `contactEmail` field contains the email address that Cloudron users can contact for any
|
||||
bug reports and suggestions.
|
||||
|
||||
Example:
|
||||
```
|
||||
"contactEmail": "support@testapp.com"
|
||||
```
|
||||
|
||||
## description
|
||||
|
||||
Type: markdown string
|
||||
|
||||
Required: yes
|
||||
|
||||
The `description` field contains a detailed description of the app. This information is shown
|
||||
to the user when they install the app from the Cloudron Store.
|
||||
|
||||
Example:
|
||||
```
|
||||
"description": "This is a detailed description of this app."
|
||||
```
|
||||
|
||||
A large `description` can be unweildy to manage and edit inside the CloudronManifest.json. For
|
||||
this reason, the `description` can also contain a file reference. The Cloudron CLI tool fills up
|
||||
the description from this file when publishing your application.
|
||||
|
||||
Example:
|
||||
```
|
||||
"description:": "file://DESCRIPTION.md"
|
||||
```
|
||||
|
||||
## healthCheckPath
|
||||
|
||||
Type: url path
|
||||
|
||||
Required: yes
|
||||
|
||||
The `healthCheckPath` field is used by the Cloudron Runtime to determine if your app is running and
|
||||
responsive. The app must return a 2xx HTTP status code as a response when this path is queried. In
|
||||
most cases, the default "/" will suffice but there might be cases where periodically querying "/"
|
||||
is an expensive operation. In addition, the app might want to use a specialized route should it
|
||||
want to perform some specialized internal checks.
|
||||
|
||||
Example:
|
||||
```
|
||||
"healthCheckPath": "/"
|
||||
```
|
||||
## httpPort
|
||||
|
||||
Type: positive integer
|
||||
|
||||
Required: yes
|
||||
|
||||
The `httpPort` field contains the TCP port on which your app is listening for HTTP requests. This
|
||||
is the HTTP port the Cloudron will use to access your app internally.
|
||||
|
||||
While not required, it is good practice to mark this port as `EXPOSE` in the Dockerfile.
|
||||
|
||||
Cloudron Apps are containerized and thus two applications can listen on the same port. In reality,
|
||||
they are in different network namespaces and do not conflict with each other.
|
||||
|
||||
Note that this port has to be HTTP and not HTTPS or any other non-HTTP protocol. HTTPS proxying is
|
||||
handled by the Cloudron platform (since it owns the certificates).
|
||||
|
||||
Example:
|
||||
```
|
||||
"httpPort": 8080
|
||||
```
|
||||
|
||||
## icon
|
||||
|
||||
Type: local image filename
|
||||
|
||||
Required: no (required for submitting to the Cloudron Store)
|
||||
|
||||
The `icon` field is used to display the application icon/logo in the Cloudron Store. Icons are expected
|
||||
to be square of size 256x256.
|
||||
|
||||
```
|
||||
"icon": "file://icon.png"
|
||||
```
|
||||
|
||||
## id
|
||||
|
||||
Type: reverse domain string
|
||||
|
||||
Required: yes
|
||||
|
||||
The `id` is a unique human friendly Cloudron Store id. This is similar to reverse domain string names used
|
||||
as java package names. The convention is to base the `id` based on a domain that you own.
|
||||
|
||||
The Cloudron tooling allows you to build applications with any `id`. However, you will be unable to publish
|
||||
the application if the id is already in use by another application.
|
||||
|
||||
```
|
||||
"id": "io.cloudron.testapp"
|
||||
```
|
||||
|
||||
## manifestVersion
|
||||
|
||||
Type: integer
|
||||
|
||||
Required: yes
|
||||
|
||||
`manifestVersion` specifies the version of the manifest and is always set to 1.
|
||||
|
||||
```
|
||||
"manifestVersion": 1
|
||||
```
|
||||
|
||||
## mediaLinks
|
||||
|
||||
Type: array of urls
|
||||
|
||||
Required: no (required for submitting to the Cloudron Store)
|
||||
|
||||
The `mediaLinks` field contains an array of links that the Cloudron Store uses to display a slide show of pictures of the application.
|
||||
|
||||
They have to be publicly reachable via `https` and should have an aspect ratio of 3 to 1.
|
||||
For example `600px by 200px` (with/height).
|
||||
|
||||
```
|
||||
"mediaLinks": [
|
||||
"https://s3.amazonaws.com/cloudron-app-screenshots/org.owncloud.cloudronapp/556f6a1d82d5e27a7c4fca427ebe6386d373304f/2.jpg",
|
||||
"https://images.rapgenius.com/fd0175ef780e2feefb30055be9f2e022.520x343x1.jpg"
|
||||
]
|
||||
```
|
||||
|
||||
## memoryLimit
|
||||
|
||||
Type: bytes (integer)
|
||||
|
||||
Required: no
|
||||
|
||||
The `memoryLimit` field is the maximum amount of memory (including swap) in bytes an app is allowed to consume before it
|
||||
gets killed and restarted.
|
||||
|
||||
By default, all apps have a memoryLimit of 256MB. For example, to have a limit of 500MB,
|
||||
|
||||
```
|
||||
"memoryLimit": 524288000
|
||||
```
|
||||
|
||||
## maxBoxVersion
|
||||
|
||||
Type: semver string
|
||||
|
||||
Required: no
|
||||
|
||||
The `maxBoxVersion` field is the maximum box version that the app can possibly run on. Attempting to install the app on
|
||||
a box greater than `maxBoxVersion` will fail.
|
||||
|
||||
This is useful when a new box release introduces features which are incompatible with the app. This situation is quite
|
||||
unlikely and it is recommended to leave this unset.
|
||||
|
||||
## minBoxVersion
|
||||
|
||||
Type: semver string
|
||||
|
||||
Required: no
|
||||
|
||||
The `minBoxVersion` field is the minimum box version that the app can possibly run on. Attempting to install the app on
|
||||
a box lesser than `minBoxVersion` will fail.
|
||||
|
||||
This is useful when the app relies on features that are only available from a certain version of the box. If unset, the
|
||||
default value is `0.0.1`.
|
||||
|
||||
## postInstallMessage
|
||||
|
||||
Type: markdown string
|
||||
|
||||
Required: no
|
||||
|
||||
The `postInstallMessageField` is a message that is displayed to the user after an app is installed.
|
||||
|
||||
The intended use of this field is to display some post installation steps that the user has to carry out to
|
||||
complete the installation. For example, displaying the default admin credentials and informing the user to
|
||||
to change it.
|
||||
|
||||
The message can have the following special tags:
|
||||
* `<sso> ... </sso>` - Content in `sso` blocks are shown if SSO enabled.
|
||||
* `<nosso> ... </nosso>`- Content in `nosso` blocks are shows when SSO is disabled.
|
||||
|
||||
## optionalSso
|
||||
|
||||
Type: boolean
|
||||
|
||||
Required: no
|
||||
|
||||
The `optionalSso` field can be set to true for apps that can be installed optionally without using the Cloudron user management.
|
||||
|
||||
This only applies if any Cloudron auth related addons are used. When set, the Cloudron will not inject the auth related addon environment variables.
|
||||
Any app startup scripts have to be able to deal with missing env variables in this case.
|
||||
|
||||
## tagline
|
||||
|
||||
Type: one-line string
|
||||
|
||||
Required: no (required for submitting to the Cloudron Store)
|
||||
|
||||
The `tagline` is used by the Cloudron Store to display a single line short description of the application.
|
||||
|
||||
```
|
||||
"tagline": "The very best note keeper"
|
||||
```
|
||||
|
||||
## tags
|
||||
|
||||
Type: Array of strings
|
||||
|
||||
Required: no (required for submitting to the Cloudron Store)
|
||||
|
||||
The `tags` are used by the Cloudron Store for filtering searches by keyword.
|
||||
|
||||
```
|
||||
"tags": [ "git", "version control", "scm" ]
|
||||
```
|
||||
|
||||
## targetBoxVersion
|
||||
|
||||
Type: semver string
|
||||
|
||||
Required: no
|
||||
|
||||
The `targetBoxVersion` field is the box version that the app was tested on. By definition, this version has to be greater
|
||||
than the `minBoxVersion`.
|
||||
|
||||
The box uses this value to enable compatibility behavior of APIs. For example, an app sets the targetBoxVersion to 0.0.5
|
||||
and is published on the store. Later, box version 0.0.10 introduces a new feature that conflicts with how apps used
|
||||
to run in 0.0.5 (say SELinux was enabled for apps). When the box runs such an app, it ensures compatible behavior
|
||||
and will disable the SELinux feature for the app.
|
||||
|
||||
If unspecified, this value defaults to `minBoxVersion`.
|
||||
|
||||
## tcpPorts
|
||||
|
||||
Type: object
|
||||
|
||||
Required: no
|
||||
|
||||
Syntax: Each key is the environment variable. Each value is an object containing `title`, `description` and `defaultValue`.
|
||||
An optional `containerPort` may be specified.
|
||||
|
||||
The `tcpPorts` field provides information on the non-http TCP ports/services that your application is listening on. During
|
||||
installation, the user can decide how these ports are exposed from their Cloudron.
|
||||
|
||||
For example, if the application runs an SSH server at port 29418, this information is listed here. At installation time,
|
||||
the user can decide any of the following:
|
||||
* Expose the port with the suggested `defaultValue` to the outside world. This will only work if no other app is being exposed at same port.
|
||||
* Provide an alternate value on which the port is to be exposed to outside world.
|
||||
* Disable the port/service.
|
||||
|
||||
To illustrate, the application lists the ports as below:
|
||||
```
|
||||
"tcpPorts": {
|
||||
"SSH_PORT": {
|
||||
"title": "SSH Port",
|
||||
"description": "SSH Port over which repos can be pushed & pulled",
|
||||
"defaultValue": 29418,
|
||||
"containerPort": 22
|
||||
}
|
||||
},
|
||||
```
|
||||
|
||||
In the above example:
|
||||
* `SSH_PORT` is an app specific environment variable. Only strings, numbers and _ (underscore) are allowed. The author has to ensure that they don't clash with platform profided variable names.
|
||||
|
||||
* `title` is a short one line information about this port/service.
|
||||
|
||||
* `description` is a multi line description about this port/service.
|
||||
|
||||
* `defaultValue` is the recommended port value to be shown in the app installation UI.
|
||||
|
||||
* `containerPort` is the port that the app is listening on (recall that each app has it's own networking namespace).
|
||||
|
||||
In more detail:
|
||||
|
||||
* If the user decides to disable the SSH service, this environment variable `SSH_PORT` is absent. Applications _must_ detect this on
|
||||
start up and disable these services.
|
||||
|
||||
* `SSH_PORT` is set to the value of the exposed port. Should the user choose to expose the SSH server on port 6000, then the
|
||||
value of SSH_PORT is 6000.
|
||||
|
||||
* `defaultValue` is **only** used for display purposes in the app installation UI. This value is independent of the value
|
||||
that the app is listening on. For example, the app can run an SSH server at port 22 but still recommend a value of 29418 to the user.
|
||||
|
||||
* `containerPort` is the port that the app is listening on. The Cloudron runtime will _bridge_ the user chosen external port
|
||||
with the app specific `containerPort`. Cloudron Apps are containerized and each app has it's own networking namespace.
|
||||
As a result, different apps can have the same `containerPort` value because these values are namespaced.
|
||||
|
||||
* The environment variable `SSH_PORT` may be used by the app to display external URLs. For example, the app might want to display
|
||||
the SSH URL. In such a case, it would be incorrect to use the `containerPort` 22 or the `defaultValue` 29418 since this is not
|
||||
the value chosen by the user.
|
||||
|
||||
* `containerPort` is optional and can be omitted, in which case the bridged port numbers are the same internally and externally.
|
||||
Some apps use the same variable (in their code) for listen port and user visible display strings. When packaging these apps,
|
||||
it might be simpler to listen on `SSH_PORT` internally. In such cases, the app can omit the `containerPort` value and should
|
||||
instead reconfigure itself to listen internally on `SSH_PORT` on each start up.
|
||||
|
||||
## title
|
||||
|
||||
Type: string
|
||||
|
||||
Required: yes
|
||||
|
||||
The `title` is the primary application title displayed on the Cloudron Store.
|
||||
|
||||
Example:
|
||||
```
|
||||
"title": "Gitlab"
|
||||
```
|
||||
|
||||
## version
|
||||
|
||||
Type: semver string
|
||||
|
||||
Required: yes
|
||||
|
||||
The `version` field specifies a [semver](http://semver.org/) string. The version is used by the Cloudron to compare versions and to
|
||||
determine if an update is available.
|
||||
|
||||
Example:
|
||||
```
|
||||
"version": "1.1.0"
|
||||
```
|
||||
|
||||
## website
|
||||
|
||||
Type: url
|
||||
|
||||
Required: yes
|
||||
|
||||
The `website` field is a URL where the user can read more about the application.
|
||||
|
||||
Example:
|
||||
```
|
||||
"website": "https://example.com/myapp"
|
||||
```
|
||||
@@ -1,61 +0,0 @@
|
||||
# Configuration Recipes
|
||||
|
||||
## nginx
|
||||
|
||||
`nginx` is often used as a reverse proxy in front of the application, to dispatch to different backend programs based on the request route or other characteristics. In such a case it is recommended to run nginx and the application through a process manager like `supervisor`.
|
||||
|
||||
Example nginx supervisor configuration file:
|
||||
```
|
||||
[program:nginx]
|
||||
directory=/tmp
|
||||
command=/usr/sbin/nginx -g "daemon off;"
|
||||
user=root
|
||||
autostart=true
|
||||
autorestart=true
|
||||
stdout_logfile=/var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile=/var/log/supervisor/%(program_name)s.log
|
||||
```
|
||||
|
||||
The nginx configuration, provided with the base image, can be used by adding an application specific config file under `/etc/nginx/sites-enabled/` when building the docker image.
|
||||
|
||||
```
|
||||
ADD <app config file> /etc/nginx/sites-enabled/<app config file>
|
||||
```
|
||||
|
||||
Since the base image nginx configuration is unpatched from the ubuntu package, the application configuration has to ensure nginx is using `/run/` instead of `/var/lib/nginx/` to support the read-only filesystem nature of a Cloudron application.
|
||||
|
||||
Example nginx app config file:
|
||||
```
|
||||
client_body_temp_path /run/client_body;
|
||||
proxy_temp_path /run/proxy_temp;
|
||||
fastcgi_temp_path /run/fastcgi_temp;
|
||||
scgi_temp_path /run/scgi_temp;
|
||||
uwsgi_temp_path /run/uwsgi_temp;
|
||||
|
||||
server {
|
||||
listen 8000;
|
||||
|
||||
root /app/code/dist;
|
||||
|
||||
location /api/v1/ {
|
||||
proxy_pass http://127.0.0.1:8001;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_read_timeout 86400;
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## supervisor
|
||||
|
||||
Use this in the program's config:
|
||||
|
||||
```
|
||||
[program:app]
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
```
|
||||
@@ -1,395 +0,0 @@
|
||||
# Overview
|
||||
|
||||
The Cloudron platform can be installed on public cloud servers from EC2, Digital Ocean, Hetzner,
|
||||
Linode, OVH, Scaleway, Vultr etc. Cloudron also runs well on a home server or company intranet.
|
||||
|
||||
If you run into any trouble following this guide, ask us at our [chat](https://chat.cloudron.io).
|
||||
|
||||
# Understand
|
||||
|
||||
Before installing the Cloudron, it is helpful to understand Cloudron's design. The Cloudron
|
||||
intends to make self-hosting effortless. It takes care of updates, backups, firewall, dns setup,
|
||||
certificate management etc. All app and user configuration is carried out using the web interface.
|
||||
|
||||
This approach to self-hosting means that the Cloudron takes complete ownership of the server and
|
||||
only tracks changes that were made via the web interface. Any external changes made to the server
|
||||
(i.e other than via the Cloudron web interface or API) may be lost across updates.
|
||||
|
||||
The Cloudron requires a domain name when it is installed. Apps are installed into subdomains.
|
||||
The `my` subdomain is special and is the location of the Cloudron web interface. For this to
|
||||
work, the Cloudron requires a way to programmatically configure the DNS entries of the domain.
|
||||
Note that the Cloudron will never overwrite _existing_ DNS entries and refuse to install
|
||||
apps on existing subdomains (so, it is safe to reuse an existing domain that runs other services).
|
||||
|
||||
# Cloud Server
|
||||
|
||||
DigitalOcean and EC2 (Amazon Web Services) are frequently tested by us.
|
||||
|
||||
Please use the below links to support us with referrals:
|
||||
* [Amazon EC2](https://aws.amazon.com/ec2/)
|
||||
* [DigitalOcean](https://m.do.co/c/933831d60a1e)
|
||||
|
||||
In addition to those, the Cloudron community has successfully installed the platform on those providers:
|
||||
* [Amazon Lightsail](https://amazonlightsail.com/)
|
||||
* [hosttech](https://www.hosttech.ch/?promocode=53619290)
|
||||
* [Linode](https://www.linode.com/?r=f68d816692c49141e91dd4cef3305da457ac0f75)
|
||||
* [OVH](https://www.ovh.com/)
|
||||
* [Rosehosting](https://secure.rosehosting.com/clientarea/?affid=661)
|
||||
* [Scaleway](https://www.scaleway.com/)
|
||||
* [So you Start](https://www.soyoustart.com/)
|
||||
* [Vultr](http://www.vultr.com/?ref=7110116-3B)
|
||||
|
||||
Please let us know if any of them requires tweaks or adjustments.
|
||||
|
||||
# Installing
|
||||
|
||||
## Create server
|
||||
|
||||
Create an `Ubuntu 16.04 (Xenial)` server with at-least `1gb` RAM and 20GB disk space.
|
||||
Do not make any changes to vanilla ubuntu. Be sure to allocate a static IPv4 address
|
||||
for your server.
|
||||
|
||||
Cloudron has a built-in firewall and ports are opened and closed dynamically, as and when
|
||||
apps are installed, re-configured or removed. For this reason, be sure to open all TCP and
|
||||
UDP traffic to the server and leave the traffic management to the Cloudron.
|
||||
|
||||
### Linode
|
||||
|
||||
Since Linode does not manage SSH keys, be sure to add the public key to
|
||||
`/root/.ssh/authorized_keys`.
|
||||
|
||||
### Scaleway
|
||||
|
||||
Use the [boot script](https://github.com/scaleway-community/scaleway-docker/issues/2) to
|
||||
enable memory accouting.
|
||||
|
||||
## Run setup
|
||||
|
||||
SSH into your server and run the following commands:
|
||||
|
||||
```
|
||||
wget https://cloudron.io/cloudron-setup
|
||||
chmod +x cloudron-setup
|
||||
./cloudron-setup --provider <azure|digitalocean|ec2|lightsail|linode|ovh|rosehosting|scaleway|vultr|generic>
|
||||
```
|
||||
|
||||
The setup will take around 10-15 minutes.
|
||||
|
||||
**cloudron-setup** takes the following arguments:
|
||||
|
||||
* `--provider` is the name of your VPS provider. If the name is not on the list, simply
|
||||
choose `generic`. In most cases, the `generic` provider mostly will work fine.
|
||||
If the Cloudron does not complete initialization, it may mean that
|
||||
we have to add some vendor specific quirks. Please open a
|
||||
[bug report](https://git.cloudron.io/cloudron/box/issues) in that case.
|
||||
|
||||
Optional arguments for installation:
|
||||
|
||||
* `--tls-provider` is the name of the SSL/TLS certificate backend. Defaults to Let's encrypt.
|
||||
Specifying `fallback` will setup the Cloudron to use the fallback wildcard certificate.
|
||||
Initially a self-signed one is provided, which can be overwritten later in the admin interface.
|
||||
This may be useful for non-public installations.
|
||||
|
||||
Optional arguments used for update and restore:
|
||||
|
||||
* `--version` is the version of Cloudron to install. By default, the setup script installs
|
||||
the latest version. You can set this to an older version when restoring a Cloudron from a backup.
|
||||
|
||||
* `--restore-url` is a backup URL to restore from.
|
||||
|
||||
## Domain setup
|
||||
|
||||
Once the setup script completes, the server will reboot, then visit your server by its
|
||||
IP address (`https://ip`) to complete the installation.
|
||||
|
||||
The setup website will show a certificate warning. Accept the self-signed certificate
|
||||
and proceed to the domain setup.
|
||||
|
||||
Currently, only subdomains of the [Public Suffix List](https://publicsuffix.org/) are supported.
|
||||
For example, `example.com`, `example.co.uk` will work fine. Choosing other non-registrable
|
||||
domain names like `cloudron.example.com` will not work.
|
||||
|
||||
### Route 53
|
||||
|
||||
Create root or IAM credentials and choose `Route 53` as the DNS provider.
|
||||
|
||||
* For root credentials:
|
||||
* In AWS Console, under your name in the menu bar, click `Security Credentials`
|
||||
* Click on `Access Keys` and create a key pair.
|
||||
* For IAM credentials:
|
||||
* You can use the following policy to create IAM credentials:
|
||||
|
||||
```
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "route53:*",
|
||||
"Resource": [
|
||||
"arn:aws:route53:::hostedzone/<hosted zone id>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"route53:ListHostedZones",
|
||||
"route53:GetChange"
|
||||
],
|
||||
"Resource": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Digital Ocean
|
||||
|
||||
Create an API token with read+write access and choose `Digital Ocean` as the DNS provider.
|
||||
|
||||
### Other
|
||||
|
||||
If your domain *does not* use Route 53 or Digital Ocean, setup a wildcard (`*`) DNS `A` record that points to the
|
||||
IP of the server created above. If your DNS provider has an API, please open an
|
||||
[issue](https://git.cloudron.io/cloudron/box/issues) and we may be able to support it.
|
||||
|
||||
## Finish Setup
|
||||
|
||||
Once the domain setup is done, the Cloudron will configure the DNS and get a SSL certificate. It will automatically redirect to `https://my.<domain>`.
|
||||
|
||||
# Backups
|
||||
|
||||
The Cloudron creates encrypted backups once a day. Each app is backed up independently and these
|
||||
backups have the prefix `app_`. The platform state is backed up independently with the
|
||||
prefix `box_`.
|
||||
|
||||
By default, backups reside in `/var/backups`. Please note that having backups reside in the same
|
||||
physical machine as the Cloudron server instance is dangerous and it must be changed to
|
||||
an external storage location like `S3` as soon as possible.
|
||||
|
||||
## Amazon S3
|
||||
|
||||
Provide S3 backup credentials in the `Settings` page and leave the endpoint field empty.
|
||||
|
||||
Create a bucket in S3 (You have to have an account at [AWS](https://aws.amazon.com/)). The bucket can be setup to periodically delete old backups by
|
||||
adding a lifecycle rule using the AWS console. S3 supports both permanent deletion
|
||||
or moving objects to the cheaper Glacier storage class based on an age attribute.
|
||||
With the current daily backup schedule a setting of two days should be sufficient
|
||||
for most use-cases.
|
||||
|
||||
* For root credentials:
|
||||
* In AWS Console, under your name in the menu bar, click `Security Credentials`
|
||||
* Click on `Access Keys` and create a key pair.
|
||||
* For IAM credentials:
|
||||
* You can use the following policy to create IAM credentials:
|
||||
|
||||
```
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "s3:*",
|
||||
"Resource": [
|
||||
"arn:aws:s3:::<your bucket name>",
|
||||
"arn:aws:s3:::<your bucket name>/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The `Encryption key` is an arbitrary passphrase used to encrypt the backups. Keep the passphrase safe; it is
|
||||
required to decrypt the backups when restoring the Cloudron.
|
||||
|
||||
## Minio S3
|
||||
|
||||
[Minio](https://minio.io/) is a distributed object storage server, providing the same API as Amazon S3.
|
||||
Since Cloudron supports S3, any API compatible solution should be supported as well, if this is not the case, let us know.
|
||||
|
||||
Minio can be setup, by following the [installation instructions](https://docs.minio.io/) on any server, which is reachable by the Cloudron.
|
||||
Do not setup Minio on the same server as the Cloudron, this will inevitably result in data loss, if backups are stored on the same instance.
|
||||
|
||||
Once setup, minio will print the necessary information, like login credentials, region and endpoints in its logs.
|
||||
|
||||
```
|
||||
$ ./minio server ./storage
|
||||
|
||||
Endpoint: http://192.168.10.113:9000 http://127.0.0.1:9000
|
||||
AccessKey: GFAWYNJEY7PUSLTHYHT6
|
||||
SecretKey: /fEWk66E7GsPnzE1gohqKDovaytLcxhr0tNWnv3U
|
||||
Region: us-east-1
|
||||
```
|
||||
|
||||
First create a new bucket for the backups, using the minio commandline tools or the webinterface. The bucket has to have **read and write** permissions.
|
||||
|
||||
The information to be copied to the Cloudron's backup settings form may look similar to:
|
||||
|
||||
<img src="/docs/img/minio_backup_config.png" class="shadow"><br/>
|
||||
|
||||
The `Encryption key` is an arbitrary passphrase used to encrypt the backups. Keep the passphrase safe; it is
|
||||
required to decrypt the backups when restoring the Cloudron.
|
||||
|
||||
# Email
|
||||
|
||||
Cloudron has a built-in email server. By default, it only sends out email on behalf of apps
|
||||
(for example, password reset or notification). You can enable the email server for sending
|
||||
and receiving mail on the `settings` page. This feature is only available if you have setup
|
||||
a DNS provider like Digital Ocean or Route53.
|
||||
|
||||
Your server's IP plays a big role in how emails from our Cloudron get handled. Spammers
|
||||
frequently abuse public IP addresses and as a result your Cloudron might possibly start
|
||||
out with a bad reputation. The good news is that most IP based blacklisting services cool
|
||||
down over time. The Cloudron sets up DNS entries for SPF, DKIM, DMARC automatically and
|
||||
reputation should be easy to get back.
|
||||
|
||||
## Checklist
|
||||
|
||||
* If you are unable to receive mail, first thing to check is if your VPS provider lets you
|
||||
receive mail on port 25.
|
||||
|
||||
* Digital Ocean - New accounts frequently have port 25 blocked. Write to their support to
|
||||
unblock your server.
|
||||
|
||||
* EC2, Lightsail & Scaleway - Edit your security group to allow email.
|
||||
|
||||
* Setup a Reverse DNS PTR record to be setup for the `my` subdomain.
|
||||
**Note:** PTR records are a feature of your VPS provider and not your domain provider.
|
||||
|
||||
* You can verify the PTR record [https://mxtoolbox.com/ReverseLookup.aspx](here).
|
||||
|
||||
* AWS EC2 & Lightsail - Fill the [PTR request form](https://aws-portal.amazon.com/gp/aws/html-forms-controller/contactus/ec2-email-limit-rdns-request).
|
||||
|
||||
* Digital Ocean - Digital Ocean sets up a PTR record based on the droplet's name. So, simply rename
|
||||
your droplet to `my.<domain>`. Note that some new Digital Ocean accounts have [port 25 blocked](https://www.digitalocean.com/community/questions/port-25-smtp-external-access).
|
||||
|
||||
* Linode - Follow this [guide](https://www.linode.com/docs/networking/dns/setting-reverse-dns).
|
||||
|
||||
* Scaleway - Edit your security group to allow email and [reboot the server](https://community.online.net/t/security-group-not-working/2096) for the change to take effect. You can also set a PTR record on the interface with your `my.<domain>`.
|
||||
|
||||
* Check if your IP is listed in any DNSBL list [here](http://multirbl.valli.org/). In most cases,
|
||||
you can apply for removal of your IP by filling out a form at the DNSBL manager site.
|
||||
|
||||
* When using wildcard or manual DNS backends, you have to setup the DMARC, MX records manually.
|
||||
|
||||
* Finally, check your spam score at [mail-tester.com](https://www.mail-tester.com/). The Cloudron
|
||||
should get 100%, if not please let us know.
|
||||
|
||||
# CLI Tool
|
||||
|
||||
The [Cloudron tool](https://git.cloudron.io/cloudron/cloudron-cli) is useful for managing
|
||||
a Cloudron. <b class="text-danger">The Cloudron CLI tool has to be installed & run on a Laptop or PC</b>
|
||||
|
||||
Once installed, you can install, configure, list, backup and restore apps from the command line.
|
||||
|
||||
## Linux & OS X
|
||||
|
||||
Installing the CLI tool requires node.js and npm. The CLI tool can be installed using the following command:
|
||||
|
||||
```
|
||||
npm install -g cloudron
|
||||
```
|
||||
|
||||
Depending on your setup, you may need to run this as root.
|
||||
|
||||
On OS X, it is known to work with the `openssl` package from homebrew.
|
||||
|
||||
See [#14](https://git.cloudron.io/cloudron/cloudron-cli/issues/14) for more information.
|
||||
|
||||
## Windows
|
||||
|
||||
The CLI tool does not work on Windows. Please contact us on our [chat](https://chat.cloudron.io) if you want to help with Windows support.
|
||||
|
||||
# Updates
|
||||
|
||||
Apps installed from the Cloudron Store are automatically updated every night.
|
||||
|
||||
The Cloudron platform itself updates in two ways: update or upgrade.
|
||||
|
||||
### Update
|
||||
|
||||
An **update** is applied onto the running server instance. Such updates are performed
|
||||
every night. You can also use the Cloudron UI to initiate an update immediately.
|
||||
|
||||
The Cloudron will always make a complete backup before attempting an update. In the unlikely
|
||||
case an update fails, it can be [restored](/references/selfhosting.html#restore).
|
||||
|
||||
### Upgrade
|
||||
|
||||
An **upgrade** requires a new OS image. This process involves creating a new server from scratch
|
||||
with the latest code and restoring it from the last backup.
|
||||
|
||||
To upgrade follow these steps closely:
|
||||
|
||||
* Create a new backup - `cloudron machine backup create`
|
||||
|
||||
* List the latest backup - `cloudron machine backup list`
|
||||
|
||||
* Make the backup available for the new cloudron instance:
|
||||
|
||||
* `S3` - When storing backup ins S3, make the latest box backup public - files starting with `box_` (from v0.94.0) or `backup_`. This can be done from the AWS S3 console as seen here:
|
||||
|
||||
<img src="/docs/img/aws_backup_public.png" class="shadow haze"><br/>
|
||||
|
||||
Copy the new public URL of the latest backup for use as the `--restore-url` below.
|
||||
|
||||
<img src="/docs/img/aws_backup_link.png" class="shadow haze"><br/>
|
||||
|
||||
* `File system` - When storing backups in `/var/backups`, you have to make the box and the app backups available to the new Cloudron instance's `/var/backups`. This can be achieved in a variety of ways depending on the situation: like scp'ing the backup files to the machine before installation, mounting the external backup hard drive into the new Cloudron's `/var/backup` OR downloading a copy of the backup using `cloudron machine backup download` and uploading them to the new machine. After doing so, pass `file:///var/backups/<path to box backup>` as the `--restore-url` below.
|
||||
|
||||
* Create a new Cloudron by following the [installing](/references/selfhosting.html#installing) section.
|
||||
When running the setup script, pass in the `--encryption-key` and `--restore-url` flags.
|
||||
The `--encryption-key` is the backup encryption key. It can be displayed with `cloudron machine info`
|
||||
|
||||
Similar to the initial installation, a Cloudron upgrade looks like:
|
||||
```
|
||||
$ ssh root@newserverip
|
||||
> wget https://cloudron.io/cloudron-setup
|
||||
> chmod +x cloudron-setup
|
||||
> ./cloudron-setup --provider <digitalocean|ec2|generic|scaleway> --domain <example.com> --encryption-key <key> --restore-url <publicS3Url>
|
||||
```
|
||||
|
||||
Note: When upgrading an old version of Cloudron (<= 0.94.0), pass the `--version 0.94.1` flag and then continue updating
|
||||
from that.
|
||||
|
||||
* Finally, once you see the newest version being displayed in your Cloudron webinterface, you can safely delete the old server instance.
|
||||
|
||||
# Restore
|
||||
|
||||
To restore a Cloudron from a specific backup:
|
||||
|
||||
* Select the backup - `cloudron machine backup list`
|
||||
|
||||
* Make the backup public
|
||||
|
||||
* `S3` - Make the box backup publicly readable - files starting with `box_` (from v0.94.0) or `backup_`. This can be done from the AWS S3 console. Once the box has restored, you can make it private again.
|
||||
|
||||
* `File system` - When storing backups in `/var/backups`, you have to make the box and the app backups available to the new Cloudron instance's `/var/backups`. This can be achieved in a variety of ways depending on the situation: like scp'ing the backup files to the new machine before Cloudron installation OR mounting an external backup hard drive into the new Cloudron's `/var/backup` OR downloading a copy of the backup using `cloudron machine backup download` and uploading them to the new machine. After doing so, pass `file:///var/backups/<path to box backup>` as the `--restore-url` below.
|
||||
|
||||
* Create a new Cloudron by following the [installing](/references/selfhosting.html#installing) section.
|
||||
When running the setup script, pass in the `version`, `encryption-key`, `domain` and `restore-url` flags.
|
||||
The `version` field is the version of the Cloudron that the backup corresponds to (it is embedded
|
||||
in the backup file name).
|
||||
|
||||
* Make the box backup private, once the upgrade is complete.
|
||||
|
||||
# Debug
|
||||
|
||||
You can SSH into your Cloudron and collect logs:
|
||||
|
||||
* `journalctl -a -u box` to get debug output of box related code.
|
||||
* `docker ps` will give you the list of containers. The addon containers are named as `mail`, `postgresql`,
|
||||
`mysql` etc. If you want to get a specific container's log output, `journalctl -a CONTAINER_ID=<container_id>`.
|
||||
|
||||
# Alerts
|
||||
|
||||
The Cloudron will notify the Cloudron administrator via email if apps go down, run out of memory, have updates
|
||||
available etc.
|
||||
|
||||
You will have to setup a 3rd party service like [Cloud Watch](https://aws.amazon.com/cloudwatch/) or [UptimeRobot](http://uptimerobot.com/) to monitor the Cloudron itself. You can use `https://my.<domain>/api/v1/cloudron/status`
|
||||
as the health check URL.
|
||||
|
||||
# Help
|
||||
|
||||
If you run into any problems, join us at our [chat](https://chat.cloudron.io) or [email us](mailto:support@cloudron.io).
|
||||
@@ -1,354 +0,0 @@
|
||||
# Introduction
|
||||
|
||||
The Cloudron is the best platform self-hosting web applications on your server. You
|
||||
can easily install apps on it, add users, manage access restriction and keep your
|
||||
server and apps updated with no effort.
|
||||
|
||||
You might wonder that there are so many 1-click app solutions out there and what is so special
|
||||
about Cloudron? As the name implies, 1-click installers simply install code into a server
|
||||
and leave it at that. There's so much more to do:
|
||||
|
||||
1. Configure a domain to point to your server
|
||||
2. Setup SSL certificates and renew them periodically
|
||||
3. Ensure apps are backed up correctly
|
||||
4. Ensure apps are uptodate and secure
|
||||
5. Have a mechanism to quickly restore apps from a backup
|
||||
6. Manage users across all your apps
|
||||
7. Get alerts and notifications about the status of apps
|
||||
|
||||
... and so on ...
|
||||
|
||||
We made the Cloudron to dramatically lower the bar for people to run apps on servers. Just provide
|
||||
a domain name, install apps and add users. All the server management tasks listed above is
|
||||
completely automated.
|
||||
|
||||
If you want to learn more about the secret sauce that makes the Cloudron, please read our
|
||||
[architecture overview](/references/architecture.html).
|
||||
|
||||
# Use cases
|
||||
|
||||
Here are some of the apps you can run on a Cloudron:
|
||||
|
||||
* RSS Reader
|
||||
* Chat, IRC, Jabber servers
|
||||
* Public forum
|
||||
* Blog
|
||||
* File syncing and sharing
|
||||
* Code hosting
|
||||
* Email
|
||||
|
||||
Our list of apps is growing everyday, so be sure to [follow us on twitter](https://twitter.com/cloudron_io).
|
||||
|
||||
# Activation
|
||||
|
||||
When you first create the Cloudron, the setup wizard will ask you to setup an administrator
|
||||
account. Don't worry, a Cloudron adminstrator doesn't need to know anything about maintaining
|
||||
a server! It's the whole reason why we made the Cloudron. Being a Cloudron administrator is
|
||||
more analagous to being the owner of a smartphone. You can always add more administrators to
|
||||
the Cloudron from the `Users` menu item.
|
||||
|
||||
<img src="/docs/img/webadmin_domain.png" class="shadow">
|
||||
|
||||
The Cloudron administration page is located at the `my` subdomain. You might want to bookmark
|
||||
this link!
|
||||
|
||||
# Apps
|
||||
|
||||
## Installation
|
||||
|
||||
You can install apps on the Cloudron by choosing the `App Store` menu item. Use the 'Search' bar
|
||||
to search for apps.
|
||||
|
||||
Clicking on app gives you information about the app.
|
||||
|
||||
<img src="/docs/img/app_info.png" class="shadow">
|
||||
|
||||
Clicking the `Install` button will show an install dialog like below:
|
||||
|
||||
<img src="/docs/img/app_install.png" class="shadow">
|
||||
|
||||
The `Location` field is the subdomain in which your app will be installed. For example, if you use the
|
||||
`mail` location for your web mail client, then it will be accessible at `mail.<domain>`.
|
||||
|
||||
Tip: You can access the apps directly on your browser using `mail.<domain>`. You don't have to
|
||||
visit the Cloudron administration panel.
|
||||
|
||||
`Access control` specifies who can access this app.
|
||||
|
||||
* `Every Cloudron user` - Any user in your Cloudron can access the app. Initially, you are the only
|
||||
user in your Cloudron. Unless you explicitly invite others, nobody else can access these apps.
|
||||
Note that the term 'access' depends on the app. For a blog, this means that nobody can post new
|
||||
blog posts (but anybody can view them). For a chat server, this might mean that nobody can access
|
||||
your chat server.
|
||||
|
||||
* `Restrict to groups` - Only users in the groups can access the app.
|
||||
|
||||
## Updates
|
||||
|
||||
All your apps automatically update as and when the application author releases an update. The Cloudron
|
||||
will attempt to update around midnight of your timezone.
|
||||
|
||||
Some app updates are not automatic. This can happen if a new version of the app has removed some features
|
||||
that you were relying on. In such a case, the update has to be manually approved. This is simply a matter
|
||||
of clicking the `Update` button (the green star) after you read about the changes.
|
||||
|
||||
<img src="/docs/img/app_update.png" class="shadow">
|
||||
|
||||
## Backups
|
||||
|
||||
<i>If you self-host, please refer to the [self-hosting documentation](/references/selfhosting.html#backups) for backups.</i>
|
||||
|
||||
All apps are automatically backed up every day. Backups are stored encrypted in Amazon S3. You don't have
|
||||
to do anything about it. The [Cloudron CLI](https://git.cloudron.io/cloudron/cloudron-cli) tool can be used
|
||||
to download application backups.
|
||||
|
||||
## Configuration
|
||||
|
||||
Apps can be reconfigured using the `Configure` button.
|
||||
|
||||
<img src="/docs/img/app_configure_button.png" class="shadow">
|
||||
|
||||
Click on the wrench button will bring up the configure dialog.
|
||||
|
||||
<img src="/docs/img/app_configure.png" class="shadow">
|
||||
|
||||
You can do the following:
|
||||
* Change the location to move the app to another subdomain. Say, you want to move your blog from `blog` to `about`.
|
||||
* Change who can access the app.
|
||||
|
||||
Changing an app's configuration has a small downtime (usually around a minute).
|
||||
|
||||
## Restore
|
||||
|
||||
Apps can be restored to a previous backup by clicking on the `Restore` button.
|
||||
|
||||
<img src="/docs/img/app_restore_button.png" class="shadow">
|
||||
|
||||
Note that restoring previous data might also restore the previous version of the software. For example, you might
|
||||
be currently using Version 5 of the app. If you restore to a backup that was made with Version 3 of the app, then the restore
|
||||
operation will install Version 3 of the app. This is because the latest version may not be able to handle old data.
|
||||
|
||||
## Uninstall
|
||||
|
||||
You can uninstall an app by clicking the `Uninstall` button.
|
||||
|
||||
<img src="/docs/img/app_uninstall_button.png" class="shadow">
|
||||
|
||||
Note that all data associated with the app will be immediately removed from the Cloudron. App data might still
|
||||
persist in your old backups and the [CLI tool](https://git.cloudron.io/cloudron/cloudron-cli) provides a way to
|
||||
restore from those old backups should it be required.
|
||||
|
||||
## Embedding Apps
|
||||
|
||||
It is possible to embed Cloudron apps into other websites. By default, this is disabled to prevent
|
||||
[Clickjacking](https://cloudron.io/blog/2016-07-15-site-embedding.html).
|
||||
|
||||
You can set a website that is allowed to embed your Cloudron app using the app's [Configure dialog](#configuration).
|
||||
Click on 'Show Advanced Settings...' and enter the embedder website name.
|
||||
|
||||
# Custom domain
|
||||
|
||||
When you create a Cloudron from cloudron.io, we provide a subdomain under `cloudron.me` like `girish.cloudron.me`.
|
||||
Apps are available under that subdomain using a hyphenated name like `blog-girish.cloudron.me`.
|
||||
|
||||
Domain names are a thing of pride and the Cloudron makes it easy to make your apps accessible from memorable locations like `blog.girish.in`.
|
||||
|
||||
## Single app on a custom domain
|
||||
|
||||
This approach is applicable if you desire that only a single app be accessing from a custom
|
||||
domain. For this, open the app's configure dialog and choose `External Domain` in the location dropdown.
|
||||
|
||||
<img src="/docs/img/app_external_domain.png" class="shadow">
|
||||
|
||||
This dialog will suggest you to add a `CNAME` record. Once you setup a CNAME record with your DNS provider,
|
||||
the app will be accessible from that external domain.
|
||||
|
||||
## Entire Cloudron on a custom domain
|
||||
|
||||
This approach is applicable if you want all your apps to be accessible from subdomains of your custom domain.
|
||||
For example, `blog.girish.in`, `notes.girish.in`, `owncloud.girish.in`, `mail.girish.in` and so on. This
|
||||
approach is also the only way that the Cloudron supports for sending and receiving emails from your domain.
|
||||
|
||||
For this, go to the 'Domains & Certs' menu item.
|
||||
|
||||
<img src="/docs/img/custom_domain_menu.png" class="shadow">
|
||||
|
||||
Change the domain name to your custom domain. Currently, we require that your domain be hosted on AWS Route53.
|
||||
|
||||
<img src="/docs/img/custom_domain_change.png" class="shadow">
|
||||
|
||||
Moving to a custom domain will retain all your apps and data and will take around 15 minutes. If you require assistance with another provider,
|
||||
<a href="mailto:support@cloudron.io">just let us know</a>.
|
||||
|
||||
# User management
|
||||
|
||||
## Users
|
||||
|
||||
You can invite new users (friends, family, colleagues) with their email address from the `Users` menu. They will
|
||||
receive an invite to sign up with your Cloudron. They can now access the apps that you have given them access
|
||||
to.
|
||||
|
||||
<img src="/docs/img/users.png" class="shadow">
|
||||
|
||||
To remove a user, simply remove them from the list. Note that the removed user cannot access any app anymore.
|
||||
|
||||
## Administrators
|
||||
|
||||
A Cloudron administrator is a special right given to an existing Cloudron user allowing them to manage
|
||||
apps and users. To make an existing user an administator, click the edit (pencil) button corresponding to
|
||||
the user and check the `Allow this user to manage apps, groups and other users` checkbox.
|
||||
|
||||
<img src="/docs/img/administrator.png" class="shadow">
|
||||
|
||||
## Groups
|
||||
|
||||
Groups provide a convenient way to group users. It's purpose is two-fold:
|
||||
|
||||
* You can assign one or more groups to apps to restrict who can access for an app.
|
||||
* Each group is a mailing list (forwarding address) constituting of it's members.
|
||||
|
||||
You can create a group by using the `Groups` menu item.
|
||||
|
||||
<img src="/docs/img/groups.png" class="shadow">
|
||||
|
||||
To set the access restriction use the app's configure dialog.
|
||||
|
||||
<img src="/docs/img/app_access_control.png" class="shadow">
|
||||
|
||||
You can now send mails to `groupname@<domain>` to address all the group members.
|
||||
|
||||
# Login
|
||||
|
||||
## Cloudron admin
|
||||
|
||||
The Cloudron admin page is always located at the `my` subdomain of your Cloudron domain. For custom domains,
|
||||
this will be like `my.girish.in`. For domains from cloudron.io, this will be like `my-girish.cloudron.me`.
|
||||
|
||||
## Apps (single sign-on)
|
||||
|
||||
An important feature of the Cloudron is Single Sign-On. You use the same username & password for logging in
|
||||
to all your apps. No more having to manage separate set of credentials for each service!
|
||||
|
||||
## Single user apps
|
||||
|
||||
Some apps only work with a single user. For example, a notes app might allow only a single user to login and add
|
||||
notes. For such apps, you will be prompted during installation to select the single user who can access the app.
|
||||
|
||||
<img src="/docs/img/app_single_user.png" class="shadow">
|
||||
|
||||
If you want multiple users to use the app independently, simply install the app multiple times to different locations.
|
||||
|
||||
# Email
|
||||
|
||||
The Cloudron has a built-in email server. The primary email address is the same as the username. Emails can be sent
|
||||
and received from `<username>@<domain>`. The Cloudron does not allow masquerading - one user cannot send email
|
||||
pretending to be another user.
|
||||
|
||||
## Enabling Email
|
||||
|
||||
By default, Cloudron's email server only allows apps to send email. To enable users to send and receive email,
|
||||
turn on the option under `Settings`. Turning on this option also allows apps to _receive_ email.
|
||||
|
||||
Once email is enabled, the Cloudron will keep the the `MX` DNS record updated.
|
||||
|
||||
<img src="/docs/img/enable_email.png" class="shadow">
|
||||
|
||||
## Receiving email using IMAP
|
||||
|
||||
Use the following settings to receive email.
|
||||
|
||||
* Server Name - Use the `my` subdomain of your Cloudron
|
||||
* Port - 993
|
||||
* Connection Security - TLS
|
||||
* Username/password - Same as your Cloudron credentials
|
||||
|
||||
## Sending email using SMTP
|
||||
|
||||
Use the following settings to send email.
|
||||
|
||||
* Server Name - Use the `my` subdomain of your Cloudron
|
||||
* Port - 587
|
||||
* Connection Security - STARTTLS
|
||||
* Username/password - Same as your Cloudron credentials
|
||||
|
||||
## Email filters using Sieve
|
||||
|
||||
Use the following settings to setup email filtering users via Manage Sieve.
|
||||
|
||||
* Server Name - Use the `my` subdomain of your Cloudron
|
||||
* Port - 4190
|
||||
* Connection Security - TLS
|
||||
* Username/password - Same as your Cloudron credentials
|
||||
|
||||
The [Rainloop](https://cloudron.io/appstore.html?app=net.rainloop.cloudronapp) and [Roundcube](https://cloudron.io/appstore.html?app=net.roundcube.cloudronapp)
|
||||
apps are already pre-configured to use the above settings.
|
||||
|
||||
## Aliases
|
||||
|
||||
You can configure one or more aliases alongside the primary email address of each user. You can set aliases by editing the
|
||||
user's settings, available behind the edit button in the user listing. Note that aliases cannot conflict with existing user names.
|
||||
|
||||
<img src="/docs/img/email_alias.png" class="shadow">
|
||||
|
||||
Currently, it is not possible to login using the alias for SMTP/IMAP/Sieve services. Instead, add the alias as an identity in
|
||||
your mail client but login using the Cloudron credentials.
|
||||
|
||||
## Subaddresses
|
||||
|
||||
Emails addressed to `<username>+tag@<domain>` will be delivered to the `username` mailbox. You can use this feature to give out emails of the form
|
||||
`username+kayak@<domain>`, `username+aws@<domain>` and so on and have them all delivered to your mailbox.
|
||||
|
||||
## Forwarding addresses
|
||||
|
||||
Each group on the Cloudron is also a forwarding address. Mails can be addressed to `group@<domain>` and the mail will
|
||||
be sent to each user who is part of the group.
|
||||
|
||||
## Marking Spam
|
||||
|
||||
The spam detection agent on the Cloudron requires training to identify spam. To do this, simply move your junk mails
|
||||
to a pre-created folder named `Spam`. Most mail clients have a Junk or Spam button which does this automatically.
|
||||
|
||||
# Graphs
|
||||
|
||||
The Graphs view shows an overview of the disk and memory usage on your Cloudron.
|
||||
|
||||
<img src="/docs/img/graphs.png" class="shadow">
|
||||
|
||||
The `Disk Usage` graph shows you how much disk space you have left. Note that the Cloudron will
|
||||
send the Cloudron admins an email notification when the disk is ~90% full.
|
||||
|
||||
The `Apps` Memory graph shows the memory consumed by each installed app. You can click on each segment
|
||||
on the graph to see the memory consumption over time in the chart below it.
|
||||
|
||||
The `System` Memory graph shows the overall memory consumption on the entire Cloudron. If you see
|
||||
the Free memory < 50MB frequently, you should consider upgrading to a Cloudron with more memory.
|
||||
|
||||
# Activity log
|
||||
|
||||
The `Activity` view shows the activity on your Cloudron. It includes information about who is using
|
||||
the apps on your Cloudron and also tracks configuration changes.
|
||||
|
||||
<img src="/docs/img/activity.png" class="shadow">
|
||||
|
||||
# Domains and SSL Certificates
|
||||
|
||||
All apps on the Cloudron can only be reached by `https`. The Cloudron automatically installs and
|
||||
renews certificates for your apps as needed. Should installation of certificate fail for reasons
|
||||
beyond it's control, Cloudron admins will get a notification about it.
|
||||
|
||||
# API Access
|
||||
|
||||
All the operations listed in this manual like installing app, configuring users and groups, are
|
||||
completely programmable with a [REST API](/references/api.html).
|
||||
|
||||
# Moving to a larger Cloudron
|
||||
|
||||
When using a Cloudron from cloudron.io, it is easy to migrate your apps and data to a bigger server.
|
||||
In the `Settings` page, you can change the plan.
|
||||
|
||||
<insert picture>
|
||||
|
||||
# Command line tool
|
||||
|
||||
If you are a software developer or a sysadmin, the Cloudron comes with a CLI tool that can be
|
||||
used to develop custom apps for the Cloudron. Read more about it [here](https://git.cloudron.io/cloudron/cloudron-cli).
|
||||
@@ -1,621 +0,0 @@
|
||||
# Overview
|
||||
|
||||
This tutorial provides an introduction to developing applications
|
||||
for the Cloudron using node.js.
|
||||
|
||||
# Installation
|
||||
|
||||
## Install CLI tool
|
||||
|
||||
The Cloudron CLI tool allows you to install, configure and test apps on your Cloudron.
|
||||
|
||||
Installing the CLI tool requires [node.js](https://nodejs.org/) and
|
||||
[npm](https://www.npmjs.com/). You can then install the CLI tool using the following
|
||||
command:
|
||||
|
||||
```
|
||||
sudo npm install -g cloudron
|
||||
```
|
||||
|
||||
Note: Depending on your setup, you can run the above command without `sudo`.
|
||||
|
||||
## Testing your installation
|
||||
|
||||
The `cloudron` command should now be available in your path.
|
||||
|
||||
Let's login to the Cloudron as follows:
|
||||
|
||||
```
|
||||
$ cloudron login
|
||||
Cloudron Hostname: craft.selfhost.io
|
||||
|
||||
Enter credentials for craft.selfhost.io:
|
||||
Username: girish
|
||||
Password:
|
||||
Login successful.
|
||||
```
|
||||
|
||||
## Your First Application
|
||||
|
||||
Creating an application for Cloudron can be summarized as follows:
|
||||
|
||||
1. Create a web application using any language/framework. This web application must run a HTTP server
|
||||
and can optionally provide other services using custom protocols (like git, ssh, TCP etc).
|
||||
|
||||
2. Create a [Dockerfile](http://docs.docker.com/engine/reference/builder/) that specifies how to create
|
||||
an application ```image```. An ```image``` is essentially a bundle of the application source code
|
||||
and it's dependencies.
|
||||
|
||||
3. Create a [CloudronManifest.json](/references/manifest.html) file that provides essential information
|
||||
about the app. This includes information required for the Cloudron Store like title, version, icon and
|
||||
runtime requirements like `addons`.
|
||||
|
||||
## Simple Web application
|
||||
|
||||
To keep things simple, we will start by deploying a trivial node.js server running on port 8000.
|
||||
|
||||
Create a new project folder `tutorial/` and add a file named `tutorial/server.js` with the following content:
|
||||
```javascript
|
||||
var http = require("http");
|
||||
|
||||
var server = http.createServer(function (request, response) {
|
||||
response.writeHead(200, {"Content-Type": "text/plain"});
|
||||
response.end("Hello World\n");
|
||||
});
|
||||
|
||||
server.listen(8000);
|
||||
|
||||
console.log("Server running at port 8000");
|
||||
```
|
||||
|
||||
## Dockerfile
|
||||
|
||||
A Dockerfile contains commands to assemble an image.
|
||||
|
||||
Create a file named `tutorial/Dockerfile` with the following content:
|
||||
|
||||
```dockerfile
|
||||
FROM cloudron/base:0.10.0
|
||||
|
||||
ADD server.js /app/code/server.js
|
||||
|
||||
CMD [ "/usr/local/node-0.12.7/bin/node", "/app/code/server.js" ]
|
||||
```
|
||||
|
||||
The `FROM` command specifies that we want to start off with Cloudron's [base image](/references/baseimage.html).
|
||||
All Cloudron apps **must** start from this base image.
|
||||
|
||||
The `ADD` command copies the source code of the app into the directory `/app/code`.
|
||||
While this example only copies a single file, the ADD command can be used to copy directory trees as well.
|
||||
See the [Dockerfile](https://docs.docker.com/reference/builder/#add) documentation for more details.
|
||||
|
||||
The `CMD` command specifies how to run the server. There are multiple versions of node available under `/usr/local`. We
|
||||
choose node v0.12.7 for our app.
|
||||
|
||||
## CloudronManifest.json
|
||||
|
||||
The `CloudronManifest.json` specifies
|
||||
|
||||
* Information about displaying the app on the Cloudron Store. For example,
|
||||
the title, author information, description etc
|
||||
|
||||
* Information for installing the app on the Cloudron. This includes fields
|
||||
like httpPort, tcpPorts.
|
||||
|
||||
Create the CloudronManifest.json using the following command:
|
||||
|
||||
```
|
||||
$ cloudron init
|
||||
id: io.cloudron.tutorial # unique id for this app. use reverse domain name convention
|
||||
author: John Doe # developer or company name of the for user <email>
|
||||
title: Tutorial App # Cloudron Store title of this app
|
||||
description: App that uses node.js # A string or local file reference like file://DESCRIPTION.md
|
||||
tagline: Changing the world one app at a time # A tag line for this app for the Cloudron Store
|
||||
website: https://cloudron.io # A link to this app's website
|
||||
contactEmail: support@cloudron.io # Contact email of developer or company
|
||||
httPort: 8000 # The http port on which this application listens to
|
||||
```
|
||||
|
||||
The above command creates a CloudronManifest.json:
|
||||
|
||||
File ```tutorial/CloudronManifest.json```
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "io.cloudron.tutorial",
|
||||
"author": "John Doe",
|
||||
"title": "Tutorial App",
|
||||
"description": "App that uses node.js",
|
||||
"tagline": "Changing the world one app at a time",
|
||||
"version": "0.0.1",
|
||||
"healthCheckPath": "/",
|
||||
"httpPort": 8000,
|
||||
"addons": {
|
||||
"localstorage": {}
|
||||
},
|
||||
"minBoxVersion": "0.0.1",
|
||||
"manifestVersion": 1,
|
||||
"website": "https://cloudron.io",
|
||||
"contactEmail": "support@cloudron.io",
|
||||
"icon": "",
|
||||
"mediaLinks": []
|
||||
}
|
||||
```
|
||||
|
||||
You can read in more detail about each field in the [Manifest reference](/references/manifest.html).
|
||||
|
||||
# Installing
|
||||
|
||||
## Building
|
||||
|
||||
We now have all the necessary files in place to build and deploy the app to the Cloudron.
|
||||
Building creates an image of the app using the Dockerfile which can then be used to deploy
|
||||
to the Cloudron.
|
||||
|
||||
Building, pushing and pulling docker images is very bandwidth and CPU intensive. To alleviate this
|
||||
problem, apps are built using the `build service` which uses `cloudron.io` account credentials.
|
||||
|
||||
**Warning**: As of this writing, the build service uses the public Docker registry and the images that are built
|
||||
can be downloaded by anyone. This means that your source code will be viewable by others.
|
||||
|
||||
Initiate a build using ```cloudron build```:
|
||||
```
|
||||
$ cloudron build
|
||||
Building io.cloudron.tutorial@0.0.1
|
||||
|
||||
Appstore login:
|
||||
Email: ramakrishnan.girish@gmail.com # cloudron.io account
|
||||
Password: # Enter password
|
||||
Login successful.
|
||||
|
||||
Build scheduled with id 76cebfdd-7822-4f3d-af17-b3eb393ae604
|
||||
Downloading source
|
||||
Building
|
||||
Step 0 : FROM cloudron/base:0.10.0
|
||||
---> 97583855cc0c
|
||||
Step 1 : ADD server.js /app/code
|
||||
---> b09b97ecdfbc
|
||||
Removing intermediate container 03c1e1f77acb
|
||||
Step 2 : CMD /usr/local/node-0.12.7/bin/node /app/code/main.js
|
||||
---> Running in 370f59d87ab2
|
||||
---> 53b51eabcb89
|
||||
Removing intermediate container 370f59d87ab2
|
||||
Successfully built 53b51eabcb89
|
||||
The push refers to a repository [cloudron/img-2074d69134a7e0da3d6cdf3c53e241c4] (len: 1)
|
||||
Sending image list
|
||||
Pushing repository cloudron/img-2074d69134a7e0da3d6cdf3c53e241c4 (1 tags)
|
||||
Image already pushed, skipping 57f52d167bbb
|
||||
Image successfully pushed b09b97ecdfbc
|
||||
Image successfully pushed 53b51eabcb89
|
||||
Pushing tag for rev [53b51eabcb89] on {https://cdn-registry-1.docker.io/v1/repositories/cloudron/img-2074d69134a7e0da3d6cdf3c53e241c4/tags/76cebfdd-7822-4f3d-af17-b3eb393ae604}
|
||||
Build succeeded
|
||||
```
|
||||
|
||||
## Installing
|
||||
|
||||
Now that we have built the image, we can install our latest build on the Cloudron
|
||||
using the following command:
|
||||
|
||||
```
|
||||
$ cloudron install
|
||||
Using cloudron craft.selfhost.io
|
||||
Using build 76cebfdd-7822-4f3d-af17-b3eb393ae604 from 1 hour ago
|
||||
Location: tutorial # This is the location into which the application installs
|
||||
App is being installed with id: 4dedd3bb-4bae-41ef-9f32-7f938995f85e
|
||||
|
||||
=> Waiting to start installation
|
||||
=> Registering subdomain .
|
||||
=> Verifying manifest .
|
||||
=> Downloading image ..............
|
||||
=> Creating volume .
|
||||
=> Creating container
|
||||
=> Setting up collectd profile ................
|
||||
=> Waiting for DNS propagation ...
|
||||
|
||||
App is installed.
|
||||
```
|
||||
|
||||
This makes the app available at https://tutorial-craft.selfhost.io.
|
||||
|
||||
Open the app in your default browser:
|
||||
```
|
||||
cloudron open
|
||||
```
|
||||
|
||||
You should see `Hello World`.
|
||||
|
||||
# Testing
|
||||
|
||||
The application testing cycle involves `cloudron build` and `cloudron install`.
|
||||
Note that `cloudron install` updates an existing app in place.
|
||||
|
||||
You can view the logs using `cloudron logs`. When the app is running you can follow the logs
|
||||
using `cloudron logs -f`.
|
||||
|
||||
For example, you can see the console.log output in our server.js with the command below:
|
||||
|
||||
```
|
||||
$ cloudron logs
|
||||
Using cloudron craft.selfhost.io
|
||||
2015-05-08T03:28:40.233940616Z Server running at port 8000
|
||||
```
|
||||
|
||||
It is also possible to run a *shell* and *execute* arbitrary commands in the context of the application
|
||||
process by using `cloudron exec`. By default, exec simply drops you into an interactive bash shell with
|
||||
which you can inspect the file system and the environment.
|
||||
|
||||
```
|
||||
$ cloudron exec
|
||||
```
|
||||
|
||||
You can also execute arbitrary commands:
|
||||
```
|
||||
$ cloudron exec env # display the env variables that your app is running with
|
||||
```
|
||||
|
||||
# Storing data
|
||||
|
||||
For file system storage, an app can use the `localstorage` addon to store data under `/app/data`.
|
||||
When the `localstorage` addon is active, any data under /app/data is automatically backed up. When an
|
||||
app is updated, /app/data already contains the data generated by the previous version.
|
||||
|
||||
*Note*: For convenience, the initial CloudronManifest.json generated by `cloudron init` already contains this
|
||||
addon.
|
||||
|
||||
Let us put this theory into action by saving a *visit counter* as a file.
|
||||
*server.js* has been modified to count the number of visitors on the site by storing a counter
|
||||
in a file named ```counter.dat```.
|
||||
|
||||
File ```tutorial/server.js```
|
||||
|
||||
```javascript
|
||||
var http = require('http'),
|
||||
fs = require('fs'),
|
||||
util = require('util');
|
||||
|
||||
var COUNTER_FILE = '/app/data/counter.dat';
|
||||
|
||||
var server = http.createServer(function (request, response) {
|
||||
var counter = 0;
|
||||
if (fs.existsSync(COUNTER_FILE)) {
|
||||
// read existing counter if it exists
|
||||
counter = parseInt(fs.readFileSync(COUNTER_FILE, 'utf8'), 10);
|
||||
}
|
||||
|
||||
response.writeHead(200, {"Content-Type": "text/plain"});
|
||||
response.end(util.format("Hello World. %s visitors have visited this page\n", counter));
|
||||
++counter; // bump the counter
|
||||
fs.writeFileSync(COUNTER_FILE, counter + '', 'utf8'); // save back counter
|
||||
});
|
||||
|
||||
server.listen(8000);
|
||||
|
||||
console.log("Server running at port 8000");
|
||||
```
|
||||
|
||||
Now every time you refresh the page you will notice that the counter bumps up. You will
|
||||
also notice that if you make changes to the app and do a `cloudron install`, the `counter.dat`
|
||||
is *retained* across updates.
|
||||
|
||||
# Database
|
||||
|
||||
Most web applications require a database of some form. In theory, it is possible to run any
|
||||
database you want as part of the application image. This is, however, a waste of server resources
|
||||
should every app runs it's own database server.
|
||||
|
||||
To solve this, the Cloudron provides shareable resources like databases in form of ```addons```.
|
||||
The database server is managed by the Cloudron and the application simply needs to request access to
|
||||
the database in the CloudronManifest.json. While the database server itself is a shared resource, the
|
||||
databases are exclusive to the application. Each database is password protected and accessible only
|
||||
to the application. Databases and tables can be configured without restriction as the application
|
||||
requires.
|
||||
|
||||
Cloudron currently provides `mysql`, `postgresql`, `mongodb`, `redis` database addons.
|
||||
|
||||
For this tutorial, let us try to save the counter in `redis` addon. For this, we make use of the
|
||||
[redis](https://www.npmjs.com/package/redis) module.
|
||||
|
||||
Since this is a node.js app, let's add a very basic `package.json` containing the `redis` module dependency.
|
||||
|
||||
File `tutorial/package.json`
|
||||
```json
|
||||
{
|
||||
"name": "tutorial",
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"redis": "^0.12.1"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
and modify our Dockerfile to look like this:
|
||||
|
||||
File `tutorial/Dockerfile`
|
||||
|
||||
```dockerfile
|
||||
FROM cloudron/base:0.10.0
|
||||
|
||||
ADD server.js /app/code/server.js
|
||||
ADD package.json /app/code/package.json
|
||||
|
||||
WORKDIR /app/code
|
||||
RUN npm install --production
|
||||
|
||||
CMD [ "/usr/local/node-0.12.7/bin/node", "/app/code/server.js" ]
|
||||
```
|
||||
|
||||
Notice the new `RUN` command which installs the node module dependencies in package.json using `npm install`.
|
||||
|
||||
Since we want to use redis, we have to modify the CloudronManifest.json to make redis available for this app.
|
||||
|
||||
File `tutorial/CloudronManifest.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "io.cloudron.tutorial",
|
||||
"author": "John Doe",
|
||||
"title": "Tutorial App",
|
||||
"description": "App that uses node.js",
|
||||
"tagline": "Changing the world one app at a time",
|
||||
"version": "0.0.1",
|
||||
"healthCheckPath": "/",
|
||||
"httpPort": 8000,
|
||||
"addons": {
|
||||
"localstorage": {},
|
||||
"redis": {}
|
||||
},
|
||||
"minBoxVersion": "0.0.1",
|
||||
"manifestVersion": 1,
|
||||
"website": "https://cloudron.io",
|
||||
"contactEmail": "support@cloudron.io",
|
||||
"icon": "",
|
||||
"mediaLinks": []
|
||||
}
|
||||
```
|
||||
|
||||
When the application runs, environment variables `REDIS_HOST`, `REDIS_PORT` and
|
||||
`REDIS_PASSWORD` are injected. You can read about the environment variables in the
|
||||
[Redis reference](/references/addons.html#redis).
|
||||
|
||||
Let's change `server.js` to use redis instead of file backed counting:
|
||||
|
||||
File ```tutorial/server.js```
|
||||
|
||||
```javascript
|
||||
var http = require('http'),
|
||||
fs = require('fs'),
|
||||
util = require('util'),
|
||||
redis = require('redis');
|
||||
|
||||
var redisClient = redis.createClient(process.env.REDIS_PORT, process.env.REDIS_HOST);
|
||||
redisClient.auth(process.env.REDIS_PASSWORD);
|
||||
redisClient.on("error", function (err) {
|
||||
console.log("Redis Client Error " + err);
|
||||
});
|
||||
|
||||
var COUNTER_KEY = 'counter';
|
||||
|
||||
var server = http.createServer(function (request, response) {
|
||||
redisClient.get(COUNTER_KEY, function (err, reply) {
|
||||
var counter = (!err && reply) ? parseInt(reply, 10) : 0;
|
||||
response.writeHead(200, {"Content-Type": "text/plain"});
|
||||
response.end(util.format("Hello World. %s visitors have visited this page\n", counter));
|
||||
redisClient.incr(COUNTER_KEY);
|
||||
});
|
||||
});
|
||||
|
||||
server.listen(8000);
|
||||
|
||||
console.log("Server running at port 8000");
|
||||
```
|
||||
|
||||
Simply `cloudron build` and `cloudron install` to test your app!
|
||||
|
||||
# Authentication
|
||||
|
||||
The Cloudron has a centralized panel for managing users and groups. Apps can integrate Single Sign-On
|
||||
authentication using LDAP or OAuth.
|
||||
|
||||
Note that apps that are single user can skip Single Sign-On support. The Cloudron implements an `OAuth
|
||||
proxy` (accessed through the app configuration dialog) that optionally lets the Cloudron admin make the
|
||||
app visible only for logged in users.
|
||||
|
||||
## LDAP
|
||||
|
||||
Let's start out by adding the [ldap](/references/addons.html#ldap) addon to the manifest.
|
||||
|
||||
File `tutorial/CloudronManifest.json`
|
||||
```json
|
||||
{
|
||||
"id": "io.cloudron.tutorial",
|
||||
"author": "John Doe",
|
||||
"title": "Tutorial App",
|
||||
"description": "App that uses node.js",
|
||||
"tagline": "Changing the world one app at a time",
|
||||
"version": "0.0.1",
|
||||
"healthCheckPath": "/",
|
||||
"httpPort": 8000,
|
||||
"addons": {
|
||||
"localstorage": {},
|
||||
"ldap": {}
|
||||
},
|
||||
"minBoxVersion": "0.0.1",
|
||||
"manifestVersion": 1,
|
||||
"website": "https://cloudron.io",
|
||||
"contactEmail": "support@cloudron.io",
|
||||
"icon": "",
|
||||
"mediaLinks": []
|
||||
}
|
||||
```
|
||||
|
||||
Building and installing the app shows that the app gets new LDAP specific environment variables.
|
||||
|
||||
```
|
||||
$ cloudron build
|
||||
$ cloudron install
|
||||
$ cloudron exec env | grep LDAP
|
||||
LDAP_SERVER=172.17.42.1
|
||||
LDAP_PORT=3002
|
||||
LDAP_URL=ldap://172.17.42.1:3002
|
||||
LDAP_USERS_BASE_DN=ou=users,dc=cloudron
|
||||
LDAP_GROUPS_BASE_DN=ou=groups,dc=cloudron
|
||||
```
|
||||
|
||||
Let's test the environment variables to use by using the [ldapjs](http://www.ldapjs.org) npm module.
|
||||
We start by adding ldapjs to package.json.
|
||||
|
||||
File `tutorial/package.json`
|
||||
```json
|
||||
{
|
||||
"name": "tutorial",
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"ldapjs": "^0.7.1"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The server code has been modified to authenticate using the `X-Username` and `X-Password` headers for
|
||||
any path other than '/'.
|
||||
|
||||
File `tutorial/server.js`
|
||||
```javascript
|
||||
var http = require("http"),
|
||||
ldap = require('ldapjs');
|
||||
|
||||
var ldapClient = ldap.createClient({ url: process.env.LDAP_URL });
|
||||
|
||||
var server = http.createServer(function (request, response) {
|
||||
if (request.url === '/') {
|
||||
response.writeHead(200, {"Content-Type": "text/plain"});
|
||||
return response.end();
|
||||
}
|
||||
|
||||
var username = request.headers['x-username'] || '';
|
||||
var password = request.headers['x-password'] || '';
|
||||
var ldapDn = 'cn=' + username + ',' + process.env.LDAP_USERS_BASE_DN;
|
||||
|
||||
ldapClient.bind(ldapDn, password, function (error) {
|
||||
if (error) {
|
||||
response.writeHead(401, {"Content-Type": "text/plain"});
|
||||
response.end('Failed to authenticate: ' + error);
|
||||
} else {
|
||||
response.writeHead(200, {"Content-Type": "text/plain"});
|
||||
response.end('Successfully authenticated');
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
server.listen(8000);
|
||||
|
||||
console.log("Server running at port 8000");
|
||||
```
|
||||
|
||||
Once we have used `cloudron build` and `cloudron install`, you can use `curl` to test
|
||||
credentials as follows:
|
||||
|
||||
```bash
|
||||
# Test with various credentials here. Your cloudon admin username and password should succeed.
|
||||
curl -X 'X-Username: admin' -X 'X-Password: pass' https://tutorial-craft.selfhost.io/login
|
||||
```
|
||||
|
||||
## OAuth
|
||||
|
||||
An app can integrate with OAuth 2.0 Authorization code grant flow by adding
|
||||
[oauth](/references/addons.html#oauth) to CloudronManifest.json `addons` section.
|
||||
|
||||
Doing so will get the following environment variables:
|
||||
```
|
||||
$ cloudron exec env
|
||||
OAUTH_CLIENT_ID=cid-addon-4089f65a-2adb-49d2-a6d1-e519b7d85e8d
|
||||
OAUTH_CLIENT_SECRET=5af99a9633283aa15f5e6df4a108ff57f82064e4845de8bce8ad3af54dfa9dda
|
||||
OAUTH_ORIGIN=https://my-craft.selfhost.io
|
||||
API_ORIGIN=https://my-craft.selfhost.io
|
||||
HOSTNAME=tutorial-craft.selfhost.io
|
||||
```
|
||||
|
||||
OAuth Authorization code grant flow works as follows:
|
||||
* App starts the flow by redirecting the user to Cloudron authorization endpoint of the following format:
|
||||
```
|
||||
https://API_ORIGIN/api/v1/oauth/dialog/authorize?response_type=code&client_id=OAUTH_CLIENT_ID&redirect_uri=CALLBACK_URL&scope=profile
|
||||
```
|
||||
|
||||
In the above URL, API_ORIGIN and OAUTH_CLIENT_ID are environment variables. CALLBACK_URL is a url of the app
|
||||
to which the user will be redirected back to after successful authentication. CALLBACK_URL has to have the
|
||||
same origin as the app.
|
||||
|
||||
* The Cloudron OAuth server authenticates the user (using a password form) at the above URL. It also establishes
|
||||
that the user grants the client's access request.
|
||||
|
||||
* If the user authenticated successfully, it will redirect the browser to CALLBACK_URL with a `code` query parameter.
|
||||
|
||||
* The app can exchange the `code` above for a `access token` by using the `OAUTH_CLIENT_SECRET`. It does so by making
|
||||
a _POST_ request to the following url:
|
||||
```
|
||||
https://API_ORIGIN/api/v1/oauth/token?response_type=token&client_id=OAUTH_CLIENT_ID
|
||||
```
|
||||
with the following request body (json):
|
||||
```json
|
||||
{
|
||||
"grant_type": "authorization_code",
|
||||
"code": "<the code received in CALLBACK_URL query parameter>",
|
||||
"redirect_uri": "https://<HOSTNAME>",
|
||||
"client_id": "<OAUTH_CLIENT_ID>",
|
||||
"client_secret": "<OAUTH_CLIENT_SECRET>"
|
||||
}
|
||||
```
|
||||
|
||||
In the above URL, API_ORIGIN, OAUTH_CLIENT_ID and HOSTNAME are environment variables. The response contains
|
||||
the `access_token` in the body.
|
||||
|
||||
* The `access_token` can be used to get the [user's profile](/references/api.html#profile) using the following url:
|
||||
```
|
||||
https://API_ORIGIN/api/v1/profile?access_token=ACCESS_TOKEN
|
||||
```
|
||||
|
||||
The `access_token` may also be provided in the `Authorization` header as `Bearer: <token>`.
|
||||
|
||||
An implementation of the above OAuth logic is at [ircd-app](https://github.com/cloudron-io/ircd-app/blob/master/settings/app.js).
|
||||
|
||||
The following libraries implement Cloudron OAuth for Ruby and Javascript.
|
||||
|
||||
* [omniauth-cloudron](https://github.com/cloudron-io/omniauth-cloudron)
|
||||
* [passport-cloudron](https://github.com/cloudron-io/passport-cloudron)
|
||||
|
||||
# Beta Testing
|
||||
|
||||
Once your app is ready, you can upload it to the store for `beta testing` by
|
||||
other Cloudron users. This can be done using:
|
||||
|
||||
```
|
||||
cloudron upload
|
||||
```
|
||||
|
||||
The app should now be visible in the Store view of your cloudron under
|
||||
the 'Testing' section. You can check if the icon, description and other details
|
||||
appear correctly.
|
||||
|
||||
Other Cloudron users can install your app on their Cloudron's using
|
||||
`cloudron install --appstore-id <appid@version>`. Note that this currently
|
||||
requires your beta testers to install the CLI tool and put their Cloudron in
|
||||
developer mode.
|
||||
|
||||
# Publishing
|
||||
|
||||
Once you are satisfied with the beta testing, you can submit it for review.
|
||||
|
||||
```
|
||||
cloudron submit
|
||||
```
|
||||
|
||||
The cloudron.io team will review the app and publish the app to the store.
|
||||
|
||||
# Next steps
|
||||
|
||||
Congratulations! You are now well equipped to build web applications for the Cloudron.
|
||||
|
||||
# Samples
|
||||
|
||||
* [Lets Chat](https://github.com/cloudron-io/letschat-app)
|
||||
* [Haste bin](https://github.com/cloudron-io/haste-app)
|
||||
* [Pasteboard](https://github.com/cloudron-io/pasteboard-app)
|
||||
@@ -1,497 +0,0 @@
|
||||
# Overview
|
||||
|
||||
This tutorial outlines how to package an existing web application for the Cloudron.
|
||||
|
||||
If you are aware of Docker and Heroku, you should feel at home packaging for the
|
||||
Cloudron. Roughly, the steps involved are:
|
||||
|
||||
* Create a Dockerfile for your application. If your application already has a Dockerfile, it
|
||||
is a good starting point for packaging for the Cloudron. By virtue of Docker, the Cloudron
|
||||
is able to run apps written in any language/framework.
|
||||
|
||||
* Create a CloudronManifest.json that provides information like title, author, description
|
||||
etc. You can also specify the addons (like database) required
|
||||
to run your app. When the app runs on the Cloudron, it will have environment
|
||||
variables set for connecting to the addon.
|
||||
|
||||
* Test the app on your Cloudron with the CLI tool.
|
||||
|
||||
* Optionally, submit the app to [Cloudron Store](/appstore.html).
|
||||
|
||||
# Prerequisites
|
||||
|
||||
## Install CLI tool
|
||||
|
||||
The Cloudron CLI tool allows you to install, configure and test apps on your Cloudron.
|
||||
|
||||
Installing the CLI tool requires [node.js](https://nodejs.org/) and
|
||||
[npm](https://www.npmjs.com/). You can then install the CLI tool using the following
|
||||
command:
|
||||
|
||||
```
|
||||
sudo npm install -g cloudron
|
||||
```
|
||||
|
||||
Note: Depending on your setup, you can run the above command without `sudo`.
|
||||
|
||||
## Login to Cloudron
|
||||
|
||||
The `cloudron` command should now be available in your path.
|
||||
|
||||
You can login to your Cloudron now:
|
||||
|
||||
```
|
||||
$ cloudron login
|
||||
Cloudron Hostname: craft.selfhost.io
|
||||
|
||||
Enter credentials for craft.selfhost.io:
|
||||
Username: girish
|
||||
Password:
|
||||
Login successful.
|
||||
```
|
||||
|
||||
# Basic app
|
||||
|
||||
We will first package a very simple app to understand how the packaging works.
|
||||
You can clone this app from https://git.cloudron.io/cloudron/tutorial-basic.
|
||||
|
||||
## The server
|
||||
|
||||
The basic app server is a very simple HTTP server that runs on port 8000.
|
||||
While the server in this tutorial uses node.js, you can write your server
|
||||
in any language you want.
|
||||
|
||||
```server.js
|
||||
var http = require("http");
|
||||
|
||||
var server = http.createServer(function (request, response) {
|
||||
response.writeHead(200, {"Content-Type": "text/plain"});
|
||||
response.end("Hello World\n");
|
||||
});
|
||||
|
||||
server.listen(8000);
|
||||
|
||||
console.log("Server running at port 8000");
|
||||
```
|
||||
|
||||
## Dockerfile
|
||||
|
||||
The Dockerfile contains instructions on how to create an image for your application.
|
||||
|
||||
```Dockerfile
|
||||
FROM cloudron/base:0.10.0
|
||||
|
||||
ADD server.js /app/code/server.js
|
||||
|
||||
CMD [ "/usr/local/node-4.4.7/bin/node", "/app/code/server.js" ]
|
||||
```
|
||||
|
||||
The `FROM` command specifies that we want to start off with Cloudron's [base image](/references/baseimage.html).
|
||||
All Cloudron apps **must** start from this base image. This approach conserves space on the Cloudron since
|
||||
Docker images tend to be quite large and also helps us to do a security audit on apps more easily.
|
||||
|
||||
The `ADD` command copies the source code of the app into the directory `/app/code`. There is nothing special
|
||||
about the `/app/code` directory and it is merely a convention we use to store the application code.
|
||||
|
||||
The `CMD` command specifies how to run the server. The base image already contains many different versions of
|
||||
node.js. We use Node 4.4.7 here.
|
||||
|
||||
This Dockerfile can be built and run locally as:
|
||||
```
|
||||
docker build -t tutorial .
|
||||
docker run -p 8000:8000 -t tutorial
|
||||
```
|
||||
|
||||
## Manifest
|
||||
|
||||
The `CloudronManifest.json` specifies
|
||||
|
||||
* Information for installing and running the app on the Cloudron. This includes fields like addons, httpPort, tcpPorts.
|
||||
|
||||
* Information about displaying the app on the Cloudron Store. For example, fields like title, author, description.
|
||||
|
||||
Create the CloudronManifest.json using `cloudron init` as follows:
|
||||
|
||||
```
|
||||
$ cloudron init
|
||||
id: io.cloudron.tutorial # unique id for this app. use reverse domain name convention
|
||||
author: John Doe # developer or company name of the for user <email>
|
||||
title: Tutorial App # Cloudron Store title of this app
|
||||
description: App that uses node.js # A string or local file reference like file://DESCRIPTION.md
|
||||
tagline: Changing the world one app at a time # A tag line for this app for the Cloudron Store
|
||||
website: https://cloudron.io # A link to this app's website
|
||||
contactEmail: support@cloudron.io # Contact email of developer or company
|
||||
httPort: 8000 # The http port on which this application listens to
|
||||
```
|
||||
|
||||
The above command creates a CloudronManifest.json:
|
||||
|
||||
File ```tutorial/CloudronManifest.json```
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "io.cloudron.tutorial",
|
||||
"title": "Tutorial App",
|
||||
"author": "John Doe",
|
||||
"description": "file://DESCRIPTION.md",
|
||||
"changelog": "file://CHANGELOG",
|
||||
"tagline": "Changing the world one app at a time",
|
||||
"version": "0.0.1",
|
||||
"healthCheckPath": "/",
|
||||
"httpPort": 8000,
|
||||
"addons": {
|
||||
"localstorage": {}
|
||||
},
|
||||
"manifestVersion": 1,
|
||||
"website": "https://cloudron.io",
|
||||
"contactEmail": "support@cloudron.io",
|
||||
"icon": "",
|
||||
"tags": [
|
||||
"changme"
|
||||
],
|
||||
"mediaLinks": [ ]
|
||||
}
|
||||
```
|
||||
|
||||
You can read in more detail about each field in the [Manifest reference](/references/manifest.html). The
|
||||
`localstorage` addon allows the app to store files in `/app/data`. We will explore addons further further
|
||||
down in this tutorial.
|
||||
|
||||
Additional files created by `init` are:
|
||||
* `DESCRIPTION.md` - A markdown file providing description of the app for the Cloudron Store.
|
||||
* `CHANGELOG` - A file containing change information for each version released to the Cloudron Store. This
|
||||
information is shown when the user updates the app.
|
||||
|
||||
# Installing
|
||||
|
||||
We now have all the necessary files in place to build and deploy the app to the Cloudron.
|
||||
|
||||
## Building
|
||||
|
||||
Building, pushing and pulling docker images can be very bandwidth and CPU intensive. To alleviate this
|
||||
problem, apps are built using the `build service` which uses `cloudron.io` account credentials.
|
||||
|
||||
**Warning**: As of this writing, the build service uses the public Docker registry and the images that are built
|
||||
can be downloaded by anyone. This means that your source code will be viewable by others.
|
||||
|
||||
Initiate a build using ```cloudron build```:
|
||||
```
|
||||
$ cloudron build
|
||||
Building io.cloudron.tutorial@0.0.1
|
||||
|
||||
Appstore login:
|
||||
Email: ramakrishnan.girish@gmail.com # cloudron.io account
|
||||
Password: # Enter password
|
||||
Login successful.
|
||||
|
||||
Build scheduled with id e7706847-f2e3-4ba2-9638-3f334a9453a5
|
||||
Waiting for build to begin, this may take a bit...
|
||||
Downloading source
|
||||
Building
|
||||
Step 1 : FROM cloudron/base:0.10.0
|
||||
---> be9fc6312b2d
|
||||
Step 2 : ADD server.js /app/code/server.js
|
||||
---> 10513e428d7a
|
||||
Removing intermediate container 574573f6ed1c
|
||||
Step 3 : CMD /usr/local/node-4.2.1/bin/node /app/code/server.js
|
||||
---> Running in b541d149b6b9
|
||||
---> 51aa796ea6e5
|
||||
Removing intermediate container b541d149b6b9
|
||||
Successfully built 51aa796ea6e5
|
||||
Pushing
|
||||
The push refers to a repository [docker.io/cloudron/img-062037096d69bbf3ffb5b9316ad89cb9] (len: 1)
|
||||
Pushed 51aa796ea6e5
|
||||
Pushed 10513e428d7a
|
||||
Image already exists be9fc6312b2d
|
||||
Image already exists a0261a2a7c75
|
||||
Image already exists f9d4f0f1eeed
|
||||
Image already exists 2b650158d5d8
|
||||
e7706847-f2e3-4ba2-9638-3f334a9453a5: digest: sha256:8241d68b65874496191106ecf2ee8f3df2e05a953cd90ff074a6f8815a49389c size: 26098
|
||||
Build succeeded
|
||||
Success
|
||||
```
|
||||
|
||||
## Installing
|
||||
|
||||
Now that we have built the image, we can install our latest build on the Cloudron
|
||||
using the following command:
|
||||
|
||||
```
|
||||
$ cloudron install
|
||||
Using cloudron craft.selfhost.io
|
||||
Using build 76cebfdd-7822-4f3d-af17-b3eb393ae604 from 1 hour ago
|
||||
Location: tutorial # This is the location into which the application installs
|
||||
App is being installed with id: 4dedd3bb-4bae-41ef-9f32-7f938995f85e
|
||||
|
||||
=> Waiting to start installation
|
||||
=> Registering subdomain .
|
||||
=> Verifying manifest .
|
||||
=> Downloading image ..............
|
||||
=> Creating volume .
|
||||
=> Creating container
|
||||
=> Setting up collectd profile ................
|
||||
=> Waiting for DNS propagation ...
|
||||
|
||||
App is installed.
|
||||
```
|
||||
|
||||
Open the app in your default browser:
|
||||
```
|
||||
cloudron open
|
||||
```
|
||||
|
||||
You should see `Hello World`.
|
||||
|
||||
# Testing
|
||||
|
||||
The application testing cycle involves `cloudron build` and `cloudron install`.
|
||||
Note that `cloudron install` updates an existing app in place.
|
||||
|
||||
You can view the logs using `cloudron logs`. When the app is running you can follow the logs
|
||||
using `cloudron logs -f`.
|
||||
|
||||
For example, you can see the console.log output in our server.js with the command below:
|
||||
|
||||
```
|
||||
$ cloudron logs
|
||||
Using cloudron craft.selfhost.io
|
||||
16:44:11 [main] Server running at port 8000
|
||||
```
|
||||
|
||||
It is also possible to run a *shell* and *execute* arbitrary commands in the context of the application
|
||||
process by using `cloudron exec`. By default, exec simply drops you into an interactive bash shell with
|
||||
which you can inspect the file system and the environment.
|
||||
|
||||
```
|
||||
$ cloudron exec
|
||||
```
|
||||
|
||||
You can also execute arbitrary commands:
|
||||
```
|
||||
$ cloudron exec env # display the env variables that your app is running with
|
||||
```
|
||||
|
||||
### Debugging
|
||||
|
||||
An app can be placed in `debug` mode by passing `--debug` to `cloudron install` or `cloudron configure`.
|
||||
Doing so, runs the app in a non-readonly rootfs and unlimited memory. By default, this will also ignore
|
||||
the `RUN` command specified in the Dockerfile. The developer can then interactively test the app and
|
||||
startup scripts using `cloudron exec`.
|
||||
|
||||
This mode can be used to identify the files being modified by your application - often required to
|
||||
debug situations where your app does not run on a readonly rootfs. Run your app using `cloudron exec`
|
||||
and use `find / -mmin -30` to find file that have been changed or created in the last 30 minutes.
|
||||
|
||||
You can turn off debugging mode using `cloudron configure --no-debug`.
|
||||
|
||||
# Addons
|
||||
|
||||
## Filesystem
|
||||
|
||||
The application container created on the Cloudron has a `readonly` file system. Writing to any location
|
||||
other than the below will result in an error:
|
||||
|
||||
* `/tmp` - Use this location for temporary files. The Cloudron will cleanup any files in this directory
|
||||
periodically.
|
||||
|
||||
* `/run` - Use this location for runtime configuration and dynamic data. These files should not be expected
|
||||
to persist across application restarts (for example, after an update or a crash).
|
||||
|
||||
* `/app/data` - Use this location to store application data that is to be backed up. To use this location,
|
||||
you must use the [localstorage](/references/addons.html#localstorage) addon. For convenience, the initial CloudronManifest.json generated by
|
||||
`cloudron init` already contains this addon.
|
||||
|
||||
## Database
|
||||
|
||||
Most web applications require a database of some form. In theory, it is possible to run any
|
||||
database you want as part of the application image. This is, however, a waste of server resources
|
||||
should every app runs it's own database server.
|
||||
|
||||
Cloudron currently provides [mysql](/references/addons.html#mysql), [postgresql](/references/addons.html#postgresql),
|
||||
[mongodb](/references/addons.html#mongodb), [redis](/references/addons.html#redis) database addons. When choosing
|
||||
these addons, the Cloudron will inject environment variables that contain information on how to connect
|
||||
to the addon.
|
||||
|
||||
See https://git.cloudron.io/cloudron/tutorial-redis for a simple example of how redis can be used by
|
||||
an application. The server simply uses the environment variables to connect to redis.
|
||||
|
||||
## Email
|
||||
|
||||
Cloudron applications can send email using the `sendmail` addon. Using the `sendmail` addon provides
|
||||
the SMTP server and authentication credentials in environment variables.
|
||||
|
||||
Cloudron applications can also receive mail via IMAP using the `recvmail` addon.
|
||||
|
||||
## Authentication
|
||||
|
||||
The Cloudron has a centralized panel for managing users and groups. Apps can integrate Single Sign-On
|
||||
authentication using LDAP or OAuth.
|
||||
|
||||
Apps can integrate with the Cloudron authentication system using LDAP, OAuth or Simple Auth. See the
|
||||
[authentication](/references/authentication.html) reference page for more details.
|
||||
|
||||
See https://git.cloudron.io/cloudron/tutorial-ldap for a simple example of how to authenticate via LDAP.
|
||||
|
||||
For apps that are single user can skip Single Sign-On support by setting the `"singleUser": true`
|
||||
in the manifest. By doing so, the Cloudron will installer will show a dialog to choose a user.
|
||||
|
||||
For app that have no user management at all, the Cloudron implements an `OAuth proxy` that
|
||||
optionally lets the Cloudron admin make the app visible only for logged in users.
|
||||
|
||||
# Best practices
|
||||
|
||||
## No Setup
|
||||
|
||||
A Cloudron app is meant to instantly usable after installation. For this reason, Cloudron apps must not
|
||||
show any setup screen after installation and should simply choose reasonable defaults.
|
||||
|
||||
Databases, email configuration should be automatically picked up from the environment variables using
|
||||
addons.
|
||||
|
||||
## Dockerfile
|
||||
|
||||
The app is run as a read-only docker container. Because of this:
|
||||
* Install any required packages in the Dockerfile.
|
||||
* Create static configuration files in the Dockerfile.
|
||||
* Create symlinks to dynamic configuration files under /run in the Dockerfile.
|
||||
|
||||
## Process manager
|
||||
|
||||
Docker supports restarting processes natively. Should your application crash, it will be restarted
|
||||
automatically. If your application is a single process, you do not require any process manager.
|
||||
|
||||
Use supervisor, pm2 or any of the other process managers if you application has more then one component.
|
||||
This **excludes** web servers like apache, nginx which can already manage their children by themselves.
|
||||
Be sure to pick a process manager that forwards signals to child processes.
|
||||
|
||||
## Automatic updates
|
||||
|
||||
Some apps support automatic updates by overwriting themselves. A Cloudron app cannot overwrite itself
|
||||
because of the read-only file system. For this reason, disable auto updates for app and let updates be
|
||||
triggered through the Cloudron Store. This ties in better to the Cloudron's update and restore approach
|
||||
should something go wrong with the update.
|
||||
|
||||
## Logging
|
||||
|
||||
Cloudron applications stream their logs to stdout and stderr. In practice, this ideal is hard to achieve.
|
||||
Some programs like apache simply don't log to stdout. In those cases, simply log to `/tmp` or `/run`.
|
||||
|
||||
Logging to stdout has many advantages:
|
||||
* App does not need to rotate logs and the Cloudron takes care of managing logs.
|
||||
* App does not need special mechanism to release log file handles (on a log rotate).
|
||||
* Integrates better with tooling like cloudron cli.
|
||||
|
||||
## Memory
|
||||
|
||||
By default, applications get 256MB RAM (including swap). This can be changed using the `memoryLimit`
|
||||
field in the manifest.
|
||||
|
||||
Design your application runtime for concurrent use by 50 users. The Cloudron is not designed for
|
||||
concurrent access by 100s or 1000s of users.
|
||||
|
||||
An app can determine it's memory limit by reading `/sys/fs/cgroup/memory/memory.limit_in_bytes`.
|
||||
|
||||
## Authentication
|
||||
|
||||
Apps should integrate with one of the [authentication strategies](/references/authentication.html).
|
||||
This saves the user from having to manage separate set of credentials for each app.
|
||||
|
||||
## Startup Script
|
||||
|
||||
Many apps do not launch the server directly, as we did in our basic example. Instead, they execute
|
||||
a `start.sh` script (named so by convention) which launches the server. Before starting the server,
|
||||
the `start.sh` script does the following:
|
||||
|
||||
* When using the `localstorage` addon, it changes the ownership of files in `/app/data` as desired using `chown`. This
|
||||
is necessary because file permissions may not be correctly preserved across backup, restore, application and base image
|
||||
updates.
|
||||
|
||||
* Addon information (mail, database) exposed as environment are subject to change across restarts and an application
|
||||
must use these values directly (i.e not cache them across restarts). For this reason, it usually regenerates
|
||||
any config files with the current database settings on each invocation.
|
||||
|
||||
* Finally, it starts the server as a non-root user.
|
||||
|
||||
The app's main process must handle SIGTERM and forward it as required to child processes. bash does not
|
||||
automatically forward signals to child processes. For this reason, when using a startup shell script,
|
||||
remember to use exec <app> as the last line. Doing so will replace bash with your program and allows
|
||||
your program to handle signals as required.
|
||||
|
||||
# Beta Testing
|
||||
|
||||
## Metadata
|
||||
|
||||
Publishing to the Cloudron Store requires apps to have meta data specified in the `CloudronManifest.json`.
|
||||
|
||||
The `cloudron` tool will notify if any such information is missing, prior to uploading.
|
||||
See more information for each field [here](/references/manifest.html).
|
||||
|
||||
## Upload for Testing
|
||||
|
||||
Once your app is ready, you can upload it to the store for `beta testing` by
|
||||
other Cloudron users. This can be done using:
|
||||
|
||||
```
|
||||
cloudron upload
|
||||
```
|
||||
|
||||
You should now be able to visit `/#/appstore/<appid>?version=<appversion>` on your
|
||||
Cloudron to check if the icon, description and other details appear correctly.
|
||||
|
||||
Other Cloudron users can install your app on their Cloudron's using
|
||||
`cloudron install --appstore-id <appid@version>`.
|
||||
|
||||
# Publishing
|
||||
|
||||
Once you are satisfied with the beta testing, you can submit it for review.
|
||||
|
||||
```
|
||||
cloudron submit
|
||||
```
|
||||
|
||||
The cloudron.io team will review the app and publish the app to the store.
|
||||
|
||||
# Updating the app
|
||||
|
||||
## Versioning
|
||||
|
||||
To create an update for an app, simply bump up the [semver version](/references/manifest.html#version) field in
|
||||
the manifest and publish a new version to the store.
|
||||
|
||||
The Cloudron chooses the next app version to update to based on the following algorithm:
|
||||
* Choose the maximum `patch` version matching the app's current `major` and `minor` version.
|
||||
* Failing the above, choose the maximum patch version of the next minor version matching the app's current `major` version.
|
||||
* Failing the above, choose the maximum patch and minor version of the next major version
|
||||
|
||||
For example, let's assume the versions 1.1.3, 1.1.4, 1.1.5, 1.2.4, 1.2.6, 1.3.0, 2.0.0 are published.
|
||||
|
||||
* If the app is running 1.1.3, then app will directly update to 1.1.5 (skipping 1.1.4)
|
||||
* Once in 1.1.5, the app will update to 1.2.6 (skipping 1.2.4)
|
||||
* Once in 1.2.6, the app will update to 1.3.0
|
||||
* Once in 1.3.0, the app will update to 2.0.0
|
||||
|
||||
The Cloudron admins get notified by email for any major or minor app releases.
|
||||
|
||||
## Failed updates
|
||||
|
||||
The Cloudron always makes a backup of the app before making an update. Should the
|
||||
update fail, the user can restore to the backup (which will also restore the app's
|
||||
code to the previous version).
|
||||
|
||||
# Cloudron Button
|
||||
|
||||
The [Cloudron Button](/references/button.html) allows anyone to install your application with the click of a button
|
||||
on their Cloudron.
|
||||
|
||||
The button can be added to just about any website including the application's website
|
||||
and README.md files in GitHub repositories.
|
||||
|
||||
# Next steps
|
||||
|
||||
Congratulations! You are now well equipped to build web applications for the Cloudron.
|
||||
|
||||
You can see some examples of how real apps are packaged here:
|
||||
|
||||
* [Lets Chat](https://git.cloudron.io/cloudron/letschat-app)
|
||||
* [Haste bin](https://git.cloudron.io/cloudron/haste-app)
|
||||
* [Pasteboard](https://git.cloudron.io/cloudron/pasteboard-app)
|
||||
@@ -2,17 +2,18 @@
|
||||
|
||||
'use strict';
|
||||
|
||||
var ejs = require('gulp-ejs'),
|
||||
gulp = require('gulp'),
|
||||
del = require('del'),
|
||||
concat = require('gulp-concat'),
|
||||
uglify = require('gulp-uglify'),
|
||||
serve = require('gulp-serve'),
|
||||
sass = require('gulp-sass'),
|
||||
sourcemaps = require('gulp-sourcemaps'),
|
||||
cssnano = require('gulp-cssnano'),
|
||||
var argv = require('yargs').argv,
|
||||
autoprefixer = require('gulp-autoprefixer'),
|
||||
argv = require('yargs').argv;
|
||||
concat = require('gulp-concat'),
|
||||
cssnano = require('gulp-cssnano'),
|
||||
ejs = require('gulp-ejs'),
|
||||
gulp = require('gulp'),
|
||||
rimraf = require('rimraf'),
|
||||
sass = require('gulp-sass'),
|
||||
serve = require('gulp-serve'),
|
||||
sourcemaps = require('gulp-sourcemaps'),
|
||||
uglify = require('gulp-uglify'),
|
||||
url = require('url');
|
||||
|
||||
gulp.task('3rdparty', function () {
|
||||
gulp.src([
|
||||
@@ -49,19 +50,21 @@ if (argv.help || argv.h) {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
gulp.task('js', ['js-index', 'js-setup', 'js-setupdns', 'js-update'], function () {});
|
||||
gulp.task('js', ['js-index', 'js-logs', 'js-terminal', 'js-setup', 'js-setupdns', 'js-restore', 'js-update'], function () {});
|
||||
|
||||
var oauth = {
|
||||
clientId: argv.clientId || 'cid-webadmin',
|
||||
clientSecret: argv.clientSecret || 'unused',
|
||||
apiOrigin: argv.apiOrigin || ''
|
||||
apiOrigin: argv.apiOrigin || '',
|
||||
apiOriginHostname: argv.apiOrigin ? url.parse(argv.apiOrigin).hostname : ''
|
||||
};
|
||||
|
||||
console.log();
|
||||
console.log('Using OAuth credentials:');
|
||||
console.log(' ClientId: %s', oauth.clientId);
|
||||
console.log(' ClientSecret: %s', oauth.clientSecret);
|
||||
console.log(' Cloudron API: %s', oauth.apiOrigin || 'default');
|
||||
console.log(' ClientId: %s', oauth.clientId);
|
||||
console.log(' ClientSecret: %s', oauth.clientSecret);
|
||||
console.log(' Cloudron API: %s', oauth.apiOrigin || 'default');
|
||||
console.log(' Cloudron Host: %s', oauth.apiOriginHostname);
|
||||
console.log();
|
||||
|
||||
|
||||
@@ -79,7 +82,7 @@ gulp.task('js-index', function () {
|
||||
'webadmin/src/js/main.js',
|
||||
'webadmin/src/views/*.js'
|
||||
])
|
||||
.pipe(ejs({ oauth: oauth }, { ext: '.js' }))
|
||||
.pipe(ejs({ oauth: oauth }, {}, { ext: '.js' }))
|
||||
.pipe(sourcemaps.init())
|
||||
.pipe(concat('index.js', { newLine: ';' }))
|
||||
.pipe(uglifyer)
|
||||
@@ -87,6 +90,38 @@ gulp.task('js-index', function () {
|
||||
.pipe(gulp.dest('webadmin/dist/js'));
|
||||
});
|
||||
|
||||
gulp.task('js-logs', function () {
|
||||
// needs special treatment for error handling
|
||||
var uglifyer = uglify();
|
||||
uglifyer.on('error', function (error) {
|
||||
console.error(error);
|
||||
});
|
||||
|
||||
gulp.src(['webadmin/src/js/logs.js', 'webadmin/src/js/client.js'])
|
||||
.pipe(ejs({ oauth: oauth }, {}, { ext: '.js' }))
|
||||
.pipe(sourcemaps.init())
|
||||
.pipe(concat('logs.js', { newLine: ';' }))
|
||||
.pipe(uglifyer)
|
||||
.pipe(sourcemaps.write())
|
||||
.pipe(gulp.dest('webadmin/dist/js'));
|
||||
});
|
||||
|
||||
gulp.task('js-terminal', function () {
|
||||
// needs special treatment for error handling
|
||||
var uglifyer = uglify();
|
||||
uglifyer.on('error', function (error) {
|
||||
console.error(error);
|
||||
});
|
||||
|
||||
gulp.src(['webadmin/src/js/terminal.js', 'webadmin/src/js/client.js'])
|
||||
.pipe(ejs({ oauth: oauth }, {}, { ext: '.js' }))
|
||||
.pipe(sourcemaps.init())
|
||||
.pipe(concat('terminal.js', { newLine: ';' }))
|
||||
.pipe(uglifyer)
|
||||
.pipe(sourcemaps.write())
|
||||
.pipe(gulp.dest('webadmin/dist/js'));
|
||||
});
|
||||
|
||||
gulp.task('js-setup', function () {
|
||||
// needs special treatment for error handling
|
||||
var uglifyer = uglify();
|
||||
@@ -95,7 +130,7 @@ gulp.task('js-setup', function () {
|
||||
});
|
||||
|
||||
gulp.src(['webadmin/src/js/setup.js', 'webadmin/src/js/client.js'])
|
||||
.pipe(ejs({ oauth: oauth }, { ext: '.js' }))
|
||||
.pipe(ejs({ oauth: oauth }, {}, { ext: '.js' }))
|
||||
.pipe(sourcemaps.init())
|
||||
.pipe(concat('setup.js', { newLine: ';' }))
|
||||
.pipe(uglifyer)
|
||||
@@ -111,7 +146,7 @@ gulp.task('js-setupdns', function () {
|
||||
});
|
||||
|
||||
gulp.src(['webadmin/src/js/setupdns.js', 'webadmin/src/js/client.js'])
|
||||
.pipe(ejs({ oauth: oauth }, { ext: '.js' }))
|
||||
.pipe(ejs({ oauth: oauth }, {}, { ext: '.js' }))
|
||||
.pipe(sourcemaps.init())
|
||||
.pipe(concat('setupdns.js', { newLine: ';' }))
|
||||
.pipe(uglifyer)
|
||||
@@ -119,6 +154,23 @@ gulp.task('js-setupdns', function () {
|
||||
.pipe(gulp.dest('webadmin/dist/js'));
|
||||
});
|
||||
|
||||
gulp.task('js-restore', function () {
|
||||
// needs special treatment for error handling
|
||||
var uglifyer = uglify();
|
||||
uglifyer.on('error', function (error) {
|
||||
console.error(error);
|
||||
});
|
||||
|
||||
gulp.src(['webadmin/src/js/restore.js', 'webadmin/src/js/client.js'])
|
||||
.pipe(ejs({ oauth: oauth }, {}, { ext: '.js' }))
|
||||
.pipe(sourcemaps.init())
|
||||
.pipe(concat('restore.js', { newLine: ';' }))
|
||||
.pipe(uglifyer)
|
||||
.pipe(sourcemaps.write())
|
||||
.pipe(gulp.dest('webadmin/dist/js'));
|
||||
});
|
||||
|
||||
|
||||
gulp.task('js-update', function () {
|
||||
// needs special treatment for error handling
|
||||
var uglifyer = uglify();
|
||||
@@ -140,7 +192,7 @@ gulp.task('js-update', function () {
|
||||
// --------------
|
||||
|
||||
gulp.task('html', ['html-views', 'html-update', 'html-templates'], function () {
|
||||
return gulp.src('webadmin/src/*.html').pipe(gulp.dest('webadmin/dist'));
|
||||
return gulp.src('webadmin/src/*.html').pipe(ejs({ apiOriginHostname: oauth.apiOriginHostname }, {}, { ext: '.html' })).pipe(gulp.dest('webadmin/dist'));
|
||||
});
|
||||
|
||||
gulp.task('html-update', function () {
|
||||
@@ -188,12 +240,16 @@ gulp.task('watch', ['default'], function () {
|
||||
gulp.watch(['webadmin/src/js/update.js'], ['js-update']);
|
||||
gulp.watch(['webadmin/src/js/setup.js', 'webadmin/src/js/client.js'], ['js-setup']);
|
||||
gulp.watch(['webadmin/src/js/setupdns.js', 'webadmin/src/js/client.js'], ['js-setupdns']);
|
||||
gulp.watch(['webadmin/src/js/restore.js', 'webadmin/src/js/client.js'], ['js-restore']);
|
||||
gulp.watch(['webadmin/src/js/logs.js', 'webadmin/src/js/client.js'], ['js-logs']);
|
||||
gulp.watch(['webadmin/src/js/terminal.js', 'webadmin/src/js/client.js'], ['js-terminal']);
|
||||
gulp.watch(['webadmin/src/js/index.js', 'webadmin/src/js/client.js', 'webadmin/src/js/appstore.js', 'webadmin/src/js/main.js', 'webadmin/src/views/*.js'], ['js-index']);
|
||||
gulp.watch(['webadmin/src/3rdparty/**/*'], ['3rdparty']);
|
||||
});
|
||||
|
||||
gulp.task('clean', function () {
|
||||
del.sync(['webadmin/dist', 'setup/splash/website']);
|
||||
rimraf.sync('webadmin/dist');
|
||||
rimraf.sync('setup/splash/website');
|
||||
});
|
||||
|
||||
gulp.task('default', ['clean', 'html', 'js', '3rdparty', 'images', 'css'], function () {});
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
var async = require('async');
|
||||
|
||||
var ADMIN_GROUP_ID = 'admin'; // see groups.js
|
||||
var ADMIN_GROUP_ID = 'admin'; // see constants.js
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
|
||||
@@ -3,10 +3,10 @@
|
||||
exports.up = function(db, callback) {
|
||||
var cmd = "CREATE TABLE eventlog(" +
|
||||
"id VARCHAR(128) NOT NULL," +
|
||||
"source JSON," +
|
||||
"source TEXT," +
|
||||
"creationTime TIMESTAMP," +
|
||||
"action VARCHAR(128) NOT NULL," +
|
||||
"data JSON," +
|
||||
"data TEXT," +
|
||||
"PRIMARY KEY (id))";
|
||||
|
||||
db.runSql(cmd, function (error) {
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE appAddonConfigs ADD COLUMN name VARCHAR(128)', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE appAddonConfigs DROP COLUMN name', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
'use strict';
|
||||
|
||||
var url = require('url');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var dbName = url.parse(process.env.DATABASE_URL).path.substr(1); // remove slash
|
||||
|
||||
// by default, mysql collates case insensitively. 'utf8_general_cs' is not available
|
||||
db.runSql('ALTER DATABASE ' + dbName + ' DEFAULT CHARACTER SET=utf8mb4 DEFAULT COLLATE utf8mb4_unicode_ci', callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,95 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
// from apps.js DO NOT UPDATE WHEN apps.js changes, as this is part of db migration!!
|
||||
function postProcess(result) {
|
||||
try {
|
||||
result.manifest = JSON.parse(result.manifestJson);
|
||||
delete result.manifestJson;
|
||||
|
||||
result.oldConfig = JSON.parse(result.oldConfigJson);
|
||||
delete result.oldConfigJson;
|
||||
|
||||
result.portBindings = { };
|
||||
var hostPorts = result.hostPorts === null ? [ ] : result.hostPorts.split(',');
|
||||
var environmentVariables = result.environmentVariables === null ? [ ] : result.environmentVariables.split(',');
|
||||
|
||||
delete result.hostPorts;
|
||||
delete result.environmentVariables;
|
||||
|
||||
for (var i = 0; i < environmentVariables.length; i++) {
|
||||
result.portBindings[environmentVariables[i]] = parseInt(hostPorts[i], 10);
|
||||
}
|
||||
|
||||
result.accessRestriction = JSON.parse(result.accessRestrictionJson);
|
||||
if (result.accessRestriction && !result.accessRestriction.users) result.accessRestriction.users = [];
|
||||
delete result.accessRestrictionJson;
|
||||
|
||||
// TODO remove later once all apps have this attribute
|
||||
result.xFrameOptions = result.xFrameOptions || 'SAMEORIGIN';
|
||||
|
||||
result.sso = !!result.sso; // make it bool
|
||||
|
||||
result.debugMode = JSON.parse(result.debugModeJson);
|
||||
delete result.debugModeJson;
|
||||
} catch (e) {
|
||||
console.error('Failed to get restoreConfig for app.', e);
|
||||
console.error('Falling back to empty values to make the update succeed.');
|
||||
result.manifest = null;
|
||||
}
|
||||
}
|
||||
|
||||
// from apps.js DO NOT UPDATE WHEN apps.js changes, as this is part of db migration!!
|
||||
var APPS_FIELDS_PREFIXED = [ 'apps.id', 'apps.appStoreId', 'apps.installationState', 'apps.installationProgress', 'apps.runState',
|
||||
'apps.health', 'apps.containerId', 'apps.manifestJson', 'apps.httpPort', 'apps.location', 'apps.dnsRecordId',
|
||||
'apps.accessRestrictionJson', 'apps.lastBackupId', 'apps.oldConfigJson', 'apps.memoryLimit', 'apps.altDomain',
|
||||
'apps.xFrameOptions', 'apps.sso', 'apps.debugModeJson' ].join(',');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE backups ADD COLUMN restoreConfigJson TEXT'),
|
||||
// fill all the backups with restoreConfigs from current apps
|
||||
function addRestoreConfigs(callback) {
|
||||
console.log('Importing restoreConfigs');
|
||||
|
||||
var appQuery = 'SELECT ' + APPS_FIELDS_PREFIXED + ',' +
|
||||
'GROUP_CONCAT(CAST(appPortBindings.hostPort AS CHAR(6))) AS hostPorts, GROUP_CONCAT(appPortBindings.environmentVariable) AS environmentVariables' +
|
||||
' FROM apps LEFT OUTER JOIN appPortBindings ON apps.id = appPortBindings.appId' +
|
||||
' GROUP BY apps.id ORDER BY apps.id';
|
||||
|
||||
db.all(appQuery, function (error, apps) {
|
||||
if (error) return callback(error);
|
||||
|
||||
apps.forEach(postProcess);
|
||||
|
||||
async.eachSeries(apps, function (app, next) {
|
||||
if (app.manifest === null) return next();
|
||||
|
||||
db.all('SELECT * FROM backups WHERE type="app" AND id LIKE "%app%\\_' + app.id + '\\_%"', function (error, backups) {
|
||||
if (error) return next(error);
|
||||
|
||||
// from apps.js:getAppConfig()
|
||||
var restoreConfig = {
|
||||
manifest: app.manifest,
|
||||
location: app.location,
|
||||
accessRestriction: app.accessRestriction,
|
||||
portBindings: app.portBindings,
|
||||
memoryLimit: app.memoryLimit,
|
||||
xFrameOptions: app.xFrameOptions || 'SAMEORIGIN',
|
||||
altDomain: app.altDomain
|
||||
};
|
||||
|
||||
async.eachSeries(backups, function (backup, next) {
|
||||
db.runSql('UPDATE backups SET restoreConfigJson=?,creationTime=creationTime WHERE id=?', [ JSON.stringify(restoreConfig), backup.id ], next);
|
||||
}, next);
|
||||
});
|
||||
}, callback);
|
||||
});
|
||||
}
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups DROP COLUMN restoreConfigJson', callback);
|
||||
};
|
||||
@@ -0,0 +1,22 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT value FROM settings WHERE name="backup_config"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
|
||||
var backupConfig = JSON.parse(results[0].value);
|
||||
if (backupConfig.provider === 'filesystem') {
|
||||
backupConfig.retentionSecs = 2 * 24 * 60 * 60; // 2 days
|
||||
} else if (backupConfig.provider === 's3') { // S3
|
||||
backupConfig.retentionSecs = -1;
|
||||
} else if (backupConfig.provider === 'caas') {
|
||||
backupConfig.retentionSecs = 10 * 24 * 60 * 60; // 10 days
|
||||
}
|
||||
db.runSql('UPDATE settings SET value=? WHERE name="backup_config"', [ JSON.stringify(backupConfig) ], callback);
|
||||
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,9 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('INSERT settings (name, value) VALUES("mail_relay", ?)', [ JSON.stringify({ provider: 'cloudron-smtp' }) ], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('DELETE * FROM settings WHERE name="mail_relay"', [ ], callback);
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN robotsTxt TEXT', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN robotsTxt', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,29 @@
|
||||
'use strict';
|
||||
|
||||
// we used to have JSON as the db type for those two, however mariadb does not support it
|
||||
// and we never used any JSON related features, but have the TEXT pattern everywhere
|
||||
// This ensures all old cloudrons will have the columns altered
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE eventlog MODIFY data TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
db.runSql('ALTER TABLE eventlog MODIFY source TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
callback(error);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE eventlog MODIFY data TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
db.runSql('ALTER TABLE eventlog MODIFY source TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
callback(error);
|
||||
});
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,16 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN enableBackup BOOLEAN DEFAULT 1', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN enableBackup', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE settings MODIFY value TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE settings MODIFY value VARCHAR(512)', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,25 @@
|
||||
'use strict';
|
||||
|
||||
// ensure backupFolder and format are not empty
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM settings WHERE name=?', [ 'backup_config' ], function (error, result) {
|
||||
if (error || result.length === 0) return callback(error);
|
||||
|
||||
var value = JSON.parse(result[0].value);
|
||||
value.format = 'tgz'; // set the format
|
||||
|
||||
if (value.provider === 'filesystem' && !value.backupFolder) {
|
||||
value.backupFolder = '/var/backups'; // set the backupFolder
|
||||
}
|
||||
|
||||
db.runSql('UPDATE settings SET value = ? WHERE name = ?', [ JSON.stringify(value), 'backup_config' ], function (error) {
|
||||
if (error) console.error('Error setting ownerid ' + JSON.stringify(u) + error);
|
||||
callback();
|
||||
});
|
||||
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups ADD COLUMN format VARCHAR(16) DEFAULT "tgz"', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups DROP COLUMN format', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN newConfigJson TEXT', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN newConfigJson', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,40 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE backups ADD COLUMN manifestJson TEXT'),
|
||||
|
||||
db.runSql.bind(db, 'START TRANSACTION;'),
|
||||
|
||||
// fill all the backups with restoreConfigs from current apps
|
||||
function addManifests(callback) {
|
||||
console.log('Importing manifests');
|
||||
|
||||
db.all('SELECT * FROM backups WHERE type="app"', function (error, backups) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.eachSeries(backups, function (backup, next) {
|
||||
var m = backup.restoreConfigJson ? JSON.parse(backup.restoreConfigJson) : null;
|
||||
if (m) m = JSON.stringify(m.manifest);
|
||||
|
||||
db.runSql('UPDATE backups SET manifestJson=? WHERE id=?', [ m, backup.id ], next);
|
||||
}, callback);
|
||||
});
|
||||
},
|
||||
|
||||
db.runSql.bind(db, 'COMMIT'),
|
||||
|
||||
// remove the restoreConfig
|
||||
db.runSql.bind(db, 'ALTER TABLE backups DROP COLUMN restoreConfigJson')
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE backups DROP COLUMN manifestJson'),
|
||||
db.runSql.bind(db, 'ALTER TABLE backups ADD COLUMN restoreConfigJson TEXT'),
|
||||
], callback);
|
||||
};
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps CHANGE newConfigJson updateConfigJson TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps CHANGE updateConfigJson newConfigJson TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps CHANGE lastBackupId restoreConfigJson TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps CHANGE restoreConfigJson lastBackupId TEXT', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,31 @@
|
||||
'use strict';
|
||||
|
||||
// WARNING!!
|
||||
// At this point the default db collation is utf8mb4_unicode_ci however we already have foreign key constraits
|
||||
// already with tables on utf8_bin charset, so we cannot convert all tables here to utf8mb4 collation without
|
||||
// a reimport from a sql dump, as foreign keys across different collations are not supported
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE appPortBindings CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
|
||||
db.runSql.bind(db, 'ALTER TABLE apps CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
|
||||
db.runSql.bind(db, 'ALTER TABLE authcodes CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
|
||||
db.runSql.bind(db, 'ALTER TABLE backups CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
|
||||
db.runSql.bind(db, 'ALTER TABLE clients CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
|
||||
db.runSql.bind(db, 'ALTER TABLE eventlog CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
|
||||
db.runSql.bind(db, 'ALTER TABLE groupMembers CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
|
||||
db.runSql.bind(db, 'ALTER TABLE groups CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
|
||||
db.runSql.bind(db, 'ALTER TABLE mailboxes CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
|
||||
db.runSql.bind(db, 'ALTER TABLE migrations CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
|
||||
db.runSql.bind(db, 'ALTER TABLE settings CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
|
||||
db.runSql.bind(db, 'ALTER TABLE tokens CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin'),
|
||||
db.runSql.bind(db, 'ALTER TABLE users CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin')
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
// nothing to be done here
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,70 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async'),
|
||||
safe = require('safetydance');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
// first check precondtion of domain entry in settings
|
||||
db.all('SELECT * FROM settings WHERE name = ?', [ 'domain' ], function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var domain = {};
|
||||
if (result[0]) domain = safe.JSON.parse(result[0].value) || {};
|
||||
|
||||
async.series([
|
||||
db.runSql.bind(db, 'START TRANSACTION;'),
|
||||
function addAppsDomainColumn(done) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN domain VARCHAR(128)', [], done);
|
||||
},
|
||||
function setAppDomain(done) {
|
||||
if (!domain.fqdn) return done(); // skip for new cloudrons without a domain
|
||||
db.runSql('UPDATE apps SET domain = ?', [ domain.fqdn ], done);
|
||||
},
|
||||
function addAppsLocationDomainUniqueConstraint(done) {
|
||||
db.runSql('ALTER TABLE apps ADD UNIQUE location_domain_unique_index (location, domain)', [], done);
|
||||
},
|
||||
function removePresetupAdminGroupIfNew(done) {
|
||||
// do not delete on update, will update the record in setMailboxesDomain()
|
||||
if (domain.fqdn) return done();
|
||||
|
||||
// this will be finally created once we have a domain when we create the owner in user.js
|
||||
const ADMIN_GROUP_ID = 'admin'; // see constants.js
|
||||
db.runSql('DELETE FROM groups WHERE id = ?', [ ADMIN_GROUP_ID ], function (error) {
|
||||
if (error) return done(error);
|
||||
|
||||
db.runSql('DELETE FROM mailboxes WHERE ownerId = ?', [ ADMIN_GROUP_ID ], done);
|
||||
});
|
||||
},
|
||||
function addMailboxesDomainColumn(done) {
|
||||
db.runSql('ALTER TABLE mailboxes ADD COLUMN domain VARCHAR(128)', [], done);
|
||||
},
|
||||
function setMailboxesDomain(done) {
|
||||
if (!domain.fqdn) return done(); // skip for new cloudrons without a domain
|
||||
db.runSql('UPDATE mailboxes SET domain = ?', [ domain.fqdn ], done);
|
||||
},
|
||||
function dropAppsLocationUniqueConstraint(done) {
|
||||
db.runSql('ALTER TABLE apps DROP INDEX location', [], done);
|
||||
},
|
||||
db.runSql.bind(db, 'COMMIT')
|
||||
], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'START TRANSACTION;'),
|
||||
function dropMailboxesDomainColumn(done) {
|
||||
db.runSql('ALTER TABLE mailboxes DROP COLUMN domain', [], done);
|
||||
},
|
||||
function dropLocationDomainUniqueConstraint(done) {
|
||||
db.runSql('ALTER TABLE apps DROP INDEX location_domain_unique_index', [], done);
|
||||
},
|
||||
function dropAppsDomainColumn(done) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN domain', [], done);
|
||||
},
|
||||
function addAppsLocationUniqueConstraint(done) {
|
||||
db.runSql('ALTER TABLE apps ADD UNIQUE location (location)', [], done);
|
||||
},
|
||||
db.runSql.bind(db, 'COMMIT')
|
||||
], callback);
|
||||
};
|
||||
@@ -0,0 +1,61 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async'),
|
||||
safe = require('safetydance'),
|
||||
tld = require('tldjs');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var fqdn, zoneName, configJson;
|
||||
|
||||
async.series([
|
||||
function gatherDomain(done) {
|
||||
db.all('SELECT * FROM settings WHERE name = ?', [ 'domain' ], function (error, result) {
|
||||
if (error) return done(error);
|
||||
|
||||
var domain = {};
|
||||
if (result[0]) domain = safe.JSON.parse(result[0].value) || {};
|
||||
|
||||
fqdn = domain.fqdn || ''; // will be null pre-setup
|
||||
zoneName = domain.zoneName || tld.getDomain(fqdn) || fqdn;
|
||||
|
||||
done();
|
||||
});
|
||||
},
|
||||
function gatherDNSConfig(done) {
|
||||
db.all('SELECT * FROM settings WHERE name = ?', [ 'dns_config' ], function (error, result) {
|
||||
if (error) return done(error);
|
||||
|
||||
configJson = (result[0] && result[0].value) ? result[0].value : JSON.stringify({ provider: 'manual'});
|
||||
|
||||
// caas dns config needs an fqdn
|
||||
var config = JSON.parse(configJson);
|
||||
if (config.provider === 'caas') config.fqdn = fqdn;
|
||||
configJson = JSON.stringify(config);
|
||||
|
||||
done();
|
||||
});
|
||||
},
|
||||
db.runSql.bind(db, 'START TRANSACTION;'),
|
||||
function createDomainsTable(done) {
|
||||
var cmd = `
|
||||
CREATE TABLE domains(
|
||||
domain VARCHAR(128) NOT NULL UNIQUE,
|
||||
zoneName VARCHAR(128) NOT NULL,
|
||||
configJson TEXT,
|
||||
PRIMARY KEY (domain)) CHARACTER SET utf8 COLLATE utf8_bin
|
||||
`;
|
||||
|
||||
db.runSql(cmd, [], done);
|
||||
},
|
||||
function addInitialDomain(done) {
|
||||
if (!fqdn) return done();
|
||||
|
||||
db.runSql('INSERT INTO domains (domain, zoneName, configJson) VALUES (?, ?, ?)', [ fqdn, zoneName, configJson ], done);
|
||||
},
|
||||
db.runSql.bind(db, 'COMMIT')
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('DROP TABLE domains', callback);
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD CONSTRAINT apps_domain_constraint FOREIGN KEY(domain) REFERENCES domains(domain)', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP FOREIGN KEY apps_domain_constraint', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE mailboxes ADD CONSTRAINT mailboxes_domain_constraint FOREIGN KEY(domain) REFERENCES domains(domain)', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE mailboxes DROP FOREIGN KEY mailboxes_domain_constraint', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE mailboxes DROP PRIMARY KEY', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE mailboxes ADD PRIMARY KEY(name)', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE mailboxes ADD UNIQUE mailboxes_name_domain_unique_index (name, domain)', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE mailboxes DROP INDEX mailboxes_name_domain_unique_index', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN updateTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN updateTime', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps CHANGE createdAt creationTime TIMESTAMP(2) NOT NULL', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps CHANGE creationTime createdAt TIMESTAMP(2) NOT NULL', [], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,28 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
// NOTE: This migration is incorrect because 'caas' domain is not guaranteed to be present in all Caas cloudrons
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT * FROM domains', function (error, domains) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var caasDomains = domains.filter(function (d) { return JSON.parse(d.configJson).provider === 'caas'; });
|
||||
if (caasDomains.length === 0) return callback();
|
||||
var caasDomain = caasDomains[0].domain;
|
||||
|
||||
db.all('SELECT * FROM settings WHERE name=?', [ 'backup_config' ], function (error, settings) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var setting = settings[0];
|
||||
var config = JSON.parse(setting.value);
|
||||
config.fqdn = caasDomain;
|
||||
|
||||
db.runSql('UPDATE settings SET value=? WHERE name=?', [ JSON.stringify(config), setting.name ], callback);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -0,0 +1,23 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var backupConfig = {
|
||||
"provider": "filesystem",
|
||||
"backupFolder": "/var/backups",
|
||||
"format": "tgz",
|
||||
"retentionSecs": 172800
|
||||
};
|
||||
|
||||
db.runSql('INSERT settings (name, value) VALUES(?, ?)', [ 'backup_config', JSON.stringify(backupConfig) ], function (error) {
|
||||
if (!error || error.code === 'ER_DUP_ENTRY') return callback(); // dup entry is OK for existing cloudrons
|
||||
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('DELETE FROM settings WHERE name=?', ['backup_config'], function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -0,0 +1,33 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
// first check precondtion of domain entry in settings
|
||||
db.all('SELECT * FROM domains', [ ], function (error, domains) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.series([
|
||||
db.runSql.bind(db, 'START TRANSACTION;'),
|
||||
db.runSql.bind(db, 'ALTER TABLE domains ADD COLUMN provider VARCHAR(16) DEFAULT ""'),
|
||||
function setProvider(done) {
|
||||
async.eachSeries(domains, function (domain, iteratorCallback) {
|
||||
var config = JSON.parse(domain.configJson);
|
||||
var provider = config.provider;
|
||||
delete config.provider;
|
||||
|
||||
db.runSql('UPDATE domains SET provider = ?, configJson = ? WHERE domain = ?', [ provider, JSON.stringify(config), domain.domain ], iteratorCallback);
|
||||
}, done);
|
||||
},
|
||||
db.runSql.bind(db, 'ALTER TABLE domains MODIFY provider VARCHAR(16) NOT NULL'),
|
||||
db.runSql.bind(db, 'COMMIT')
|
||||
], callback);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE domains DROP COLUMN provider', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
@@ -9,6 +9,10 @@
|
||||
#### BLOB - stored offline from table row (use for binary data)
|
||||
#### https://dev.mysql.com/doc/refman/5.0/en/storage-requirements.html
|
||||
|
||||
# The code uses zero dates. Make sure sql_mode does NOT have NO_ZERO_DATE
|
||||
# http://johnemb.blogspot.com/2014/09/adding-or-removing-individual-sql-modes.html
|
||||
# SET GLOBAL sql_mode=(SELECT REPLACE(@@sql_mode,'NO_ZERO_DATE',''));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS users(
|
||||
id VARCHAR(128) NOT NULL UNIQUE,
|
||||
username VARCHAR(254) UNIQUE,
|
||||
@@ -59,20 +63,26 @@ CREATE TABLE IF NOT EXISTS apps(
|
||||
containerId VARCHAR(128),
|
||||
manifestJson TEXT,
|
||||
httpPort INTEGER, // this is the nginx proxy port and not manifest.httpPort
|
||||
location VARCHAR(128) NOT NULL UNIQUE,
|
||||
dnsRecordId VARCHAR(512), // tracks any id that we got back to track dns updates (unused)
|
||||
location VARCHAR(128) NOT NULL,
|
||||
domain VARCHAR(128) NOT NULL,
|
||||
dnsRecordId VARCHAR(512), // tracks any id that we got back to track dns updates
|
||||
accessRestrictionJson TEXT, // { users: [ ], groups: [ ] }
|
||||
createdAt TIMESTAMP(2) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updatedAt TIMESTAMP(2) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
memoryLimit BIGINT DEFAULT 0,
|
||||
altDomain VARCHAR(256),
|
||||
xFrameOptions VARCHAR(512),
|
||||
sso BOOLEAN DEFAULT 1, // whether user chose to enable SSO
|
||||
debugModeJson TEXT, // options for development mode
|
||||
robotsTxt TEXT,
|
||||
enableBackup BOOLEAN DEFAULT 1, // misnomer: controls automatic daily backups
|
||||
|
||||
lastBackupId VARCHAR(128), // tracks last valid backup, can be removed
|
||||
|
||||
oldConfigJson TEXT, // used to pass old config for apptask, can be removed when we use a queue
|
||||
// the following fields do not belong here, they can be removed when we use a queue for apptask
|
||||
restoreConfigJson VARCHAR(256), // used to pass backupId to restore from to apptask
|
||||
oldConfigJson TEXT, // used to pass old config for apptask (configure, restore)
|
||||
updateConfigJson TEXT, // used to pass new config for apptask (update)
|
||||
|
||||
FOREIGN KEY(domain) REFERENCES domains(domain),
|
||||
PRIMARY KEY(id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS appPortBindings(
|
||||
@@ -91,30 +101,33 @@ CREATE TABLE IF NOT EXISTS authcodes(
|
||||
|
||||
CREATE TABLE IF NOT EXISTS settings(
|
||||
name VARCHAR(128) NOT NULL UNIQUE,
|
||||
value VARCHAR(512),
|
||||
value TEXT,
|
||||
PRIMARY KEY(name));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS appAddonConfigs(
|
||||
appId VARCHAR(128) NOT NULL,
|
||||
addonId VARCHAR(32) NOT NULL,
|
||||
name VARCHAR(128) NOT NULL,
|
||||
value VARCHAR(512) NOT NULL,
|
||||
FOREIGN KEY(appId) REFERENCES apps(id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS backups(
|
||||
filename VARCHAR(128) NOT NULL,
|
||||
id VARCHAR(128) NOT NULL,
|
||||
creationTime TIMESTAMP,
|
||||
version VARCHAR(128) NOT NULL, /* app version or box version */
|
||||
type VARCHAR(16) NOT NULL, /* 'box' or 'app' */
|
||||
dependsOn TEXT, /* comma separate list of objects this backup depends on */
|
||||
state VARCHAR(16) NOT NULL,
|
||||
manifestJson TEXT, /* to validate if the app can be installed in this version of box */
|
||||
format VARCHAR(16) DEFAULT "tgz",
|
||||
|
||||
PRIMARY KEY (filename));
|
||||
PRIMARY KEY (id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS eventlog(
|
||||
id VARCHAR(128) NOT NULL,
|
||||
action VARCHAR(128) NOT NULL,
|
||||
source JSON, /* { userId, username, ip }. userId can be null for cron,sysadmin */
|
||||
data JSON, /* free flowing json based on action */
|
||||
source TEXT, /* { userId, username, ip }. userId can be null for cron,sysadmin */
|
||||
data TEXT, /* free flowing json based on action */
|
||||
creationTime TIMESTAMP, /* FIXME: precision must be TIMESTAMP(2) */
|
||||
|
||||
PRIMARY KEY (id));
|
||||
@@ -129,5 +142,18 @@ CREATE TABLE IF NOT EXISTS mailboxes(
|
||||
ownerType VARCHAR(16) NOT NULL, /* 'app' or 'user' or 'group' */
|
||||
aliasTarget VARCHAR(128), /* the target name type is an alias */
|
||||
creationTime TIMESTAMP,
|
||||
domain VARCHAR(128),
|
||||
|
||||
FOREIGN KEY(domain) REFERENCES domains(domain),
|
||||
PRIMARY KEY (name));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS domains(
|
||||
domain VARCHAR(128) NOT NULL UNIQUE, /* if this needs to be larger, InnoDB has a limit of 767 bytes for PRIMARY KEY values! */
|
||||
zoneName VARCHAR(128) NOT NULL, /* this mostly contains the domain itself again */
|
||||
provider VARCHAR(16) NOT NULL,
|
||||
configJson TEXT, /* JSON containing the dns backend provider config */
|
||||
|
||||
PRIMARY KEY (domain))
|
||||
|
||||
/* the default db collation is utf8mb4_unicode_ci but for the app table domain constraint we have to use the old one */
|
||||
CHARACTER SET utf8 COLLATE utf8_bin;
|
||||
|
||||
@@ -1,101 +1,108 @@
|
||||
{
|
||||
"name": "Cloudron",
|
||||
"name": "cloudron",
|
||||
"description": "Main code for a cloudron",
|
||||
"version": "0.0.1",
|
||||
"private": "true",
|
||||
"version": "1.0.0",
|
||||
"private": true,
|
||||
"author": {
|
||||
"name": "Cloudron authors"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git"
|
||||
"type": "git",
|
||||
"url": "https://git.cloudron.io/cloudron/box.git"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4.0.0 <=4.1.1"
|
||||
},
|
||||
"engines": [
|
||||
"node >=4.0.0 <=4.1.1"
|
||||
],
|
||||
"dependencies": {
|
||||
"async": "^2.1.4",
|
||||
"aws-sdk": "^2.1.46",
|
||||
"body-parser": "^1.13.1",
|
||||
"checksum": "^0.1.1",
|
||||
"cloudron-manifestformat": "^2.8.0",
|
||||
"@google-cloud/dns": "^0.7.0",
|
||||
"@google-cloud/storage": "^1.2.1",
|
||||
"@sindresorhus/df": "^2.1.0",
|
||||
"async": "^2.6.0",
|
||||
"aws-sdk": "^2.151.0",
|
||||
"body-parser": "^1.18.2",
|
||||
"cloudron-manifestformat": "^2.11.0",
|
||||
"connect-ensure-login": "^0.1.1",
|
||||
"connect-lastmile": "^0.1.0",
|
||||
"connect-timeout": "^1.5.0",
|
||||
"connect-lastmile": "^1.0.2",
|
||||
"connect-timeout": "^1.9.0",
|
||||
"cookie-parser": "^1.3.5",
|
||||
"cookie-session": "^1.1.0",
|
||||
"cron": "^1.0.9",
|
||||
"cookie-session": "^1.3.2",
|
||||
"cron": "^1.3.0",
|
||||
"csurf": "^1.6.6",
|
||||
"db-migrate": "^0.10.0-beta.20",
|
||||
"db-migrate": "^0.10.0-beta.24",
|
||||
"db-migrate-mysql": "^1.1.10",
|
||||
"debug": "^2.2.0",
|
||||
"dockerode": "^2.2.10",
|
||||
"ejs": "^2.2.4",
|
||||
"ejs-cli": "^1.2.0",
|
||||
"express": "^4.12.4",
|
||||
"express-rate-limit": "^2.6.0",
|
||||
"express-session": "^1.11.3",
|
||||
"gulp-sass": "^3.0.0",
|
||||
"debug": "^3.1.0",
|
||||
"dockerode": "^2.5.3",
|
||||
"ejs": "^2.5.7",
|
||||
"ejs-cli": "^2.0.0",
|
||||
"express": "^4.16.2",
|
||||
"express-session": "^1.15.6",
|
||||
"hat": "0.0.3",
|
||||
"json": "^9.0.3",
|
||||
"ldapjs": "^1.0.0",
|
||||
"mime": "^1.3.4",
|
||||
"moment-timezone": "^0.5.5",
|
||||
"morgan": "^1.7.0",
|
||||
"lodash.chunk": "^4.2.0",
|
||||
"mime": "^2.0.3",
|
||||
"moment-timezone": "^0.5.14",
|
||||
"morgan": "^1.9.0",
|
||||
"multiparty": "^4.1.2",
|
||||
"mysql": "^2.7.0",
|
||||
"native-dns": "^0.7.0",
|
||||
"node-df": "^0.1.1",
|
||||
"node-uuid": "^1.4.3",
|
||||
"nodemailer": "^1.3.0",
|
||||
"nodemailer-smtp-transport": "^1.0.3",
|
||||
"oauth2orize": "^1.0.1",
|
||||
"mysql": "^2.15.0",
|
||||
"nodemailer": "^4.4.0",
|
||||
"nodemailer-smtp-transport": "^2.7.4",
|
||||
"oauth2orize": "^1.11.0",
|
||||
"once": "^1.3.2",
|
||||
"parse-links": "^0.1.0",
|
||||
"passport": "^0.2.2",
|
||||
"passport-http": "^0.2.2",
|
||||
"passport": "^0.4.0",
|
||||
"passport-http": "^0.3.0",
|
||||
"passport-http-bearer": "^1.0.1",
|
||||
"passport-local": "^1.0.0",
|
||||
"passport-oauth2-client-password": "^0.1.2",
|
||||
"password-generator": "^2.0.2",
|
||||
"proxy-middleware": "^0.13.0",
|
||||
"safetydance": "^0.1.1",
|
||||
"semver": "^4.3.6",
|
||||
"showdown": "^1.6.0",
|
||||
"password-generator": "^2.2.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"proxy-middleware": "^0.15.0",
|
||||
"recursive-readdir": "^2.2.1",
|
||||
"request": "^2.83.0",
|
||||
"s3-block-read-stream": "^0.2.0",
|
||||
"safetydance": "^0.7.1",
|
||||
"semver": "^5.4.1",
|
||||
"showdown": "^1.8.2",
|
||||
"split": "^1.0.0",
|
||||
"superagent": "^1.8.3",
|
||||
"superagent": "^3.8.1",
|
||||
"supererror": "^0.7.1",
|
||||
"tldjs": "^1.6.2",
|
||||
"tar-fs": "^1.16.0",
|
||||
"tar-stream": "^1.5.5",
|
||||
"tldjs": "^2.2.0",
|
||||
"underscore": "^1.7.0",
|
||||
"uuid": "^3.1.0",
|
||||
"valid-url": "^1.0.9",
|
||||
"validator": "^4.9.0"
|
||||
"validator": "^9.1.1",
|
||||
"ws": "^3.3.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"bootstrap-sass": "^3.3.3",
|
||||
"deep-extend": "^0.4.1",
|
||||
"del": "^1.1.1",
|
||||
"expect.js": "*",
|
||||
"gulp": "^3.8.11",
|
||||
"gulp-autoprefixer": "^2.3.0",
|
||||
"gulp": "^3.9.1",
|
||||
"gulp-autoprefixer": "^4.0.0",
|
||||
"gulp-concat": "^2.4.3",
|
||||
"gulp-cssnano": "^2.1.0",
|
||||
"gulp-ejs": "^1.0.0",
|
||||
"gulp-sass": "^3.0.0",
|
||||
"gulp-ejs": "^3.1.0",
|
||||
"gulp-sass": "^3.1.0",
|
||||
"gulp-serve": "^1.0.0",
|
||||
"gulp-sourcemaps": "^1.5.2",
|
||||
"gulp-uglify": "^1.1.0",
|
||||
"hock": "~1.2.0",
|
||||
"gulp-sourcemaps": "^2.6.1",
|
||||
"gulp-uglify": "^3.0.0",
|
||||
"hock": "^1.3.2",
|
||||
"istanbul": "*",
|
||||
"js2xmlparser": "^1.0.0",
|
||||
"js2xmlparser": "^3.0.0",
|
||||
"mocha": "*",
|
||||
"nock": "^9.0.2",
|
||||
"node-sass": "^3.0.0-alpha.0",
|
||||
"request": "^2.65.0",
|
||||
"yargs": "^3.15.0"
|
||||
"mock-aws-s3": "git+https://github.com/cloudron-io/mock-aws-s3.git",
|
||||
"nock": "^9.0.14",
|
||||
"node-sass": "^4.6.1",
|
||||
"readdirp": "https://registry.npmjs.org/readdirp/-/readdirp-2.1.0.tgz",
|
||||
"yargs": "^10.0.3"
|
||||
},
|
||||
"scripts": {
|
||||
"migrate_local": "DATABASE_URL=mysql://root:@localhost/box node_modules/.bin/db-migrate up",
|
||||
"migrate_test": "BOX_ENV=test DATABASE_URL=mysql://root:@localhost/boxtest node_modules/.bin/db-migrate up",
|
||||
"test": "npm run migrate_test && src/test/setupTest && BOX_ENV=test ./node_modules/istanbul/lib/cli.js test $1 ./node_modules/mocha/bin/_mocha -- -R spec ./src/test ./src/routes/test",
|
||||
"test": "npm run migrate_test && src/test/setupTest && BOX_ENV=test ./node_modules/istanbul/lib/cli.js test $1 ./node_modules/mocha/bin/_mocha -- --exit -R spec ./src/test ./src/routes/test/[^a]*",
|
||||
"test_all": "npm run migrate_test && src/test/setupTest && BOX_ENV=test ./node_modules/istanbul/lib/cli.js test $1 ./node_modules/mocha/bin/_mocha -- --exit -R spec ./src/test ./src/routes/test",
|
||||
"postmerge": "/bin/true",
|
||||
"precommit": "/bin/true",
|
||||
"prepush": "npm test",
|
||||
|
||||
@@ -15,77 +15,85 @@ fi
|
||||
# change this to a hash when we make a upgrade release
|
||||
readonly LOG_FILE="/var/log/cloudron-setup.log"
|
||||
readonly DATA_FILE="/root/cloudron-install-data.json"
|
||||
readonly MINIMUM_DISK_SIZE_GB="19" # this is the size of "/" and required to fit in docker images 19 is a safe bet for different reporting on 20GB min
|
||||
readonly MINIMUM_DISK_SIZE_GB="18" # this is the size of "/" and required to fit in docker images 18 is a safe bet for different reporting on 20GB min
|
||||
readonly MINIMUM_MEMORY="974" # this is mostly reported for 1GB main memory (DO 992, EC2 990, Linode 989, Serverdiscounter.com 974)
|
||||
|
||||
readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 2400"
|
||||
|
||||
# copied from cloudron-resize-fs.sh
|
||||
readonly rootfs_type=$(LC_ALL=C df --output=fstype / | tail -n1)
|
||||
readonly physical_memory=$(LC_ALL=C free -m | awk '/Mem:/ { print $2 }')
|
||||
readonly disk_device="$(for d in $(find /dev -type b); do [ "$(mountpoint -d /)" = "$(mountpoint -x $d)" ] && echo $d && break; done)"
|
||||
readonly disk_size_bytes=$(LC_ALL=C fdisk -l ${disk_device} | grep "Disk ${disk_device}" | awk '{ printf $5 }')
|
||||
readonly disk_size_gb=$((${disk_size_bytes}/1024/1024/1024))
|
||||
readonly disk_size_bytes=$(LC_ALL=C df --output=size / | tail -n1)
|
||||
readonly disk_size_gb=$((${disk_size_bytes}/1024/1024))
|
||||
|
||||
# verify the system has minimum requirements met
|
||||
if [[ "${rootfs_type}" != "ext4" ]]; then
|
||||
echo "Error: Cloudron requires '/' to be ext4" # see #364
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${physical_memory}" -lt "${MINIMUM_MEMORY}" ]]; then
|
||||
echo "Error: Cloudron requires atleast 1GB physical memory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${disk_size_gb}" -lt "${MINIMUM_DISK_SIZE_GB}" ]]; then
|
||||
echo "Error: Cloudron requires atleast 20GB disk space (Disk space on ${disk_device} is ${disk_size_gb}GB)"
|
||||
echo "Error: Cloudron requires atleast 20GB disk space (Disk space on / is ${disk_size_gb}GB)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
initBaseImage="true"
|
||||
# provisioning data
|
||||
domain=""
|
||||
adminLocation="my"
|
||||
zoneName=""
|
||||
provider=""
|
||||
encryptionKey=""
|
||||
restoreUrl=""
|
||||
dnsProvider="manual"
|
||||
tlsProvider="le-prod"
|
||||
versionsUrl="https://s3.amazonaws.com/prod-cloudron-releases/versions.json"
|
||||
requestedVersion="latest"
|
||||
requestedVersion=""
|
||||
apiServerOrigin="https://api.cloudron.io"
|
||||
webServerOrigin="https://cloudron.io"
|
||||
dataJson=""
|
||||
prerelease="false"
|
||||
sourceTarballUrl=""
|
||||
rebootServer="true"
|
||||
baseDataDir=""
|
||||
|
||||
args=$(getopt -o "" -l "domain:,help,skip-baseimage-init,data:,provider:,encryption-key:,restore-url:,tls-provider:,version:,versions-url:,api-server:,dns-provider:,env:,prerelease,skip-reboot,source-url:" -n "$0" -- "$@")
|
||||
# these are here for pre-1.9 compat
|
||||
encryptionKey=""
|
||||
restoreUrl=""
|
||||
|
||||
args=$(getopt -o "" -l "domain:,help,skip-baseimage-init,data:,data-dir:,provider:,encryption-key:,restore-url:,tls-provider:,version:,dns-provider:,env:,admin-location:,prerelease,skip-reboot,source-url:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
--domain) domain="$2"; shift 2;;
|
||||
--help) echo "See https://cloudron.io/references/selfhosting.html on how to install Cloudron"; exit 0;;
|
||||
--admin-location) adminLocation="$2"; shift 2;;
|
||||
--help) echo "See https://cloudron.io/documentation/installation/ on how to install Cloudron"; exit 0;;
|
||||
--provider) provider="$2"; shift 2;;
|
||||
--encryption-key) encryptionKey="$2"; shift 2;;
|
||||
--restore-url) restoreUrl="$2"; shift 2;;
|
||||
--tls-provider) tlsProvider="$2"; shift 2;;
|
||||
--dns-provider) dnsProvider="$2"; shift 2;;
|
||||
--version) requestedVersion="$2"; shift 2;;
|
||||
--env)
|
||||
if [[ "$2" == "dev" ]]; then
|
||||
apiServerOrigin="https://api.dev.cloudron.io"
|
||||
versionsUrl="https://s3.amazonaws.com/dev-cloudron-releases/versions.json"
|
||||
webServerOrigin="https://dev.cloudron.io"
|
||||
tlsProvider="le-staging"
|
||||
prerelease="true"
|
||||
elif [[ "$2" == "staging" ]]; then
|
||||
apiServerOrigin="https://api.staging.cloudron.io"
|
||||
versionsUrl="https://s3.amazonaws.com/staging-cloudron-releases/versions.json"
|
||||
webServerOrigin="https://staging.cloudron.io"
|
||||
tlsProvider="le-staging"
|
||||
prerelease="true"
|
||||
fi
|
||||
shift 2;;
|
||||
--versions-url) versionsUrl="$2"; shift 2;;
|
||||
--api-server) apiServerOrigin="$2"; shift 2;;
|
||||
--skip-baseimage-init) initBaseImage="false"; shift;;
|
||||
--skip-reboot) rebootServer="false"; shift;;
|
||||
--data) dataJson="$2"; shift 2;;
|
||||
--prerelease) prerelease="true"; shift;;
|
||||
--source-url) sourceTarballUrl="$2"; version="0.0.1+custom"; shift 2;;
|
||||
--data-dir) baseDataDir=$(realpath "$2"); shift 2;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
@@ -94,13 +102,16 @@ done
|
||||
# validate arguments in the absence of data
|
||||
if [[ -z "${dataJson}" ]]; then
|
||||
if [[ -z "${provider}" ]]; then
|
||||
echo "--provider is required (azure, digitalocean, ec2, lightsail, linode, ovh, scaleway, vultr or generic)"
|
||||
echo "--provider is required (azure, cloudscale.ch, digitalocean, ec2, exoscale, lightsail, linode, ovh, rosehosting, scaleway, vultr or generic)"
|
||||
exit 1
|
||||
elif [[ \
|
||||
"${provider}" != "ami" && \
|
||||
"${provider}" != "azure" && \
|
||||
"${provider}" != "cloudscale.ch" && \
|
||||
"${provider}" != "digitalocean" && \
|
||||
"${provider}" != "ec2" && \
|
||||
"${provider}" != "exoscale" && \
|
||||
"${provider}" != "gce" && \
|
||||
"${provider}" != "lightsail" && \
|
||||
"${provider}" != "linode" && \
|
||||
"${provider}" != "ovh" && \
|
||||
@@ -109,7 +120,7 @@ if [[ -z "${dataJson}" ]]; then
|
||||
"${provider}" != "vultr" && \
|
||||
"${provider}" != "generic" \
|
||||
]]; then
|
||||
echo "--provider must be one of: azure, digitalocean, ec2, lightsail, linode, ovh, rosehosting, scaleway, vultr or generic"
|
||||
echo "--provider must be one of: azure, cloudscale.ch, digitalocean, ec2, exoscale, gce, lightsail, linode, ovh, rosehosting, scaleway, vultr or generic"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -118,18 +129,15 @@ if [[ -z "${dataJson}" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${dnsProvider}" ]]; then
|
||||
echo "--dns-provider is required (noop, manual)"
|
||||
exit 1
|
||||
elif [[ "${dnsProvider}" != "noop" && "${dnsProvider}" != "manual" ]]; then
|
||||
echo "--dns-provider must be one of : manual, noop"
|
||||
if [[ -n "${baseDataDir}" && ! -d "${baseDataDir}" ]]; then
|
||||
echo "${baseDataDir} does not exist"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "##############################################"
|
||||
echo " Cloudron Setup (${requestedVersion}) "
|
||||
echo " Cloudron Setup (${requestedVersion:-latest})"
|
||||
echo "##############################################"
|
||||
echo ""
|
||||
echo " Follow setup logs in a second terminal with:"
|
||||
@@ -153,53 +161,61 @@ fi
|
||||
|
||||
echo "=> Checking version"
|
||||
if [[ "${sourceTarballUrl}" == "" ]]; then
|
||||
releaseJson=$($curl -s "${versionsUrl}")
|
||||
if [[ "$requestedVersion" == "latest" ]]; then
|
||||
pre=$([[ "${prerelease}" == "true" ]] && echo "null" || echo "-pre")
|
||||
version=$(echo "${releaseJson}" | python3 -c "import json,sys,collections;obj=json.load(sys.stdin, object_pairs_hook=collections.OrderedDict);latest=list(v for v in obj if '${pre}' not in v)[-1];print(latest)")
|
||||
if ! releaseJson=$($curl -s "${apiServerOrigin}/api/v1/releases?prerelease=${prerelease}&boxVersion=${requestedVersion}"); then
|
||||
echo "Failed to get release information"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$requestedVersion" == "" ]]; then
|
||||
version=$(echo "${releaseJson}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj["version"])')
|
||||
else
|
||||
version="${requestedVersion}"
|
||||
fi
|
||||
if ! sourceTarballUrl=$(echo "${releaseJson}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj[sys.argv[1]]["sourceTarballUrl"])' "${version}"); then
|
||||
echo "No source code for version ${requestedVersion}"
|
||||
|
||||
if ! sourceTarballUrl=$(echo "${releaseJson}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj["info"]["sourceTarballUrl"])'); then
|
||||
echo "No source code for version '${requestedVersion:-latest}'"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Build data
|
||||
# tlsConfig, dnsConfig, backupConfig are here for backward compat with < 1.9
|
||||
# from 1.9, we use autoprovision.json
|
||||
if [[ -z "${dataJson}" ]]; then
|
||||
if [[ -z "${restoreUrl}" ]]; then
|
||||
data=$(cat <<EOF
|
||||
{
|
||||
"boxVersionsUrl": "${versionsUrl}",
|
||||
"fqdn": "${domain}",
|
||||
"adminLocation": "${adminLocation}",
|
||||
"adminFqdn": "${adminLocation}.${domain}",
|
||||
"zoneName": "${zoneName}",
|
||||
"provider": "${provider}",
|
||||
"apiServerOrigin": "${apiServerOrigin}",
|
||||
"webServerOrigin": "${webServerOrigin}",
|
||||
"version": "${version}",
|
||||
"tlsConfig": {
|
||||
"provider": "${tlsProvider}"
|
||||
},
|
||||
"dnsConfig": {
|
||||
"provider": "${dnsProvider}"
|
||||
},
|
||||
"backupConfig" : {
|
||||
"provider": "filesystem",
|
||||
"backupFolder": "/var/backups",
|
||||
"key": "${encryptionKey}"
|
||||
},
|
||||
"updateConfig": {
|
||||
"prerelease": ${prerelease}
|
||||
},
|
||||
"version": "${version}"
|
||||
"key": "${encryptionKey}",
|
||||
"format": "tgz",
|
||||
"retentionSecs": 172800
|
||||
}
|
||||
}
|
||||
EOF
|
||||
)
|
||||
else
|
||||
data=$(cat <<EOF
|
||||
{
|
||||
"boxVersionsUrl": "${versionsUrl}",
|
||||
"fqdn": "${domain}",
|
||||
"adminLocation": "${adminLocation}",
|
||||
"adminFqdn": "${adminLocation}.${domain}",
|
||||
"zoneName": "${zoneName}",
|
||||
"provider": "${provider}",
|
||||
"apiServerOrigin": "${apiServerOrigin}",
|
||||
"webServerOrigin": "${webServerOrigin}",
|
||||
"restore": {
|
||||
"url": "${restoreUrl}",
|
||||
"key": "${encryptionKey}"
|
||||
@@ -232,7 +248,7 @@ fi
|
||||
|
||||
echo "=> Installing version ${version} (this takes some time) ..."
|
||||
echo "${data}" > "${DATA_FILE}"
|
||||
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" --data-file "${DATA_FILE}" &>> "${LOG_FILE}"; then
|
||||
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" --data-file "${DATA_FILE}" --data-dir "${baseDataDir}" &>> "${LOG_FILE}"; then
|
||||
echo "Failed to install cloudron. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
@@ -248,6 +264,17 @@ while true; do
|
||||
sleep 10
|
||||
done
|
||||
|
||||
autoprovision_data=$(cat <<EOF
|
||||
{
|
||||
"tlsConfig": {
|
||||
"provider": "${tlsProvider}"
|
||||
}
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
echo "${autoprovision_data}" > /home/yellowtent/configs/autoprovision.json
|
||||
|
||||
if [[ -n "${domain}" ]]; then
|
||||
echo -e "\n\nVisit https://my.${domain} to finish setup once the server has rebooted.\n"
|
||||
else
|
||||
@@ -256,5 +283,6 @@ fi
|
||||
|
||||
if [[ "${rebootServer}" == "true" ]]; then
|
||||
echo -e "\n\nRebooting this server now to let bootloader changes take effect.\n"
|
||||
systemctl stop mysql # sometimes mysql ends up having corrupt privilege tables
|
||||
systemctl reboot
|
||||
fi
|
||||
|
||||
@@ -31,8 +31,8 @@ if ! $(cd "${SOURCE_DIR}" && git diff --exit-code >/dev/null); then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$(node --version)" != "v6.9.2" ]]; then
|
||||
echo "This script requires node 6.9.2"
|
||||
if [[ "$(node --version)" != "v8.9.3" ]]; then
|
||||
echo "This script requires node 8.9.3"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -44,7 +44,7 @@ chmod "o+rx,g+rx" "${bundle_dir}" # otherwise extracted tarball director won't b
|
||||
echo "Checking out code [${version}] into ${bundle_dir}"
|
||||
(cd "${SOURCE_DIR}" && git archive --format=tar ${version} | (cd "${bundle_dir}" && tar xf -))
|
||||
|
||||
if diff "${TMPDIR}/boxtarball.cache/npm-shrinkwrap.json.all" "${bundle_dir}/npm-shrinkwrap.json" >/dev/null 2>&1; then
|
||||
if diff "${TMPDIR}/boxtarball.cache/package-lock.json.all" "${bundle_dir}/package-lock.json" >/dev/null 2>&1; then
|
||||
echo "Reusing dev modules from cache"
|
||||
cp -r "${TMPDIR}/boxtarball.cache/node_modules-all/." "${bundle_dir}/node_modules"
|
||||
else
|
||||
@@ -54,18 +54,18 @@ else
|
||||
echo "Caching dev dependencies"
|
||||
mkdir -p "${TMPDIR}/boxtarball.cache/node_modules-all"
|
||||
rsync -a --delete "${bundle_dir}/node_modules/" "${TMPDIR}/boxtarball.cache/node_modules-all/"
|
||||
cp "${bundle_dir}/npm-shrinkwrap.json" "${TMPDIR}/boxtarball.cache/npm-shrinkwrap.json.all"
|
||||
cp "${bundle_dir}/package-lock.json" "${TMPDIR}/boxtarball.cache/package-lock.json.all"
|
||||
fi
|
||||
|
||||
echo "Building webadmin assets"
|
||||
(cd "${bundle_dir}" && gulp)
|
||||
(cd "${bundle_dir}" && ./node_modules/.bin/gulp)
|
||||
|
||||
echo "Remove intermediate files required at build-time only"
|
||||
rm -rf "${bundle_dir}/node_modules/"
|
||||
rm -rf "${bundle_dir}/webadmin/src"
|
||||
rm -rf "${bundle_dir}/gulpfile.js"
|
||||
|
||||
if diff "${TMPDIR}/boxtarball.cache/npm-shrinkwrap.json.prod" "${bundle_dir}/npm-shrinkwrap.json" >/dev/null 2>&1; then
|
||||
if diff "${TMPDIR}/boxtarball.cache/package-lock.json.prod" "${bundle_dir}/package-lock.json" >/dev/null 2>&1; then
|
||||
echo "Reusing prod modules from cache"
|
||||
cp -r "${TMPDIR}/boxtarball.cache/node_modules-prod/." "${bundle_dir}/node_modules"
|
||||
else
|
||||
@@ -75,7 +75,7 @@ else
|
||||
echo "Caching prod dependencies"
|
||||
mkdir -p "${TMPDIR}/boxtarball.cache/node_modules-prod"
|
||||
rsync -a --delete "${bundle_dir}/node_modules/" "${TMPDIR}/boxtarball.cache/node_modules-prod/"
|
||||
cp "${bundle_dir}/npm-shrinkwrap.json" "${TMPDIR}/boxtarball.cache/npm-shrinkwrap.json.prod"
|
||||
cp "${bundle_dir}/package-lock.json" "${TMPDIR}/boxtarball.cache/package-lock.json.prod"
|
||||
fi
|
||||
|
||||
echo "Create final tarball"
|
||||
@@ -84,4 +84,3 @@ echo "Cleaning up ${bundle_dir}"
|
||||
rm -rf "${bundle_dir}"
|
||||
|
||||
echo "Tarball saved at ${bundle_file}"
|
||||
|
||||
|
||||
@@ -9,27 +9,68 @@ fi
|
||||
|
||||
readonly USER=yellowtent
|
||||
readonly BOX_SRC_DIR=/home/${USER}/box
|
||||
readonly BASE_DATA_DIR=/home/${USER}
|
||||
readonly CLOUDRON_CONF=/home/yellowtent/configs/cloudron.conf
|
||||
|
||||
readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 2400"
|
||||
readonly script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly box_src_tmp_dir="$(realpath ${script_dir}/..)"
|
||||
|
||||
readonly is_update=$([[ -f "${CLOUDRON_CONF}" ]] && echo "yes" || echo "no")
|
||||
|
||||
arg_data=""
|
||||
arg_data_dir=""
|
||||
|
||||
args=$(getopt -o "" -l "data:,data-file:" -n "$0" -- "$@")
|
||||
args=$(getopt -o "" -l "data:,data-file:,data-dir:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
--data) arg_data="$2"; shift 2;;
|
||||
--data-file) arg_data=$(cat $2); shift 2;;
|
||||
--data-dir) arg_data_dir="$2"; shift 2;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "==> installer: updating docker"
|
||||
if [[ $(docker version --format {{.Client.Version}}) != "17.09.0-ce" ]]; then
|
||||
$curl -sL https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_17.09.0~ce-0~ubuntu_amd64.deb -o /tmp/docker.deb
|
||||
|
||||
# https://download.docker.com/linux/ubuntu/dists/xenial/stable/binary-amd64/Packages
|
||||
if [[ $(sha256sum /tmp/docker.deb | cut -d' ' -f1) != "d33f6eb134f0ab0876148bd96de95ea47d583d7f2cddfdc6757979453f9bd9bf" ]]; then
|
||||
echo "docker binary download is corrupt"
|
||||
exit 5
|
||||
fi
|
||||
|
||||
echo "Waiting for all dpkg tasks to finish..."
|
||||
while fuser /var/lib/dpkg/lock; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
while ! dpkg --force-confold --configure -a; do
|
||||
echo "Failed to fix packages. Retry"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
while ! apt install -y /tmp/docker.deb; do
|
||||
echo "Failed to install docker. Retry"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
rm /tmp/docker.deb
|
||||
fi
|
||||
|
||||
echo "==> installer: updating node"
|
||||
if [[ "$(node --version)" != "v8.9.3" ]]; then
|
||||
mkdir -p /usr/local/node-8.9.3
|
||||
$curl -sL https://nodejs.org/dist/v8.9.3/node-v8.9.3-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-8.9.3
|
||||
ln -sf /usr/local/node-8.9.3/bin/node /usr/bin/node
|
||||
ln -sf /usr/local/node-8.9.3/bin/npm /usr/bin/npm
|
||||
rm -rf /usr/local/node-6.11.5
|
||||
fi
|
||||
|
||||
for try in `seq 1 10`; do
|
||||
# for reasons unknown, the dtrace package will fail. but rebuilding second time will work
|
||||
|
||||
@@ -56,6 +97,15 @@ if [[ "${is_update}" == "yes" ]]; then
|
||||
${BOX_SRC_DIR}/setup/stop.sh # stop the old code
|
||||
fi
|
||||
|
||||
# setup links to data directory
|
||||
if [[ -n "${arg_data_dir}" ]]; then
|
||||
echo "==> installer: setting up links to data directory"
|
||||
mkdir "${arg_data_dir}/appsdata"
|
||||
ln -s "${arg_data_dir}/appsdata" "${BASE_DATA_DIR}/appsdata"
|
||||
mkdir "${arg_data_dir}/platformdata"
|
||||
ln -s "${arg_data_dir}/platformdata" "${BASE_DATA_DIR}/platformdata"
|
||||
fi
|
||||
|
||||
# ensure we are not inside the source directory, which we will remove now
|
||||
cd /root
|
||||
|
||||
|
||||
@@ -5,24 +5,17 @@ json="${source_dir}/../node_modules/.bin/json"
|
||||
|
||||
# IMPORTANT: Fix cloudron.js:doUpdate if you add/remove any arg. keep these sorted for readability
|
||||
arg_api_server_origin=""
|
||||
arg_box_versions_url=""
|
||||
arg_fqdn=""
|
||||
arg_is_custom_domain="false"
|
||||
arg_restore_key=""
|
||||
arg_restore_url=""
|
||||
arg_admin_location=""
|
||||
arg_admin_fqdn=""
|
||||
arg_zone_name=""
|
||||
arg_is_custom_domain="false" # can be removed after 1.9
|
||||
arg_retire_reason=""
|
||||
arg_retire_info=""
|
||||
arg_tls_config=""
|
||||
arg_tls_cert=""
|
||||
arg_tls_key=""
|
||||
arg_token=""
|
||||
arg_version=""
|
||||
arg_web_server_origin=""
|
||||
arg_backup_config=""
|
||||
arg_dns_config=""
|
||||
arg_update_config=""
|
||||
arg_provider=""
|
||||
arg_app_bundle=""
|
||||
arg_is_demo="false"
|
||||
|
||||
args=$(getopt -o "" -l "data:,retire-reason:,retire-info:" -n "$0" -- "$@")
|
||||
@@ -41,53 +34,35 @@ while true; do
|
||||
--data)
|
||||
# these params must be valid in all cases
|
||||
arg_fqdn=$(echo "$2" | $json fqdn)
|
||||
arg_admin_fqdn=$(echo "$2" | $json adminFqdn)
|
||||
arg_zone_name=$(echo "$2" | $json zoneName)
|
||||
[[ "${arg_zone_name}" == "" ]] && arg_zone_name="${arg_fqdn}"
|
||||
|
||||
# can be removed after 1.9
|
||||
arg_is_custom_domain=$(echo "$2" | $json isCustomDomain)
|
||||
[[ "${arg_is_custom_domain}" == "" ]] && arg_is_custom_domain="true"
|
||||
|
||||
arg_admin_location=$(echo "$2" | $json adminLocation)
|
||||
[[ "${arg_admin_location}" == "" ]] && arg_admin_location="my"
|
||||
|
||||
# only update/restore have this valid (but not migrate)
|
||||
arg_api_server_origin=$(echo "$2" | $json apiServerOrigin)
|
||||
[[ "${arg_api_server_origin}" == "" ]] && arg_api_server_origin="https://api.cloudron.io"
|
||||
arg_web_server_origin=$(echo "$2" | $json webServerOrigin)
|
||||
[[ "${arg_web_server_origin}" == "" ]] && arg_web_server_origin="https://cloudron.io"
|
||||
arg_box_versions_url=$(echo "$2" | $json boxVersionsUrl)
|
||||
[[ "${arg_box_versions_url}" == "" ]] && arg_box_versions_url="https://s3.amazonaws.com/prod-cloudron-releases/versions.json"
|
||||
|
||||
# TODO check if an where this is used
|
||||
# TODO check if and where this is used
|
||||
arg_version=$(echo "$2" | $json version)
|
||||
|
||||
# read possibly empty parameters here
|
||||
arg_app_bundle=$(echo "$2" | $json appBundle)
|
||||
[[ "${arg_app_bundle}" == "" ]] && arg_app_bundle="[]"
|
||||
|
||||
arg_is_demo=$(echo "$2" | $json isDemo)
|
||||
[[ "${arg_is_demo}" == "" ]] && arg_is_demo="false"
|
||||
|
||||
arg_tls_cert=$(echo "$2" | $json tlsCert)
|
||||
arg_tls_key=$(echo "$2" | $json tlsKey)
|
||||
arg_token=$(echo "$2" | $json token)
|
||||
|
||||
arg_provider=$(echo "$2" | $json provider)
|
||||
[[ "${arg_provider}" == "" ]] && arg_provider="generic"
|
||||
|
||||
arg_tls_config=$(echo "$2" | $json tlsConfig)
|
||||
[[ "${arg_tls_config}" == "null" ]] && arg_tls_config=""
|
||||
|
||||
arg_restore_url=$(echo "$2" | $json restore.url)
|
||||
[[ "${arg_restore_url}" == "null" ]] && arg_restore_url=""
|
||||
|
||||
arg_restore_key=$(echo "$2" | $json restore.key)
|
||||
[[ "${arg_restore_key}" == "null" ]] && arg_restore_key=""
|
||||
|
||||
arg_backup_config=$(echo "$2" | $json backupConfig)
|
||||
[[ "${arg_backup_config}" == "null" ]] && arg_backup_config=""
|
||||
|
||||
arg_dns_config=$(echo "$2" | $json dnsConfig)
|
||||
[[ "${arg_dns_config}" == "null" ]] && arg_dns_config=""
|
||||
|
||||
arg_update_config=$(echo "$2" | $json updateConfig)
|
||||
[[ "${arg_update_config}" == "null" ]] && arg_update_config=""
|
||||
|
||||
shift 2
|
||||
;;
|
||||
--) break;;
|
||||
@@ -97,15 +72,10 @@ done
|
||||
|
||||
echo "Parsed arguments:"
|
||||
echo "api server: ${arg_api_server_origin}"
|
||||
echo "box versions url: ${arg_box_versions_url}"
|
||||
echo "fqdn: ${arg_fqdn}"
|
||||
echo "custom domain: ${arg_is_custom_domain}"
|
||||
echo "restore key: ${arg_restore_key}"
|
||||
echo "restore url: ${arg_restore_url}"
|
||||
echo "tls cert: ${arg_tls_cert}"
|
||||
echo "tls key: ${arg_tls_key}"
|
||||
echo "token: ${arg_token}"
|
||||
echo "tlsConfig: ${arg_tls_config}"
|
||||
# do not dump these as they might become available via logs API
|
||||
#echo "token: ${arg_token}"
|
||||
echo "version: ${arg_version}"
|
||||
echo "web server: ${arg_web_server_origin}"
|
||||
echo "provider: ${arg_provider}"
|
||||
|
||||
@@ -6,12 +6,11 @@ readonly SETUP_WEBSITE_DIR="/home/yellowtent/setup/website"
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly box_src_dir="$(realpath ${script_dir}/..)"
|
||||
readonly DATA_DIR="/home/yellowtent/data"
|
||||
readonly ADMIN_LOCATION="my" # keep this in sync with constants.js
|
||||
readonly PLATFORM_DATA_DIR="/home/yellowtent/platformdata"
|
||||
|
||||
echo "Setting up nginx update page"
|
||||
|
||||
if [[ ! -f "${DATA_DIR}/nginx/applications/admin.conf" ]]; then
|
||||
if [[ ! -f "${PLATFORM_DATA_DIR}/nginx/applications/admin.conf" ]]; then
|
||||
echo "No admin.conf found. This Cloudron has no domain yet. Skip splash setup"
|
||||
exit
|
||||
fi
|
||||
@@ -19,8 +18,7 @@ fi
|
||||
source "${script_dir}/argparser.sh" "$@" # this injects the arg_* variables used below
|
||||
|
||||
# keep this is sync with config.js appFqdn()
|
||||
admin_fqdn=$([[ "${arg_is_custom_domain}" == "true" ]] && echo "${ADMIN_LOCATION}.${arg_fqdn}" || echo "${ADMIN_LOCATION}-${arg_fqdn}")
|
||||
admin_origin="https://${admin_fqdn}"
|
||||
admin_origin="https://${arg_admin_fqdn}"
|
||||
|
||||
# copy the website
|
||||
rm -rf "${SETUP_WEBSITE_DIR}" && mkdir -p "${SETUP_WEBSITE_DIR}"
|
||||
@@ -29,16 +27,16 @@ cp -r "${script_dir}/splash/website/"* "${SETUP_WEBSITE_DIR}"
|
||||
# create nginx config
|
||||
readonly current_infra=$(node -e "console.log(require('${script_dir}/../src/infra_version.js').version);")
|
||||
existing_infra="none"
|
||||
[[ -f "${DATA_DIR}/INFRA_VERSION" ]] && existing_infra=$(node -e "console.log(JSON.parse(require('fs').readFileSync('${DATA_DIR}/INFRA_VERSION', 'utf8')).version);")
|
||||
[[ -f "${PLATFORM_DATA_DIR}/INFRA_VERSION" ]] && existing_infra=$(node -e "console.log(JSON.parse(require('fs').readFileSync('${PLATFORM_DATA_DIR}/INFRA_VERSION', 'utf8')).version);")
|
||||
if [[ "${arg_retire_reason}" != "" || "${existing_infra}" != "${current_infra}" ]]; then
|
||||
echo "Showing progress bar on all subdomains in retired mode or infra update. retire: ${arg_retire_reason} existing: ${existing_infra} current: ${current_infra}"
|
||||
rm -f ${DATA_DIR}/nginx/applications/*
|
||||
rm -f ${PLATFORM_DATA_DIR}/nginx/applications/*
|
||||
${box_src_dir}/node_modules/.bin/ejs-cli -f "${script_dir}/start/nginx/appconfig.ejs" \
|
||||
-O "{ \"vhost\": \"~^(.+)\$\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\" }" > "${DATA_DIR}/nginx/applications/admin.conf"
|
||||
-O "{ \"vhost\": \"~^(.+)\$\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\", \"robotsTxtQuoted\": null, \"hasIPv6\": false }" > "${PLATFORM_DATA_DIR}/nginx/applications/admin.conf"
|
||||
else
|
||||
echo "Show progress bar only on admin domain for normal update"
|
||||
${box_src_dir}/node_modules/.bin/ejs-cli -f "${script_dir}/start/nginx/appconfig.ejs" \
|
||||
-O "{ \"vhost\": \"${admin_fqdn}\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\" }" > "${DATA_DIR}/nginx/applications/admin.conf"
|
||||
-O "{ \"vhost\": \"${arg_admin_fqdn}\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\", \"robotsTxtQuoted\": null, \"hasIPv6\": false }" > "${PLATFORM_DATA_DIR}/nginx/applications/admin.conf"
|
||||
fi
|
||||
|
||||
if [[ "${arg_retire_reason}" == "migrate" ]]; then
|
||||
|
||||
@@ -5,10 +5,10 @@ set -eu -o pipefail
|
||||
echo "==> Cloudron Start"
|
||||
|
||||
readonly USER="yellowtent"
|
||||
readonly DATA_FILE="/root/user_data.img"
|
||||
readonly HOME_DIR="/home/${USER}"
|
||||
readonly BOX_SRC_DIR="${HOME_DIR}/box"
|
||||
readonly DATA_DIR="${HOME_DIR}/data" # app and platform data
|
||||
readonly PLATFORM_DATA_DIR="${HOME_DIR}/platformdata" # platform data
|
||||
readonly APPS_DATA_DIR="${HOME_DIR}/appsdata" # app data
|
||||
readonly BOX_DATA_DIR="${HOME_DIR}/boxdata" # box data
|
||||
readonly CONFIG_DIR="${HOME_DIR}/configs"
|
||||
readonly SETUP_PROGRESS_JSON="${HOME_DIR}/setup/website/progress.json"
|
||||
@@ -33,45 +33,19 @@ timedatectl set-ntp 1
|
||||
timedatectl set-timezone UTC
|
||||
hostnamectl set-hostname "${arg_fqdn}"
|
||||
|
||||
echo "==> Setting up firewall"
|
||||
iptables -t filter -N CLOUDRON || true
|
||||
iptables -t filter -F CLOUDRON # empty any existing rules
|
||||
|
||||
# NOTE: keep these in sync with src/apps.js validatePortBindings
|
||||
# allow ssh, http, https, ping, dns
|
||||
iptables -t filter -I CLOUDRON -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
# caas has ssh on port 202
|
||||
if [[ "${arg_provider}" == "caas" ]]; then
|
||||
iptables -A CLOUDRON -p tcp -m tcp -m multiport --dports 25,80,202,443,587,993,4190 -j ACCEPT
|
||||
else
|
||||
iptables -A CLOUDRON -p tcp -m tcp -m multiport --dports 25,80,22,443,587,993,4190 -j ACCEPT
|
||||
fi
|
||||
iptables -t filter -A CLOUDRON -p icmp --icmp-type echo-request -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p icmp --icmp-type echo-reply -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p udp --sport 53 -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -s 172.18.0.0/16 -j ACCEPT # required to accept any connections from apps to our IP:<public port>
|
||||
iptables -t filter -A CLOUDRON -i lo -j ACCEPT # required for localhost connections (mysql)
|
||||
|
||||
# log dropped incoming. keep this at the end of all the rules
|
||||
iptables -t filter -A CLOUDRON -m limit --limit 2/min -j LOG --log-prefix "IPTables Packet Dropped: " --log-level 7
|
||||
iptables -t filter -A CLOUDRON -j DROP
|
||||
|
||||
if ! iptables -t filter -C INPUT -j CLOUDRON 2>/dev/null; then
|
||||
iptables -t filter -I INPUT -j CLOUDRON
|
||||
fi
|
||||
|
||||
# so it gets restored across reboot
|
||||
mkdir -p /etc/iptables && iptables-save > /etc/iptables/rules.v4
|
||||
|
||||
echo "==> Configuring docker"
|
||||
cp "${script_dir}/start/docker-cloudron-app.apparmor" /etc/apparmor.d/docker-cloudron-app
|
||||
systemctl enable apparmor
|
||||
systemctl restart apparmor
|
||||
|
||||
usermod ${USER} -a -G docker
|
||||
# preserve the existing storage driver (user might be using overlay2)
|
||||
storage_driver=$(docker info | grep "Storage Driver" | sed 's/.*: //')
|
||||
[[ -n "${storage_driver}" ]] || storage_driver="overlay2" # if the above command fails
|
||||
|
||||
temp_file=$(mktemp)
|
||||
# create systemd drop-in. some apps do not work with aufs
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=devicemapper --dns=172.18.0.1 --dns-search=." > "${temp_file}"
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=${storage_driver}" > "${temp_file}"
|
||||
|
||||
systemctl enable docker
|
||||
# restart docker if options changed
|
||||
@@ -96,45 +70,30 @@ if [[ "${arg_provider}" == "caas" ]]; then
|
||||
systemctl reload sshd
|
||||
fi
|
||||
|
||||
echo "==> Setup btrfs data"
|
||||
if [[ ! -d "${DATA_DIR}" ]]; then
|
||||
echo "==> Mounting loopback btrfs"
|
||||
truncate -s "8192m" "${DATA_FILE}" # 8gb start (this will get resized dynamically by cloudron-resize-fs.service)
|
||||
mkfs.btrfs -L UserDataHome "${DATA_FILE}"
|
||||
mkdir -p "${DATA_DIR}"
|
||||
mount -t btrfs -o loop,nosuid "${DATA_FILE}" ${DATA_DIR}
|
||||
fi
|
||||
mkdir -p "${BOX_DATA_DIR}"
|
||||
mkdir -p "${APPS_DATA_DIR}"
|
||||
|
||||
# keep these in sync with paths.js
|
||||
echo "==> Ensuring directories"
|
||||
if ! btrfs subvolume show "${DATA_DIR}/mail" &> /dev/null; then
|
||||
# Migrate mail data to new format
|
||||
docker stop mail || true # otherwise the move below might fail if mail container writes in the middle
|
||||
rm -rf "${DATA_DIR}/mail" # this used to be mail container's run directory
|
||||
btrfs subvolume create "${DATA_DIR}/mail"
|
||||
[[ -d "${DATA_DIR}/box/mail" ]] && mv "${DATA_DIR}/box/mail/"* "${DATA_DIR}/mail"
|
||||
rm -rf "${DATA_DIR}/box/mail"
|
||||
fi
|
||||
mkdir -p "${DATA_DIR}/graphite"
|
||||
mkdir -p "${DATA_DIR}/mail/dkim"
|
||||
|
||||
mkdir -p "${DATA_DIR}/mysql"
|
||||
mkdir -p "${DATA_DIR}/postgresql"
|
||||
mkdir -p "${DATA_DIR}/mongodb"
|
||||
mkdir -p "${DATA_DIR}/snapshots"
|
||||
mkdir -p "${DATA_DIR}/addons/mail"
|
||||
mkdir -p "${DATA_DIR}/collectd/collectd.conf.d"
|
||||
mkdir -p "${DATA_DIR}/acme"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/graphite"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/mysql"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/postgresql"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/mongodb"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/addons/mail"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/collectd/collectd.conf.d"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/logrotate.d"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/acme"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/backup"
|
||||
|
||||
mkdir -p "${BOX_DATA_DIR}"
|
||||
if btrfs subvolume show "${DATA_DIR}/box" &> /dev/null; then
|
||||
# Migrate box data out of data volume
|
||||
mv "${DATA_DIR}/box/"* "${BOX_DATA_DIR}"
|
||||
btrfs subvolume delete "${DATA_DIR}/box"
|
||||
fi
|
||||
mkdir -p "${BOX_DATA_DIR}/appicons"
|
||||
mkdir -p "${BOX_DATA_DIR}/certs"
|
||||
mkdir -p "${BOX_DATA_DIR}/acme" # acme keys
|
||||
mkdir -p "${BOX_DATA_DIR}/mail/dkim"
|
||||
|
||||
# ensure backups folder exists and is writeable
|
||||
mkdir -p /var/backups
|
||||
chmod 777 /var/backups
|
||||
|
||||
echo "==> Configuring journald"
|
||||
sed -e "s/^#SystemMaxUse=.*$/SystemMaxUse=100M/" \
|
||||
@@ -161,14 +120,20 @@ echo "==> Setting up unbound"
|
||||
# DO uses Google nameservers by default. This causes RBL queries to fail (host 2.0.0.127.zen.spamhaus.org)
|
||||
# We do not use dnsmasq because it is not a recursive resolver and defaults to the value in the interfaces file (which is Google DNS!)
|
||||
# We listen on 0.0.0.0 because there is no way control ordering of docker (which creates the 172.18.0.0/16) and unbound
|
||||
echo -e "server:\n\tinterface: 0.0.0.0\n\taccess-control: 127.0.0.1 allow\n\taccess-control: 172.18.0.1/16 allow\n\tcache-max-negative-ttl: 30" > /etc/unbound/unbound.conf.d/cloudron-network.conf
|
||||
# If IP6 is not enabled, dns queries seem to fail on some hosts
|
||||
echo -e "server:\n\tinterface: 0.0.0.0\n\tdo-ip6: yes\n\taccess-control: 127.0.0.1 allow\n\taccess-control: 172.18.0.1/16 allow\n\tcache-max-negative-ttl: 30\n\tcache-max-ttl: 300\n\t#logfile: /var/log/unbound.log\n\t#verbosity: 10" > /etc/unbound/unbound.conf.d/cloudron-network.conf
|
||||
# update the root anchor after a out-of-disk-space situation (see #269)
|
||||
unbound-anchor -a /var/lib/unbound/root.key
|
||||
|
||||
echo "==> Adding systemd services"
|
||||
cp -r "${script_dir}/start/systemd/." /etc/systemd/system/
|
||||
systemctl daemon-reload
|
||||
systemctl enable unbound
|
||||
systemctl enable cloudron.target
|
||||
systemctl enable iptables-restore
|
||||
systemctl enable cloudron-firewall
|
||||
|
||||
# update firewall rules
|
||||
systemctl restart cloudron-firewall
|
||||
|
||||
# For logrotate
|
||||
systemctl enable --now cron
|
||||
@@ -182,18 +147,26 @@ cp "${script_dir}/start/sudoers" /etc/sudoers.d/${USER}
|
||||
|
||||
echo "==> Configuring collectd"
|
||||
rm -rf /etc/collectd
|
||||
ln -sfF "${DATA_DIR}/collectd" /etc/collectd
|
||||
cp "${script_dir}/start/collectd.conf" "${DATA_DIR}/collectd/collectd.conf"
|
||||
ln -sfF "${PLATFORM_DATA_DIR}/collectd" /etc/collectd
|
||||
cp "${script_dir}/start/collectd/collectd.conf" "${PLATFORM_DATA_DIR}/collectd/collectd.conf"
|
||||
systemctl restart collectd
|
||||
|
||||
echo "==> Configuring logrotate"
|
||||
if ! grep -q "^include ${PLATFORM_DATA_DIR}/logrotate.d" /etc/logrotate.conf; then
|
||||
echo -e "\ninclude ${PLATFORM_DATA_DIR}/logrotate.d\n" >> /etc/logrotate.conf
|
||||
fi
|
||||
|
||||
echo "==> Adding motd message for admins"
|
||||
cp "${script_dir}/start/cloudron-motd" /etc/update-motd.d/92-cloudron
|
||||
|
||||
echo "==> Configuring nginx"
|
||||
# link nginx config to system config
|
||||
unlink /etc/nginx 2>/dev/null || rm -rf /etc/nginx
|
||||
ln -s "${DATA_DIR}/nginx" /etc/nginx
|
||||
mkdir -p "${DATA_DIR}/nginx/applications"
|
||||
mkdir -p "${DATA_DIR}/nginx/cert"
|
||||
cp "${script_dir}/start/nginx/nginx.conf" "${DATA_DIR}/nginx/nginx.conf"
|
||||
cp "${script_dir}/start/nginx/mime.types" "${DATA_DIR}/nginx/mime.types"
|
||||
ln -s "${PLATFORM_DATA_DIR}/nginx" /etc/nginx
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/nginx/applications"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/nginx/cert"
|
||||
cp "${script_dir}/start/nginx/nginx.conf" "${PLATFORM_DATA_DIR}/nginx/nginx.conf"
|
||||
cp "${script_dir}/start/nginx/mime.types" "${PLATFORM_DATA_DIR}/nginx/mime.types"
|
||||
if ! grep -q "^Restart=" /etc/systemd/system/multi-user.target.wants/nginx.service; then
|
||||
# default nginx service file does not restart on crash
|
||||
echo -e "\n[Service]\nRestart=always\n" >> /etc/systemd/system/multi-user.target.wants/nginx.service
|
||||
@@ -202,12 +175,7 @@ fi
|
||||
systemctl start nginx
|
||||
|
||||
# bookkeep the version as part of data
|
||||
echo "{ \"version\": \"${arg_version}\", \"boxVersionsUrl\": \"${arg_box_versions_url}\" }" > "${BOX_DATA_DIR}/version"
|
||||
|
||||
# remove old snapshots. if we do want to keep this around, we will have to fix the chown -R below
|
||||
# which currently fails because these are readonly fs
|
||||
echo "==> Cleaning up snapshots"
|
||||
find "${DATA_DIR}/snapshots" -mindepth 1 -maxdepth 1 | xargs --no-run-if-empty btrfs subvolume delete
|
||||
echo "{ \"version\": \"${arg_version}\", \"apiServerOrigin\": \"${arg_api_server_origin}\" }" > "${BOX_DATA_DIR}/version"
|
||||
|
||||
# restart mysql to make sure it has latest config
|
||||
if [[ ! -f /etc/mysql/mysql.cnf ]] || ! diff -q "${script_dir}/start/mysql.cnf" /etc/mysql/mysql.cnf >/dev/null; then
|
||||
@@ -218,7 +186,11 @@ if [[ ! -f /etc/mysql/mysql.cnf ]] || ! diff -q "${script_dir}/start/mysql.cnf"
|
||||
echo "Waiting for mysql jobs..."
|
||||
sleep 1
|
||||
done
|
||||
systemctl restart mysql
|
||||
while true; do
|
||||
if systemctl restart mysql; then break; fi
|
||||
echo "Restarting MySql again after sometime since this fails randomly"
|
||||
sleep 1
|
||||
done
|
||||
else
|
||||
systemctl start mysql
|
||||
fi
|
||||
@@ -227,31 +199,20 @@ readonly mysql_root_password="password"
|
||||
mysqladmin -u root -ppassword password password # reset default root password
|
||||
mysql -u root -p${mysql_root_password} -e 'CREATE DATABASE IF NOT EXISTS box'
|
||||
|
||||
if [[ -n "${arg_restore_url}" ]]; then
|
||||
set_progress "30" "Downloading restore data"
|
||||
|
||||
echo "==> Downloading backup: ${arg_restore_url} and key: ${arg_restore_key}"
|
||||
|
||||
while true; do
|
||||
if $curl -L "${arg_restore_url}" | openssl aes-256-cbc -d -pass "pass:${arg_restore_key}" \
|
||||
| tar -zxf - --overwrite --transform="s,^box/\?,boxdata/," --transform="s,^mail/\?,data/mail/," --show-transformed-names -C "${HOME_DIR}"; then break; fi
|
||||
echo "Failed to download data, trying again"
|
||||
done
|
||||
|
||||
set_progress "35" "Setting up MySQL"
|
||||
if [[ -f "${BOX_DATA_DIR}/box.mysqldump" ]]; then
|
||||
echo "==> Importing existing database into MySQL"
|
||||
mysql -u root -p${mysql_root_password} box < "${BOX_DATA_DIR}/box.mysqldump"
|
||||
fi
|
||||
fi
|
||||
|
||||
set_progress "40" "Migrating data"
|
||||
sudo -u "${USER}" -H bash <<EOF
|
||||
set -eu
|
||||
cd "${BOX_SRC_DIR}"
|
||||
BOX_ENV=cloudron DATABASE_URL=mysql://root:${mysql_root_password}@localhost/box "${BOX_SRC_DIR}/node_modules/.bin/db-migrate" up
|
||||
BOX_ENV=cloudron DATABASE_URL=mysql://root:${mysql_root_password}@127.0.0.1/box "${BOX_SRC_DIR}/node_modules/.bin/db-migrate" up
|
||||
EOF
|
||||
|
||||
if [[ -z "${arg_admin_fqdn:-}" ]]; then
|
||||
# can be removed after 1.9
|
||||
admin_fqdn=$([[ "${arg_is_custom_domain}" == "true" ]] && echo "${arg_admin_location}.${arg_fqdn}" || echo "${arg_admin_location}-${arg_fqdn}")
|
||||
else
|
||||
admin_fqdn="${arg_admin_fqdn}"
|
||||
fi
|
||||
|
||||
echo "==> Creating cloudron.conf"
|
||||
cat > "${CONFIG_DIR}/cloudron.conf" <<CONF_END
|
||||
{
|
||||
@@ -260,25 +221,13 @@ cat > "${CONFIG_DIR}/cloudron.conf" <<CONF_END
|
||||
"apiServerOrigin": "${arg_api_server_origin}",
|
||||
"webServerOrigin": "${arg_web_server_origin}",
|
||||
"fqdn": "${arg_fqdn}",
|
||||
"isCustomDomain": ${arg_is_custom_domain},
|
||||
"boxVersionsUrl": "${arg_box_versions_url}",
|
||||
"adminFqdn": "${admin_fqdn}",
|
||||
"adminLocation": "${arg_admin_location}",
|
||||
"zoneName": "${arg_zone_name}",
|
||||
"provider": "${arg_provider}",
|
||||
"isDemo": ${arg_is_demo},
|
||||
"database": {
|
||||
"hostname": "localhost",
|
||||
"username": "root",
|
||||
"password": "${mysql_root_password}",
|
||||
"port": 3306,
|
||||
"name": "box"
|
||||
},
|
||||
"appBundle": ${arg_app_bundle}
|
||||
"isDemo": ${arg_is_demo}
|
||||
}
|
||||
CONF_END
|
||||
# pass these out-of-band because they have new lines which interfere with json
|
||||
if [[ -n "${arg_tls_cert}" && -n "${arg_tls_key}" ]]; then
|
||||
echo "${arg_tls_cert}" > "${CONFIG_DIR}/host.cert"
|
||||
echo "${arg_tls_key}" > "${CONFIG_DIR}/host.key"
|
||||
fi
|
||||
|
||||
echo "==> Creating config.json for webadmin"
|
||||
cat > "${BOX_SRC_DIR}/webadmin/dist/config.json" <<CONF_END
|
||||
@@ -287,39 +236,25 @@ cat > "${BOX_SRC_DIR}/webadmin/dist/config.json" <<CONF_END
|
||||
}
|
||||
CONF_END
|
||||
|
||||
if [[ ! -f "${BOX_DATA_DIR}/dhparams.pem" ]]; then
|
||||
echo "==> Generating dhparams (takes forever)"
|
||||
openssl dhparam -out "${BOX_DATA_DIR}/dhparams.pem" 2048
|
||||
cp "${BOX_DATA_DIR}/dhparams.pem" "${PLATFORM_DATA_DIR}/addons/mail/dhparams.pem"
|
||||
else
|
||||
cp "${BOX_DATA_DIR}/dhparams.pem" "${PLATFORM_DATA_DIR}/addons/mail/dhparams.pem"
|
||||
fi
|
||||
|
||||
echo "==> Changing ownership"
|
||||
chown "${USER}:${USER}" -R "${CONFIG_DIR}"
|
||||
chown "${USER}:${USER}" -R "${DATA_DIR}/nginx" "${DATA_DIR}/collectd" "${DATA_DIR}/addons" "${DATA_DIR}/acme"
|
||||
chown "${USER}:${USER}" -R "${BOX_DATA_DIR}"
|
||||
chown "${USER}:${USER}" -R "${DATA_DIR}/mail/dkim" # this is owned by box currently since it generates the keys
|
||||
chown "${USER}:${USER}" "${DATA_DIR}/INFRA_VERSION" 2>/dev/null || true
|
||||
chown "${USER}:${USER}" "${DATA_DIR}"
|
||||
chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/nginx" "${PLATFORM_DATA_DIR}/collectd" "${PLATFORM_DATA_DIR}/logrotate.d" "${PLATFORM_DATA_DIR}/addons" "${PLATFORM_DATA_DIR}/acme" "${PLATFORM_DATA_DIR}/backup"
|
||||
chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}/INFRA_VERSION" 2>/dev/null || true
|
||||
chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}"
|
||||
|
||||
echo "==> Adding automated configs"
|
||||
if [[ ! -z "${arg_backup_config}" ]]; then
|
||||
mysql -u root -p${mysql_root_password} \
|
||||
-e "REPLACE INTO settings (name, value) VALUES (\"backup_config\", '$arg_backup_config')" box
|
||||
fi
|
||||
|
||||
if [[ ! -z "${arg_dns_config}" ]]; then
|
||||
mysql -u root -p${mysql_root_password} \
|
||||
-e "REPLACE INTO settings (name, value) VALUES (\"dns_config\", '$arg_dns_config')" box
|
||||
fi
|
||||
|
||||
if [[ ! -z "${arg_update_config}" ]]; then
|
||||
mysql -u root -p${mysql_root_password} \
|
||||
-e "REPLACE INTO settings (name, value) VALUES (\"update_config\", '$arg_update_config')" box
|
||||
fi
|
||||
|
||||
if [[ ! -z "${arg_tls_config}" ]]; then
|
||||
mysql -u root -p${mysql_root_password} \
|
||||
-e "REPLACE INTO settings (name, value) VALUES (\"tls_config\", '$arg_tls_config')" box
|
||||
fi
|
||||
|
||||
echo "==> Generating dhparams (takes forever)"
|
||||
if [[ ! -f "${BOX_DATA_DIR}/dhparams.pem" ]]; then
|
||||
openssl dhparam -out "${BOX_DATA_DIR}/dhparams.pem" 2048
|
||||
fi
|
||||
# do not chown the boxdata/mail directory; dovecot gets upset
|
||||
chown "${USER}:${USER}" "${BOX_DATA_DIR}"
|
||||
find "${BOX_DATA_DIR}" -mindepth 1 -maxdepth 1 -not -path "${BOX_DATA_DIR}/mail" -exec chown -R "${USER}:${USER}" {} \;
|
||||
chown "${USER}:${USER}" "${BOX_DATA_DIR}/mail"
|
||||
chown "${USER}:${USER}" -R "${BOX_DATA_DIR}/mail/dkim" # this is owned by box currently since it generates the keys
|
||||
|
||||
set_progress "60" "Starting Cloudron"
|
||||
systemctl start cloudron.target
|
||||
|
||||
@@ -0,0 +1,75 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
echo "==> Setting up firewall"
|
||||
iptables -t filter -N CLOUDRON || true
|
||||
iptables -t filter -F CLOUDRON # empty any existing rules
|
||||
|
||||
# NOTE: keep these in sync with src/apps.js validatePortBindings
|
||||
# allow ssh, http, https, ping, dns
|
||||
iptables -t filter -I CLOUDRON -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
# caas has ssh on port 202
|
||||
iptables -A CLOUDRON -p tcp -m tcp -m multiport --dports 22,25,80,202,443,587,993,4190 -j ACCEPT
|
||||
|
||||
iptables -t filter -A CLOUDRON -p icmp --icmp-type echo-request -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p icmp --icmp-type echo-reply -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p udp --sport 53 -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -s 172.18.0.0/16 -j ACCEPT # required to accept any connections from apps to our IP:<public port>
|
||||
iptables -t filter -A CLOUDRON -i lo -j ACCEPT # required for localhost connections (mysql)
|
||||
|
||||
# log dropped incoming. keep this at the end of all the rules
|
||||
iptables -t filter -A CLOUDRON -m limit --limit 2/min -j LOG --log-prefix "IPTables Packet Dropped: " --log-level 7
|
||||
iptables -t filter -A CLOUDRON -j DROP
|
||||
|
||||
if ! iptables -t filter -C INPUT -j CLOUDRON 2>/dev/null; then
|
||||
iptables -t filter -I INPUT -j CLOUDRON
|
||||
fi
|
||||
|
||||
# Setup rate limit chain (the recent info is at /proc/net/xt_recent)
|
||||
iptables -t filter -N CLOUDRON_RATELIMIT || true
|
||||
iptables -t filter -F CLOUDRON_RATELIMIT # empty any existing rules
|
||||
|
||||
# log dropped incoming. keep this at the end of all the rules
|
||||
iptables -t filter -N CLOUDRON_RATELIMIT_LOG || true
|
||||
iptables -t filter -F CLOUDRON_RATELIMIT_LOG # empty any existing rules
|
||||
iptables -t filter -A CLOUDRON_RATELIMIT_LOG -m limit --limit 2/min -j LOG --log-prefix "IPTables RateLimit: " --log-level 7
|
||||
iptables -t filter -A CLOUDRON_RATELIMIT_LOG -j DROP
|
||||
|
||||
# http https
|
||||
for port in 80 443; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --syn --dport ${port} -m connlimit --connlimit-above 5000 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# ssh smtp ssh msa imap sieve
|
||||
for port in 22 202; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --dport ${port} -m state --state NEW -m recent --set --name "public-${port}"
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --dport ${port} -m state --state NEW -m recent --update --name "public-${port}" --seconds 10 --hitcount 5 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# TODO: move docker platform rules to platform.js so it can be specialized to rate limit only when destination is the mail container
|
||||
|
||||
# docker translates (dnat) 25, 587, 993, 4190 in the PREROUTING step
|
||||
for port in 2525 4190 9993; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --syn ! -s 172.18.0.0/16 -d 172.18.0.0/16 --dport ${port} -m connlimit --connlimit-above 50 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# msa, ldap, imap, sieve
|
||||
for port in 2525 3002 4190 9993; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --syn -s 172.18.0.0/16 -d 172.18.0.0/16 --dport ${port} -m connlimit --connlimit-above 500 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# cloudron docker network: mysql postgresql redis mongodb
|
||||
for port in 3306 5432 6379 27017; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --syn -s 172.18.0.0/16 -d 172.18.0.0/16 --dport ${port} -m connlimit --connlimit-above 5000 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# For ssh, http, https
|
||||
if ! iptables -t filter -C INPUT -j CLOUDRON_RATELIMIT 2>/dev/null; then
|
||||
iptables -t filter -I INPUT 1 -j CLOUDRON_RATELIMIT
|
||||
fi
|
||||
|
||||
# For smtp, imap etc routed via docker/nat
|
||||
# Workaroud issue where Docker insists on adding itself first in FORWARD table
|
||||
iptables -D FORWARD -j CLOUDRON_RATELIMIT || true
|
||||
iptables -I FORWARD 1 -j CLOUDRON_RATELIMIT
|
||||
@@ -0,0 +1,15 @@
|
||||
#!/bin/sh
|
||||
# motd hook to remind admins about updates
|
||||
printf "\t\t\tNOTE TO CLOUDRON ADMINS\n"
|
||||
printf "\t\t\t-----------------------\n"
|
||||
printf "Please do not run apt upgrade manually as it will update packages that\n"
|
||||
printf "Cloudron relies on and may break your installation. Ubuntu security updates\n"
|
||||
printf "are automatically installed on this server every night.\n"
|
||||
printf "\n"
|
||||
printf "Read more at https://cloudron.io/documentation/security/#os-updates\n"
|
||||
|
||||
if grep -q "^PasswordAuthentication yes" /etc/ssh/sshd_config; then
|
||||
printf "\nPlease disable password based SSH access to secure your server. Read more at\n"
|
||||
printf "https://cloudron.io/documentation/security/#securing-ssh-access\n"
|
||||
fi
|
||||
|
||||
@@ -2,49 +2,42 @@
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
readonly USER_HOME="/home/yellowtent"
|
||||
readonly APPS_SWAP_FILE="/apps.swap"
|
||||
readonly USER_DATA_FILE="/root/user_data.img"
|
||||
readonly USER_DATA_DIR="/home/yellowtent/data"
|
||||
|
||||
# detect device of rootfs (http://forums.fedoraforum.org/showthread.php?t=270316)
|
||||
disk_device="$(for d in $(find /dev -type b); do [ "$(mountpoint -d /)" = "$(mountpoint -x $d)" ] && echo $d && break; done)"
|
||||
|
||||
existing_swap=$(cat /proc/meminfo | grep SwapTotal | awk '{ printf "%.0f", $2/1024 }')
|
||||
|
||||
# all sizes are in mb
|
||||
readonly physical_memory=$(LC_ALL=C free -m | awk '/Mem:/ { print $2 }')
|
||||
readonly swap_size=$((${physical_memory} - ${existing_swap})) # if you change this, fix enoughResourcesAvailable() in client.js
|
||||
readonly swap_size=$((${physical_memory} > 4096 ? 4096 : ${physical_memory})) # min(RAM, 4GB) if you change this, fix enoughResourcesAvailable() in client.js
|
||||
readonly app_count=$((${physical_memory} / 200)) # estimated app count
|
||||
readonly disk_size_bytes=$(LC_ALL=C fdisk -l ${disk_device} | grep "Disk ${disk_device}" | awk '{ printf $5 }') # can't rely on fdisk human readable units, using bytes instead
|
||||
readonly disk_size=$((${disk_size_bytes}/1024/1024))
|
||||
readonly disk_size_bytes=$(LC_ALL=C df --output=size / | tail -n1)
|
||||
readonly disk_size=$((${disk_size_bytes}/1024))
|
||||
readonly system_size=10240 # 10 gigs for system libs, apps images, installer, box code, data and tmp
|
||||
readonly ext4_reserved=$((disk_size * 5 / 100)) # this can be changes using tune2fs -m percent /dev/vda1
|
||||
|
||||
echo "Disk device: ${disk_device}"
|
||||
echo "Physical memory: ${physical_memory}"
|
||||
echo "Estimated app count: ${app_count}"
|
||||
echo "Disk size: ${disk_size}M"
|
||||
|
||||
# Allocate swap for general app usage
|
||||
if [[ ! -f "${APPS_SWAP_FILE}" && ${swap_size} -gt 0 ]]; then
|
||||
echo "Creating Apps swap file of size ${swap_size}M"
|
||||
fallocate -l "${swap_size}m" "${APPS_SWAP_FILE}"
|
||||
readonly current_swap=$(swapon --show="name,size" --noheadings --bytes | awk 'BEGIN{s=0}{s+=$2}END{printf "%.0f", s/1024/1024}')
|
||||
readonly needed_swap_size=$((swap_size - current_swap))
|
||||
if [[ ${needed_swap_size} -gt 0 ]]; then
|
||||
echo "Need more swap of ${needed_swap_size}M"
|
||||
# compute size of apps.swap ignoring what is already set
|
||||
without_apps_swap=$(swapon --show="name,size" --noheadings --bytes | awk 'BEGIN{s=0}{if ($1!="/apps.swap") s+=$2}END{printf "%.0f", s/1024/1024}')
|
||||
apps_swap_size=$((swap_size - without_apps_swap))
|
||||
echo "Creating Apps swap file of size ${apps_swap_size}M"
|
||||
if [[ -f "${APPS_SWAP_FILE}" ]]; then
|
||||
echo "Swapping off before resizing swap"
|
||||
swapoff "${APPS_SWAP_FILE}" || true
|
||||
fi
|
||||
fallocate -l "${apps_swap_size}m" "${APPS_SWAP_FILE}"
|
||||
chmod 600 "${APPS_SWAP_FILE}"
|
||||
mkswap "${APPS_SWAP_FILE}"
|
||||
swapon "${APPS_SWAP_FILE}"
|
||||
echo "${APPS_SWAP_FILE} none swap sw 0 0" >> /etc/fstab
|
||||
if ! grep -q "${APPS_SWAP_FILE}" /etc/fstab; then
|
||||
echo "Adding swap to fstab"
|
||||
echo "${APPS_SWAP_FILE} none swap sw 0 0" >> /etc/fstab
|
||||
fi
|
||||
else
|
||||
echo "Apps Swap file already exists"
|
||||
echo "Swap requirements already met"
|
||||
fi
|
||||
|
||||
# see start.sh for the initial default size of 8gb. On small disks the calculation might be lower than 8gb resulting in a failure to resize here.
|
||||
echo "Resizing data volume"
|
||||
home_data_size=$((disk_size - system_size - swap_size - ext4_reserved))
|
||||
echo "Resizing up btrfs user data to size ${home_data_size}M"
|
||||
umount "${USER_DATA_DIR}" || true
|
||||
# Do not preallocate (non-sparse). Doing so overallocates for data too much in advance and causes problems when using many apps with smaller data
|
||||
# fallocate -l "${home_data_size}m" "${USER_DATA_FILE}" # does not overwrite existing data
|
||||
truncate -s "${home_data_size}m" "${USER_DATA_FILE}" # this will shrink it if the file had existed. this is useful when running this script on a live system
|
||||
mount -t btrfs -o loop,nosuid "${USER_DATA_FILE}" ${USER_DATA_DIR}
|
||||
btrfs filesystem resize max "${USER_DATA_DIR}"
|
||||
|
||||
@@ -89,7 +89,7 @@ LoadPlugin cpu
|
||||
#LoadPlugin curl_json
|
||||
#LoadPlugin curl_xml
|
||||
#LoadPlugin dbi
|
||||
LoadPlugin df
|
||||
#LoadPlugin df
|
||||
#LoadPlugin disk
|
||||
#LoadPlugin dns
|
||||
#LoadPlugin email
|
||||
@@ -138,9 +138,9 @@ LoadPlugin nginx
|
||||
#LoadPlugin powerdns
|
||||
#LoadPlugin processes
|
||||
#LoadPlugin protocols
|
||||
#<LoadPlugin python>
|
||||
# Globals true
|
||||
#</LoadPlugin>
|
||||
<LoadPlugin python>
|
||||
Globals true
|
||||
</LoadPlugin>
|
||||
#LoadPlugin rrdcached
|
||||
#LoadPlugin rrdtool
|
||||
#LoadPlugin sensors
|
||||
@@ -192,17 +192,6 @@ LoadPlugin write_graphite
|
||||
</Aggregation>
|
||||
</Plugin>
|
||||
|
||||
<Plugin df>
|
||||
FSType "ext4"
|
||||
FSType "btrfs"
|
||||
|
||||
ReportByDevice true
|
||||
IgnoreSelected false
|
||||
|
||||
ValuesAbsolute true
|
||||
ValuesPercentage true
|
||||
</Plugin>
|
||||
|
||||
<Plugin interface>
|
||||
Interface "eth0"
|
||||
IgnoreSelected false
|
||||
@@ -244,6 +233,17 @@ LoadPlugin write_graphite
|
||||
</File>
|
||||
</Plugin>
|
||||
|
||||
<Plugin python>
|
||||
# https://blog.dbrgn.ch/2017/3/10/write-a-collectd-python-plugin/
|
||||
ModulePath "/home/yellowtent/box/setup/start/collectd/"
|
||||
LogTraces false # enable this to get traces in /var/log/collectd.log
|
||||
Interactive false
|
||||
|
||||
Import "df"
|
||||
# <Module df>
|
||||
# </Module>
|
||||
</Plugin>
|
||||
|
||||
<Plugin write_graphite>
|
||||
<Node "graphing">
|
||||
Host "localhost"
|
||||
@@ -260,4 +260,3 @@ LoadPlugin write_graphite
|
||||
<Include "/etc/collectd/collectd.conf.d">
|
||||
Filter "*.conf"
|
||||
</Include>
|
||||
|
||||
@@ -0,0 +1,36 @@
|
||||
import collectd,os,subprocess
|
||||
|
||||
# https://blog.dbrgn.ch/2017/3/10/write-a-collectd-python-plugin/
|
||||
|
||||
disks = []
|
||||
|
||||
def init():
|
||||
global disks
|
||||
lines = [s.split() for s in subprocess.check_output(["df", "--type=ext4", "--output=source,target,size,used,avail"]).splitlines()]
|
||||
disks = lines[1:] # strip header
|
||||
collectd.info('custom df plugin initialized with %s' % disks)
|
||||
|
||||
def read():
|
||||
for d in disks:
|
||||
device = d[0]
|
||||
if 'devicemapper' in d[1] or not device.startswith('/dev/'): continue
|
||||
instance = device[len('/dev/'):].replace('/', '_') # see #348
|
||||
|
||||
try:
|
||||
st = os.statvfs(d[1]) # handle disk removal
|
||||
except:
|
||||
continue
|
||||
|
||||
val = collectd.Values(type='df_complex', plugin='df', plugin_instance=instance)
|
||||
|
||||
free = st.f_bavail * st.f_frsize # bavail is for non-root user. bfree is total
|
||||
val.dispatch(values=[free], type_instance='free')
|
||||
|
||||
reserved = (st.f_bfree - st.f_bavail) * st.f_frsize # root took these
|
||||
val.dispatch(values=[reserved], type_instance='reserved')
|
||||
|
||||
used = (st.f_blocks - st.f_bfree) * st.f_frsize
|
||||
val.dispatch(values=[used], type_instance='used')
|
||||
|
||||
collectd.register_init(init)
|
||||
collectd.register_read(read)
|
||||
@@ -8,3 +8,19 @@ max_connections=50
|
||||
# on ec2, without this we get a sporadic connection drop when doing the initial migration
|
||||
max_allowed_packet=32M
|
||||
|
||||
# https://mathiasbynens.be/notes/mysql-utf8mb4
|
||||
character-set-server = utf8mb4
|
||||
collation-server = utf8mb4_unicode_ci
|
||||
|
||||
[mysqldump]
|
||||
quick
|
||||
quote-names
|
||||
max_allowed_packet = 16M
|
||||
default-character-set = utf8mb4
|
||||
|
||||
[mysql]
|
||||
default-character-set = utf8mb4
|
||||
|
||||
[client]
|
||||
default-character-set = utf8mb4
|
||||
|
||||
|
||||
@@ -4,13 +4,54 @@ map $http_upgrade $connection_upgrade {
|
||||
'' close;
|
||||
}
|
||||
|
||||
# http server
|
||||
server {
|
||||
<% if (vhost) { %>
|
||||
listen 443;
|
||||
listen 80;
|
||||
<% if (hasIPv6) { -%>
|
||||
listen [::]:80;
|
||||
<% } -%>
|
||||
|
||||
<% if (vhost) { -%>
|
||||
server_name <%= vhost %>;
|
||||
<% } else { %>
|
||||
listen 443 default_server;
|
||||
<% } %>
|
||||
<% } else { -%>
|
||||
# IP based access from collectd or initial cloudron setup. TODO: match the IPv6 address
|
||||
server_name "~^\d+\.\d+\.\d+\.\d+$";
|
||||
|
||||
# collectd
|
||||
location /nginx_status {
|
||||
stub_status on;
|
||||
access_log off;
|
||||
allow 127.0.0.1;
|
||||
deny all;
|
||||
}
|
||||
<% } -%>
|
||||
|
||||
# acme challenges (for cert renewal where the vhost config exists)
|
||||
location /.well-known/acme-challenge/ {
|
||||
default_type text/plain;
|
||||
alias /home/yellowtent/platformdata/acme/;
|
||||
}
|
||||
|
||||
location / {
|
||||
# redirect everything to HTTPS
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
}
|
||||
|
||||
# https server
|
||||
server {
|
||||
<% if (vhost) { -%>
|
||||
server_name <%= vhost %>;
|
||||
listen 443 http2;
|
||||
<% if (hasIPv6) { -%>
|
||||
listen [::]:443 http2;
|
||||
<% } -%>
|
||||
<% } else { -%>
|
||||
listen 443 http2 default_server;
|
||||
<% if (hasIPv6) { -%>
|
||||
listen [::]:443 http2 default_server;
|
||||
<% } -%>
|
||||
<% } -%>
|
||||
|
||||
ssl on;
|
||||
# paths are relative to prefix and not to this file
|
||||
@@ -32,14 +73,21 @@ server {
|
||||
|
||||
# https://developer.mozilla.org/en-US/docs/Web/HTTP/X-Frame-Options
|
||||
add_header X-Frame-Options "<%= xFrameOptions %>";
|
||||
proxy_hide_header X-Frame-Options;
|
||||
|
||||
# https://github.com/twitter/secureheaders
|
||||
# https://www.owasp.org/index.php/OWASP_Secure_Headers_Project#tab=Compatibility_Matrix
|
||||
# https://wiki.mozilla.org/Security/Guidelines/Web_Security
|
||||
add_header X-XSS-Protection "1; mode=block";
|
||||
proxy_hide_header X-XSS-Protection;
|
||||
add_header X-Download-Options "noopen";
|
||||
proxy_hide_header X-Download-Options;
|
||||
add_header X-Content-Type-Options "nosniff";
|
||||
proxy_hide_header X-Content-Type-Options;
|
||||
add_header X-Permitted-Cross-Domain-Policies "none";
|
||||
proxy_hide_header X-Permitted-Cross-Domain-Policies;
|
||||
add_header Referrer-Policy "no-referrer-when-downgrade";
|
||||
proxy_hide_header Referrer-Policy;
|
||||
|
||||
proxy_http_version 1.1;
|
||||
proxy_intercept_errors on;
|
||||
@@ -49,6 +97,7 @@ server {
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_set_header X-Forwarded-Ssl on;
|
||||
|
||||
@@ -69,22 +118,42 @@ server {
|
||||
proxy_buffers 4 256k;
|
||||
proxy_busy_buffers_size 256k;
|
||||
|
||||
# Disable check to allow unlimited body sizes
|
||||
# No buffering to temp files, it fails for large downloads
|
||||
proxy_max_temp_file_size 0;
|
||||
|
||||
# Disable check to allow unlimited body sizes. this allows apps to accept whatever size they want
|
||||
client_max_body_size 0;
|
||||
|
||||
<% if (robotsTxtQuoted) { %>
|
||||
location = /robots.txt {
|
||||
return 200 <%- robotsTxtQuoted %>;
|
||||
}
|
||||
<% } %>
|
||||
|
||||
<% if ( endpoint === 'admin' ) { %>
|
||||
location /api/ {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
client_max_body_size 1m;
|
||||
}
|
||||
|
||||
location ~ ^/api/v1/(developer|session)/login$ {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
client_max_body_size 1m;
|
||||
limit_req zone=admin_login burst=5;
|
||||
}
|
||||
|
||||
# the read timeout is between successive reads and not the whole connection
|
||||
location ~ ^/api/v1/apps/.*/exec$ {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
proxy_read_timeout 30m;
|
||||
}
|
||||
|
||||
# graphite paths
|
||||
location ~ ^/api/v1/apps/.*/upload$ {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
client_max_body_size 0;
|
||||
}
|
||||
|
||||
# graphite paths (uncomment block below and visit /graphite/index.html)
|
||||
# location ~ ^/(graphite|content|metrics|dashboard|render|browser|composer)/ {
|
||||
# proxy_pass http://127.0.0.1:8000;
|
||||
# client_max_body_size 1m;
|
||||
@@ -94,7 +163,6 @@ server {
|
||||
root <%= sourceDir %>/webadmin/dist;
|
||||
index index.html index.htm;
|
||||
}
|
||||
|
||||
<% } else if ( endpoint === 'app' ) { %>
|
||||
proxy_pass http://127.0.0.1:<%= port %>;
|
||||
<% } else if ( endpoint === 'splash' ) { %>
|
||||
@@ -134,4 +202,3 @@ server {
|
||||
<% } %>
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,12 +15,12 @@ http {
|
||||
# the collectd config depends on this log format
|
||||
log_format combined2 '$remote_addr - [$time_local] '
|
||||
'"$request" $status $body_bytes_sent $request_time '
|
||||
'"$http_referer" "$http_user_agent"';
|
||||
'"$http_referer" "$host" "$http_user_agent"';
|
||||
|
||||
# required for long host names
|
||||
server_names_hash_bucket_size 128;
|
||||
|
||||
access_log access.log combined2;
|
||||
access_log /var/log/nginx/access.log combined2;
|
||||
|
||||
sendfile on;
|
||||
|
||||
@@ -33,30 +33,26 @@ http {
|
||||
# keep-alive connections timeout in 65s. this is because many browsers timeout in 60 seconds
|
||||
keepalive_timeout 65s;
|
||||
|
||||
# HTTP server
|
||||
# zones for rate limiting
|
||||
limit_req_zone $binary_remote_addr zone=admin_login:10m rate=10r/s; # 10 request a second
|
||||
|
||||
|
||||
# default http server that returns 404 for any domain we are not listening on
|
||||
server {
|
||||
listen 80;
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
server_name does_not_match_anything;
|
||||
|
||||
# collectd
|
||||
location /nginx_status {
|
||||
stub_status on;
|
||||
access_log off;
|
||||
allow 127.0.0.1;
|
||||
deny all;
|
||||
}
|
||||
|
||||
# acme challenges
|
||||
# acme challenges (for app installation and re-configure when the vhost config does not exist)
|
||||
location /.well-known/acme-challenge/ {
|
||||
default_type text/plain;
|
||||
alias /home/yellowtent/data/acme/;
|
||||
alias /home/yellowtent/platformdata/acme/;
|
||||
}
|
||||
|
||||
location / {
|
||||
# redirect everything to HTTPS
|
||||
return 301 https://$host$request_uri;
|
||||
return 404;
|
||||
}
|
||||
}
|
||||
|
||||
include applications/*.conf;
|
||||
}
|
||||
|
||||
|
||||
@@ -10,20 +10,11 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmappdir.sh
|
||||
Defaults!/home/yellowtent/box/src/scripts/reloadnginx.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reloadnginx.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/backupbox.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/backupbox.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/backupapp.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/backupapp.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/restoreapp.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restoreapp.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/reboot.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reboot.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/reloadcollectd.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reloadcollectd.sh
|
||||
Defaults!/home/yellowtent/box/src/scripts/configurecollectd.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/configurecollectd.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/collectlogs.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/collectlogs.sh
|
||||
@@ -31,11 +22,18 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/collectlogs.sh
|
||||
Defaults!/home/yellowtent/box/src/scripts/retire.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/retire.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/rmbackup.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmbackup.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/update.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/update.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/authorized_keys.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/authorized_keys.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/configurelogrotate.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/configurelogrotate.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/backuptask.js env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD:SETENV: /home/yellowtent/box/src/backuptask.js
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/restart.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restart.sh
|
||||
|
||||
|
||||
@@ -0,0 +1,12 @@
|
||||
[Unit]
|
||||
Description=Cloudron Firewall
|
||||
After=docker.service
|
||||
PartOf=docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart="/home/yellowtent/box/setup/start/cloudron-firewall.sh"
|
||||
RemainAfterExit=yes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,11 +0,0 @@
|
||||
[Unit]
|
||||
Description=IPTables Restore
|
||||
Before=docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/sbin/iptables-restore /etc/iptables/rules.v4
|
||||
RemainAfterExit=yes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -114,7 +114,7 @@ var RMAPPDIR_CMD = path.join(__dirname, 'scripts/rmappdir.sh');
|
||||
function debugApp(app, args) {
|
||||
assert(!app || typeof app === 'object');
|
||||
|
||||
var prefix = app ? (app.location || 'naked_domain') : '(no app)';
|
||||
var prefix = app ? app.intrinsicFqdn : '(no app)';
|
||||
debug(prefix + ' ' + util.format.apply(util, Array.prototype.slice.call(arguments, 1)));
|
||||
}
|
||||
|
||||
@@ -125,7 +125,7 @@ function setupAddons(app, addons, callback) {
|
||||
|
||||
if (!addons) return callback(null);
|
||||
|
||||
debugApp(app, 'setupAddons: Settings up %j', Object.keys(addons));
|
||||
debugApp(app, 'setupAddons: Setting up %j', Object.keys(addons));
|
||||
|
||||
async.eachSeries(Object.keys(addons), function iterator(addon, iteratorCallback) {
|
||||
if (!(addon in KNOWN_ADDONS)) return iteratorCallback(new Error('No such addon:' + addon));
|
||||
@@ -194,7 +194,11 @@ function getEnvironment(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
appdb.getAddonConfigByAppId(app.id, callback);
|
||||
appdb.getAddonConfigByAppId(app.id, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
return callback(null, result.map(function (e) { return e.name + '=' + e.value; }));
|
||||
});
|
||||
}
|
||||
|
||||
function getBindsSync(app, addons) {
|
||||
@@ -207,7 +211,7 @@ function getBindsSync(app, addons) {
|
||||
|
||||
for (var addon in addons) {
|
||||
switch (addon) {
|
||||
case 'localstorage': binds.push(path.join(paths.DATA_DIR, app.id, 'data') + ':/app/data:rw'); break;
|
||||
case 'localstorage': binds.push(path.join(paths.APPS_DATA_DIR, app.id, 'data') + ':/app/data:rw'); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
@@ -241,10 +245,12 @@ function setupOauth(app, options, callback) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debugApp(app, 'setupOauth');
|
||||
|
||||
if (!app.sso) return callback(null);
|
||||
|
||||
var appId = app.id;
|
||||
var redirectURI = 'https://' + config.appFqdn(app.location);
|
||||
var redirectURI = 'https://' + (app.altDomain || app.intrinsicFqdn);
|
||||
var scope = 'profile';
|
||||
|
||||
clients.delByAppIdAndType(appId, clients.TYPE_OAUTH, function (error) { // remove existing creds
|
||||
@@ -254,9 +260,9 @@ function setupOauth(app, options, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var env = [
|
||||
'OAUTH_CLIENT_ID=' + result.id,
|
||||
'OAUTH_CLIENT_SECRET=' + result.clientSecret,
|
||||
'OAUTH_ORIGIN=' + config.adminOrigin()
|
||||
{ name: 'OAUTH_CLIENT_ID', value: result.id },
|
||||
{ name: 'OAUTH_CLIENT_SECRET', value: result.clientSecret },
|
||||
{ name: 'OAUTH_ORIGIN', value: config.adminOrigin() }
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting oauth addon config to %j', env);
|
||||
@@ -287,13 +293,13 @@ function setupEmail(app, options, callback) {
|
||||
|
||||
// note that "external" access info can be derived from MAIL_DOMAIN (since it's part of user documentation)
|
||||
var env = [
|
||||
'MAIL_SMTP_SERVER=mail',
|
||||
'MAIL_SMTP_PORT=2525',
|
||||
'MAIL_IMAP_SERVER=mail',
|
||||
'MAIL_IMAP_PORT=9993',
|
||||
'MAIL_SIEVE_SERVER=mail',
|
||||
'MAIL_SIEVE_PORT=4190',
|
||||
'MAIL_DOMAIN=' + config.fqdn()
|
||||
{ name: 'MAIL_SMTP_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_SMTP_PORT', value: '2525' },
|
||||
{ name: 'MAIL_IMAP_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_IMAP_PORT', value: '9993' },
|
||||
{ name: 'MAIL_SIEVE_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_SIEVE_PORT', value: '4190' },
|
||||
{ name: 'MAIL_DOMAIN', value: config.fqdn() }
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting up Email');
|
||||
@@ -319,13 +325,13 @@ function setupLdap(app, options, callback) {
|
||||
if (!app.sso) return callback(null);
|
||||
|
||||
var env = [
|
||||
'LDAP_SERVER=172.18.0.1',
|
||||
'LDAP_PORT=' + config.get('ldapPort'),
|
||||
'LDAP_URL=ldap://172.18.0.1:' + config.get('ldapPort'),
|
||||
'LDAP_USERS_BASE_DN=ou=users,dc=cloudron',
|
||||
'LDAP_GROUPS_BASE_DN=ou=groups,dc=cloudron',
|
||||
'LDAP_BIND_DN=cn='+ app.id + ',ou=apps,dc=cloudron',
|
||||
'LDAP_BIND_PASSWORD=' + hat(4 * 128) // this is ignored
|
||||
{ name: 'LDAP_SERVER', value: '172.18.0.1' },
|
||||
{ name: 'LDAP_PORT', value: '' + config.get('ldapPort') },
|
||||
{ name: 'LDAP_URL', value: 'ldap://172.18.0.1:' + config.get('ldapPort') },
|
||||
{ name: 'LDAP_USERS_BASE_DN', value: 'ou=users,dc=cloudron' },
|
||||
{ name: 'LDAP_GROUPS_BASE_DN', value: 'ou=groups,dc=cloudron' },
|
||||
{ name: 'LDAP_BIND_DN', value: 'cn='+ app.id + ',ou=apps,dc=cloudron' },
|
||||
{ name: 'LDAP_BIND_PASSWORD', value: hat(4 * 128) } // this is ignored
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting up LDAP');
|
||||
@@ -354,14 +360,16 @@ function setupSendMail(app, options, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var mailbox = results.filter(function (r) { return !r.aliasTarget; })[0];
|
||||
var password = generatePassword(128, false /* memorable */, /[\w\d_]/);
|
||||
|
||||
var env = [
|
||||
"MAIL_SMTP_SERVER=mail",
|
||||
"MAIL_SMTP_PORT=2525",
|
||||
"MAIL_SMTP_USERNAME=" + mailbox.name,
|
||||
"MAIL_SMTP_PASSWORD=" + app.id,
|
||||
"MAIL_FROM=" + mailbox.name + '@' + config.fqdn(),
|
||||
"MAIL_DOMAIN=" + config.fqdn()
|
||||
{ name: 'MAIL_SMTP_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_SMTP_PORT', value: '2525' },
|
||||
{ name: 'MAIL_SMTPS_PORT', value: '4650' },
|
||||
{ name: 'MAIL_SMTP_USERNAME', value: mailbox.name },
|
||||
{ name: 'MAIL_SMTP_PASSWORD', value: password },
|
||||
{ name: 'MAIL_FROM', value: mailbox.name + '@' + config.fqdn() },
|
||||
{ name: 'MAIL_DOMAIN', value: config.fqdn() }
|
||||
];
|
||||
debugApp(app, 'Setting sendmail addon config to %j', env);
|
||||
appdb.setAddonConfig(app.id, 'sendmail', env, callback);
|
||||
@@ -389,14 +397,15 @@ function setupRecvMail(app, options, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var mailbox = results.filter(function (r) { return !r.aliasTarget; })[0];
|
||||
var password = generatePassword(128, false /* memorable */, /[\w\d_]/);
|
||||
|
||||
var env = [
|
||||
"MAIL_IMAP_SERVER=mail",
|
||||
"MAIL_IMAP_PORT=9993",
|
||||
"MAIL_IMAP_USERNAME=" + mailbox.name,
|
||||
"MAIL_IMAP_PASSWORD=" + app.id,
|
||||
"MAIL_TO=" + mailbox.name + '@' + config.fqdn(),
|
||||
"MAIL_DOMAIN=" + config.fqdn()
|
||||
{ name: 'MAIL_IMAP_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_IMAP_PORT', value: '9993' },
|
||||
{ name: 'MAIL_IMAP_USERNAME', value: mailbox.name },
|
||||
{ name: 'MAIL_IMAP_PASSWORD', value: password },
|
||||
{ name: 'MAIL_TO', value: mailbox.name + '@' + config.fqdn() },
|
||||
{ name: 'MAIL_DOMAIN', value: config.fqdn() }
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting sendmail addon config to %j', env);
|
||||
@@ -426,7 +435,9 @@ function setupMySql(app, options, callback) {
|
||||
docker.execContainer('mysql', cmd, { bufferStdout: true }, function (error, stdout) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var env = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
var result = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
var env = result.map(function (r) { var idx = r.indexOf('='); return { name: r.substr(0, idx), value: r.substr(idx + 1) }; });
|
||||
|
||||
debugApp(app, 'Setting mysql addon config to %j', env);
|
||||
appdb.setAddonConfig(app.id, 'mysql', env, callback);
|
||||
});
|
||||
@@ -453,7 +464,7 @@ function backupMySql(app, options, callback) {
|
||||
|
||||
callback = once(callback); // ChildProcess exit may or may not be called after error
|
||||
|
||||
var output = fs.createWriteStream(path.join(paths.DATA_DIR, app.id, 'mysqldump'));
|
||||
var output = fs.createWriteStream(path.join(paths.APPS_DATA_DIR, app.id, 'mysqldump'));
|
||||
output.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/mysql/service.sh', options.multipleDatabases ? 'backup-prefix' : 'backup', app.id ];
|
||||
@@ -469,7 +480,7 @@ function restoreMySql(app, options, callback) {
|
||||
|
||||
debugApp(app, 'restoreMySql');
|
||||
|
||||
var input = fs.createReadStream(path.join(paths.DATA_DIR, app.id, 'mysqldump'));
|
||||
var input = fs.createReadStream(path.join(paths.APPS_DATA_DIR, app.id, 'mysqldump'));
|
||||
input.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/mysql/service.sh', options.multipleDatabases ? 'restore-prefix' : 'restore', app.id ];
|
||||
@@ -489,7 +500,9 @@ function setupPostgreSql(app, options, callback) {
|
||||
docker.execContainer('postgresql', cmd, { bufferStdout: true }, function (error, stdout) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var env = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
var result = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
var env = result.map(function (r) { var idx = r.indexOf('='); return { name: r.substr(0, idx), value: r.substr(idx + 1) }; });
|
||||
|
||||
debugApp(app, 'Setting postgresql addon config to %j', env);
|
||||
appdb.setAddonConfig(app.id, 'postgresql', env, callback);
|
||||
});
|
||||
@@ -516,7 +529,7 @@ function backupPostgreSql(app, options, callback) {
|
||||
|
||||
callback = once(callback); // ChildProcess exit may or may not be called after error
|
||||
|
||||
var output = fs.createWriteStream(path.join(paths.DATA_DIR, app.id, 'postgresqldump'));
|
||||
var output = fs.createWriteStream(path.join(paths.APPS_DATA_DIR, app.id, 'postgresqldump'));
|
||||
output.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/postgresql/service.sh', 'backup', app.id ];
|
||||
@@ -532,7 +545,7 @@ function restorePostgreSql(app, options, callback) {
|
||||
|
||||
debugApp(app, 'restorePostgreSql');
|
||||
|
||||
var input = fs.createReadStream(path.join(paths.DATA_DIR, app.id, 'postgresqldump'));
|
||||
var input = fs.createReadStream(path.join(paths.APPS_DATA_DIR, app.id, 'postgresqldump'));
|
||||
input.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/postgresql/service.sh', 'restore', app.id ];
|
||||
@@ -553,7 +566,9 @@ function setupMongoDb(app, options, callback) {
|
||||
docker.execContainer('mongodb', cmd, { bufferStdout: true }, function (error, stdout) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var env = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
var result = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
var env = result.map(function (r) { var idx = r.indexOf('='); return { name: r.substr(0, idx), value: r.substr(idx + 1) }; });
|
||||
|
||||
debugApp(app, 'Setting mongodb addon config to %j', env);
|
||||
appdb.setAddonConfig(app.id, 'mongodb', env, callback);
|
||||
});
|
||||
@@ -580,7 +595,7 @@ function backupMongoDb(app, options, callback) {
|
||||
|
||||
callback = once(callback); // ChildProcess exit may or may not be called after error
|
||||
|
||||
var output = fs.createWriteStream(path.join(paths.DATA_DIR, app.id, 'mongodbdump'));
|
||||
var output = fs.createWriteStream(path.join(paths.APPS_DATA_DIR, app.id, 'mongodbdump'));
|
||||
output.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/mongodb/service.sh', 'backup', app.id ];
|
||||
@@ -596,7 +611,7 @@ function restoreMongoDb(app, options, callback) {
|
||||
|
||||
debugApp(app, 'restoreMongoDb');
|
||||
|
||||
var input = fs.createReadStream(path.join(paths.DATA_DIR, app.id, 'mongodbdump'));
|
||||
var input = fs.createReadStream(path.join(paths.APPS_DATA_DIR, app.id, 'mongodbdump'));
|
||||
input.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/mongodb/service.sh', 'restore', app.id ];
|
||||
@@ -610,9 +625,9 @@ function setupRedis(app, options, callback) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var redisPassword = generatePassword(64, false /* memorable */, /[\w\d_]/); // ensure no / in password for being sed friendly (and be uri friendly)
|
||||
var redisPassword = generatePassword(128, false /* memorable */, /[\w\d_]/); // ensure no / in password for being sed friendly (and be uri friendly)
|
||||
var redisVarsFile = path.join(paths.ADDON_CONFIG_DIR, 'redis-' + app.id + '_vars.sh');
|
||||
var redisDataDir = path.join(paths.DATA_DIR, app.id + '/redis');
|
||||
var redisDataDir = path.join(paths.APPS_DATA_DIR, app.id + '/redis');
|
||||
|
||||
if (!safe.fs.writeFileSync(redisVarsFile, 'REDIS_PASSWORD=' + redisPassword)) {
|
||||
return callback(new Error('Error writing redis config'));
|
||||
@@ -620,21 +635,37 @@ function setupRedis(app, options, callback) {
|
||||
|
||||
if (!safe.fs.mkdirSync(redisDataDir) && safe.error.code !== 'EEXIST') return callback(new Error('Error creating redis data dir:' + safe.error));
|
||||
|
||||
// Compute redis memory limit based on app's memory limit (this is arbitrary)
|
||||
var memoryLimit = app.memoryLimit || app.manifest.memoryLimit || 0;
|
||||
|
||||
if (memoryLimit === -1) { // unrestricted (debug mode)
|
||||
memoryLimit = 0;
|
||||
} else if (memoryLimit === 0 || memoryLimit <= (2 * 1024 * 1024 * 1024)) { // less than 2G (ram+swap)
|
||||
memoryLimit = 150 * 1024 * 1024; // 150m
|
||||
} else {
|
||||
memoryLimit = 600 * 1024 * 1024; // 600m
|
||||
}
|
||||
|
||||
const tag = infra.images.redis.tag, redisName = 'redis-' + app.id;
|
||||
const label = app.intrinsicFqdn;
|
||||
// note that we do not add appId label because this interferes with the stop/start app logic
|
||||
const cmd = `docker run --restart=always -d --name=${redisName} \
|
||||
--label=location=${label} \
|
||||
--net cloudron \
|
||||
--net-alias ${redisName} \
|
||||
-m 100m \
|
||||
--memory-swap 150m \
|
||||
-m ${memoryLimit/2} \
|
||||
--memory-swap ${memoryLimit} \
|
||||
--dns 172.18.0.1 \
|
||||
--dns-search=. \
|
||||
-v ${redisVarsFile}:/etc/redis/redis_vars.sh:ro \
|
||||
-v ${redisDataDir}:/var/lib/redis:rw \
|
||||
--read-only -v /tmp -v /run ${tag}`;
|
||||
|
||||
var env = [
|
||||
'REDIS_URL=redis://redisuser:' + redisPassword + '@redis-' + app.id,
|
||||
'REDIS_PASSWORD=' + redisPassword,
|
||||
'REDIS_HOST=' + redisName,
|
||||
'REDIS_PORT=6379'
|
||||
{ name: 'REDIS_URL', value: 'redis://redisuser:' + redisPassword + '@redis-' + app.id },
|
||||
{ name: 'REDIS_PASSWORD', value: redisPassword },
|
||||
{ name: 'REDIS_HOST', value: redisName },
|
||||
{ name: 'REDIS_PORT', value: '6379' }
|
||||
];
|
||||
|
||||
async.series([
|
||||
@@ -666,7 +697,7 @@ function teardownRedis(app, options, callback) {
|
||||
|
||||
safe.fs.unlinkSync(paths.ADDON_CONFIG_DIR, 'redis-' + app.id + '_vars.sh');
|
||||
|
||||
shell.sudo('teardownRedis', [ RMAPPDIR_CMD, app.id + '/redis' ], function (error, stdout, stderr) {
|
||||
shell.sudo('teardownRedis', [ RMAPPDIR_CMD, app.id + '/redis', true /* delete directory */ ], function (error, stdout, stderr) {
|
||||
if (error) return callback(new Error('Error removing redis data:' + error));
|
||||
|
||||
appdb.unsetAddonConfig(app.id, 'redis', callback);
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
var crypto = require('crypto');
|
||||
|
||||
// This code is taken from https://github.com/fabdrol/node-aes-helper
|
||||
module.exports = {
|
||||
algorithm: 'AES-256-CBC',
|
||||
|
||||
key: function (password, salt) {
|
||||
var key = salt.toString('utf8') + password;
|
||||
var hash = crypto.createHash('sha1');
|
||||
|
||||
hash.update(key, 'utf8');
|
||||
return hash.digest('hex');
|
||||
},
|
||||
|
||||
encrypt: function (plain, password, salt) {
|
||||
var key = this.key(password, salt);
|
||||
var cipher = crypto.createCipher(this.algorithm, key);
|
||||
var crypted;
|
||||
|
||||
try {
|
||||
crypted = cipher.update(plain, 'utf8', 'hex');
|
||||
crypted += cipher.final('hex');
|
||||
} catch (e) {
|
||||
console.error('Encryption error:', e);
|
||||
crypted = '';
|
||||
}
|
||||
|
||||
return crypted;
|
||||
},
|
||||
|
||||
decrypt: function (crypted, password, salt) {
|
||||
var key = this.key(password, salt);
|
||||
var decipher = crypto.createDecipher(this.algorithm, key);
|
||||
var decoded;
|
||||
|
||||
try {
|
||||
decoded = decipher.update(crypted, 'hex', 'utf8');
|
||||
decoded += decipher.final('utf8');
|
||||
} catch (e) {
|
||||
console.error('Decryption error:', e);
|
||||
decoded = '';
|
||||
}
|
||||
|
||||
return decoded;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -10,10 +10,12 @@ exports = module.exports = {
|
||||
update: update,
|
||||
getAll: getAll,
|
||||
getPortBindings: getPortBindings,
|
||||
delPortBinding: delPortBinding,
|
||||
|
||||
setAddonConfig: setAddonConfig,
|
||||
getAddonConfig: getAddonConfig,
|
||||
getAddonConfigByAppId: getAddonConfigByAppId,
|
||||
getAddonConfigByName: getAddonConfigByName,
|
||||
unsetAddonConfig: unsetAddonConfig,
|
||||
unsetAddonConfigByAppId: unsetAddonConfigByAppId,
|
||||
|
||||
@@ -57,9 +59,10 @@ var assert = require('assert'),
|
||||
util = require('util');
|
||||
|
||||
var APPS_FIELDS_PREFIXED = [ 'apps.id', 'apps.appStoreId', 'apps.installationState', 'apps.installationProgress', 'apps.runState',
|
||||
'apps.health', 'apps.containerId', 'apps.manifestJson', 'apps.httpPort', 'apps.location', 'apps.dnsRecordId',
|
||||
'apps.accessRestrictionJson', 'apps.lastBackupId', 'apps.oldConfigJson', 'apps.memoryLimit', 'apps.altDomain',
|
||||
'apps.xFrameOptions', 'apps.sso', 'apps.debugModeJson' ].join(',');
|
||||
'apps.health', 'apps.containerId', 'apps.manifestJson', 'apps.httpPort', 'apps.location', 'apps.domain', 'apps.dnsRecordId',
|
||||
'apps.accessRestrictionJson', 'apps.restoreConfigJson', 'apps.oldConfigJson', 'apps.updateConfigJson', 'apps.memoryLimit',
|
||||
'apps.altDomain', 'apps.xFrameOptions', 'apps.sso', 'apps.debugModeJson', 'apps.robotsTxt', 'apps.enableBackup',
|
||||
'apps.creationTime', 'apps.updateTime' ].join(',');
|
||||
|
||||
var PORT_BINDINGS_FIELDS = [ 'hostPort', 'environmentVariable', 'appId' ].join(',');
|
||||
|
||||
@@ -74,6 +77,14 @@ function postProcess(result) {
|
||||
result.oldConfig = safe.JSON.parse(result.oldConfigJson);
|
||||
delete result.oldConfigJson;
|
||||
|
||||
assert(result.updateConfigJson === null || typeof result.updateConfigJson === 'string');
|
||||
result.updateConfig = safe.JSON.parse(result.updateConfigJson);
|
||||
delete result.updateConfigJson;
|
||||
|
||||
assert(result.restoreConfigJson === null || typeof result.restoreConfigJson === 'string');
|
||||
result.restoreConfig = safe.JSON.parse(result.restoreConfigJson);
|
||||
delete result.restoreConfigJson;
|
||||
|
||||
assert(result.hostPorts === null || typeof result.hostPorts === 'string');
|
||||
assert(result.environmentVariables === null || typeof result.environmentVariables === 'string');
|
||||
|
||||
@@ -97,6 +108,7 @@ function postProcess(result) {
|
||||
result.xFrameOptions = result.xFrameOptions || 'SAMEORIGIN';
|
||||
|
||||
result.sso = !!result.sso; // make it bool
|
||||
result.enableBackup = !!result.enableBackup; // make it bool
|
||||
|
||||
assert(result.debugModeJson === null || typeof result.debugModeJson === 'string');
|
||||
result.debugMode = safe.JSON.parse(result.debugModeJson);
|
||||
@@ -166,12 +178,13 @@ function getAll(callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function add(id, appStoreId, manifest, location, portBindings, data, callback) {
|
||||
function add(id, appStoreId, manifest, location, domain, portBindings, data, callback) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof appStoreId, 'string');
|
||||
assert(manifest && typeof manifest === 'object');
|
||||
assert.strictEqual(typeof manifest.version, 'string');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof portBindings, 'object');
|
||||
assert(data && typeof data === 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
@@ -186,14 +199,14 @@ function add(id, appStoreId, manifest, location, portBindings, data, callback) {
|
||||
var altDomain = data.altDomain || null;
|
||||
var xFrameOptions = data.xFrameOptions || '';
|
||||
var installationState = data.installationState || exports.ISTATE_PENDING_INSTALL;
|
||||
var lastBackupId = data.lastBackupId || null; // used when cloning
|
||||
var restoreConfigJson = data.restoreConfig ? JSON.stringify(data.restoreConfig) : null; // used when cloning
|
||||
var sso = 'sso' in data ? data.sso : null;
|
||||
var debugModeJson = data.debugMode ? JSON.stringify(data.debugMode) : null;
|
||||
|
||||
var queries = [];
|
||||
queries.push({
|
||||
query: 'INSERT INTO apps (id, appStoreId, manifestJson, installationState, location, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, lastBackupId, sso, debugModeJson) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
|
||||
args: [ id, appStoreId, manifestJson, installationState, location, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, lastBackupId, sso, debugModeJson ]
|
||||
query: 'INSERT INTO apps (id, appStoreId, manifestJson, installationState, location, domain, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, restoreConfigJson, sso, debugModeJson) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
|
||||
args: [ id, appStoreId, manifestJson, installationState, location, domain, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, restoreConfigJson, sso, debugModeJson ]
|
||||
});
|
||||
|
||||
Object.keys(portBindings).forEach(function (env) {
|
||||
@@ -206,8 +219,8 @@ function add(id, appStoreId, manifest, location, portBindings, data, callback) {
|
||||
// only allocate a mailbox if mailboxName is set
|
||||
if (data.mailboxName) {
|
||||
queries.push({
|
||||
query: 'INSERT INTO mailboxes (name, ownerId, ownerType) VALUES (?, ?, ?)',
|
||||
args: [ data.mailboxName, id, mailboxdb.TYPE_APP ]
|
||||
query: 'INSERT INTO mailboxes (name, domain, ownerId, ownerType) VALUES (?, ?, ?, ?)',
|
||||
args: [ data.mailboxName, domain, id, mailboxdb.TYPE_APP ]
|
||||
});
|
||||
}
|
||||
|
||||
@@ -246,6 +259,18 @@ function getPortBindings(id, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function delPortBinding(hostPort, callback) {
|
||||
assert.strictEqual(typeof hostPort, 'number');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('DELETE FROM appPortBindings WHERE hostPort=?', [ hostPort ], function (error, result) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
if (result.affectedRows !== 1) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
function del(id, callback) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
@@ -303,17 +328,8 @@ function updateWithConstraints(id, app, constraints, callback) {
|
||||
|
||||
var fields = [ ], values = [ ];
|
||||
for (var p in app) {
|
||||
if (p === 'manifest') {
|
||||
fields.push('manifestJson = ?');
|
||||
values.push(JSON.stringify(app[p]));
|
||||
} else if (p === 'oldConfig') {
|
||||
fields.push('oldConfigJson = ?');
|
||||
values.push(JSON.stringify(app[p]));
|
||||
} else if (p === 'accessRestriction') {
|
||||
fields.push('accessRestrictionJson = ?');
|
||||
values.push(JSON.stringify(app[p]));
|
||||
} else if (p === 'debugMode') {
|
||||
fields.push('debugModeJson = ?');
|
||||
if (p === 'manifest' || p === 'oldConfig' || p === 'updateConfig' || p === 'restoreConfig' || p === 'accessRestriction' || p === 'debugMode') {
|
||||
fields.push(`${p}Json = ?`);
|
||||
values.push(JSON.stringify(app[p]));
|
||||
} else if (p !== 'portBindings') {
|
||||
fields.push(p + ' = ?');
|
||||
@@ -366,14 +382,14 @@ function setInstallationCommand(appId, installationState, values, callback) {
|
||||
// Rules are:
|
||||
// uninstall is allowed in any state
|
||||
// force update is allowed in any state including pending_uninstall! (for better or worse)
|
||||
// restore is allowed from installed or error state
|
||||
// restore is allowed from installed or error state or currently restoring
|
||||
// configure is allowed in installed state or currently configuring or in error state
|
||||
// update and backup are allowed only in installed state
|
||||
|
||||
if (installationState === exports.ISTATE_PENDING_UNINSTALL || installationState === exports.ISTATE_PENDING_FORCE_UPDATE) {
|
||||
updateWithConstraints(appId, values, '', callback);
|
||||
} else if (installationState === exports.ISTATE_PENDING_RESTORE) {
|
||||
updateWithConstraints(appId, values, 'AND (installationState = "installed" OR installationState = "error")', callback);
|
||||
updateWithConstraints(appId, values, 'AND (installationState = "installed" OR installationState = "error" OR installationState = "pending_restore")', callback);
|
||||
} else if (installationState === exports.ISTATE_PENDING_UPDATE || installationState === exports.ISTATE_PENDING_BACKUP) {
|
||||
updateWithConstraints(appId, values, 'AND installationState = "installed"', callback);
|
||||
} else if (installationState === exports.ISTATE_PENDING_CONFIGURE) {
|
||||
@@ -413,11 +429,11 @@ function setAddonConfig(appId, addonId, env, callback) {
|
||||
|
||||
if (env.length === 0) return callback(null);
|
||||
|
||||
var query = 'INSERT INTO appAddonConfigs(appId, addonId, value) VALUES ';
|
||||
var query = 'INSERT INTO appAddonConfigs(appId, addonId, name, value) VALUES ';
|
||||
var args = [ ], queryArgs = [ ];
|
||||
for (var i = 0; i < env.length; i++) {
|
||||
args.push(appId, addonId, env[i]);
|
||||
queryArgs.push('(?, ?, ?)');
|
||||
args.push(appId, addonId, env[i].name, env[i].value);
|
||||
queryArgs.push('(?, ?, ?, ?)');
|
||||
}
|
||||
|
||||
database.query(query + queryArgs.join(','), args, function (error) {
|
||||
@@ -456,13 +472,10 @@ function getAddonConfig(appId, addonId, callback) {
|
||||
assert.strictEqual(typeof addonId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT value FROM appAddonConfigs WHERE appId = ? AND addonId = ?', [ appId, addonId ], function (error, results) {
|
||||
database.query('SELECT name, value FROM appAddonConfigs WHERE appId = ? AND addonId = ?', [ appId, addonId ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
var config = [ ];
|
||||
results.forEach(function (v) { config.push(v.value); });
|
||||
|
||||
callback(null, config);
|
||||
callback(null, results);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -470,13 +483,23 @@ function getAddonConfigByAppId(appId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT value FROM appAddonConfigs WHERE appId = ?', [ appId ], function (error, results) {
|
||||
database.query('SELECT name, value FROM appAddonConfigs WHERE appId = ?', [ appId ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
var config = [ ];
|
||||
results.forEach(function (v) { config.push(v.value); });
|
||||
|
||||
callback(null, config);
|
||||
callback(null, results);
|
||||
});
|
||||
}
|
||||
|
||||
function getAddonConfigByName(appId, addonId, name, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof addonId, 'string');
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT value FROM appAddonConfigs WHERE appId = ? AND addonId = ? AND name = ?', [ appId, addonId, name ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
if (results.length === 0) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
|
||||
|
||||
callback(null, results[0].value);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ var appdb = require('./appdb.js'),
|
||||
assert = require('assert'),
|
||||
async = require('async'),
|
||||
DatabaseError = require('./databaseerror.js'),
|
||||
config = require('./config.js'),
|
||||
debug = require('debug')('box:apphealthmonitor'),
|
||||
docker = require('./docker.js').connection,
|
||||
mailer = require('./mailer.js'),
|
||||
@@ -25,7 +26,7 @@ var gDockerEventStream = null;
|
||||
function debugApp(app) {
|
||||
assert(!app || typeof app === 'object');
|
||||
|
||||
var prefix = app ? (app.location || 'naked_domain') : '(no app)';
|
||||
var prefix = app ? app.intrinsicFqdn : '(no app)';
|
||||
var manifestAppId = app ? app.manifest.id : '';
|
||||
var id = app ? app.id : '';
|
||||
|
||||
@@ -94,19 +95,20 @@ function checkAppHealth(app, callback) {
|
||||
superagent
|
||||
.get(healthCheckUrl)
|
||||
.set('Host', app.fqdn) // required for some apache configs with rewrite rules
|
||||
.set('User-Agent', 'Mozilla') // required for some apps (e.g. minio)
|
||||
.redirects(0)
|
||||
.timeout(HEALTHCHECK_INTERVAL)
|
||||
.end(function (error, res) {
|
||||
if (error && !error.response) {
|
||||
debugApp(app, 'not alive (network error): %s', error.message);
|
||||
setHealth(app, appdb.HEALTH_UNHEALTHY, callback);
|
||||
} else if (res.statusCode >= 400) { // 2xx and 3xx are ok
|
||||
debugApp(app, 'not alive : %s', error || res.status);
|
||||
setHealth(app, appdb.HEALTH_UNHEALTHY, callback);
|
||||
} else {
|
||||
setHealth(app, appdb.HEALTH_HEALTHY, callback);
|
||||
}
|
||||
});
|
||||
if (error && !error.response) {
|
||||
debugApp(app, 'not alive (network error): %s', error.message);
|
||||
setHealth(app, appdb.HEALTH_UNHEALTHY, callback);
|
||||
} else if (res.statusCode >= 400) { // 2xx and 3xx are ok
|
||||
debugApp(app, 'not alive : %s', error || res.status);
|
||||
setHealth(app, appdb.HEALTH_UNHEALTHY, callback);
|
||||
} else {
|
||||
setHealth(app, appdb.HEALTH_HEALTHY, callback);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -156,7 +158,6 @@ function processDockerEvents() {
|
||||
stream.setEncoding('utf8');
|
||||
stream.on('data', function (data) {
|
||||
var ev = JSON.parse(data);
|
||||
debug('Container ' + ev.id + ' went OOM');
|
||||
appdb.getByContainerId(ev.id, function (error, app) { // this can error for addons
|
||||
var program = error || !app.appStoreId ? ev.id : app.appStoreId;
|
||||
var context = JSON.stringify(ev);
|
||||
|
||||
@@ -30,13 +30,16 @@ exports = module.exports = {
|
||||
|
||||
checkManifestConstraints: checkManifestConstraints,
|
||||
|
||||
updateApps: updateApps,
|
||||
autoupdateApps: autoupdateApps,
|
||||
|
||||
restoreInstalledApps: restoreInstalledApps,
|
||||
configureInstalledApps: configureInstalledApps,
|
||||
|
||||
getAppConfig: getAppConfig,
|
||||
|
||||
downloadFile: downloadFile,
|
||||
uploadFile: uploadFile,
|
||||
|
||||
// exported for testing
|
||||
_validateHostname: validateHostname,
|
||||
_validatePortBindings: validatePortBindings,
|
||||
@@ -45,6 +48,8 @@ exports = module.exports = {
|
||||
|
||||
var addons = require('./addons.js'),
|
||||
appdb = require('./appdb.js'),
|
||||
appstore = require('./appstore.js'),
|
||||
AppstoreError = require('./appstore.js').AppstoreError,
|
||||
assert = require('assert'),
|
||||
async = require('async'),
|
||||
backups = require('./backups.js'),
|
||||
@@ -55,24 +60,29 @@ var addons = require('./addons.js'),
|
||||
DatabaseError = require('./databaseerror.js'),
|
||||
debug = require('debug')('box:apps'),
|
||||
docker = require('./docker.js'),
|
||||
domaindb = require('./domaindb.js'),
|
||||
domains = require('./domains.js'),
|
||||
DomainError = require('./domains.js').DomainError,
|
||||
eventlog = require('./eventlog.js'),
|
||||
fs = require('fs'),
|
||||
groups = require('./groups.js'),
|
||||
mailboxdb = require('./mailboxdb.js'),
|
||||
manifestFormat = require('cloudron-manifestformat'),
|
||||
os = require('os'),
|
||||
path = require('path'),
|
||||
paths = require('./paths.js'),
|
||||
safe = require('safetydance'),
|
||||
semver = require('semver'),
|
||||
settings = require('./settings.js'),
|
||||
spawn = require('child_process').spawn,
|
||||
split = require('split'),
|
||||
superagent = require('superagent'),
|
||||
taskmanager = require('./taskmanager.js'),
|
||||
tld = require('tldjs'),
|
||||
TransformStream = require('stream').Transform,
|
||||
updateChecker = require('./updatechecker.js'),
|
||||
url = require('url'),
|
||||
util = require('util'),
|
||||
uuid = require('node-uuid'),
|
||||
uuid = require('uuid'),
|
||||
validator = require('validator');
|
||||
|
||||
// http://dustinsenos.com/articles/customErrorsInNode
|
||||
@@ -111,18 +121,34 @@ AppsError.BAD_CERTIFICATE = 'Invalid certificate';
|
||||
// Hostname validation comes from RFC 1123 (section 2.1)
|
||||
// Domain name validation comes from RFC 2181 (Name syntax)
|
||||
// https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names
|
||||
// We are validating the validity of the location-fqdn as host name
|
||||
function validateHostname(location, fqdn) {
|
||||
var RESERVED_LOCATIONS = [ constants.ADMIN_LOCATION, constants.API_LOCATION, constants.SMTP_LOCATION, constants.IMAP_LOCATION, constants.MAIL_LOCATION, constants.POSTMAN_LOCATION ];
|
||||
// We are validating the validity of the location-fqdn as host name (and not dns name)
|
||||
function validateHostname(location, domain, hostname) {
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof hostname, 'string');
|
||||
|
||||
const RESERVED_LOCATIONS = [
|
||||
constants.API_LOCATION,
|
||||
constants.SMTP_LOCATION,
|
||||
constants.IMAP_LOCATION,
|
||||
constants.POSTMAN_LOCATION
|
||||
];
|
||||
if (RESERVED_LOCATIONS.indexOf(location) !== -1) return new AppsError(AppsError.BAD_FIELD, location + ' is reserved');
|
||||
|
||||
if (location === '') return null; // bare location
|
||||
if (hostname === config.adminFqdn()) return new AppsError(AppsError.BAD_FIELD, location + ' is reserved');
|
||||
|
||||
if ((location.length + 1 /*+ hyphen */ + fqdn.indexOf('.')) > 63) return new AppsError(AppsError.BAD_FIELD, 'Hostname length cannot be greater than 63');
|
||||
if (location.match(/^[A-Za-z0-9-]+$/) === null) return new AppsError(AppsError.BAD_FIELD, 'Hostname can only contain alphanumerics and hyphen');
|
||||
if (location[0] === '-' || location[location.length-1] === '-') return new AppsError(AppsError.BAD_FIELD, 'Hostname cannot start or end with hyphen');
|
||||
if (location.length + 1 /* hyphen */ + fqdn.length > 253) return new AppsError(AppsError.BAD_FIELD, 'FQDN length exceeds 253 characters');
|
||||
// workaround https://github.com/oncletom/tld.js/issues/73
|
||||
var tmp = hostname.replace('_', '-');
|
||||
if (!tld.isValid(tmp)) return new AppsError(AppsError.BAD_FIELD, 'Hostname is not a valid domain name');
|
||||
|
||||
if (hostname.length > 253) return new AppsError(AppsError.BAD_FIELD, 'Hostname length exceeds 253 characters');
|
||||
|
||||
if (location) {
|
||||
// label validation
|
||||
if (location.length > 63) return new AppsError(AppsError.BAD_FIELD, 'Subdomain exceeds 63 characters');
|
||||
if (location.match(/^[A-Za-z0-9-]+$/) === null) return new AppsError(AppsError.BAD_FIELD, 'Subdomain can only contain alphanumerics and hyphen');
|
||||
if (location.startsWith('-') || location.endsWith('-')) return new AppsError(AppsError.BAD_FIELD, 'Subdomain cannot start or end with hyphen');
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
@@ -203,7 +229,7 @@ function validateMemoryLimit(manifest, memoryLimit) {
|
||||
assert.strictEqual(typeof memoryLimit, 'number');
|
||||
|
||||
var min = manifest.memoryLimit || constants.DEFAULT_MEMORY_LIMIT;
|
||||
var max = (4096 * 1024 * 1024);
|
||||
var max = os.totalmem() * 2; // this will overallocate since we don't allocate equal swap always (#466)
|
||||
|
||||
// allow 0, which indicates that it is not set, the one from the manifest will be choosen but we don't commit any user value
|
||||
// this is needed so an app update can change the value in the manifest, and if not set by the user, the new value should be used
|
||||
@@ -242,6 +268,23 @@ function validateDebugMode(debugMode) {
|
||||
return null;
|
||||
}
|
||||
|
||||
function validateRobotsTxt(robotsTxt) {
|
||||
if (robotsTxt === null) return null;
|
||||
|
||||
// this is the nginx limit on inline strings. if we really hit this, we have to generate a file
|
||||
if (robotsTxt.length > 4096) return new AppsError(AppsError.BAD_FIELD, 'robotsTxt must be less than 4096');
|
||||
|
||||
// TODO: validate the robots file? we escape the string when templating the nginx config right now
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function validateBackupFormat(format) {
|
||||
if (format === 'tgz' || format == 'rsync') return null;
|
||||
|
||||
return new AppsError(AppsError.BAD_FIELD, 'Invalid backup format');
|
||||
}
|
||||
|
||||
function getDuplicateErrorDetails(location, portBindings, error) {
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof portBindings, 'object');
|
||||
@@ -268,6 +311,8 @@ function getAppConfig(app) {
|
||||
return {
|
||||
manifest: app.manifest,
|
||||
location: app.location,
|
||||
domain: app.domain,
|
||||
intrinsicFqdn: app.intrinsicFqdn,
|
||||
accessRestriction: app.accessRestriction,
|
||||
portBindings: app.portBindings,
|
||||
memoryLimit: app.memoryLimit,
|
||||
@@ -292,12 +337,18 @@ function hasAccessTo(app, user, callback) {
|
||||
if (app.accessRestriction.users.some(function (e) { return e === user.id; })) return callback(null, true);
|
||||
|
||||
// check group access
|
||||
if (!app.accessRestriction.groups) return callback(null, false);
|
||||
groups.getGroups(user.id, function (error, groupIds) {
|
||||
if (error) return callback(null, false);
|
||||
|
||||
async.some(app.accessRestriction.groups, function (groupId, iteratorDone) {
|
||||
groups.isMember(groupId, user.id, iteratorDone);
|
||||
}, function (error, result) {
|
||||
callback(null, !error && result);
|
||||
const isAdmin = groupIds.indexOf(constants.ADMIN_GROUP_ID) !== -1;
|
||||
|
||||
if (isAdmin) return callback(null, true); // admins can always access any app
|
||||
|
||||
if (!app.accessRestriction.groups) return callback(null, false);
|
||||
|
||||
if (app.accessRestriction.groups.some(function (gid) { return groupIds.indexOf(gid) !== -1; })) return callback(null, true);
|
||||
|
||||
callback(null, false);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -309,11 +360,16 @@ function get(appId, callback) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND, 'No such app'));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
app.iconUrl = getIconUrlSync(app);
|
||||
app.fqdn = app.altDomain || config.appFqdn(app.location);
|
||||
app.cnameTarget = app.altDomain ? config.appFqdn(app.location) : null;
|
||||
domaindb.get(app.domain, function (error, result) {
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, app);
|
||||
app.intrinsicFqdn = domains.fqdn(app.location, app.domain, result.provider);
|
||||
app.iconUrl = getIconUrlSync(app);
|
||||
app.fqdn = app.altDomain || app.intrinsicFqdn;
|
||||
app.cnameTarget = app.altDomain ? app.intrinsicFqdn : null;
|
||||
|
||||
callback(null, app);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -328,11 +384,16 @@ function getByIpAddress(ip, callback) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND, 'No such app'));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
app.iconUrl = getIconUrlSync(app);
|
||||
app.fqdn = app.altDomain || config.appFqdn(app.location);
|
||||
app.cnameTarget = app.altDomain ? config.appFqdn(app.location) : null;
|
||||
domaindb.get(app.domain, function (error, result) {
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, app);
|
||||
app.intrinsicFqdn = domains.fqdn(app.location, app.domain, result.provider);
|
||||
app.iconUrl = getIconUrlSync(app);
|
||||
app.fqdn = app.altDomain || app.intrinsicFqdn;
|
||||
app.cnameTarget = app.altDomain ? app.intrinsicFqdn : null;
|
||||
|
||||
callback(null, app);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -343,13 +404,22 @@ function getAll(callback) {
|
||||
appdb.getAll(function (error, apps) {
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
apps.forEach(function (app) {
|
||||
app.iconUrl = getIconUrlSync(app);
|
||||
app.fqdn = app.altDomain || config.appFqdn(app.location);
|
||||
app.cnameTarget = app.altDomain ? config.appFqdn(app.location) : null;
|
||||
});
|
||||
async.eachSeries(apps, function (app, iteratorDone) {
|
||||
domaindb.get(app.domain, function (error, result) {
|
||||
if (error) return iteratorDone(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, apps);
|
||||
app.intrinsicFqdn = domains.fqdn(app.location, app.domain, result.provider);
|
||||
app.iconUrl = getIconUrlSync(app);
|
||||
app.fqdn = app.altDomain || app.intrinsicFqdn;
|
||||
app.cnameTarget = app.altDomain ? app.intrinsicFqdn : null;
|
||||
|
||||
iteratorDone();
|
||||
});
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(null, apps);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -366,99 +436,6 @@ function getAllByUser(user, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function purchase(appId, appstoreId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof appstoreId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (appstoreId === '') return callback(null);
|
||||
|
||||
function purchaseWithAppstoreConfig(appstoreConfig) {
|
||||
assert.strictEqual(typeof appstoreConfig.userId, 'string');
|
||||
assert.strictEqual(typeof appstoreConfig.cloudronId, 'string');
|
||||
assert.strictEqual(typeof appstoreConfig.token, 'string');
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/apps/' + appId;
|
||||
var data = { appstoreId: appstoreId };
|
||||
|
||||
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 404) return callback(new AppsError(AppsError.NOT_FOUND));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
// Caas Cloudrons do not store appstore credentials in their local database
|
||||
if (config.provider() === 'caas') {
|
||||
var url = config.apiServerOrigin() + '/api/v1/exchangeBoxTokenWithUserToken';
|
||||
superagent.post(url).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode !== 201) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
purchaseWithAppstoreConfig(result.body);
|
||||
});
|
||||
} else {
|
||||
settings.getAppstoreConfig(function (error, result) {
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
if (!result.token) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
|
||||
purchaseWithAppstoreConfig(result);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function unpurchase(appId, appstoreId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof appstoreId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (appstoreId === '') return callback(null);
|
||||
|
||||
function unpurchaseWithAppstoreConfig(appstoreConfig) {
|
||||
assert.strictEqual(typeof appstoreConfig.userId, 'string');
|
||||
assert.strictEqual(typeof appstoreConfig.cloudronId, 'string');
|
||||
assert.strictEqual(typeof appstoreConfig.token, 'string');
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/apps/' + appId;
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
if (result.statusCode === 404) return callback(null); // was never purchased
|
||||
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
superagent.del(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
if (result.statusCode !== 204) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// Caas Cloudrons do not store appstore credentials in their local database
|
||||
if (config.provider() === 'caas') {
|
||||
var url = config.apiServerOrigin() + '/api/v1/exchangeBoxTokenWithUserToken';
|
||||
superagent.post(url).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode !== 201) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
unpurchaseWithAppstoreConfig(result.body);
|
||||
});
|
||||
} else {
|
||||
settings.getAppstoreConfig(function (error, result) {
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
if (!result.token) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
|
||||
unpurchaseWithAppstoreConfig(result);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function downloadManifest(appStoreId, manifest, callback) {
|
||||
if (!appStoreId && !manifest) return callback(new AppsError(AppsError.BAD_FIELD, 'Neither manifest nor appStoreId provided'));
|
||||
|
||||
@@ -473,7 +450,7 @@ function downloadManifest(appStoreId, manifest, callback) {
|
||||
superagent.get(url).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, 'Network error downloading manifest:' + error.message));
|
||||
|
||||
if (result.statusCode !== 200) return callback(new AppsError(AppsError.BAD_FIELD, util.format('Failed to get app info from store.', result.statusCode, result.text)));
|
||||
if (result.statusCode !== 200) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('Failed to get app info from store.', result.statusCode, result.text)));
|
||||
|
||||
callback(null, parts[0], result.body.manifest);
|
||||
});
|
||||
@@ -485,6 +462,7 @@ function install(data, auditSource, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var location = data.location.toLowerCase(),
|
||||
domain = data.domain.toLowerCase(),
|
||||
portBindings = data.portBindings || null,
|
||||
accessRestriction = data.accessRestriction || null,
|
||||
icon = data.icon || null,
|
||||
@@ -494,7 +472,11 @@ function install(data, auditSource, callback) {
|
||||
altDomain = data.altDomain || null,
|
||||
xFrameOptions = data.xFrameOptions || 'SAMEORIGIN',
|
||||
sso = 'sso' in data ? data.sso : null,
|
||||
debugMode = data.debugMode || null;
|
||||
debugMode = data.debugMode || null,
|
||||
robotsTxt = data.robotsTxt || null,
|
||||
enableBackup = 'enableBackup' in data ? data.enableBackup : true,
|
||||
backupId = data.backupId || null,
|
||||
backupFormat = data.backupFormat || 'tgz';
|
||||
|
||||
assert(data.appStoreId || data.manifest); // atleast one of them is required
|
||||
|
||||
@@ -507,9 +489,6 @@ function install(data, auditSource, callback) {
|
||||
error = checkManifestConstraints(manifest);
|
||||
if (error) return callback(error);
|
||||
|
||||
error = validateHostname(location, config.fqdn());
|
||||
if (error) return callback(error);
|
||||
|
||||
error = validatePortBindings(portBindings, manifest.tcpPorts);
|
||||
if (error) return callback(error);
|
||||
|
||||
@@ -525,11 +504,17 @@ function install(data, auditSource, callback) {
|
||||
error = validateDebugMode(debugMode);
|
||||
if (error) return callback(error);
|
||||
|
||||
error = validateRobotsTxt(robotsTxt);
|
||||
if (error) return callback(error);
|
||||
|
||||
error = validateBackupFormat(backupFormat);
|
||||
if (error) return callback(error);
|
||||
|
||||
if ('sso' in data && !('optionalSso' in manifest)) return callback(new AppsError(AppsError.BAD_FIELD, 'sso can only be specified for apps with optionalSso'));
|
||||
// if sso was unspecified, enable it by default if possible
|
||||
if (sso === null) sso = !!manifest.addons['ldap'] || !!manifest.addons['oauth'];
|
||||
|
||||
if (altDomain !== null && !validator.isFQDN(altDomain)) return callback(new AppsError(AppsError.BAD_FIELD, 'Invalid alt domain'));
|
||||
if (altDomain !== null && !validator.isFQDN(altDomain)) return callback(new AppsError(AppsError.BAD_FIELD, 'Invalid external domain'));
|
||||
|
||||
var appId = uuid.v4();
|
||||
|
||||
@@ -541,39 +526,56 @@ function install(data, auditSource, callback) {
|
||||
}
|
||||
}
|
||||
|
||||
error = certificates.validateCertificate(cert, key, config.appFqdn(location));
|
||||
if (error) return callback(new AppsError(AppsError.BAD_CERTIFICATE, error.message));
|
||||
domains.get(domain, function (error, domainObject) {
|
||||
if (error && error.reason === DomainError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND, 'No such domain'));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Could not get domain info:' + error.message));
|
||||
|
||||
debug('Will install app with id : ' + appId);
|
||||
var intrinsicFqdn = domains.fqdn(location, domain, domainObject.provider);
|
||||
|
||||
purchase(appId, appStoreId, function (error) {
|
||||
error = validateHostname(location, domain, intrinsicFqdn);
|
||||
if (error) return callback(error);
|
||||
|
||||
var data = {
|
||||
accessRestriction: accessRestriction,
|
||||
memoryLimit: memoryLimit,
|
||||
altDomain: altDomain,
|
||||
xFrameOptions: xFrameOptions,
|
||||
sso: sso,
|
||||
debugMode: debugMode,
|
||||
mailboxName: (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app'
|
||||
};
|
||||
error = certificates.validateCertificate(cert, key, intrinsicFqdn);
|
||||
if (error) return callback(new AppsError(AppsError.BAD_CERTIFICATE, error.message));
|
||||
|
||||
appdb.add(appId, appStoreId, manifest, location, portBindings, data, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails(location, portBindings, error));
|
||||
debug('Will install app with id : ' + appId);
|
||||
|
||||
appstore.purchase(appId, appStoreId, function (error) {
|
||||
if (error && error.reason === AppstoreError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND));
|
||||
if (error && error.reason === AppstoreError.BILLING_REQUIRED) return callback(new AppsError(AppsError.BILLING_REQUIRED, error.message));
|
||||
if (error && error.reason === AppstoreError.EXTERNAL_ERROR) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
// save cert to boxdata/certs
|
||||
if (cert && key) {
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.cert'), cert)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving cert: ' + safe.error.message));
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.key'), key)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving key: ' + safe.error.message));
|
||||
}
|
||||
var data = {
|
||||
accessRestriction: accessRestriction,
|
||||
memoryLimit: memoryLimit,
|
||||
altDomain: altDomain,
|
||||
xFrameOptions: xFrameOptions,
|
||||
sso: sso,
|
||||
debugMode: debugMode,
|
||||
mailboxName: (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app',
|
||||
restoreConfig: backupId ? { backupId: backupId, backupFormat: backupFormat } : null,
|
||||
enableBackup: enableBackup,
|
||||
robotsTxt: robotsTxt,
|
||||
intrinsicFqdn: intrinsicFqdn
|
||||
};
|
||||
|
||||
taskmanager.restartAppTask(appId);
|
||||
appdb.add(appId, appStoreId, manifest, location, domain, portBindings, data, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails(location, portBindings, error));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
eventlog.add(eventlog.ACTION_APP_INSTALL, auditSource, { appId: appId, location: location, manifest: manifest });
|
||||
// save cert to boxdata/certs
|
||||
if (cert && key) {
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, intrinsicFqdn + '.user.cert'), cert)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving cert: ' + safe.error.message));
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, intrinsicFqdn + '.user.key'), key)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving key: ' + safe.error.message));
|
||||
}
|
||||
|
||||
callback(null, { id : appId });
|
||||
taskmanager.restartAppTask(appId);
|
||||
|
||||
eventlog.add(eventlog.ACTION_APP_INSTALL, auditSource, { appId: appId, location: location, domain: domain, manifest: manifest, backupId: backupId });
|
||||
|
||||
callback(null, { id : appId });
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -589,14 +591,12 @@ function configure(appId, data, auditSource, callback) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND, 'No such app'));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
var location, portBindings, values = { };
|
||||
if ('location' in data) {
|
||||
location = values.location = data.location.toLowerCase();
|
||||
error = validateHostname(values.location, config.fqdn());
|
||||
if (error) return callback(error);
|
||||
} else {
|
||||
location = app.location;
|
||||
}
|
||||
var domain, location, portBindings, values = { };
|
||||
if ('location' in data) location = values.location = data.location.toLowerCase();
|
||||
else location = app.location;
|
||||
|
||||
if ('domain' in data) domain = values.domain = data.domain.toLowerCase();
|
||||
else domain = app.domain;
|
||||
|
||||
if ('accessRestriction' in data) {
|
||||
values.accessRestriction = data.accessRestriction;
|
||||
@@ -606,7 +606,7 @@ function configure(appId, data, auditSource, callback) {
|
||||
|
||||
if ('altDomain' in data) {
|
||||
values.altDomain = data.altDomain;
|
||||
if (values.altDomain !== null && !validator.isFQDN(values.altDomain)) return callback(new AppsError(AppsError.BAD_FIELD, 'Invalid alt domain'));
|
||||
if (values.altDomain !== null && !validator.isFQDN(values.altDomain)) return callback(new AppsError(AppsError.BAD_FIELD, 'Invalid external domain'));
|
||||
}
|
||||
|
||||
if ('portBindings' in data) {
|
||||
@@ -635,41 +635,59 @@ function configure(appId, data, auditSource, callback) {
|
||||
if (error) return callback(error);
|
||||
}
|
||||
|
||||
// save cert to boxdata/certs. TODO: move this to apptask when we have a real task queue
|
||||
if ('cert' in data && 'key' in data) {
|
||||
if (data.cert && data.key) {
|
||||
error = certificates.validateCertificate(data.cert, data.key, config.appFqdn(location));
|
||||
if (error) return callback(new AppsError(AppsError.BAD_CERTIFICATE, error.message));
|
||||
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.cert'), data.cert)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving cert: ' + safe.error.message));
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.key'), data.key)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving key: ' + safe.error.message));
|
||||
} else { // remove existing cert/key
|
||||
if (!safe.fs.unlinkSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.cert'))) debug('Error removing cert: ' + safe.error.message);
|
||||
if (!safe.fs.unlinkSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.key'))) debug('Error removing key: ' + safe.error.message);
|
||||
}
|
||||
if ('robotsTxt' in data) {
|
||||
values.robotsTxt = data.robotsTxt || null;
|
||||
error = validateRobotsTxt(values.robotsTxt);
|
||||
if (error) return callback(error);
|
||||
}
|
||||
|
||||
values.oldConfig = getAppConfig(app);
|
||||
domains.get(domain, function (error, domainObject) {
|
||||
if (error && error.reason === DomainError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND, 'No such domain'));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Could not get domain info:' + error.message));
|
||||
|
||||
debug('Will configure app with id:%s values:%j', appId, values);
|
||||
var intrinsicFqdn = domains.fqdn(location, domain, domainObject.provider);
|
||||
|
||||
var oldName = (app.location ? app.location : app.manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app';
|
||||
var newName = (location ? location : app.manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app';
|
||||
mailboxdb.updateName(oldName, newName, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(new AppsError(AppsError.ALREADY_EXISTS, 'This mailbox is already taken'));
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.BAD_STATE));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
error = validateHostname(location, domain, intrinsicFqdn);
|
||||
if (error) return callback(error);
|
||||
|
||||
appdb.setInstallationCommand(appId, appdb.ISTATE_PENDING_CONFIGURE, values, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails(location, portBindings, error));
|
||||
// save cert to boxdata/certs. TODO: move this to apptask when we have a real task queue
|
||||
if ('cert' in data && 'key' in data) {
|
||||
if (data.cert && data.key) {
|
||||
error = certificates.validateCertificate(data.cert, data.key, intrinsicFqdn);
|
||||
if (error) return callback(new AppsError(AppsError.BAD_CERTIFICATE, error.message));
|
||||
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, intrinsicFqdn + '.user.cert'), data.cert)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving cert: ' + safe.error.message));
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, intrinsicFqdn + '.user.key'), data.key)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving key: ' + safe.error.message));
|
||||
} else { // remove existing cert/key
|
||||
if (!safe.fs.unlinkSync(path.join(paths.APP_CERTS_DIR, intrinsicFqdn + '.user.cert'))) debug('Error removing cert: ' + safe.error.message);
|
||||
if (!safe.fs.unlinkSync(path.join(paths.APP_CERTS_DIR, intrinsicFqdn + '.user.key'))) debug('Error removing key: ' + safe.error.message);
|
||||
}
|
||||
}
|
||||
|
||||
if ('enableBackup' in data) values.enableBackup = data.enableBackup;
|
||||
|
||||
values.oldConfig = getAppConfig(app);
|
||||
|
||||
debug('Will configure app with id:%s values:%j', appId, values);
|
||||
|
||||
var oldName = (app.location ? app.location : app.manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app';
|
||||
var newName = (location ? location : app.manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app';
|
||||
mailboxdb.updateName(oldName, values.oldConfig.domain, newName, domain, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(new AppsError(AppsError.ALREADY_EXISTS, 'This mailbox is already taken'));
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.BAD_STATE));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
taskmanager.restartAppTask(appId);
|
||||
appdb.setInstallationCommand(appId, appdb.ISTATE_PENDING_CONFIGURE, values, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails(location, portBindings, error));
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.BAD_STATE));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId: appId });
|
||||
taskmanager.restartAppTask(appId);
|
||||
|
||||
callback(null);
|
||||
eventlog.add(eventlog.ACTION_APP_CONFIGURE, auditSource, { appId: appId });
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -686,7 +704,7 @@ function update(appId, data, auditSource, callback) {
|
||||
downloadManifest(data.appStoreId, data.manifest, function (error, appStoreId, manifest) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var values = { };
|
||||
var updateConfig = { };
|
||||
|
||||
error = manifestFormat.parse(manifest);
|
||||
if (error) return callback(new AppsError(AppsError.BAD_FIELD, 'Manifest error:' + error.message));
|
||||
@@ -694,13 +712,7 @@ function update(appId, data, auditSource, callback) {
|
||||
error = checkManifestConstraints(manifest);
|
||||
if (error) return callback(error);
|
||||
|
||||
values.manifest = manifest;
|
||||
|
||||
if ('portBindings' in data) {
|
||||
values.portBindings = data.portBindings;
|
||||
error = validatePortBindings(data.portBindings, values.manifest.tcpPorts);
|
||||
if (error) return callback(error);
|
||||
}
|
||||
updateConfig.manifest = manifest;
|
||||
|
||||
if ('icon' in data) {
|
||||
if (data.icon) {
|
||||
@@ -720,26 +732,23 @@ function update(appId, data, auditSource, callback) {
|
||||
|
||||
// prevent user from installing a app with different manifest id over an existing app
|
||||
// this allows cloudron install -f --app <appid> for an app installed from the appStore
|
||||
if (app.manifest.id !== values.manifest.id) {
|
||||
if (app.manifest.id !== updateConfig.manifest.id) {
|
||||
if (!data.force) return callback(new AppsError(AppsError.BAD_FIELD, 'manifest id does not match. force to override'));
|
||||
// clear appStoreId so that this app does not get updates anymore
|
||||
values.appStoreId = '';
|
||||
updateConfig.appStoreId = '';
|
||||
}
|
||||
|
||||
// do not update apps in debug mode
|
||||
if (app.debugMode && !data.force) return callback(new AppsError(AppsError.BAD_STATE, 'debug mode enabled. force to override'));
|
||||
|
||||
// Ensure we update the memory limit in case the new app requires more memory as a minimum
|
||||
// 0 and -1 are special values for memory limit indicating unset and unlimited
|
||||
if (app.memoryLimit > 0 && values.manifest.memoryLimit && app.memoryLimit < values.manifest.memoryLimit) {
|
||||
values.memoryLimit = values.manifest.memoryLimit;
|
||||
// 0 and -1 are special updateConfig for memory limit indicating unset and unlimited
|
||||
if (app.memoryLimit > 0 && updateConfig.manifest.memoryLimit && app.memoryLimit < updateConfig.manifest.memoryLimit) {
|
||||
updateConfig.memoryLimit = updateConfig.manifest.memoryLimit;
|
||||
}
|
||||
|
||||
values.oldConfig = getAppConfig(app);
|
||||
|
||||
appdb.setInstallationCommand(appId, data.force ? appdb.ISTATE_PENDING_FORCE_UPDATE : appdb.ISTATE_PENDING_UPDATE, values, function (error) {
|
||||
appdb.setInstallationCommand(appId, data.force ? appdb.ISTATE_PENDING_FORCE_UPDATE : appdb.ISTATE_PENDING_UPDATE, { updateConfig: updateConfig }, function (error) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.BAD_STATE)); // might be a bad guess
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails('' /* location cannot conflict */, values.portBindings, error));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
taskmanager.restartAppTask(appId);
|
||||
@@ -761,10 +770,9 @@ function appLogFilter(app) {
|
||||
return names.map(function (name) { return 'CONTAINER_NAME=' + name; });
|
||||
}
|
||||
|
||||
function getLogs(appId, lines, follow, callback) {
|
||||
function getLogs(appId, options, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof lines, 'number');
|
||||
assert.strictEqual(typeof follow, 'boolean');
|
||||
assert(options && typeof options === 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('Getting logs for %s', appId);
|
||||
@@ -773,13 +781,21 @@ function getLogs(appId, lines, follow, callback) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
var args = [ '--output=json', '--no-pager', '--lines=' + lines ];
|
||||
|
||||
var lines = options.lines || 100,
|
||||
follow = !!options.follow,
|
||||
format = options.format || 'json';
|
||||
|
||||
var args = [ '--no-pager', '--lines=' + lines ];
|
||||
if (follow) args.push('--follow');
|
||||
if (format == 'short') args.push('--output=short', '-a'); else args.push('--output=json');
|
||||
args = args.concat(appLogFilter(app));
|
||||
|
||||
var cp = spawn('/bin/journalctl', args);
|
||||
|
||||
var transformStream = split(function mapper(line) {
|
||||
if (format !== 'json') return line + '\n';
|
||||
|
||||
var obj = safe.JSON.parse(line);
|
||||
if (!obj) return undefined;
|
||||
|
||||
@@ -813,22 +829,22 @@ function restore(appId, data, auditSource, callback) {
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
// for empty or null backupId, use existing manifest to mimic a reinstall
|
||||
var func = data.backupId ? backups.getRestoreConfig.bind(null, data.backupId) : function (next) { return next(null, { manifest: app.manifest }); };
|
||||
var func = data.backupId ? backups.get.bind(null, data.backupId) : function (next) { return next(null, { manifest: app.manifest }); };
|
||||
|
||||
func(function (error, restoreConfig) {
|
||||
func(function (error, backupInfo) {
|
||||
if (error && error.reason === BackupsError.NOT_FOUND) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
|
||||
if (error && error.reason === BackupsError.EXTERNAL_ERROR) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
if (!restoreConfig) callback(new AppsError(AppsError.EXTERNAL_ERROR, 'Could not get restore config'));
|
||||
if (!backupInfo.manifest) callback(new AppsError(AppsError.EXTERNAL_ERROR, 'Could not get restore manifest'));
|
||||
|
||||
// re-validate because this new box version may not accept old configs
|
||||
error = checkManifestConstraints(restoreConfig.manifest);
|
||||
error = checkManifestConstraints(backupInfo.manifest);
|
||||
if (error) return callback(error);
|
||||
|
||||
var values = {
|
||||
lastBackupId: data.backupId || null, // when null, apptask simply reinstalls
|
||||
manifest: restoreConfig.manifest,
|
||||
restoreConfig: data.backupId ? { backupId: data.backupId, backupFormat: backupInfo.format } : null, // when null, apptask simply reinstalls
|
||||
manifest: backupInfo.manifest,
|
||||
|
||||
oldConfig: getAppConfig(app)
|
||||
};
|
||||
@@ -856,57 +872,70 @@ function clone(appId, data, auditSource, callback) {
|
||||
debug('Will clone app with id:%s', appId);
|
||||
|
||||
var location = data.location.toLowerCase(),
|
||||
domain = data.domain.toLowerCase(),
|
||||
portBindings = data.portBindings || null,
|
||||
backupId = data.backupId;
|
||||
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof portBindings, 'object');
|
||||
|
||||
appdb.get(appId, function (error, app) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
backups.getRestoreConfig(backupId, function (error, restoreConfig) {
|
||||
backups.get(backupId, function (error, backupInfo) {
|
||||
if (error && error.reason === BackupsError.EXTERNAL_ERROR) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
|
||||
if (error && error.reason === BackupsError.NOT_FOUND) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
if (!restoreConfig) callback(new AppsError(AppsError.EXTERNAL_ERROR, 'Could not get restore config'));
|
||||
if (!backupInfo.manifest) callback(new AppsError(AppsError.EXTERNAL_ERROR, 'Could not get restore config'));
|
||||
|
||||
// re-validate because this new box version may not accept old configs
|
||||
error = checkManifestConstraints(restoreConfig.manifest);
|
||||
error = checkManifestConstraints(backupInfo.manifest);
|
||||
if (error) return callback(error);
|
||||
|
||||
error = validateHostname(location, config.fqdn());
|
||||
error = validatePortBindings(portBindings, backupInfo.manifest.tcpPorts);
|
||||
if (error) return callback(error);
|
||||
|
||||
error = validatePortBindings(portBindings, restoreConfig.manifest.tcpPorts);
|
||||
if (error) return callback(error);
|
||||
domains.get(domain, function (error, domainObject) {
|
||||
if (error && error.reason === DomainError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND, 'No such domain'));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Could not get domain info:' + error.message));
|
||||
|
||||
var newAppId = uuid.v4(), appStoreId = app.appStoreId, manifest = restoreConfig.manifest;
|
||||
var intrinsicFqdn = domains.fqdn(location, domain, domainObject.provider);
|
||||
|
||||
purchase(newAppId, appStoreId, function (error) {
|
||||
error = validateHostname(location, domain, intrinsicFqdn);
|
||||
if (error) return callback(error);
|
||||
|
||||
var data = {
|
||||
installationState: appdb.ISTATE_PENDING_CLONE,
|
||||
memoryLimit: app.memoryLimit,
|
||||
accessRestriction: app.accessRestriction,
|
||||
xFrameOptions: app.xFrameOptions,
|
||||
lastBackupId: backupId,
|
||||
sso: !!app.sso,
|
||||
mailboxName: (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app'
|
||||
};
|
||||
var newAppId = uuid.v4(), manifest = backupInfo.manifest;
|
||||
|
||||
appdb.add(newAppId, appStoreId, manifest, location, portBindings, data, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails(location, portBindings, error));
|
||||
appstore.purchase(newAppId, app.appStoreId, function (error) {
|
||||
if (error && error.reason === AppstoreError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND));
|
||||
if (error && error.reason === AppstoreError.BILLING_REQUIRED) return callback(new AppsError(AppsError.BILLING_REQUIRED, error.message));
|
||||
if (error && error.reason === AppstoreError.EXTERNAL_ERROR) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
taskmanager.restartAppTask(newAppId);
|
||||
var data = {
|
||||
installationState: appdb.ISTATE_PENDING_CLONE,
|
||||
memoryLimit: app.memoryLimit,
|
||||
accessRestriction: app.accessRestriction,
|
||||
xFrameOptions: app.xFrameOptions,
|
||||
restoreConfig: { backupId: backupId, backupFormat: backupInfo.format },
|
||||
sso: !!app.sso,
|
||||
mailboxName: (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app'
|
||||
};
|
||||
|
||||
eventlog.add(eventlog.ACTION_APP_CLONE, auditSource, { appId: newAppId, oldAppId: appId, backupId: backupId, location: location, manifest: manifest });
|
||||
appdb.add(newAppId, app.appStoreId, manifest, location, domain, portBindings, data, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails(location, portBindings, error));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, { id : newAppId });
|
||||
taskmanager.restartAppTask(newAppId);
|
||||
|
||||
eventlog.add(eventlog.ACTION_APP_CLONE, auditSource, { appId: newAppId, oldAppId: appId, backupId: backupId, location: location, manifest: manifest });
|
||||
|
||||
callback(null, { id : newAppId });
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -923,8 +952,11 @@ function uninstall(appId, auditSource, callback) {
|
||||
get(appId, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
unpurchase(appId, result.appStoreId, function (error) {
|
||||
if (error) return callback(error);
|
||||
appstore.unpurchase(appId, result.appStoreId, function (error) {
|
||||
if (error && error.reason === AppstoreError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND));
|
||||
if (error && error.reason === AppstoreError.BILLING_REQUIRED) return callback(new AppsError(AppsError.BILLING_REQUIRED, error.message));
|
||||
if (error && error.reason === AppstoreError.EXTERNAL_ERROR) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
taskmanager.stopAppTask(appId, function () {
|
||||
appdb.setInstallationCommand(appId, appdb.ISTATE_PENDING_UNINSTALL, function (error) {
|
||||
@@ -1047,21 +1079,18 @@ function exec(appId, options, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function updateApps(updateInfo, auditSource, callback) { // updateInfo is { appId -> { manifest } }
|
||||
function autoupdateApps(updateInfo, auditSource, callback) { // updateInfo is { appId -> { manifest } }
|
||||
assert.strictEqual(typeof updateInfo, 'object');
|
||||
assert.strictEqual(typeof auditSource, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
function canAutoupdateApp(app, newManifest) {
|
||||
if ((semver.major(app.manifest.version) !== 0) && (semver.major(app.manifest.version) !== semver.major(newManifest.version))) return new Error('Major version change'); // major changes are blocking
|
||||
|
||||
var newTcpPorts = newManifest.tcpPorts || { };
|
||||
var oldTcpPorts = app.manifest.tcpPorts || { };
|
||||
var portBindings = app.portBindings; // this is never null
|
||||
|
||||
for (var env in newTcpPorts) {
|
||||
if (!(env in oldTcpPorts)) return new Error(env + ' is required from user');
|
||||
}
|
||||
|
||||
for (env in portBindings) {
|
||||
for (var env in portBindings) {
|
||||
if (!(env in newTcpPorts)) return new Error(env + ' was in use but new update removes it');
|
||||
}
|
||||
|
||||
@@ -1076,7 +1105,7 @@ function updateApps(updateInfo, auditSource, callback) { // updateInfo is { appI
|
||||
if (error) {
|
||||
debug('Cannot autoupdate app %s : %s', appId, error.message);
|
||||
return iteratorDone();
|
||||
}
|
||||
}
|
||||
|
||||
error = canAutoupdateApp(app, updateInfo[appId].manifest);
|
||||
if (error) {
|
||||
@@ -1144,12 +1173,16 @@ function restoreInstalledApps(callback) {
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
async.map(apps, function (app, iteratorDone) {
|
||||
debug('marking %s for restore', app.location || app.id);
|
||||
debug('marking %s for restore', app.intrinsicFqdn);
|
||||
|
||||
appdb.setInstallationCommand(app.id, appdb.ISTATE_PENDING_RESTORE, { oldConfig: null }, function (error) {
|
||||
if (error) debug('did not mark %s for restore', app.location || app.id, error);
|
||||
backups.getByAppIdPaged(1, 1, app.id, function (error, results) {
|
||||
var restoreConfig = !error && results.length ? { backupId: results[0].id, backupFormat: results[0].format } : null;
|
||||
|
||||
iteratorDone(); // always succeed
|
||||
appdb.setInstallationCommand(app.id, appdb.ISTATE_PENDING_RESTORE, { restoreConfig: restoreConfig, oldConfig: null }, function (error) {
|
||||
if (error) debug('did not mark %s for restore', app.intrinsicFqdn, error);
|
||||
|
||||
iteratorDone(); // always succeed
|
||||
});
|
||||
});
|
||||
}, callback);
|
||||
});
|
||||
@@ -1162,13 +1195,95 @@ function configureInstalledApps(callback) {
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
async.map(apps, function (app, iteratorDone) {
|
||||
debug('marking %s for reconfigure', app.location || app.id);
|
||||
debug('marking %s for reconfigure', app.intrinsicFqdn);
|
||||
|
||||
appdb.setInstallationCommand(app.id, appdb.ISTATE_PENDING_CONFIGURE, { oldConfig: null }, function (error) {
|
||||
if (error) debug('did not mark %s for reconfigure', app.location || app.id, error);
|
||||
if (error) debug('did not mark %s for reconfigure', app.intrinsicFqdn, error);
|
||||
|
||||
iteratorDone(); // always succeed
|
||||
});
|
||||
}, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function downloadFile(appId, filePath, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof filePath, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
exec(appId, { cmd: [ 'stat', '--printf=%F-%s', filePath ], tty: true }, function (error, stream) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var data = '';
|
||||
stream.setEncoding('utf8');
|
||||
stream.on('data', function (d) { data += d; });
|
||||
stream.on('end', function () {
|
||||
var parts = data.split('-');
|
||||
if (parts.length !== 2) return callback(new AppsError(AppsError.NOT_FOUND, 'file does not exist'));
|
||||
|
||||
var type = parts[0], filename, cmd, size;
|
||||
|
||||
if (type === 'regular file') {
|
||||
cmd = [ 'cat', filePath ];
|
||||
size = parseInt(parts[1], 10);
|
||||
filename = path.basename(filePath);
|
||||
if (isNaN(size)) return callback(new AppsError(AppsError.NOT_FOUND, 'file does not exist'));
|
||||
} else if (type === 'directory') {
|
||||
cmd = ['tar', 'zcf', '-', '-C', filePath, '.'];
|
||||
filename = path.basename(filePath) + '.tar.gz';
|
||||
size = 0; // unknown
|
||||
} else {
|
||||
return callback(new AppsError(AppsError.NOT_FOUND, 'only files or dirs can be downloaded'));
|
||||
}
|
||||
|
||||
exec(appId, { cmd: cmd , tty: false }, function (error, stream) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var stdoutStream = new TransformStream({
|
||||
transform: function (chunk, ignoredEncoding, callback) {
|
||||
this._buffer = this._buffer ? Buffer.concat([this._buffer, chunk]) : chunk;
|
||||
|
||||
while (true) {
|
||||
if (this._buffer.length < 8) break; // header is 8 bytes
|
||||
|
||||
var type = this._buffer.readUInt8(0);
|
||||
var len = this._buffer.readUInt32BE(4);
|
||||
|
||||
if (this._buffer.length < (8 + len)) break; // not enough
|
||||
|
||||
var payload = this._buffer.slice(8, 8 + len);
|
||||
|
||||
this._buffer = this._buffer.slice(8+len); // consumed
|
||||
|
||||
if (type === 1) this.push(payload);
|
||||
}
|
||||
|
||||
callback();
|
||||
}
|
||||
});
|
||||
|
||||
stream.pipe(stdoutStream);
|
||||
|
||||
return callback(null, stdoutStream, { filename: filename, size: size });
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function uploadFile(appId, sourceFilePath, destFilePath, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof sourceFilePath, 'string');
|
||||
assert.strictEqual(typeof destFilePath, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
exec(appId, { cmd: [ 'bash', '-c', 'cat - > ' + destFilePath ], tty: false }, function (error, stream) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var readFile = fs.createReadStream(sourceFilePath);
|
||||
readFile.on('error', callback);
|
||||
|
||||
readFile.pipe(stream);
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -0,0 +1,290 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
purchase: purchase,
|
||||
unpurchase: unpurchase,
|
||||
|
||||
getSubscription: getSubscription,
|
||||
|
||||
sendAliveStatus: sendAliveStatus,
|
||||
|
||||
getAppUpdate: getAppUpdate,
|
||||
getBoxUpdate: getBoxUpdate,
|
||||
|
||||
getAccount: getAccount,
|
||||
|
||||
sendFeedback: sendFeedback,
|
||||
|
||||
AppstoreError: AppstoreError
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
config = require('./config.js'),
|
||||
debug = require('debug')('box:appstore'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
os = require('os'),
|
||||
settings = require('./settings.js'),
|
||||
superagent = require('superagent'),
|
||||
util = require('util');
|
||||
|
||||
function AppstoreError(reason, errorOrMessage) {
|
||||
assert.strictEqual(typeof reason, 'string');
|
||||
assert(errorOrMessage instanceof Error || typeof errorOrMessage === 'string' || typeof errorOrMessage === 'undefined');
|
||||
|
||||
Error.call(this);
|
||||
Error.captureStackTrace(this, this.constructor);
|
||||
|
||||
this.name = this.constructor.name;
|
||||
this.reason = reason;
|
||||
if (typeof errorOrMessage === 'undefined') {
|
||||
this.message = reason;
|
||||
} else if (typeof errorOrMessage === 'string') {
|
||||
this.message = errorOrMessage;
|
||||
} else {
|
||||
this.message = 'Internal error';
|
||||
this.nestedError = errorOrMessage;
|
||||
}
|
||||
}
|
||||
util.inherits(AppstoreError, Error);
|
||||
AppstoreError.INTERNAL_ERROR = 'Internal Error';
|
||||
AppstoreError.EXTERNAL_ERROR = 'External Error';
|
||||
AppstoreError.NOT_FOUND = 'Internal Error';
|
||||
AppstoreError.BILLING_REQUIRED = 'Billing Required';
|
||||
|
||||
var NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
|
||||
function getAppstoreConfig(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Caas Cloudrons do not store appstore credentials in their local database
|
||||
if (config.provider() === 'caas') {
|
||||
var url = config.apiServerOrigin() + '/api/v1/exchangeBoxTokenWithUserToken';
|
||||
superagent.post(url).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode !== 201) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null, result.body);
|
||||
});
|
||||
} else {
|
||||
settings.getAppstoreConfig(function (error, result) {
|
||||
if (error) return callback(new AppstoreError(AppstoreError.INTERNAL_ERROR, error));
|
||||
if (!result.token) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED));
|
||||
|
||||
callback(null, result);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function getSubscription(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
const url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/subscription';
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error.message));
|
||||
if (result.statusCode === 401) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, 'invalid appstore token'));
|
||||
if (result.statusCode === 403) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, 'wrong user'));
|
||||
if (result.statusCode === 502) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, 'stripe error'));
|
||||
if (result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, 'unknown error'));
|
||||
|
||||
callback(null, result.body.subscription);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function purchase(appId, appstoreId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof appstoreId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (appstoreId === '') return callback(null);
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/apps/' + appId;
|
||||
var data = { appstoreId: appstoreId };
|
||||
|
||||
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error.message));
|
||||
if (result.statusCode === 404) return callback(new AppstoreError(AppstoreError.NOT_FOUND));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED));
|
||||
if (result.statusCode === 402) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED, result.body.message));
|
||||
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function unpurchase(appId, appstoreId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof appstoreId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (appstoreId === '') return callback(null);
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/apps/' + appId;
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED));
|
||||
if (result.statusCode === 404) return callback(null); // was never purchased
|
||||
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
superagent.del(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED));
|
||||
if (result.statusCode !== 204) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function sendAliveStatus(data, callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
settings.getAll(function (error, result) {
|
||||
if (error) return callback(new AppstoreError(AppstoreError.INTERNAL_ERROR, error));
|
||||
|
||||
eventlog.getAllPaged(eventlog.ACTION_USER_LOGIN, null, 1, 1, function (error, loginEvents) {
|
||||
if (error) return callback(new AppstoreError(AppstoreError.INTERNAL_ERROR, error));
|
||||
|
||||
var backendSettings = {
|
||||
tlsConfig: {
|
||||
provider: result[settings.TLS_CONFIG_KEY].provider
|
||||
},
|
||||
backupConfig: {
|
||||
provider: result[settings.BACKUP_CONFIG_KEY].provider,
|
||||
hardlinks: !result[settings.BACKUP_CONFIG_KEY].noHardlinks
|
||||
},
|
||||
mailConfig: {
|
||||
enabled: result[settings.MAIL_CONFIG_KEY].enabled
|
||||
},
|
||||
mailRelay: {
|
||||
provider: result[settings.MAIL_RELAY_KEY].provider
|
||||
},
|
||||
mailCatchAll: {
|
||||
count: result[settings.CATCH_ALL_ADDRESS_KEY].length
|
||||
},
|
||||
autoupdatePattern: result[settings.AUTOUPDATE_PATTERN_KEY],
|
||||
timeZone: result[settings.TIME_ZONE_KEY],
|
||||
};
|
||||
|
||||
var data = {
|
||||
domain: config.fqdn(),
|
||||
version: config.version(),
|
||||
adminFqdn: config.adminFqdn(),
|
||||
provider: config.provider(),
|
||||
backendSettings: backendSettings,
|
||||
machine: {
|
||||
cpus: os.cpus(),
|
||||
totalmem: os.totalmem()
|
||||
},
|
||||
events: {
|
||||
lastLogin: loginEvents[0] ? (new Date(loginEvents[0].creationTime).getTime()) : 0
|
||||
}
|
||||
};
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/alive';
|
||||
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 404) return callback(new AppstoreError(AppstoreError.NOT_FOUND));
|
||||
if (result.statusCode !== 201) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Sending alive status failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getBoxUpdate(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/boxupdate';
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token, boxVersion: config.version() }).timeout(10 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 204) return callback(null); // no update
|
||||
if (result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response: %s %s', result.statusCode, result.text)));
|
||||
|
||||
// { version, changelog, upgrade, sourceTarballUrl}
|
||||
callback(null, result.body);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getAppUpdate(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/appupdate';
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token, boxVersion: config.version(), appId: app.appStoreId, appVersion: app.manifest.version }).timeout(10 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 204) return callback(null); // no update
|
||||
if (result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response: %s %s', result.statusCode, result.text)));
|
||||
|
||||
// { id, creationDate, manifest }
|
||||
callback(null, result.body);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getAccount(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId;
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token }).timeout(10 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response: %s %s', result.statusCode, result.text)));
|
||||
|
||||
// { profile: { id, email, groupId, billing, firstName, lastName, company, street, city, zip, state, country } }
|
||||
callback(null, result.body.profile);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function sendFeedback(info, callback) {
|
||||
assert.strictEqual(typeof info, 'object');
|
||||
assert.strictEqual(typeof info.email, 'string');
|
||||
assert.strictEqual(typeof info.displayName, 'string');
|
||||
assert.strictEqual(typeof info.type, 'string');
|
||||
assert.strictEqual(typeof info.subject, 'string');
|
||||
assert.strictEqual(typeof info.description, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/feedback';
|
||||
|
||||
superagent.post(url).query({ accessToken: appstoreConfig.token }).send(info).timeout(10 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode !== 201) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response: %s %s', result.statusCode, result.text)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -35,26 +35,31 @@ var addons = require('./addons.js'),
|
||||
certificates = require('./certificates.js'),
|
||||
config = require('./config.js'),
|
||||
database = require('./database.js'),
|
||||
DatabaseError = require('./databaseerror.js'),
|
||||
debug = require('debug')('box:apptask'),
|
||||
docker = require('./docker.js'),
|
||||
domains = require('./domains.js'),
|
||||
DomainError = domains.DomainError,
|
||||
ejs = require('ejs'),
|
||||
fs = require('fs'),
|
||||
manifestFormat = require('cloudron-manifestformat'),
|
||||
net = require('net'),
|
||||
nginx = require('./nginx.js'),
|
||||
os = require('os'),
|
||||
path = require('path'),
|
||||
paths = require('./paths.js'),
|
||||
safe = require('safetydance'),
|
||||
shell = require('./shell.js'),
|
||||
SubdomainError = require('./subdomains.js').SubdomainError,
|
||||
subdomains = require('./subdomains.js'),
|
||||
superagent = require('superagent'),
|
||||
sysinfo = require('./sysinfo.js'),
|
||||
tld = require('tldjs'),
|
||||
util = require('util'),
|
||||
_ = require('underscore');
|
||||
|
||||
var COLLECTD_CONFIG_EJS = fs.readFileSync(__dirname + '/collectd.config.ejs', { encoding: 'utf8' }),
|
||||
RELOAD_COLLECTD_CMD = path.join(__dirname, 'scripts/reloadcollectd.sh'),
|
||||
CONFIGURE_COLLECTD_CMD = path.join(__dirname, 'scripts/configurecollectd.sh'),
|
||||
LOGROTATE_CONFIG_EJS = fs.readFileSync(__dirname + '/logrotate.ejs', { encoding: 'utf8' }),
|
||||
CONFIGURE_LOGROTATE_CMD = path.join(__dirname, 'scripts/configurelogrotate.sh'),
|
||||
RMAPPDIR_CMD = path.join(__dirname, 'scripts/rmappdir.sh'),
|
||||
CREATEAPPDIR_CMD = path.join(__dirname, 'scripts/createappdir.sh');
|
||||
|
||||
@@ -67,10 +72,29 @@ function initialize(callback) {
|
||||
function debugApp(app) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
|
||||
var prefix = app ? (app.location || '(bare)') : '(no app)';
|
||||
var prefix = app ? (app.intrinsicFqdn || '(bare)') : '(no app)';
|
||||
debug(prefix + ' ' + util.format.apply(util, Array.prototype.slice.call(arguments, 1)));
|
||||
}
|
||||
|
||||
// updates the app object and the database
|
||||
function updateApp(app, values, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof values, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debugApp(app, 'updating app with values: %j', values);
|
||||
|
||||
appdb.update(app.id, values, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
for (var value in values) {
|
||||
app[value] = values[value];
|
||||
}
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
function reserveHttpPort(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
@@ -126,7 +150,7 @@ function deleteContainers(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debugApp(app, 'deleting containers');
|
||||
debugApp(app, 'deleting app containers (app, scheduler)');
|
||||
|
||||
docker.deleteContainers(app.id, function (error) {
|
||||
if (error) return callback(new Error('Error deleting container: ' + error));
|
||||
@@ -142,11 +166,12 @@ function createVolume(app, callback) {
|
||||
shell.sudo('createVolume', [ CREATEAPPDIR_CMD, app.id ], callback);
|
||||
}
|
||||
|
||||
function deleteVolume(app, callback) {
|
||||
function deleteVolume(app, options, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
shell.sudo('deleteVolume', [ RMAPPDIR_CMD, app.id ], callback);
|
||||
shell.sudo('deleteVolume', [ RMAPPDIR_CMD, app.id, !!options.removeDirectory ], callback);
|
||||
}
|
||||
|
||||
function addCollectdProfile(app, callback) {
|
||||
@@ -156,7 +181,7 @@ function addCollectdProfile(app, callback) {
|
||||
var collectdConf = ejs.render(COLLECTD_CONFIG_EJS, { appId: app.id, containerId: app.containerId });
|
||||
fs.writeFile(path.join(paths.COLLECTD_APPCONFIG_DIR, app.id + '.conf'), collectdConf, function (error) {
|
||||
if (error) return callback(error);
|
||||
shell.sudo('addCollectdProfile', [ RELOAD_COLLECTD_CMD ], callback);
|
||||
shell.sudo('addCollectdProfile', [ CONFIGURE_COLLECTD_CMD, 'add', app.id ], callback);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -166,17 +191,41 @@ function removeCollectdProfile(app, callback) {
|
||||
|
||||
fs.unlink(path.join(paths.COLLECTD_APPCONFIG_DIR, app.id + '.conf'), function (error) {
|
||||
if (error && error.code !== 'ENOENT') debugApp(app, 'Error removing collectd profile', error);
|
||||
shell.sudo('removeCollectdProfile', [ RELOAD_COLLECTD_CMD ], callback);
|
||||
shell.sudo('removeCollectdProfile', [ CONFIGURE_COLLECTD_CMD, 'remove', app.id ], callback);
|
||||
});
|
||||
}
|
||||
|
||||
function verifyManifest(app, callback) {
|
||||
function addLogrotateConfig(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debugApp(app, 'Verifying manifest');
|
||||
docker.inspect(app.containerId, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var runVolume = result.Mounts.find(function (mount) { return mount.Destination === '/run'; });
|
||||
if (!runVolume) return callback(new Error('App does not have /run mounted'));
|
||||
|
||||
// logrotate configs can have arbitrary commands, so the config files must be owned by root
|
||||
var logrotateConf = ejs.render(LOGROTATE_CONFIG_EJS, { volumePath: runVolume.Source });
|
||||
var tmpFilePath = path.join(os.tmpdir(), app.id + '.logrotate');
|
||||
fs.writeFile(tmpFilePath, logrotateConf, function (error) {
|
||||
if (error) return callback(error);
|
||||
shell.sudo('addLogrotateConfig', [ CONFIGURE_LOGROTATE_CMD, 'add', app.id, tmpFilePath ], callback);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function removeLogrotateConfig(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
shell.sudo('removeLogrotateConfig', [ CONFIGURE_LOGROTATE_CMD, 'remove', app.id ], callback);
|
||||
}
|
||||
|
||||
function verifyManifest(manifest, callback) {
|
||||
assert.strictEqual(typeof manifest, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var manifest = app.manifest;
|
||||
var error = manifestFormat.parse(manifest);
|
||||
if (error) return callback(new Error(util.format('Manifest error: %s', error.message)));
|
||||
|
||||
@@ -209,7 +258,7 @@ function downloadIcon(app, callback) {
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_ICONS_DIR, app.id + '.png'), res.body)) return retryCallback(new Error('Error saving icon:' + safe.error.message));
|
||||
|
||||
retryCallback(null);
|
||||
});
|
||||
});
|
||||
}, callback);
|
||||
}
|
||||
|
||||
@@ -222,18 +271,17 @@ function registerSubdomain(app, overwrite, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.retry({ times: 200, interval: 5000 }, function (retryCallback) {
|
||||
debugApp(app, 'Registering subdomain location [%s]', app.location);
|
||||
debugApp(app, 'Registering subdomain location [%s] overwrite: %s', app.intrinsicFqdn, overwrite);
|
||||
|
||||
// get the current record before updating it
|
||||
subdomains.get(app.location, 'A', function (error, values) {
|
||||
domains.getDNSRecords(app.location, app.domain, 'A', function (error, values) {
|
||||
if (error) return retryCallback(error);
|
||||
|
||||
// refuse to update any existing DNS record for custom domains that we did not create
|
||||
// note that the appstore sets up the naked domain for non-custom domains
|
||||
if (config.isCustomDomain() && values.length !== 0 && !overwrite) return retryCallback(null, new Error('DNS Record already exists'));
|
||||
if (values.length !== 0 && !overwrite) return retryCallback(null, new Error('DNS Record already exists'));
|
||||
|
||||
subdomains.upsert(app.location, 'A', [ ip ], function (error, changeId) {
|
||||
if (error && (error.reason === SubdomainError.STILL_BUSY || error.reason === SubdomainError.EXTERNAL_ERROR)) return retryCallback(error); // try again
|
||||
domains.upsertDNSRecords(app.location, app.domain, 'A', [ ip ], function (error, changeId) {
|
||||
if (error && (error.reason === DomainError.STILL_BUSY || error.reason === DomainError.EXTERNAL_ERROR)) return retryCallback(error); // try again
|
||||
|
||||
retryCallback(null, error || changeId);
|
||||
});
|
||||
@@ -241,19 +289,20 @@ function registerSubdomain(app, overwrite, callback) {
|
||||
}, function (error, result) {
|
||||
if (error || result instanceof Error) return callback(error || result);
|
||||
|
||||
// dnsRecordId tracks whether we created this DNS record so that we can unregister later
|
||||
updateApp(app, { dnsRecordId: result }, callback);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function unregisterSubdomain(app, location, callback) {
|
||||
function unregisterSubdomain(app, location, domain, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof location, 'string');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// do not unregister bare domain because we show a error/cloudron info page there
|
||||
if (!config.isCustomDomain() && location === '') {
|
||||
debugApp(app, 'Skip unregister of empty subdomain');
|
||||
if (!app.dnsRecordId) {
|
||||
debugApp(app, 'Skip unregister of record not created by cloudron');
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
@@ -261,10 +310,11 @@ function unregisterSubdomain(app, location, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.retry({ times: 30, interval: 5000 }, function (retryCallback) {
|
||||
debugApp(app, 'Unregistering subdomain: %s', location);
|
||||
debugApp(app, 'Unregistering subdomain: %s', app.intrinsicFqdn);
|
||||
|
||||
subdomains.remove(location, 'A', [ ip ], function (error) {
|
||||
if (error && (error.reason === SubdomainError.STILL_BUSY || error.reason === SubdomainError.EXTERNAL_ERROR)) return retryCallback(error); // try again
|
||||
domains.removeDNSRecords(location, domain, 'A', [ ip ], function (error) {
|
||||
if (error && error.reason === DomainError.NOT_FOUND) return retryCallback(null, null); // domain can be not found if oldConfig.domain or restoreConfig.domain was removed
|
||||
if (error && (error.reason === DomainError.STILL_BUSY || error.reason === DomainError.EXTERNAL_ERROR)) return retryCallback(error); // try again
|
||||
|
||||
retryCallback(null, error);
|
||||
});
|
||||
@@ -298,7 +348,7 @@ function waitForDnsPropagation(app, callback) {
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(error);
|
||||
|
||||
subdomains.waitForDns(config.appFqdn(app.location), ip, 'A', { interval: 5000, times: 120 }, callback);
|
||||
domains.waitForDNSRecord(app.intrinsicFqdn, app.domain, ip, 'A', { interval: 5000, times: 120 }, callback);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -307,26 +357,16 @@ function waitForAltDomainDnsPropagation(app, callback) {
|
||||
|
||||
// try for 10 minutes before giving up. this allows the user to "reconfigure" the app in the case where
|
||||
// an app has an external domain and cloudron is migrated to custom domain.
|
||||
subdomains.waitForDns(app.altDomain, config.appFqdn(app.location), 'CNAME', { interval: 10000, times: 60 }, callback);
|
||||
}
|
||||
var isNakedDomain = tld.getDomain(app.altDomain) === app.altDomain;
|
||||
if (isNakedDomain) { // check naked domains with A record since CNAME records don't work there
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(error);
|
||||
|
||||
// updates the app object and the database
|
||||
function updateApp(app, values, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof values, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debugApp(app, 'updating app with values: %j', values);
|
||||
|
||||
appdb.update(app.id, values, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
for (var value in values) {
|
||||
app[value] = values[value];
|
||||
}
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
domains.waitForDNSRecord(app.altDomain, tld.getDomain(app.altDomain), ip, 'A', { interval: 10000, times: 60 }, callback);
|
||||
});
|
||||
} else {
|
||||
domains.waitForDNSRecord(app.altDomain, tld.getDomain(app.altDomain), app.intrinsicFqdn + '.', 'CNAME', { interval: 10000, times: 60 }, callback);
|
||||
}
|
||||
}
|
||||
|
||||
// Ordering is based on the following rationale:
|
||||
@@ -339,22 +379,35 @@ function updateApp(app, values, callback) {
|
||||
// - setup addons (requires the above volume)
|
||||
// - setup the container (requires image, volumes, addons)
|
||||
// - setup collectd (requires container id)
|
||||
// restore is also handled here since restore is just an install with some oldConfig to clean up
|
||||
function install(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const restoreConfig = app.restoreConfig, isRestoring = app.installationState === appdb.ISTATE_PENDING_RESTORE;
|
||||
|
||||
async.series([
|
||||
verifyManifest.bind(null, app),
|
||||
// this protects against the theoretical possibility of an app being marked for install/restore from
|
||||
// a previous version of box code
|
||||
verifyManifest.bind(null, app.manifest),
|
||||
|
||||
// teardown for re-installs
|
||||
updateApp.bind(null, app, { installationProgress: '10, Cleaning up old install' }),
|
||||
unconfigureNginx.bind(null, app),
|
||||
removeCollectdProfile.bind(null, app),
|
||||
removeLogrotateConfig.bind(null, app),
|
||||
stopApp.bind(null, app),
|
||||
deleteContainers.bind(null, app),
|
||||
addons.teardownAddons.bind(null, app, app.manifest.addons),
|
||||
deleteVolume.bind(null, app),
|
||||
unregisterSubdomain.bind(null, app, app.location),
|
||||
// oldConfig can be null during upgrades
|
||||
addons.teardownAddons.bind(null, app, app.oldConfig ? app.oldConfig.manifest.addons : app.manifest.addons),
|
||||
deleteVolume.bind(null, app, { removeDirectory: false }), // do not remove any symlinked volume
|
||||
|
||||
// for restore case
|
||||
function deleteImageIfChanged(done) {
|
||||
if (!app.oldConfig || (app.oldConfig.manifest.dockerImage === app.manifest.dockerImage)) return done();
|
||||
|
||||
docker.deleteImage(app.oldConfig.manifest, done);
|
||||
},
|
||||
|
||||
reserveHttpPort.bind(null, app),
|
||||
|
||||
@@ -362,7 +415,7 @@ function install(app, callback) {
|
||||
downloadIcon.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '30, Registering subdomain' }),
|
||||
registerSubdomain.bind(null, app, false /* overwrite */),
|
||||
registerSubdomain.bind(null, app, isRestoring /* overwrite */),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '40, Downloading image' }),
|
||||
docker.downloadImage.bind(null, app.manifest),
|
||||
@@ -370,12 +423,26 @@ function install(app, callback) {
|
||||
updateApp.bind(null, app, { installationProgress: '50, Creating volume' }),
|
||||
createVolume.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '60, Setting up addons' }),
|
||||
addons.setupAddons.bind(null, app, app.manifest.addons),
|
||||
function restoreFromBackup(next) {
|
||||
if (!restoreConfig) {
|
||||
async.series([
|
||||
updateApp.bind(null, app, { installationProgress: '60, Setting up addons' }),
|
||||
addons.setupAddons.bind(null, app, app.manifest.addons),
|
||||
], next);
|
||||
} else {
|
||||
async.series([
|
||||
updateApp.bind(null, app, { installationProgress: '60, Download backup and restoring addons' }),
|
||||
backups.restoreApp.bind(null, app, app.manifest.addons, restoreConfig),
|
||||
], next);
|
||||
}
|
||||
},
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '70, Creating container' }),
|
||||
createContainer.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '75, Setting up logrotate config' }),
|
||||
addLogrotateConfig.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '80, Setting up collectd profile' }),
|
||||
addCollectdProfile.bind(null, app),
|
||||
|
||||
@@ -384,8 +451,8 @@ function install(app, callback) {
|
||||
updateApp.bind(null, app, { installationProgress: '85, Waiting for DNS propagation' }),
|
||||
exports._waitForDnsPropagation.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '90, Waiting for External Domain CNAME setup' }),
|
||||
exports._waitForAltDomainDnsPropagation.bind(null, app), // required when restoring and !lastBackupId
|
||||
updateApp.bind(null, app, { installationProgress: '90, Waiting for External Domain setup' }),
|
||||
exports._waitForAltDomainDnsPropagation.bind(null, app), // required when restoring and !restoreConfig
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '95, Configure nginx' }),
|
||||
configureNginx.bind(null, app),
|
||||
@@ -410,7 +477,7 @@ function backup(app, callback) {
|
||||
|
||||
async.series([
|
||||
updateApp.bind(null, app, { installationProgress: '10, Backing up' }),
|
||||
backups.backupApp.bind(null, app, app.manifest, 'appbackups' /* tag */),
|
||||
backups.backupApp.bind(null, app),
|
||||
|
||||
// done!
|
||||
function (callback) {
|
||||
@@ -426,99 +493,26 @@ function backup(app, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
// restore is also called for upgrades and infra updates. note that in those cases it is possible there is no backup
|
||||
function restore(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// we don't have a backup, same as re-install. this allows us to install from install failures (update failures always
|
||||
// have a backupId)
|
||||
if (!app.lastBackupId) {
|
||||
debugApp(app, 'No lastBackupId. reinstalling');
|
||||
return install(app, callback);
|
||||
}
|
||||
|
||||
var backupId = app.lastBackupId;
|
||||
|
||||
async.series([
|
||||
updateApp.bind(null, app, { installationProgress: '10, Cleaning up old install' }),
|
||||
unconfigureNginx.bind(null, app),
|
||||
removeCollectdProfile.bind(null, app),
|
||||
stopApp.bind(null, app),
|
||||
deleteContainers.bind(null, app),
|
||||
// oldConfig can be null during upgrades
|
||||
addons.teardownAddons.bind(null, app, app.oldConfig ? app.oldConfig.manifest.addons : null),
|
||||
deleteVolume.bind(null, app),
|
||||
function deleteImageIfChanged(done) {
|
||||
if (!app.oldConfig || (app.oldConfig.manifest.dockerImage === app.manifest.dockerImage)) return done();
|
||||
|
||||
docker.deleteImage(app.oldConfig.manifest, done);
|
||||
},
|
||||
|
||||
reserveHttpPort.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '40, Downloading icon' }),
|
||||
downloadIcon.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '55, Registering subdomain' }), // ip might change during upgrades
|
||||
registerSubdomain.bind(null, app, true /* overwrite */),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '60, Downloading image' }),
|
||||
docker.downloadImage.bind(null, app.manifest),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '65, Creating volume' }),
|
||||
createVolume.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '70, Download backup and restore addons' }),
|
||||
backups.restoreApp.bind(null, app, app.manifest.addons, backupId),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '75, Creating container' }),
|
||||
createContainer.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '80, Setting up collectd profile' }),
|
||||
addCollectdProfile.bind(null, app),
|
||||
|
||||
runApp.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '85, Waiting for DNS propagation' }),
|
||||
exports._waitForDnsPropagation.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '90, Waiting for External Domain CNAME setup' }),
|
||||
exports._waitForAltDomainDnsPropagation.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '95, Configuring Nginx' }),
|
||||
configureNginx.bind(null, app),
|
||||
|
||||
// done!
|
||||
function (callback) {
|
||||
debugApp(app, 'restored');
|
||||
updateApp(app, { installationState: appdb.ISTATE_INSTALLED, installationProgress: '', health: null }, callback);
|
||||
}
|
||||
], function seriesDone(error) {
|
||||
if (error) {
|
||||
debugApp(app, 'Error installing app: %s', error);
|
||||
return updateApp(app, { installationState: appdb.ISTATE_ERROR, installationProgress: error.message }, callback.bind(null, error));
|
||||
}
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
// note that configure is called after an infra update as well
|
||||
function configure(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// oldConfig can be null during an infra update
|
||||
var locationChanged = app.oldConfig && (app.oldConfig.intrinsicFqdn !== app.intrinsicFqdn);
|
||||
|
||||
async.series([
|
||||
updateApp.bind(null, app, { installationProgress: '10, Cleaning up old install' }),
|
||||
unconfigureNginx.bind(null, app),
|
||||
removeCollectdProfile.bind(null, app),
|
||||
removeLogrotateConfig.bind(null, app),
|
||||
stopApp.bind(null, app),
|
||||
deleteContainers.bind(null, app),
|
||||
function (next) {
|
||||
// oldConfig can be null during an infra update
|
||||
if (!app.oldConfig || app.oldConfig.location === app.location) return next();
|
||||
unregisterSubdomain(app, app.oldConfig.location, next);
|
||||
if (!locationChanged) return next();
|
||||
|
||||
// the config.fqdn() fallback can be removed after 1.9
|
||||
unregisterSubdomain(app, app.oldConfig.location, app.oldConfig.domain || config.fqdn(), next);
|
||||
},
|
||||
|
||||
reserveHttpPort.bind(null, app),
|
||||
@@ -527,7 +521,7 @@ function configure(app, callback) {
|
||||
downloadIcon.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '35, Registering subdomain' }),
|
||||
registerSubdomain.bind(null, app, true /* overwrite */),
|
||||
registerSubdomain.bind(null, app, !locationChanged /* overwrite */), // if location changed, do not overwrite to detect conflicts
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '40, Downloading image' }),
|
||||
docker.downloadImage.bind(null, app.manifest),
|
||||
@@ -542,6 +536,9 @@ function configure(app, callback) {
|
||||
updateApp.bind(null, app, { installationProgress: '60, Creating container' }),
|
||||
createContainer.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '65, Setting up logrotate config' }),
|
||||
addLogrotateConfig.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '70, Add collectd profile' }),
|
||||
addCollectdProfile.bind(null, app),
|
||||
|
||||
@@ -550,7 +547,7 @@ function configure(app, callback) {
|
||||
updateApp.bind(null, app, { installationProgress: '80, Waiting for DNS propagation' }),
|
||||
exports._waitForDnsPropagation.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '85, Waiting for External Domain CNAME setup' }),
|
||||
updateApp.bind(null, app, { installationProgress: '85, Waiting for External Domain setup' }),
|
||||
exports._waitForAltDomainDnsPropagation.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '90, Configuring Nginx' }),
|
||||
@@ -575,54 +572,87 @@ function update(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debugApp(app, 'Updating to %s', safe.query(app, 'manifest.version'));
|
||||
debugApp(app, `Updating to ${app.updateConfig.manifest.version}`);
|
||||
|
||||
// app does not want these addons anymore
|
||||
// FIXME: this does not handle option changes (like multipleDatabases)
|
||||
var unusedAddons = _.omit(app.oldConfig.manifest.addons, Object.keys(app.manifest.addons));
|
||||
var unusedAddons = _.omit(app.manifest.addons, Object.keys(app.updateConfig.manifest.addons));
|
||||
|
||||
async.series([
|
||||
// this protects against the theoretical possibility of an app being marked for update from
|
||||
// a previous version of box code
|
||||
updateApp.bind(null, app, { installationProgress: '0, Verify manifest' }),
|
||||
verifyManifest.bind(null, app),
|
||||
|
||||
// download new image before app is stopped. this is so we can reduce downtime
|
||||
// and also not remove the 'common' layers when the old image is deleted
|
||||
updateApp.bind(null, app, { installationProgress: '15, Downloading image' }),
|
||||
docker.downloadImage.bind(null, app.manifest),
|
||||
|
||||
// note: we cleanup first and then backup. this is done so that the app is not running should backup fail
|
||||
// we cannot easily 'recover' from backup failures because we have to revert manfest and portBindings
|
||||
updateApp.bind(null, app, { installationProgress: '25, Cleaning up old install' }),
|
||||
removeCollectdProfile.bind(null, app),
|
||||
stopApp.bind(null, app),
|
||||
deleteContainers.bind(null, app),
|
||||
function deleteImageIfChanged(done) {
|
||||
if (app.oldConfig.manifest.dockerImage === app.manifest.dockerImage) return done();
|
||||
|
||||
docker.deleteImage(app.oldConfig.manifest, done);
|
||||
},
|
||||
verifyManifest.bind(null, app.updateConfig.manifest),
|
||||
|
||||
function (next) {
|
||||
if (app.installationState === appdb.ISTATE_PENDING_FORCE_UPDATE) return next(null);
|
||||
|
||||
async.series([
|
||||
updateApp.bind(null, app, { installationProgress: '30, Backing up app' }),
|
||||
backups.backupApp.bind(null, app, app.oldConfig.manifest, 'appbackups' /* tag */)
|
||||
], next);
|
||||
updateApp.bind(null, app, { installationProgress: '15, Backing up app' }),
|
||||
backups.backupApp.bind(null, app)
|
||||
], function (error) {
|
||||
if (error) error.backupError = true;
|
||||
next(error);
|
||||
});
|
||||
},
|
||||
|
||||
// download new image before app is stopped. this is so we can reduce downtime
|
||||
// and also not remove the 'common' layers when the old image is deleted
|
||||
updateApp.bind(null, app, { installationProgress: '25, Downloading image' }),
|
||||
docker.downloadImage.bind(null, app.updateConfig.manifest),
|
||||
|
||||
// note: we cleanup first and then backup. this is done so that the app is not running should backup fail
|
||||
// we cannot easily 'recover' from backup failures because we have to revert manfest and portBindings
|
||||
updateApp.bind(null, app, { installationProgress: '35, Cleaning up old install' }),
|
||||
removeCollectdProfile.bind(null, app),
|
||||
removeLogrotateConfig.bind(null, app),
|
||||
stopApp.bind(null, app),
|
||||
deleteContainers.bind(null, app),
|
||||
function deleteImageIfChanged(done) {
|
||||
if (app.manifest.dockerImage === app.updateConfig.manifest.dockerImage) return done();
|
||||
|
||||
docker.deleteImage(app.manifest, done);
|
||||
},
|
||||
|
||||
// only delete unused addons after backup
|
||||
addons.teardownAddons.bind(null, app, unusedAddons),
|
||||
|
||||
// free unused ports
|
||||
function (next) {
|
||||
// make sure we always have objects
|
||||
var currentPorts = app.portBindings || {};
|
||||
var newPorts = app.updateConfig.manifest.tcpPorts || {};
|
||||
|
||||
async.each(Object.keys(currentPorts), function (portName, callback) {
|
||||
if (newPorts[portName]) return callback(); // port still in use
|
||||
|
||||
appdb.delPortBinding(currentPorts[portName], function (error) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) console.error('Portbinding does not exist in database.');
|
||||
else if (error) return next(error);
|
||||
|
||||
// also delete from app object for further processing (the db is updated in the next step)
|
||||
delete app.portBindings[portName];
|
||||
|
||||
callback();
|
||||
});
|
||||
}, next);
|
||||
},
|
||||
|
||||
// switch over to the new config. manifest, memoryLimit, portBindings, appstoreId are updated here
|
||||
updateApp.bind(null, app, app.updateConfig),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '45, Downloading icon' }),
|
||||
downloadIcon.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '70, Updating addons' }),
|
||||
addons.setupAddons.bind(null, app, app.manifest.addons),
|
||||
addons.setupAddons.bind(null, app, app.updateConfig.manifest.addons),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '80, Creating container' }),
|
||||
createContainer.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '85, Setting up logrotate config' }),
|
||||
addLogrotateConfig.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '90, Add collectd profile' }),
|
||||
addCollectdProfile.bind(null, app),
|
||||
|
||||
@@ -631,14 +661,18 @@ function update(app, callback) {
|
||||
// done!
|
||||
function (callback) {
|
||||
debugApp(app, 'updated');
|
||||
updateApp(app, { installationState: appdb.ISTATE_INSTALLED, installationProgress: '', health: null }, callback);
|
||||
updateApp(app, { installationState: appdb.ISTATE_INSTALLED, installationProgress: '', health: null, updateConfig: null, updateTime: new Date() }, callback);
|
||||
}
|
||||
], function seriesDone(error) {
|
||||
if (error) {
|
||||
if (error && error.backupError) {
|
||||
debugApp(app, 'update aborted because backup failed', error);
|
||||
updateApp(app, { installationState: appdb.ISTATE_INSTALLED, installationProgress: '', health: null, updateConfig: null }, callback.bind(null, error));
|
||||
} else if (error) {
|
||||
debugApp(app, 'Error updating app: %s', error);
|
||||
return updateApp(app, { installationState: appdb.ISTATE_ERROR, installationProgress: error.message }, callback.bind(null, error));
|
||||
updateApp(app, { installationState: appdb.ISTATE_ERROR, installationProgress: error.message, updateTime: new Date() }, callback.bind(null, error));
|
||||
} else {
|
||||
callback(null);
|
||||
}
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -652,6 +686,9 @@ function uninstall(app, callback) {
|
||||
updateApp.bind(null, app, { installationProgress: '0, Remove collectd profile' }),
|
||||
removeCollectdProfile.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '5, Remove logrotate config' }),
|
||||
removeLogrotateConfig.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '10, Stopping app' }),
|
||||
stopApp.bind(null, app),
|
||||
|
||||
@@ -662,13 +699,13 @@ function uninstall(app, callback) {
|
||||
addons.teardownAddons.bind(null, app, app.manifest.addons),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '40, Deleting volume' }),
|
||||
deleteVolume.bind(null, app),
|
||||
deleteVolume.bind(null, app, { removeDirectory: true }),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '50, Deleting image' }),
|
||||
docker.deleteImage.bind(null, app.manifest),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '60, Unregistering subdomain' }),
|
||||
unregisterSubdomain.bind(null, app, app.location),
|
||||
unregisterSubdomain.bind(null, app, app.location, app.domain),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '80, Cleanup icon' }),
|
||||
removeIcon.bind(null, app),
|
||||
@@ -732,7 +769,7 @@ function startTask(appId, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// determine what to do
|
||||
appdb.get(appId, function (error, app) {
|
||||
apps.get(appId, function (error, app) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debugApp(app, 'startTask installationState: %s runState: %s', app.installationState, app.runState);
|
||||
@@ -740,13 +777,17 @@ function startTask(appId, callback) {
|
||||
switch (app.installationState) {
|
||||
case appdb.ISTATE_PENDING_UNINSTALL: return uninstall(app, callback);
|
||||
case appdb.ISTATE_PENDING_CONFIGURE: return configure(app, callback);
|
||||
|
||||
case appdb.ISTATE_PENDING_UPDATE: return update(app, callback);
|
||||
case appdb.ISTATE_PENDING_RESTORE: return restore(app, callback);
|
||||
case appdb.ISTATE_PENDING_FORCE_UPDATE: return update(app, callback);
|
||||
|
||||
case appdb.ISTATE_PENDING_INSTALL: return install(app, callback);
|
||||
case appdb.ISTATE_PENDING_CLONE: return install(app, callback);
|
||||
case appdb.ISTATE_PENDING_RESTORE: return install(app, callback);
|
||||
|
||||
case appdb.ISTATE_PENDING_BACKUP: return backup(app, callback);
|
||||
case appdb.ISTATE_INSTALLED: return handleRunCommand(app, callback);
|
||||
case appdb.ISTATE_PENDING_INSTALL: return install(app, callback);
|
||||
case appdb.ISTATE_PENDING_CLONE: return restore(app, callback);
|
||||
case appdb.ISTATE_PENDING_FORCE_UPDATE: return update(app, callback);
|
||||
|
||||
case appdb.ISTATE_ERROR:
|
||||
debugApp(app, 'Internal error. apptask launched with error status.');
|
||||
return callback(null);
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
exports = module.exports = {
|
||||
initialize: initialize,
|
||||
uninitialize: uninitialize
|
||||
uninitialize: uninitialize,
|
||||
|
||||
accessTokenAuth: accessTokenAuth
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
@@ -23,22 +25,22 @@ var assert = require('assert'),
|
||||
|
||||
function initialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
|
||||
passport.serializeUser(function (user, callback) {
|
||||
callback(null, user.id);
|
||||
});
|
||||
|
||||
|
||||
passport.deserializeUser(function(userId, callback) {
|
||||
user.get(userId, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
|
||||
var md5 = crypto.createHash('md5').update(result.alternateEmail || result.email).digest('hex');
|
||||
result.gravatar = 'https://www.gravatar.com/avatar/' + md5 + '.jpg?s=24&d=mm';
|
||||
|
||||
|
||||
callback(null, result);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
passport.use(new LocalStrategy(function (username, password, callback) {
|
||||
if (username.indexOf('@') === -1) {
|
||||
user.verifyWithUsername(username, password, function (error, result) {
|
||||
@@ -58,7 +60,7 @@ function initialize(callback) {
|
||||
});
|
||||
}
|
||||
}));
|
||||
|
||||
|
||||
passport.use(new BasicStrategy(function (username, password, callback) {
|
||||
if (username.indexOf('cid-') === 0) {
|
||||
debug('BasicStrategy: detected client id %s instead of username:password', username);
|
||||
@@ -80,7 +82,7 @@ function initialize(callback) {
|
||||
});
|
||||
}
|
||||
}));
|
||||
|
||||
|
||||
passport.use(new ClientPasswordStrategy(function (clientId, clientSecret, callback) {
|
||||
clients.get(clientId, function(error, client) {
|
||||
if (error && error.reason === ClientsError.NOT_FOUND) return callback(null, false);
|
||||
@@ -89,30 +91,35 @@ function initialize(callback) {
|
||||
return callback(null, client);
|
||||
});
|
||||
}));
|
||||
|
||||
passport.use(new BearerStrategy(function (accessToken, callback) {
|
||||
tokendb.get(accessToken, function (error, token) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(null, false);
|
||||
if (error) return callback(error);
|
||||
|
||||
// scopes here can define what capabilities that token carries
|
||||
// passport put the 'info' object into req.authInfo, where we can further validate the scopes
|
||||
var info = { scope: token.scope };
|
||||
|
||||
user.get(token.identifier, function (error, user) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(null, false);
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(null, user, info);
|
||||
});
|
||||
});
|
||||
}));
|
||||
|
||||
|
||||
passport.use(new BearerStrategy(accessTokenAuth));
|
||||
|
||||
callback(null);
|
||||
}
|
||||
|
||||
function uninitialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
|
||||
callback(null);
|
||||
}
|
||||
|
||||
function accessTokenAuth(accessToken, callback) {
|
||||
assert.strictEqual(typeof accessToken, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
tokendb.get(accessToken, function (error, token) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(null, false);
|
||||
if (error) return callback(error);
|
||||
|
||||
// scopes here can define what capabilities that token carries
|
||||
// passport put the 'info' object into req.authInfo, where we can further validate the scopes
|
||||
var info = { scope: token.scope };
|
||||
|
||||
user.get(token.identifier, function (error, user) {
|
||||
if (error && error.reason === UserError.NOT_FOUND) return callback(null, false);
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(null, user, info);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -3,15 +3,20 @@
|
||||
var assert = require('assert'),
|
||||
database = require('./database.js'),
|
||||
DatabaseError = require('./databaseerror.js'),
|
||||
safe = require('safetydance'),
|
||||
util = require('util');
|
||||
|
||||
var BACKUPS_FIELDS = [ 'id', 'creationTime', 'version', 'type', 'dependsOn', 'state', ];
|
||||
var BACKUPS_FIELDS = [ 'id', 'creationTime', 'version', 'type', 'dependsOn', 'state', 'manifestJson', 'format' ];
|
||||
|
||||
exports = module.exports = {
|
||||
add: add,
|
||||
getPaged: getPaged,
|
||||
|
||||
getByTypeAndStatePaged: getByTypeAndStatePaged,
|
||||
getByTypePaged: getByTypePaged,
|
||||
|
||||
get: get,
|
||||
del: del,
|
||||
update: update,
|
||||
getByAppIdPaged: getByAppIdPaged,
|
||||
|
||||
_clear: clear,
|
||||
@@ -20,27 +25,50 @@ exports = module.exports = {
|
||||
BACKUP_TYPE_BOX: 'box',
|
||||
|
||||
BACKUP_STATE_NORMAL: 'normal', // should rename to created to avoid listing in UI?
|
||||
BACKUP_STATE_CREATING: 'creating',
|
||||
BACKUP_STATE_ERROR: 'error'
|
||||
};
|
||||
|
||||
function postProcess(result) {
|
||||
assert.strictEqual(typeof result, 'object');
|
||||
|
||||
result.dependsOn = result.dependsOn ? result.dependsOn.split(',') : [ ];
|
||||
|
||||
result.manifest = result.manifestJson ? safe.JSON.parse(result.manifestJson) : null;
|
||||
delete result.manifestJson;
|
||||
}
|
||||
|
||||
function getPaged(page, perPage, callback) {
|
||||
function getByTypeAndStatePaged(type, state, page, perPage, callback) {
|
||||
assert(type === exports.BACKUP_TYPE_APP || type === exports.BACKUP_TYPE_BOX);
|
||||
assert.strictEqual(typeof state, 'string');
|
||||
assert(typeof page === 'number' && page > 0);
|
||||
assert(typeof perPage === 'number' && perPage > 0);
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT ' + BACKUPS_FIELDS + ' FROM backups WHERE type = ? AND state = ? ORDER BY creationTime DESC LIMIT ?,?',
|
||||
[ exports.BACKUP_TYPE_BOX, exports.BACKUP_STATE_NORMAL, (page-1)*perPage, perPage ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
[ type, state, (page-1)*perPage, perPage ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
results.forEach(function (result) { postProcess(result); });
|
||||
results.forEach(function (result) { postProcess(result); });
|
||||
|
||||
callback(null, results);
|
||||
});
|
||||
callback(null, results);
|
||||
});
|
||||
}
|
||||
|
||||
function getByTypePaged(type, page, perPage, callback) {
|
||||
assert(type === exports.BACKUP_TYPE_APP || type === exports.BACKUP_TYPE_BOX);
|
||||
assert(typeof page === 'number' && page > 0);
|
||||
assert(typeof perPage === 'number' && perPage > 0);
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT ' + BACKUPS_FIELDS + ' FROM backups WHERE type = ? ORDER BY creationTime DESC LIMIT ?,?',
|
||||
[ type, (page-1)*perPage, perPage ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
results.forEach(function (result) { postProcess(result); });
|
||||
|
||||
callback(null, results);
|
||||
});
|
||||
}
|
||||
|
||||
function getByAppIdPaged(page, perPage, appId, callback) {
|
||||
@@ -52,12 +80,12 @@ function getByAppIdPaged(page, perPage, appId, callback) {
|
||||
// box versions (0.93.x and below) used to use appbackup_ prefix
|
||||
database.query('SELECT ' + BACKUPS_FIELDS + ' FROM backups WHERE type = ? AND state = ? AND id LIKE ? ORDER BY creationTime DESC LIMIT ?,?',
|
||||
[ exports.BACKUP_TYPE_APP, exports.BACKUP_STATE_NORMAL, '%app%\\_' + appId + '\\_%', (page-1)*perPage, perPage ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
results.forEach(function (result) { postProcess(result); });
|
||||
results.forEach(function (result) { postProcess(result); });
|
||||
|
||||
callback(null, results);
|
||||
});
|
||||
callback(null, results);
|
||||
});
|
||||
}
|
||||
|
||||
function get(id, callback) {
|
||||
@@ -66,13 +94,13 @@ function get(id, callback) {
|
||||
|
||||
database.query('SELECT ' + BACKUPS_FIELDS + ' FROM backups WHERE id = ? ORDER BY creationTime DESC',
|
||||
[ id ], function (error, result) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
if (result.length === 0) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
if (result.length === 0) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
|
||||
|
||||
postProcess(result[0]);
|
||||
postProcess(result[0]);
|
||||
|
||||
callback(null, result[0]);
|
||||
});
|
||||
callback(null, result[0]);
|
||||
});
|
||||
}
|
||||
|
||||
function add(backup, callback) {
|
||||
@@ -81,14 +109,37 @@ function add(backup, callback) {
|
||||
assert.strictEqual(typeof backup.version, 'string');
|
||||
assert(backup.type === exports.BACKUP_TYPE_APP || backup.type === exports.BACKUP_TYPE_BOX);
|
||||
assert(util.isArray(backup.dependsOn));
|
||||
assert.strictEqual(typeof backup.manifest, 'object');
|
||||
assert.strictEqual(typeof backup.format, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var creationTime = backup.creationTime || new Date(); // allow tests to set the time
|
||||
var manifestJson = JSON.stringify(backup.manifest);
|
||||
|
||||
database.query('INSERT INTO backups (id, version, type, creationTime, state, dependsOn) VALUES (?, ?, ?, ?, ?, ?)',
|
||||
[ backup.id, backup.version, backup.type, creationTime, exports.BACKUP_STATE_NORMAL, backup.dependsOn.join(',') ],
|
||||
database.query('INSERT INTO backups (id, version, type, creationTime, state, dependsOn, manifestJson, format) VALUES (?, ?, ?, ?, ?, ?, ?, ?)',
|
||||
[ backup.id, backup.version, backup.type, creationTime, exports.BACKUP_STATE_NORMAL, backup.dependsOn.join(','), manifestJson, backup.format ],
|
||||
function (error) {
|
||||
if (error && error.code === 'ER_DUP_ENTRY') return callback(new DatabaseError(DatabaseError.ALREADY_EXISTS));
|
||||
if (error && error.code === 'ER_DUP_ENTRY') return callback(new DatabaseError(DatabaseError.ALREADY_EXISTS));
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
function update(id, backup, callback) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof backup, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var fields = [ ], values = [ ];
|
||||
for (var p in backup) {
|
||||
fields.push(p + ' = ?');
|
||||
values.push(backup[p]);
|
||||
}
|
||||
values.push(id);
|
||||
|
||||
database.query('UPDATE backups SET ' + fields.join(', ') + ' WHERE id = ?', values, function (error) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null);
|
||||
|
||||