Compare commits

...

26 Commits
7.4 ... v6.2.8

Author SHA1 Message Date
Girish Ramakrishnan
1cbce24f25 graphite: carbon crash fix
https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=923464
https://forum.cloudron.io/topic/4797/graphite-keeps-crashing-oom/34
(cherry picked from commit cd300bb6e2)
2021-04-27 14:30:30 -07:00
Girish Ramakrishnan
e691a09177 6.2.8 changes 2021-04-27 11:42:28 -07:00
Girish Ramakrishnan
3583aaf882 Fix issue where df output is not parsed correctly
LANG is the default locale i.e when LC_* are not specificall
LC_ALL will override them all

https://forum.cloudron.io/topic/4681/going-to-system-info-triggers-assertion-error
(cherry picked from commit f7bd47888a)
2021-04-27 11:40:14 -07:00
Girish Ramakrishnan
46f642bade turn: turn off verbose logging
(cherry picked from commit 8b99af952a)
2021-04-27 11:39:00 -07:00
Girish Ramakrishnan
03e970c442 firewall: Set BOX_ENV
(cherry picked from commit 00856b79dd)
2021-04-27 11:37:42 -07:00
Girish Ramakrishnan
ae962308f3 namecheap: fix del
(cherry picked from commit 0712eb1250)
2021-04-27 11:36:56 -07:00
Girish Ramakrishnan
5880204523 namecheap: Send it as POST
(cherry picked from commit 564409d8b7)
2021-04-27 11:36:49 -07:00
Girish Ramakrishnan
314668a624 namecheap: refactor
(cherry picked from commit 1c9c8e8e2b)
2021-04-27 11:36:38 -07:00
Johannes Zellner
e0209ce4f1 print dashboard domain on --owner-login
(cherry picked from commit 8757e5ba42)
2021-04-27 11:36:07 -07:00
Girish Ramakrishnan
231c8d590b mysql: bump connection limit to 200
(cherry picked from commit 131711ef5c)
2021-04-27 11:35:56 -07:00
Johannes Zellner
41142b5eda Fix blocklist setting when source and list have mixed ip versions
(cherry picked from commit 5ae5566ce8)
2021-04-27 11:35:37 -07:00
Girish Ramakrishnan
e22e718f4f linode object storage: update aws sdk
https://github.com/aws/aws-sdk-js/pull/3674
(cherry picked from commit 919f510796)
2021-04-27 11:35:05 -07:00
Girish Ramakrishnan
0aaf3b418f collectd: cache du values and send it every Interval (20)
collectd plugin ordering matters. the write_graphite plugin establishes
a TCP connection but there is a race between that and the df/du values that
get reported. du is especially problematic since we report this only every 12 hours.

so, instead we cache the values and report it every 20 seconds. on the carbon side,
it will just retain every 12 hours (since that is the whisper retention period).

there is also FlushInterval which I am not 100% sure has any effect. by default, the
write_graphite plugin waits for 1428 bytes to be accumulated. (https://manpages.debian.org/unstable/collectd-core/collectd.conf.5.en.html)

https://github.com/collectd/collectd/issues/2672
https://github.com/collectd/collectd/pull/1044

I found this syntax hidden deep inside https://www.cisco.com/c/en/us/td/docs/net_mgmt/virtual_topology_system/2_6_3/user_guide/Cisco_VTS_2_6_3_User_Guide/Cisco_VTS_2_6_1_User_Guide_chapter_01111.pdf

(cherry picked from commit c1ee3dcbd4)
2021-03-26 00:21:52 -07:00
Girish Ramakrishnan
334472bf25 redis: backup before upgrade
(cherry picked from commit d277f8137b)
2021-03-24 19:27:32 -07:00
Girish Ramakrishnan
e9fb5d7e60 6.2.7 changes 2021-03-24 14:11:38 -07:00
Girish Ramakrishnan
43c3a4f781 graphite: restart collectd on upgrade
(cherry picked from commit 7ae79fe3a5)
2021-03-24 14:10:51 -07:00
Girish Ramakrishnan
6cc07cd005 Add 6.2.6 changes 2021-03-24 10:35:30 -07:00
Girish Ramakrishnan
6c40cceddc give graphite more time to start before restarting collectd
(cherry picked from commit 1f59974e83)
2021-03-24 10:26:36 -07:00
Girish Ramakrishnan
9be09510d4 graphite: restart collectd as well
(cherry picked from commit 0447dce0d6)
2021-03-23 16:40:20 -07:00
Girish Ramakrishnan
83488bc4ce graphite: implement upgrade
for the moment, we wipe out the old data and start afresh. this is because
the graphite web app keeps changing quite drastically.

(cherry picked from commit 32f385741a)
2021-03-23 16:39:51 -07:00
Girish Ramakrishnan
2f9a8029c4 sftp: only rebuild when app task queue is empty
when multiple apptasks are scheduled, we end up with a sequence like this:
    - task1 finishes
    - task2 (uninstall) removes  appdata directory
    - sftp rebuild (from task1 finish)
    - task2 fails because sftp rebuild created empty appdata directory

a fix is to delay the sftp rebuild until all tasks are done. of course,
the same race is still there, if a user initiated another task immediately
but this seems unlikely. if that happens often, we can further add a sftpRebuildInProgress
flag inside apptaskmanager.

(cherry picked from commit 4cba5ca405)
2021-03-22 14:33:44 -07:00
Girish Ramakrishnan
07af3edf51 request has no retry method
i thought it was using superagent

(cherry picked from commit 7df89e66c8)
2021-03-22 14:32:57 -07:00
Girish Ramakrishnan
636d1f3e20 acme2: add a retry to getDirectory, since users are reporting a 429
(cherry picked from commit 4954b94d4a)
2021-03-22 14:32:50 -07:00
Girish Ramakrishnan
3b69e4dcec graphite: disable tagdb
(cherry picked from commit 8048e68eb6)
2021-03-22 14:32:18 -07:00
Girish Ramakrishnan
d977b0b238 update: set memory limit properly
(cherry picked from commit 750f313c6a)
2021-03-22 14:30:22 -07:00
Girish Ramakrishnan
909c6bccb1 error.code is a number which causes crash at times in BoxError
(cherry picked from commit 1e96606110)
2021-03-22 14:29:26 -07:00
30 changed files with 503 additions and 270 deletions

21
CHANGES
View File

@@ -2232,3 +2232,24 @@
[6.2.4]
* Another addon crash fix
[6.2.5]
* update: set memory limit properly
* Fix bug where renew certs button did not work
* sftp: fix rebuild condition
* Fix display of user management/dashboard visiblity for email apps
* graphite: disable tagdb and reduce log noise
[6.2.6]
* Fix issue where collectd is restarted too quickly before graphite
[6.2.7]
* collectd: restart on graphite upgrade
[6.2.8]
* linode object storage: update aws sdk to make it work again
* Fix crash in blocklist setting when source and list have mixed ip versions
* mysql: bump connection limit to 200
* namecheap: fix issue where DNS updates and del were not working
* turn: turn off verbose logging
* Fix crash when parsing df output (set LC_ALL for box service)

93
package-lock.json generated
View File

@@ -54,9 +54,9 @@
"integrity": "sha512-d4VSA86eL/AFTe5xtyZX+ePUjE8dIFu2T8zmdeNBSa5/kNgXPCx/o/wbFNHAGLJdGnk1vddRuMESD9HbOC8irw=="
},
"@google-cloud/storage": {
"version": "5.8.0",
"resolved": "https://registry.npmjs.org/@google-cloud/storage/-/storage-5.8.0.tgz",
"integrity": "sha512-WOShvBPOfkDXUzXMO+3j8Bzus+PFI9r1Ey9dLG2Zf458/PVuFTtaRWntd9ZiDG8g90zl2LmnA1JkDCreGUKr5g==",
"version": "5.8.3",
"resolved": "https://registry.npmjs.org/@google-cloud/storage/-/storage-5.8.3.tgz",
"integrity": "sha512-g++NTmpmwbZZEnBhJi3y1D3YyZ2Y+1xL5blp96eeJhffginMym5tRw/AGNZblDI35U2K1FTJEYqIZ31tbEzs8w==",
"requires": {
"@google-cloud/common": "^3.6.0",
"@google-cloud/paginator": "^3.0.0",
@@ -103,9 +103,9 @@
"integrity": "sha512-A1B3Bh1UmL0bidM/YX2NsCOTnGJePL9rO/M+Mw3m9f2gUpfokS0hi5Eah0WSUEWZdZhIZtMjkIYS7mDfOqNHbg=="
},
"google-auth-library": {
"version": "7.0.2",
"resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-7.0.2.tgz",
"integrity": "sha512-vjyNZR3pDLC0u7GHLfj+Hw9tGprrJwoMwkYGqURCXYITjCrP9HprOyxVV+KekdLgATtWGuDkQG2MTh0qpUPUgg==",
"version": "7.0.3",
"resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-7.0.3.tgz",
"integrity": "sha512-6wJNYqY1QUr5I2lWaUkkzOT2b9OCNhNQrdFOt/bsBbGb7T7NCdEvrBsXraUm+KTUGk2xGlQ7m9RgUd4Llcw8NQ==",
"requires": {
"arrify": "^2.0.0",
"base64-js": "^1.3.0",
@@ -316,9 +316,9 @@
"integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k="
},
"aws-sdk": {
"version": "2.850.0",
"resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.850.0.tgz",
"integrity": "sha512-RqPeSKe1JlhTUL9+xUsp771ZtMY7JICoQEnFJuJ+JVqGyILhg95L4t8S5KnznUfWYc0pcpTiHKLmPteHyHS3pw==",
"version": "2.879.0",
"resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.879.0.tgz",
"integrity": "sha512-HRfjGwST1U9AvCJFAyqpAJwbjFR4LqUyEUk77qdJpdYHL9pGPHdnEfGRkBkPn36xcC7Em6gVvFveVoEihbQUyQ==",
"requires": {
"buffer": "4.9.2",
"events": "1.1.1",
@@ -359,7 +359,7 @@
},
"backoff": {
"version": "2.5.0",
"resolved": "https://registry.npmjs.org/backoff/-/backoff-2.5.0.tgz",
"resolved": false,
"integrity": "sha1-9hbtqdPktmuMp/ynn2lXIsX44m8=",
"requires": {
"precond": "0.2"
@@ -574,6 +574,17 @@
"is-glob": "~4.0.1",
"normalize-path": "~3.0.0",
"readdirp": "~3.5.0"
},
"dependencies": {
"readdirp": {
"version": "3.5.0",
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz",
"integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==",
"dev": true,
"requires": {
"picomatch": "^2.2.1"
}
}
}
},
"chownr": {
@@ -1761,9 +1772,9 @@
},
"dependencies": {
"google-auth-library": {
"version": "7.0.2",
"resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-7.0.2.tgz",
"integrity": "sha512-vjyNZR3pDLC0u7GHLfj+Hw9tGprrJwoMwkYGqURCXYITjCrP9HprOyxVV+KekdLgATtWGuDkQG2MTh0qpUPUgg==",
"version": "7.0.3",
"resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-7.0.3.tgz",
"integrity": "sha512-6wJNYqY1QUr5I2lWaUkkzOT2b9OCNhNQrdFOt/bsBbGb7T7NCdEvrBsXraUm+KTUGk2xGlQ7m9RgUd4Llcw8NQ==",
"requires": {
"arrify": "^2.0.0",
"base64-js": "^1.3.0",
@@ -1843,9 +1854,9 @@
}
},
"glob-parent": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.1.tgz",
"integrity": "sha512-FnI+VGOpnlGHWZxthPGR+QhR78fuiK0sNLkHQv+bL9fQi57lNNdquIbna/WrfROrolq8GK5Ek6BiMwqL/voRYQ==",
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
"integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
"dev": true,
"requires": {
"is-glob": "^4.0.1"
@@ -2732,9 +2743,9 @@
"integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A=="
},
"mocha": {
"version": "8.3.0",
"resolved": "https://registry.npmjs.org/mocha/-/mocha-8.3.0.tgz",
"integrity": "sha512-TQqyC89V1J/Vxx0DhJIXlq9gbbL9XFNdeLQ1+JsnZsVaSOV1z3tWfw0qZmQJGQRIfkvZcs7snQnZnOCKoldq1Q==",
"version": "8.3.2",
"resolved": "https://registry.npmjs.org/mocha/-/mocha-8.3.2.tgz",
"integrity": "sha512-UdmISwr/5w+uXLPKspgoV7/RXZwKRTiTjJ2/AC5ZiEztIoOYdfKb19+9jNmEInzx5pBsCyJQzarAxqIGBNYJhg==",
"dev": true,
"requires": {
"@ungap/promise-all-settled": "1.1.2",
@@ -2873,9 +2884,9 @@
"dev": true
},
"string-width": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.0.tgz",
"integrity": "sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg==",
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz",
"integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==",
"dev": true,
"requires": {
"emoji-regex": "^8.0.0",
@@ -3112,9 +3123,9 @@
"integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw=="
},
"nock": {
"version": "13.0.7",
"resolved": "https://registry.npmjs.org/nock/-/nock-13.0.7.tgz",
"integrity": "sha512-WBz73VYIjdbO6BwmXODRQLtn7B5tldA9pNpWJe5QTtTEscQlY5KXU4srnGzBOK2fWakkXj69gfTnXGzmrsaRWw==",
"version": "13.0.11",
"resolved": "https://registry.npmjs.org/nock/-/nock-13.0.11.tgz",
"integrity": "sha512-sKZltNkkWblkqqPAsjYW0bm3s9DcHRPiMOyKO/PkfJ+ANHZ2+LA2PLe22r4lLrKgXaiSaDQwW3qGsJFtIpQIeQ==",
"dev": true,
"requires": {
"debug": "^4.1.0",
@@ -3223,9 +3234,9 @@
}
},
"nodemailer": {
"version": "6.4.18",
"resolved": "https://registry.npmjs.org/nodemailer/-/nodemailer-6.4.18.tgz",
"integrity": "sha512-ht9cXxQ+lTC+t00vkSIpKHIyM4aXIsQ1tcbQCn5IOnxYHi81W2XOaU66EQBFFpbtzLEBTC94gmkbD4mGZQzVpA=="
"version": "6.5.0",
"resolved": "https://registry.npmjs.org/nodemailer/-/nodemailer-6.5.0.tgz",
"integrity": "sha512-Tm4RPrrIZbnqDKAvX+/4M+zovEReiKlEXWDzG4iwtpL9X34MJY+D5LnQPH/+eghe8DLlAVshHAJZAZWBGhkguw=="
},
"nodemailer-fetch": {
"version": "1.6.0",
@@ -3487,7 +3498,7 @@
},
"precond": {
"version": "0.2.3",
"resolved": "https://registry.npmjs.org/precond/-/precond-0.2.3.tgz",
"resolved": false,
"integrity": "sha1-qpWRvKokkj8eD0hJ0kD0fvwQdaw="
},
"pretty-bytes": {
@@ -3741,9 +3752,9 @@
}
},
"readdirp": {
"version": "3.5.0",
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.5.0.tgz",
"integrity": "sha512-cMhu7c/8rdhkHXWsY+osBhfSy0JikwpHK/5+imo+LpeasTF8ouErHrlYkwT0++njiyuDvc7OFY5T3ukvZ8qmFQ==",
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
"integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
"requires": {
"picomatch": "^2.2.1"
}
@@ -3947,9 +3958,9 @@
}
},
"semver": {
"version": "7.3.4",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.4.tgz",
"integrity": "sha512-tCfb2WLjqFAtXn4KEdxIhalnRtoKFN7nAwj0B3ZXCbQloV2tq5eDbcTmT68JJD3nRJq24/XgxtQKFIpQdtvmVw==",
"version": "7.3.5",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz",
"integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==",
"requires": {
"lru-cache": "^6.0.0"
},
@@ -4660,9 +4671,9 @@
}
},
"underscore": {
"version": "1.12.0",
"resolved": "https://registry.npmjs.org/underscore/-/underscore-1.12.0.tgz",
"integrity": "sha512-21rQzss/XPMjolTiIezSu3JAjgagXKROtNrYFEOWK109qY1Uv2tVjPTZ1ci2HgvQDA16gHYSthQIJfB+XId/rQ=="
"version": "1.12.1",
"resolved": "https://registry.npmjs.org/underscore/-/underscore-1.12.1.tgz",
"integrity": "sha512-hEQt0+ZLDVUMhebKxL4x1BTtDY7bavVofhZ9KZ4aI26X9SRaE+Y3m83XUL1UP2jn8ynjndwCCpEHdUG+9pP1Tw=="
},
"unique-string": {
"version": "2.0.0",
@@ -4916,9 +4927,9 @@
}
},
"ws": {
"version": "7.4.3",
"resolved": "https://registry.npmjs.org/ws/-/ws-7.4.3.tgz",
"integrity": "sha512-hr6vCR76GsossIRsr8OLR9acVVm1jyfEWvhbNjtgPOrfvAlKzvyeg/P6r8RuDjRyrcQoPQT7K0DGEPc7Ae6jzA=="
"version": "7.4.4",
"resolved": "https://registry.npmjs.org/ws/-/ws-7.4.4.tgz",
"integrity": "sha512-Qm8k8ojNQIMx7S+Zp8u/uHOx7Qazv3Yv4q68MiWWWOJhiwG5W3x7iqmRtJo8xxrciZUY4vRxUTJCKuRnF28ZZw=="
},
"xdg-basedir": {
"version": "4.0.0",

View File

@@ -12,10 +12,10 @@
},
"dependencies": {
"@google-cloud/dns": "^2.1.0",
"@google-cloud/storage": "^5.8.0",
"@google-cloud/storage": "^5.8.3",
"@sindresorhus/df": "git+https://github.com/cloudron-io/df.git#type",
"async": "^3.2.0",
"aws-sdk": "^2.850.0",
"aws-sdk": "^2.879.0",
"basic-auth": "^2.0.1",
"body-parser": "^1.19.0",
"cloudron-manifestformat": "^5.10.1",
@@ -46,19 +46,19 @@
"multiparty": "^4.2.2",
"mustache-express": "^1.3.0",
"mysql": "^2.18.1",
"nodemailer": "^6.4.18",
"nodemailer": "^6.5.0",
"nodemailer-smtp-transport": "^2.7.4",
"once": "^1.4.0",
"pretty-bytes": "^5.6.0",
"progress-stream": "^2.0.0",
"proxy-middleware": "^0.15.0",
"qrcode": "^1.4.4",
"readdirp": "^3.5.0",
"readdirp": "^3.6.0",
"request": "^2.88.2",
"rimraf": "^3.0.2",
"s3-block-read-stream": "^0.5.0",
"safetydance": "^1.1.1",
"semver": "^7.3.4",
"semver": "^7.3.5",
"showdown": "^1.9.1",
"speakeasy": "^2.0.0",
"split": "^1.0.1",
@@ -67,19 +67,19 @@
"tar-fs": "github:cloudron-io/tar-fs#ignore_stat_error",
"tar-stream": "^2.2.0",
"tldjs": "^2.3.1",
"underscore": "^1.12.0",
"underscore": "^1.12.1",
"uuid": "^8.3.2",
"validator": "^13.5.2",
"ws": "^7.4.3",
"ws": "^7.4.4",
"xml2js": "^0.4.23"
},
"devDependencies": {
"expect.js": "*",
"hock": "^1.4.1",
"js2xmlparser": "^4.0.1",
"mocha": "^8.3.0",
"mocha": "^8.3.2",
"mock-aws-s3": "git+https://github.com/cloudron-io/mock-aws-s3.git",
"nock": "^13.0.7",
"nock": "^13.0.11",
"node-sass": "^5.0.0",
"recursive-readdir": "^2.2.2"
},

View File

@@ -39,10 +39,11 @@ while true; do
--owner-login)
admin_username=$(mysql -NB -uroot -ppassword -e "SELECT username FROM box.users WHERE role='owner' AND username IS NOT NULL ORDER BY createdAt LIMIT 1" 2>/dev/null)
admin_password=$(pwgen -1s 12)
dashboard_domain=$(mysql -NB -uroot -ppassword -e "SELECT value FROM box.settings WHERE name='admin_fqdn'" 2>/dev/nul)
ghost_file=/home/yellowtent/platformdata/cloudron_ghost.json
printf '{"%s":"%s"}\n' "${admin_username}" "${admin_password}" > "${ghost_file}"
chown yellowtent:yellowtent "${ghost_file}" && chmod o-r,g-r "${ghost_file}"
echo "Login as ${admin_username} / ${admin_password} . This password may only be used once. ${ghost_file} will be automatically removed after use."
echo "Login at https://${dashboard_domain} as ${admin_username} / ${admin_password} . This password may only be used once. ${ghost_file} will be automatically removed after use."
exit 0
;;
--) break;;

View File

@@ -164,7 +164,9 @@ LoadPlugin swap
#LoadPlugin vmem
#LoadPlugin vserver
#LoadPlugin wireless
LoadPlugin write_graphite
<LoadPlugin write_graphite>
FlushInterval 20
</LoadPlugin>
#LoadPlugin write_http
#LoadPlugin write_riemann

View File

@@ -6,19 +6,26 @@ PATHS = [] # { name, dir, exclude }
# there is a pattern in carbon/storage-schemas.conf which stores values every 12h for a year
INTERVAL = 60 * 60 * 12 # twice a day. change values in docker-graphite if you change this
# we used to pass the INTERVAL as a parameter to register_read. however, collectd write_graphite
# takes a bit to load (tcp connection) and drops the du data. this then means that we have to wait
# for INTERVAL secs for du data. instead, we just cache the value for INTERVAL instead
CACHE = dict()
CACHE_TIME = 0
def du(pathinfo):
# -B1 makes du print block sizes and not apparent sizes (to match df which also uses block sizes)
cmd = 'timeout 1800 du -DsB1 "{}"'.format(pathinfo['dir'])
dirname = pathinfo['dir']
cmd = 'timeout 1800 du -DsB1 "{}"'.format(dirname)
if pathinfo['exclude'] != '':
cmd += ' --exclude "{}"'.format(pathinfo['exclude'])
collectd.info('computing size with command: %s' % cmd);
try:
size = subprocess.check_output(cmd, shell=True).split()[0].decode('utf-8')
collectd.info('\tsize of %s is %s (time: %i)' % (pathinfo['dir'], size, int(time.time())))
collectd.info('\tsize of %s is %s (time: %i)' % (dirname, size, int(time.time())))
return size
except Exception as e:
collectd.info('\terror getting the size of %s: %s' % (pathinfo['dir'], str(e)))
collectd.info('\terror getting the size of %s: %s' % (dirname, str(e)))
return 0
def parseSize(size):
@@ -64,19 +71,35 @@ def init():
collectd.info('custom du plugin initialized with %s %s' % (PATHS, sys.version))
def read():
global CACHE, CACHE_TIME
# read from cache if < 12 hours
read_cache = (time.time() - CACHE_TIME) < INTERVAL
if not read_cache:
CACHE_TIME = time.time()
for pathinfo in PATHS:
size = du(pathinfo)
dirname = pathinfo['dir']
if read_cache and dirname in CACHE:
size = CACHE[dirname]
else:
size = du(pathinfo)
CACHE[dirname] = size
# type comes from https://github.com/collectd/collectd/blob/master/src/types.db
val = collectd.Values(type='capacity', plugin='du', plugin_instance=pathinfo['name'])
val.dispatch(values=[size], type_instance='usage')
size = dockerSize()
if read_cache and 'docker' in CACHE:
size = CACHE['docker']
else:
size = dockerSize()
CACHE['docker'] = size
val = collectd.Values(type='capacity', plugin='du', plugin_instance='docker')
val.dispatch(values=[size], type_instance='usage')
collectd.register_init(init)
collectd.register_config(configure)
collectd.register_read(read, INTERVAL)
collectd.register_read(read)

View File

@@ -4,7 +4,7 @@
# http://bugs.mysql.com/bug.php?id=68514
[mysqld]
performance_schema=OFF
max_connections=50
max_connections=200
# on ec2, without this we get a sporadic connection drop when doing the initial migration
max_allowed_packet=64M

View File

@@ -13,9 +13,6 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/mkdirvolume.sh
Defaults!/home/yellowtent/box/src/scripts/rmaddondir.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmaddondir.sh
Defaults!/home/yellowtent/box/src/scripts/reloadnginx.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reloadnginx.sh
Defaults!/home/yellowtent/box/src/scripts/reboot.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reboot.sh
@@ -41,11 +38,8 @@ yellowtent ALL=(root) NOPASSWD:SETENV: /home/yellowtent/box/src/scripts/backupup
Defaults!/home/yellowtent/box/src/scripts/restart.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restart.sh
Defaults!/home/yellowtent/box/src/scripts/restartdocker.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restartdocker.sh
Defaults!/home/yellowtent/box/src/scripts/restartunbound.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restartunbound.sh
Defaults!/home/yellowtent/box/src/scripts/restartservice.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restartservice.sh
Defaults!/home/yellowtent/box/src/scripts/rmmailbox.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmmailbox.sh

View File

@@ -13,7 +13,8 @@ Type=idle
WorkingDirectory=/home/yellowtent/box
Restart=always
ExecStart=/home/yellowtent/box/box.js
Environment="HOME=/home/yellowtent" "USER=yellowtent" "DEBUG=box:*,connect-lastmile,-box:ldap" "BOX_ENV=cloudron" "NODE_ENV=production"
; we run commands like df which will parse properly only with correct locale
Environment="HOME=/home/yellowtent" "USER=yellowtent" "DEBUG=box:*,connect-lastmile,-box:ldap" "BOX_ENV=cloudron" "NODE_ENV=production" "LC_ALL=C"
; kill apptask processes as well
KillMode=control-group
; Do not kill this process on OOM. Children inherit this score. Do not set it to -1000 so that MemoryMax can keep working

View File

@@ -5,6 +5,7 @@ PartOf=docker.service
[Service]
Type=oneshot
Environment="BOX_ENV=cloudron"
ExecStart="/home/yellowtent/box/setup/start/cloudron-firewall.sh"
RemainAfterExit=yes

View File

@@ -629,7 +629,7 @@ function scheduleTask(appId, installationState, taskId, callback) {
if (error) return callback(error);
let memoryLimit = 400;
if (installationState === exports.ISTATE_PENDING_BACKUP || installationState === exports.ISTATE_PENDING_CLONE || installationState === exports.ISTATE_PENDING_RESTORE) {
if (installationState === exports.ISTATE_PENDING_BACKUP || installationState === exports.ISTATE_PENDING_CLONE || installationState === exports.ISTATE_PENDING_RESTORE || installationState === exports.ISTATE_PENDING_UPDATE) {
memoryLimit = 'memoryLimit' in backupConfig ? Math.max(backupConfig.memoryLimit/1024/1024, 400) : 400;
}

View File

@@ -79,14 +79,16 @@ function scheduleTask(appId, taskId, options, callback) {
delete gActiveTasks[appId];
locker.unlock(locker.OP_APPTASK); // unlock event will trigger next task
// post app task hooks
services.rebuildService('sftp', error => { if (error) debug('Unable to rebuild sftp:', error); });
scheduler.resumeJobs(appId);
});
}
function startNextTask() {
if (gPendingTasks.length === 0) return;
if (gPendingTasks.length === 0) {
// rebuild sftp when task queue is empty. this minimizes risk of sftp rebuild overlapping with other app tasks
services.rebuildService('sftp', error => { if (error) debug('Unable to rebuild sftp:', error); });
return;
}
assert(Object.keys(gActiveTasks).length < TASK_CONCURRENCY);

View File

@@ -572,18 +572,20 @@ Acme2.prototype.acmeFlow = function (hostname, domain, callback) {
Acme2.prototype.getDirectory = function (callback) {
const that = this;
request.get(this.caDirectory, { json: true, timeout: 30000 }, function (error, response) {
if (error) return callback(new BoxError(BoxError.NETWORK_ERROR, `Network error getting directory: ${error.message}`));
if (response.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response code when fetching directory : ' + response.statusCode));
async.retry({ times: 3, interval: 20000 }, function (retryCallback) {
request.get(that.caDirectory, { json: true, timeout: 30000 }, function (error, response) {
if (error) return retryCallback(new BoxError(BoxError.NETWORK_ERROR, `Network error getting directory: ${error.message}`));
if (response.statusCode !== 200) return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response code when fetching directory : ' + response.statusCode));
if (typeof response.body.newNonce !== 'string' ||
typeof response.body.newOrder !== 'string' ||
typeof response.body.newAccount !== 'string') return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Invalid response body : ${response.body}`));
if (typeof response.body.newNonce !== 'string' ||
typeof response.body.newOrder !== 'string' ||
typeof response.body.newAccount !== 'string') return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, `Invalid response body : ${response.body}`));
that.directory = response.body;
that.directory = response.body;
callback(null);
});
retryCallback(null);
});
}, callback);
};
Acme2.prototype.getCertificate = function (vhost, domain, callback) {

View File

@@ -16,10 +16,10 @@ var assert = require('assert'),
debug = require('debug')('box:dns/namecheap'),
dns = require('../native-dns.js'),
domains = require('../domains.js'),
querystring = require('querystring'),
safe = require('safetydance'),
superagent = require('superagent'),
sysinfo = require('../sysinfo.js'),
util = require('util'),
waitForDns = require('./waitfordns.js'),
xml2js = require('xml2js');
@@ -50,11 +50,9 @@ function getQuery(dnsConfig, callback) {
});
}
function getInternal(dnsConfig, zoneName, subdomain, type, callback) {
function getZone(dnsConfig, zoneName, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof zoneName, 'string');
assert.strictEqual(typeof subdomain, 'string');
assert.strictEqual(typeof type, 'string');
assert.strictEqual(typeof callback, 'function');
getQuery(dnsConfig, function (error, query) {
@@ -89,7 +87,7 @@ function getInternal(dnsConfig, zoneName, subdomain, type, callback) {
});
}
function setInternal(dnsConfig, zoneName, hosts, callback) {
function setZone(dnsConfig, zoneName, hosts, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof zoneName, 'string');
assert(Array.isArray(hosts));
@@ -116,7 +114,10 @@ function setInternal(dnsConfig, zoneName, hosts, callback) {
}
});
superagent.post(ENDPOINT).query(query).end(function (error, result) {
// namecheap recommends sending as POSTDATA with > 10 records
const qs = querystring.stringify(query);
superagent.post(ENDPOINT).set('Content-Type', 'application/x-www-form-urlencoded').send(qs).end(function (error, result) {
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error));
var parser = new xml2js.Parser();
@@ -144,7 +145,7 @@ function upsert(domainObject, subdomain, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof subdomain, 'string');
assert.strictEqual(typeof type, 'string');
assert(util.isArray(values));
assert(Array.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config;
@@ -154,17 +155,17 @@ function upsert(domainObject, subdomain, type, values, callback) {
debug('upsert: %s for zone %s of type %s with values %j', subdomain, zoneName, type, values);
getInternal(dnsConfig, zoneName, subdomain, type, function (error, result) {
getZone(dnsConfig, zoneName, function (error, result) {
if (error) return callback(error);
// Array to keep track of records that need to be inserted
let toInsert = [];
for (var i = 0; i < values.length; i++) {
for (let i = 0; i < values.length; i++) {
let curValue = values[i];
let wasUpdate = false;
for (var j = 0; j < result.length; j++) {
for (let j = 0; j < result.length; j++) {
let curHost = result[j];
if (curHost.Type === type && curHost.Name === subdomain) {
@@ -198,9 +199,9 @@ function upsert(domainObject, subdomain, type, values, callback) {
}
}
let toUpsert = result.concat(toInsert);
const hosts = result.concat(toInsert);
setInternal(dnsConfig, zoneName, toUpsert, callback);
setZone(dnsConfig, zoneName, hosts, callback);
});
}
@@ -215,16 +216,16 @@ function get(domainObject, subdomain, type, callback) {
subdomain = domains.getName(domainObject, subdomain, type) || '@';
getInternal(dnsConfig, zoneName, subdomain, type, function (error, result) {
getZone(dnsConfig, zoneName, function (error, result) {
if (error) return callback(error);
// We need to filter hosts to ones with this subdomain and type
let actualHosts = result.filter((host) => host.Type === type && host.Name === subdomain);
const actualHosts = result.filter((host) => host.Type === type && host.Name === subdomain);
// We only return the value string
var tmp = actualHosts.map(function (record) { return record.Address; });
const tmp = actualHosts.map(function (record) { return record.Address; });
debug('get: %j', tmp);
debug(`get: subdomain: ${subdomain} type:${type} value:${JSON.stringify(tmp)}`);
return callback(null, tmp);
});
@@ -234,7 +235,7 @@ function del(domainObject, subdomain, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof subdomain, 'string');
assert.strictEqual(typeof type, 'string');
assert(util.isArray(values));
assert(Array.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config;
@@ -244,29 +245,19 @@ function del(domainObject, subdomain, type, values, callback) {
debug('del: %s for zone %s of type %s with values %j', subdomain, zoneName, type, values);
getInternal(dnsConfig, zoneName, subdomain, type, function (error, result) {
getZone(dnsConfig, zoneName, function (error, result) {
if (error) return callback(error);
if (result.length === 0) return callback();
const originalLength = result.length;
let removed = false;
for (var i = 0; i < values.length; i++) {
for (let i = 0; i < values.length; i++) {
let curValue = values[i];
for (var j = 0; j < result.length; j++) {
let curHost = result[i];
if (curHost.Type === type && curHost.Name === subdomain && curHost.Address === curValue) {
removed = true;
result.splice(i, 1); // Remove element from result array
}
}
result = result.filter(curHost => curHost.Type !== type || curHost.Name !== subdomain || curHost.Address !== curValue);
}
// Only set hosts if we actually removed a host
if (removed) return setInternal(dnsConfig, zoneName, result, callback);
if (result.length !== originalLength) return setZone(dnsConfig, zoneName, result, callback);
callback();
});

View File

@@ -1,50 +0,0 @@
'use strict';
exports = module.exports = {
start,
DEFAULT_MEMORY_LIMIT: 256 * 1024 * 1024
};
var assert = require('assert'),
async = require('async'),
infra = require('./infra_version.js'),
paths = require('./paths.js'),
shell = require('./shell.js'),
system = require('./system.js');
function start(existingInfra, serviceConfig, callback) {
assert.strictEqual(typeof existingInfra, 'object');
assert.strictEqual(typeof serviceConfig, 'object');
assert.strictEqual(typeof callback, 'function');
const tag = infra.images.graphite.tag;
const dataDir = paths.PLATFORM_DATA_DIR;
const memoryLimit = serviceConfig.memoryLimit || exports.DEFAULT_MEMORY_LIMIT;
const memory = system.getMemoryAllocation(memoryLimit);
const cmd = `docker run --restart=always -d --name="graphite" \
--hostname graphite \
--net cloudron \
--net-alias graphite \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=graphite \
-m ${memory} \
--memory-swap ${memoryLimit} \
--dns 172.18.0.1 \
--dns-search=. \
-p 127.0.0.1:2003:2003 \
-p 127.0.0.1:2004:2004 \
-p 127.0.0.1:8417:8000 \
-v "${dataDir}/graphite:/var/lib/graphite" \
--label isCloudronManaged=true \
--read-only -v /tmp -v /run "${tag}"`;
async.series([
shell.exec.bind(null, 'stopGraphite', 'docker stop graphite || true'),
shell.exec.bind(null, 'removeGraphite', 'docker rm -f graphite || true'),
shell.exec.bind(null, 'startGraphite', cmd)
], callback);
}

View File

@@ -15,13 +15,13 @@ exports = module.exports = {
// a major version bump in the db containers will trigger the restore logic that uses the db dumps
// docker inspect --format='{{index .RepoDigests 0}}' $IMAGE to get the sha256
'images': {
'turn': { repo: 'cloudron/turn', tag: 'cloudron/turn:1.3.0@sha256:386fb755fc41edd7086f7bcb230f7f28078936f9ae4ead6d97c741df1cc194ae' },
'turn': { repo: 'cloudron/turn', tag: 'cloudron/turn:1.3.1@sha256:759cafab7625ff538418a1f2ed5558b1d5bff08c576bba577d865d6d02b49091' },
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:3.0.4@sha256:4d688c746f27b195d98f35a7d24ec01f3f754e0ca61e9de0b0bc9793553880f1' },
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:4.0.2@sha256:424081fd38ebd35f3606c64f8f99138570e5f4d5066f12cfb4142447d249d3e7' },
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:4.0.1@sha256:ad20a9a5dcb2ab132374a7c8d44b89af0ec37651cf889e570f7625b02ee85fdf' },
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:3.0.2@sha256:caaa1f7f4055ae8990d8ec65bd100567496df7e4ed5eb427867f3717a8dcbf92' },
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:3.2.3@sha256:fdc4aa6d2c85aeafe65eaa4243aada0cc2e57b94f6eaee02c9b1a8fb89b01dd7' },
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:2.4.0@sha256:953bbd8b72a9108a8526d2c0bdbba67e1e1563ff59d0a117f0884dba1576f3dd' },
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:3.0.1@sha256:bed9f6b5d06fe2c5289e895e806cfa5b74ad62993d705be55d4554a67d128029' },
'sftp': { repo: 'cloudron/sftp', tag: 'cloudron/sftp:3.2.0@sha256:61e8247ded1e07cf882ca478dab180960357c614472e80b938f1f690a46788c2' }
}
};

View File

@@ -21,7 +21,7 @@ function getBlocklist(callback) {
assert.strictEqual(typeof callback, 'function');
const data = safe.fs.readFileSync(paths.FIREWALL_BLOCKLIST_FILE, 'utf8');
callback(null, data);
callback(null, data || '');
}
function setBlocklist(blocklist, auditSource, callback) {
@@ -39,8 +39,8 @@ function setBlocklist(blocklist, auditSource, callback) {
if (rangeOrIP.indexOf('/') === -1) {
if (auditSource.ip === rangeOrIP) return callback(new BoxError(BoxError.BAD_FIELD, `${rangeOrIP} includes client IP. Cannot block yourself`));
} else {
const parsedRange = ipaddr.parseCIDR(rangeOrIP);
if (parsedIp.match(parsedRange)) return callback(new BoxError(BoxError.BAD_FIELD, `${rangeOrIP} includes client IP. Cannot block yourself`));
const parsedRange = ipaddr.parseCIDR(rangeOrIP); // returns [addr, range]
if (parsedRange[0].kind() === parsedIp.kind() && parsedIp.match(parsedRange)) return callback(new BoxError(BoxError.BAD_FIELD, `${rangeOrIP} includes client IP. Cannot block yourself`));
}
}
@@ -51,6 +51,6 @@ function setBlocklist(blocklist, auditSource, callback) {
shell.sudo('setBlocklist', [ SET_BLOCKLIST_CMD ], {}, function (error) {
if (error) return callback(new BoxError(BoxError.IPTABLES_ERROR, `Error setting blocklist: ${error.message}`));
callback();
callback(null);
});
}

View File

@@ -54,8 +54,8 @@ var acme2 = require('./cert/acme2.js'),
users = require('./users.js'),
util = require('util');
var NGINX_APPCONFIG_EJS = fs.readFileSync(__dirname + '/nginxconfig.ejs', { encoding: 'utf8' }),
RELOAD_NGINX_CMD = path.join(__dirname, 'scripts/reloadnginx.sh');
const NGINX_APPCONFIG_EJS = fs.readFileSync(__dirname + '/nginxconfig.ejs', { encoding: 'utf8' });
const RESTART_SERVICE_CMD = path.join(__dirname, 'scripts/restartservice.sh');
function nginxLocation(s) {
if (!s.startsWith('!')) return s;
@@ -172,7 +172,7 @@ function validateCertificate(location, domainObject, certificate) {
function reload(callback) {
if (constants.TEST) return callback();
shell.sudo('reload', [ RELOAD_NGINX_CMD ], {}, function (error) {
shell.sudo('reload', [ RESTART_SERVICE_CMD, 'nginx' ], {}, function (error) {
if (error) return callback(new BoxError(BoxError.NGINX_ERROR, `Error reloading nginx: ${error.message}`));
callback();

View File

@@ -1,17 +0,0 @@
#!/bin/bash
set -eu -o pipefail
if [[ ${EUID} -ne 0 ]]; then
echo "This script should be run as root." > /dev/stderr
exit 1
fi
if [[ $# == 1 && "$1" == "--check" ]]; then
echo "OK"
exit 0
fi
if [[ "${BOX_ENV}" == "cloudron" ]]; then
nginx -s reload
fi

View File

@@ -1,18 +0,0 @@
#!/bin/bash
set -eu -o pipefail
if [[ ${EUID} -ne 0 ]]; then
echo "This script should be run as root." > /dev/stderr
exit 1
fi
if [[ $# == 1 && "$1" == "--check" ]]; then
echo "OK"
exit 0
fi
if [[ "${BOX_ENV}" == "cloudron" ]]; then
systemctl restart docker
fi

37
src/scripts/restartservice.sh Executable file
View File

@@ -0,0 +1,37 @@
#!/bin/bash
set -eu -o pipefail
if [[ ${EUID} -ne 0 ]]; then
echo "This script should be run as root." > /dev/stderr
exit 1
fi
if [[ $# -eq 0 ]]; then
echo "No arguments supplied"
exit 1
fi
if [[ "$1" == "--check" ]]; then
echo "OK"
exit 0
fi
[[ "${BOX_ENV}" != "cloudron" ]] && exit
service="$1"
if [[ "${service}" == "unbound" ]]; then
unbound-anchor -a /var/lib/unbound/root.key
systemctl restart unbound
elif [[ "${service}" == "nginx" ]]; then
nginx -s reload
elif [[ "${service}" == "docker" ]]; then
systemctl restart docker
elif [[ "${service}" == "collectd" ]]; then
systemctl restart collectd
else
echo "Unknown service ${service}"
exit 1
fi

View File

@@ -1,19 +0,0 @@
#!/bin/bash
set -eu -o pipefail
if [[ ${EUID} -ne 0 ]]; then
echo "This script should be run as root." > /dev/stderr
exit 1
fi
if [[ $# == 1 && "$1" == "--check" ]]; then
echo "OK"
exit 0
fi
if [[ "${BOX_ENV}" == "cloudron" ]]; then
unbound-anchor -a /var/lib/unbound/root.key
systemctl restart unbound
fi

View File

@@ -19,13 +19,17 @@ fi
addon="$1"
appid="${2:-}" # only valid for redis
if [[ "${addon}" != "postgresql" && "${addon}" != "mysql" && "${addon}" != "mongodb" && "${addon}" != "redis" ]]; then
echo "${addon} must be postgresql/mysql/mongodb/redis"
if [[ "${addon}" != "postgresql" && "${addon}" != "mysql" && "${addon}" != "mongodb" && "${addon}" != "redis" && "${addon}" != "graphite" ]]; then
echo "${addon} must be postgresql/mysql/mongodb/redis/graphite"
exit 1
fi
if [[ "${BOX_ENV}" == "cloudron" ]]; then
readonly addon_dir="${HOME}/platformdata/${addon}/${appid}"
if [[ "${addon}" == "graphite" ]]; then
readonly addon_dir="${HOME}/platformdata/graphite"
else
readonly addon_dir="${HOME}/platformdata/${addon}/${appid}"
fi
else
readonly addon_dir="${HOME}/.cloudron_test/platformdata/${addon}/${appid}"
fi

View File

@@ -41,7 +41,6 @@ var appdb = require('./appdb.js'),
debug = require('debug')('box:services'),
docker = require('./docker.js'),
fs = require('fs'),
graphite = require('./graphite.js'),
hat = require('./hat.js'),
infra = require('./infra_version.js'),
mail = require('./mail.js'),
@@ -64,6 +63,7 @@ var appdb = require('./appdb.js'),
const NOOP = function (app, options, callback) { return callback(); };
const NOOP_CALLBACK = function (error) { if (error) debug(error); };
const RMADDONDIR_CMD = path.join(__dirname, 'scripts/rmaddondir.sh');
const RESTART_SERVICE_CMD = path.join(__dirname, 'scripts/restartservice.sh');
// setup can be called multiple times for the same app (configure crash restart) and existing data must not be lost
// teardown is destructive. app data stored with the addon is lost
@@ -219,8 +219,8 @@ const SERVICES = {
},
graphite: {
status: statusGraphite,
restart: docker.restartContainer.bind(null, 'graphite'),
defaultMemoryLimit: graphite.DEFAULT_MEMORY_LIMIT
restart: restartGraphite,
defaultMemoryLimit: 256 * 1024 * 1024
},
nginx: {
status: statusNginx,
@@ -541,7 +541,7 @@ function rebuildService(id, callback) {
if (id === 'postgresql') return startPostgresql({ version: 'none' }, callback);
if (id === 'mysql') return startMysql({ version: 'none' }, callback);
if (id === 'sftp') return sftp.rebuild(serviceConfig, { /* options */ }, callback);
if (id === 'graphite') return graphite.start({ version: 'none' }, serviceConfig, callback);
if (id === 'graphite') return startGraphite({ version: 'none' }, serviceConfig, callback);
// nothing to rebuild for now.
// TODO: mongo/postgresql/mysql need to be scaled down.
@@ -845,7 +845,7 @@ function startServices(existingInfra, callback) {
startPostgresql.bind(null, existingInfra),
startMongodb.bind(null, existingInfra),
startRedis.bind(null, existingInfra),
graphite.start.bind(null, existingInfra, servicesConfig['graphite'] || {}),
startGraphite.bind(null, existingInfra, servicesConfig['graphite'] || {}),
sftp.start.bind(null, existingInfra, servicesConfig['sftp'] || {}),
);
} else {
@@ -857,7 +857,7 @@ function startServices(existingInfra, callback) {
if (infra.images.postgresql.tag !== existingInfra.images.postgresql.tag) startFuncs.push(startPostgresql.bind(null, existingInfra));
if (infra.images.mongodb.tag !== existingInfra.images.mongodb.tag) startFuncs.push(startMongodb.bind(null, existingInfra));
if (infra.images.redis.tag !== existingInfra.images.redis.tag) startFuncs.push(startRedis.bind(null, existingInfra));
if (infra.images.graphite.tag !== existingInfra.images.graphite.tag) startFuncs.push(graphite.start.bind(null, existingInfra, servicesConfig['graphite'] || {}));
if (infra.images.graphite.tag !== existingInfra.images.graphite.tag) startFuncs.push(startGraphite.bind(null, existingInfra, servicesConfig['graphite'] || {}));
if (infra.images.sftp.tag !== existingInfra.images.sftp.tag) startFuncs.push(sftp.start.bind(null, existingInfra, servicesConfig['sftp'] || {}));
debug('startServices: existing infra. incremental service create %j', startFuncs.map(function (f) { return f.name; }));
@@ -1781,6 +1781,54 @@ function restoreMongoDb(app, options, callback) {
});
}
function startGraphite(existingInfra, serviceConfig, callback) {
assert.strictEqual(typeof existingInfra, 'object');
assert.strictEqual(typeof serviceConfig, 'object');
assert.strictEqual(typeof callback, 'function');
const tag = infra.images.graphite.tag;
const memoryLimit = serviceConfig.memoryLimit || 256 * 1024 * 1024;
const memory = system.getMemoryAllocation(memoryLimit);
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.graphite.tag, tag);
if (upgrading) debug('startGraphite: graphite will be upgraded');
const cmd = `docker run --restart=always -d --name="graphite" \
--hostname graphite \
--net cloudron \
--net-alias graphite \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=graphite \
-m ${memory} \
--memory-swap ${memoryLimit} \
--dns 172.18.0.1 \
--dns-search=. \
-p 127.0.0.1:2003:2003 \
-p 127.0.0.1:2004:2004 \
-p 127.0.0.1:8417:8000 \
-v "${paths.PLATFORM_DATA_DIR}/graphite:/var/lib/graphite" \
--label isCloudronManaged=true \
--read-only -v /tmp -v /run "${tag}"`;
async.series([
shell.exec.bind(null, 'stopGraphite', 'docker stop graphite || true'),
shell.exec.bind(null, 'removeGraphite', 'docker rm -f graphite || true'),
(done) => {
if (!upgrading) return done();
shell.sudo('removeGraphiteDir', [ RMADDONDIR_CMD, 'graphite' ], {}, done);
},
shell.exec.bind(null, 'startGraphite', cmd)
], function (error) {
// restart collectd to get the disk stats after graphite starts. currently, there is no way to do graphite health check
if (!error) setTimeout(() => shell.sudo('restartcollectd', [ RESTART_SERVICE_CMD, 'collectd' ], {}, NOOP_CALLBACK), 60000);
callback(error);
});
}
function setupProxyAuth(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
@@ -1820,6 +1868,10 @@ function startRedis(existingInfra, callback) {
const redisName = 'redis-' + app.id;
async.series([
(done) => {
if (!upgrading) return done();
backupRedis(app, {}, done);
},
shell.exec.bind(null, 'stopRedis', `docker stop ${redisName} || true`), // redis will backup as part of signal handling
shell.exec.bind(null, 'removeRedis', `docker rm -f ${redisName} || true`),
setupRedis.bind(null, app, app.manifest.addons.redis) // starts the container
@@ -2020,7 +2072,7 @@ function statusDocker(callback) {
function restartDocker(callback) {
assert.strictEqual(typeof callback, 'function');
shell.sudo('restartdocker', [ path.join(__dirname, 'scripts/restartdocker.sh') ], {}, NOOP_CALLBACK);
shell.sudo('restartdocker', [ RESTART_SERVICE_CMD, 'docker' ], {}, NOOP_CALLBACK);
callback(null);
}
@@ -2036,7 +2088,7 @@ function statusUnbound(callback) {
function restartUnbound(callback) {
assert.strictEqual(typeof callback, 'function');
shell.sudo('restartunbound', [ path.join(__dirname, 'scripts/restartunbound.sh') ], {}, NOOP_CALLBACK);
shell.sudo('restartunbound', [ RESTART_SERVICE_CMD, 'unbound' ], {}, NOOP_CALLBACK);
callback(null);
}
@@ -2052,7 +2104,7 @@ function statusNginx(callback) {
function restartNginx(callback) {
assert.strictEqual(typeof callback, 'function');
shell.sudo('reloadnginx', [ path.join(__dirname, 'scripts/reloadnginx.sh') ], {}, NOOP_CALLBACK);
shell.sudo('restartnginx', [ RESTART_SERVICE_CMD, 'nginx' ], {}, NOOP_CALLBACK);
callback(null);
}
@@ -2104,6 +2156,14 @@ function statusGraphite(callback) {
});
}
function restartGraphite(callback) {
assert.strictEqual(typeof callback, 'function');
docker.restartContainer('graphite', callback);
setTimeout(() => shell.sudo('restartcollectd', [ RESTART_SERVICE_CMD, 'collectd' ], {}, NOOP_CALLBACK), 60000);
}
function teardownOauth(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');

View File

@@ -21,24 +21,11 @@ var apps = require('./apps.js'),
volumes = require('./volumes.js'),
_ = require('underscore');
var gRebuildInProgress = false;
function rebuild(serviceConfig, options, callback) {
assert.strictEqual(typeof serviceConfig, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
if (gRebuildInProgress) {
debug('waiting for other rebuild to finish');
return setTimeout(function () { rebuild(serviceConfig, options, callback); }, 5000);
}
gRebuildInProgress = true;
function done(error) {
gRebuildInProgress = false;
callback(error);
}
debug('rebuilding container');
const force = !!options.force;
@@ -48,7 +35,7 @@ function rebuild(serviceConfig, options, callback) {
const cloudronToken = hat(8 * 128);
apps.getAll(function (error, result) {
if (error) return done(error);
if (error) return callback(error);
let dataDirs = [];
result.forEach(function (app) {
@@ -89,7 +76,7 @@ function rebuild(serviceConfig, options, callback) {
if (!force && _.isEqual(currentDataDirs, dataDirs)) {
debug('Skipping rebuild, no changes');
return done();
return callback();
}
}
}
@@ -119,7 +106,7 @@ function rebuild(serviceConfig, options, callback) {
shell.exec.bind(null, 'stopSftp', 'docker stop sftp || true'),
shell.exec.bind(null, 'removeSftp', 'docker rm -f sftp || true'),
shell.exec.bind(null, 'startSftp', cmd)
], done);
], callback);
});
});
});

View File

@@ -157,7 +157,7 @@ function exists(apiConfig, backupFilePath, callback) {
s3.headObject(params, function (error) {
if (!Object.keys(this.httpResponse.headers).some(h => h.startsWith('x-amz'))) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'not a s3 endpoint'));
if (error && S3_NOT_FOUND(error)) return callback(null, false);
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code));
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Error headObject ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`));
callback(null, true);
});
@@ -169,7 +169,7 @@ function exists(apiConfig, backupFilePath, callback) {
};
s3.listObjects(listParams, function (error, listData) {
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code));
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`));
callback(null, listData.Contents.length !== 0);
});
@@ -200,7 +200,7 @@ function download(apiConfig, backupFilePath, callback) {
ps.emit('error', new BoxError(BoxError.NOT_FOUND, `Backup not found: ${backupFilePath}`));
} else {
debug(`download: ${apiConfig.bucket}:${backupFilePath} s3 stream error.`, error);
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code)); // DO sets 'code'
ps.emit('error', new BoxError(BoxError.EXTERNAL_ERROR, `Error multipartDownload ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`));
}
});
@@ -231,7 +231,7 @@ function listDir(apiConfig, dir, batchSize, iteratorCallback, callback) {
async.whilst((testDone) => testDone(null, !done), function listAndDownload(whilstCallback) {
s3.listObjects(listParams, function (error, listData) {
if (error) return whilstCallback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code));
if (error) return whilstCallback(new BoxError(BoxError.EXTERNAL_ERROR, `Error listing objects in ${dir}. Message: ${error.message} HTTP Code: ${error.code}`));
if (listData.Contents.length === 0) { done = true; return whilstCallback(); }
@@ -505,7 +505,7 @@ function testConfig(apiConfig, callback) {
var s3 = new AWS.S3(_.omit(credentials, 'retryDelayOptions', 'maxRetries'));
s3.putObject(params, function (error) {
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code)); // DO sets 'code'
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Error put object cloudron-testfile. Message: ${error.message} HTTP Code: ${error.code}`));
var params = {
Bucket: apiConfig.bucket,
@@ -513,7 +513,7 @@ function testConfig(apiConfig, callback) {
};
s3.deleteObject(params, function (error) {
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message || error.code)); // DO sets 'code'
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Error del object cloudron-testfile. Message: ${error.message} HTTP Code: ${error.code}`));
callback();
});

View File

@@ -62,7 +62,10 @@ async.series([
database.initialize,
settings.initCache
], function (error) {
if (error) return process.exit(50);
if (error) {
console.error(error);
return process.exit(50);
}
const debug = require('debug')('box:taskworker'); // require this here so that logging handler is already setup
const NOOP_CALLBACK = function (error) { if (error) debug(error); };
@@ -75,11 +78,14 @@ async.series([
debug(`Starting task ${taskId}. Logs are at ${logFile}`);
tasks.get(taskId, function (error, task) {
if (error) return process.exit(50);
if (error) {
debug(error);
return process.exit(50);
}
tasks.update(taskId, { percent: 2, error: null }, function (error) {
if (error) {
console.error(error);
debug(error);
return process.exit(50);
}

View File

@@ -13,11 +13,9 @@ scripts=("${SOURCE_DIR}/src/scripts/clearvolume.sh" \
"${SOURCE_DIR}/src/scripts/mvvolume.sh" \
"${SOURCE_DIR}/src/scripts/mkdirvolume.sh" \
"${SOURCE_DIR}/src/scripts/rmaddondir.sh" \
"${SOURCE_DIR}/src/scripts/reloadnginx.sh" \
"${SOURCE_DIR}/src/scripts/reboot.sh" \
"${SOURCE_DIR}/src/scripts/restart.sh" \
"${SOURCE_DIR}/src/scripts/restartdocker.sh" \
"${SOURCE_DIR}/src/scripts/restartunbound.sh" \
"${SOURCE_DIR}/src/scripts/restartservice.sh" \
"${SOURCE_DIR}/src/scripts/update.sh" \
"${SOURCE_DIR}/src/scripts/collectlogs.sh" \
"${SOURCE_DIR}/src/scripts/configurecollectd.sh" \

196
src/test/network-test.js Normal file
View File

@@ -0,0 +1,196 @@
/* global it:false */
/* global describe:false */
/* global before:false */
'use strict';
var network = require('../network.js'),
fs = require('fs'),
path = require('path'),
paths = require('../paths.js'),
BoxError = require('../boxerror.js'),
expect = require('expect.js');
describe('Network', function () {
describe('Blocklist', function () {
before(function () {
fs.mkdirSync(path.dirname(paths.FIREWALL_BLOCKLIST_FILE));
});
it('can get empty blocklist', function (done) {
network.getBlocklist(function (error, result) {
expect(error).to.equal(null);
expect(result).to.equal('');
done();
});
});
it('can set empty blocklist', function (done) {
network.setBlocklist('', { ip: '127.0.0.1' }, function (error) {
expect(error).to.equal(null);
network.getBlocklist(function (error, result) {
expect(error).to.equal(null);
expect(result).to.equal('\n');
done();
});
});
});
it('can set single IPv4 in blocklist', function (done) {
network.setBlocklist('192.168.178.1', { ip: '127.0.0.1' }, function (error) {
expect(error).to.equal(null);
network.getBlocklist(function (error, result) {
expect(error).to.equal(null);
expect(result).to.equal('192.168.178.1\n');
done();
});
});
});
it('can set single IPv6 in blocklist', function (done) {
network.setBlocklist('2a02:8106:2f:bb00:7afc:5703:ee71:3ef8', { ip: '127.0.0.1' }, function (error) {
expect(error).to.equal(null);
network.getBlocklist(function (error, result) {
expect(error).to.equal(null);
expect(result).to.equal('2a02:8106:2f:bb00:7afc:5703:ee71:3ef8\n');
done();
});
});
});
it('can set mixed IPs with comment in blocklist', function (done) {
network.setBlocklist('2a02:8106:2f:bb00:7afc:5703:ee71:3ef8\n# some comment\n192.168.178.1', { ip: '127.0.0.1' }, function (error) {
expect(error).to.equal(null);
network.getBlocklist(function (error, result) {
expect(error).to.equal(null);
expect(result).to.equal('2a02:8106:2f:bb00:7afc:5703:ee71:3ef8\n# some comment\n192.168.178.1\n');
done();
});
});
});
it('can set single IPv4 range in blocklist', function (done) {
network.setBlocklist('192.168.178.1/24', { ip: '127.0.0.1' }, function (error) {
expect(error).to.equal(null);
network.getBlocklist(function (error, result) {
expect(error).to.equal(null);
expect(result).to.equal('192.168.178.1/24\n');
done();
});
});
});
it('can set single IPv6 range in blocklist', function (done) {
network.setBlocklist('2001:db8::', { ip: '127.0.0.1' }, function (error) {
expect(error).to.equal(null);
network.getBlocklist(function (error, result) {
expect(error).to.equal(null);
expect(result).to.equal('2001:db8::\n');
done();
});
});
});
it('cannot set IPv4 in blocklist if source is same', function (done) {
network.setBlocklist('127.0.0.1', { ip: '127.0.0.1' }, function (error) {
expect(error).to.be.a(BoxError);
expect(error.reason).to.equal(BoxError.BAD_FIELD);
done();
});
});
it('cannot set IPv6 in blocklist if source is same', function (done) {
network.setBlocklist('2001:db8:1234::1', { ip: '2001:db8:1234::1' }, function (error) {
expect(error).to.be.a(BoxError);
expect(error.reason).to.equal(BoxError.BAD_FIELD);
done();
});
});
it('cannot set IPv4 range in blocklist if source is same', function (done) {
network.setBlocklist('127.0.0.1/32', { ip: '127.0.0.1' }, function (error) {
expect(error).to.be.a(BoxError);
expect(error.reason).to.equal(BoxError.BAD_FIELD);
done();
});
});
it('cannot set IPv6 range in blocklist if source is same', function (done) {
network.setBlocklist('2001:db8:1234:::', { ip: '2001:db8:1234::1' }, function (error) {
expect(error).to.be.a(BoxError);
expect(error.reason).to.equal(BoxError.BAD_FIELD);
done();
});
});
it('can set IPv4 in blocklist if source is IPv6', function (done) {
network.setBlocklist('192.168.178.1', { ip: '2001:db8:1234::1' }, function (error) {
expect(error).to.equal(null);
network.getBlocklist(function (error, result) {
expect(error).to.equal(null);
expect(result).to.equal('192.168.178.1\n');
done();
});
});
});
it('can set IPv6 in blocklist if source is IPv4', function (done) {
network.setBlocklist('2001:db8:1234::1', { ip: '127.0.0.1' }, function (error) {
expect(error).to.equal(null);
network.getBlocklist(function (error, result) {
expect(error).to.equal(null);
expect(result).to.equal('2001:db8:1234::1\n');
done();
});
});
});
it('can set IPv4 range in blocklist if source is IPv6', function (done) {
network.setBlocklist('192.168.178.1/32', { ip: '2001:db8:1234::1' }, function (error) {
expect(error).to.equal(null);
network.getBlocklist(function (error, result) {
expect(error).to.equal(null);
expect(result).to.equal('192.168.178.1/32\n');
done();
});
});
});
it('can set IPv6 range in blocklist if source is IPv4', function (done) {
network.setBlocklist('2001:db8:1234::', { ip: '127.0.0.1' }, function (error) {
expect(error).to.equal(null);
network.getBlocklist(function (error, result) {
expect(error).to.equal(null);
expect(result).to.equal('2001:db8:1234::\n');
done();
});
});
});
});
});

View File

@@ -39,8 +39,8 @@ describe('shell', function () {
});
it('can sudo valid program', function (done) {
var RELOAD_NGINX_CMD = path.join(__dirname, '../src/scripts/reloadnginx.sh');
shell.sudo('test', [ RELOAD_NGINX_CMD ], {}, function (error) {
var RELOAD_NGINX_CMD = path.join(__dirname, '../src/scripts/restartservice.sh');
shell.sudo('test', [ RELOAD_NGINX_CMD, 'nginx' ], {}, function (error) {
expect(error).to.be.ok();
done();
});