Compare commits

..

421 Commits

Author SHA1 Message Date
Girish Ramakrishnan 19a098d34b remove obsolete nginx config file 2017-01-31 18:05:26 -08:00
Johannes Zellner db452d9bc0 Also send the autoupdatePattern with the stats route 2017-01-31 17:37:55 -08:00
Johannes Zellner 90efb96635 parse mailConfig blob 2017-01-31 16:36:09 -08:00
Girish Ramakrishnan 0cee6de476 Check if cloudron.conf file exists 2017-01-31 01:53:06 -08:00
Girish Ramakrishnan 854d29330c Fix email display logic again 2017-01-30 22:55:20 -08:00
Girish Ramakrishnan 34a3dd6d46 Always generate default nginx config
If we don't, https://ip won't work (caas relies on this for
health checks)
2017-01-30 16:17:07 -08:00
Girish Ramakrishnan 4787ee3301 Fix email note display logic 2017-01-30 15:49:50 -08:00
Girish Ramakrishnan 7b547e7ae9 Revert scaleway specific overlay2 support
This reverts commit 16d65d3665.

Rainloop app breaks with overlay2
2017-01-30 15:43:42 -08:00
Girish Ramakrishnan fe5e31e528 Save update json in /root
/tmp is not very secure. But the real reason is so that we can
re-run the setup script again should things fail.

/home/yellowtent/box/scripts/installer.sh --data-file /root/cloudron-update-data.json
2017-01-30 15:21:04 -08:00
Girish Ramakrishnan 841a838910 Fix text 2017-01-30 15:08:51 -08:00
Girish Ramakrishnan 4f27fe4f1e Fix email text 2017-01-30 14:24:08 -08:00
Girish Ramakrishnan 96eab86341 Applications -> Apps 2017-01-30 14:20:11 -08:00
Girish Ramakrishnan 95d7a991dc install grub2 2017-01-30 14:01:33 -08:00
Girish Ramakrishnan dc309afbbd Add --allow-downgrades
The following packages will be DOWNGRADED:
  docker-engine
0 upgraded, 0 newly installed, 1 downgraded, 0 to remove and 0 not upgraded.
E: Packages were downgraded and -y was used without --allow-downgrades.
2017-01-30 14:01:32 -08:00
Girish Ramakrishnan 16d65d3665 Use overlay2 for scaleway
https://github.com/scaleway/image-ubuntu/issues/68
2017-01-30 14:01:29 -08:00
Girish Ramakrishnan ccb340cf80 Use systemd drop in to configure docker
The built-in service files get overwritten by updates

Fixes #203
2017-01-30 12:41:07 -08:00
Girish Ramakrishnan 56b0f57e11 Move unbound systemd config to separate file 2017-01-30 12:39:19 -08:00
Girish Ramakrishnan 7c1e056152 Add 0.99.0 changes 2017-01-30 10:25:11 -08:00
Girish Ramakrishnan 08ffa99c78 Use %s instead of %d
awk's %d behaves differently with mawk (scaleway) and gawk (do)

Fixes #200
2017-01-30 10:24:26 -08:00
Johannes Zellner cdede5a009 Add dns provider information on change dialog 2017-01-29 15:00:30 -08:00
Johannes Zellner 4cadffa6ea Remove automatic appstore account signup in setup view 2017-01-29 14:39:54 -08:00
Johannes Zellner 04e13eac55 Improve appstore signup 2017-01-29 14:38:38 -08:00
Johannes Zellner 2b3ae69f63 Selectivly show the correct labels when email is enabled in users view 2017-01-29 14:27:05 -08:00
Johannes Zellner 8f4813f691 Fix text for emails 2017-01-29 14:23:27 -08:00
Johannes Zellner 5b05baeced Make oauth view navbar entries links 2017-01-29 13:33:34 -08:00
Johannes Zellner 3d60e36c98 Fix top margin in oauth views 2017-01-29 13:33:34 -08:00
Johannes Zellner 40c7bd114a Add footer to oauth views 2017-01-29 13:33:34 -08:00
Johannes Zellner e0033b31f2 Fix text on settings and support views 2017-01-29 13:33:34 -08:00
Girish Ramakrishnan 2d3bdda1c8 Make tests pass 2017-01-29 13:01:09 -08:00
Girish Ramakrishnan fd40940ef5 Reserve ports <= 1023
Just being conservative here

Fixes #202
2017-01-29 12:43:24 -08:00
Girish Ramakrishnan 6d58f65a1a Reserve ssh ports 2017-01-29 12:38:58 -08:00
Johannes Zellner 44775e1791 Cleanup the graphs ui 2017-01-29 11:39:28 -08:00
Johannes Zellner 4be1f4dd73 Remove developerMode toggle in token ui 2017-01-29 10:26:14 -08:00
Johannes Zellner 93bab552c9 Fix text in certs, tokens and settings views 2017-01-29 02:50:26 -08:00
Johannes Zellner 023c03ddcd Use the same busy indicator everywhere 2017-01-29 02:01:01 -08:00
Johannes Zellner a5bffad556 Improve text on users page and remove username validation on delete 2017-01-29 01:40:33 -08:00
Johannes Zellner 836348cbc0 Improve text for app installation and configuration 2017-01-29 01:00:15 -08:00
Johannes Zellner 1ac7570cfb Autofocus appstore search field 2017-01-28 20:26:38 -08:00
Johannes Zellner 0dceba8a1c Do not reload all apps when search is empty 2017-01-28 19:57:32 -08:00
Johannes Zellner 599b070779 Remove appstore view title 2017-01-28 19:52:42 -08:00
Johannes Zellner c581e0ad09 webadmin: only show backup settings notification in settings view 2017-01-28 19:22:56 -08:00
Johannes Zellner e14b59af5d Append random query to ensure the avatar is refetched 2017-01-28 19:10:55 -08:00
Johannes Zellner eff9de3ded Adjust dns wait text 2017-01-28 18:33:37 -08:00
Johannes Zellner 4f128c6503 setup: improve text on dnssetup page 2017-01-28 18:27:22 -08:00
Johannes Zellner 8dc9d4c083 webadmin: Give better feedback on update schedule saving 2017-01-28 14:50:30 -08:00
Girish Ramakrishnan 21e3300396 tutorial: fix node version 2017-01-28 14:44:13 -08:00
Girish Ramakrishnan d136895598 Generate cert with cloudron.self CN instead of ip 2017-01-28 09:10:53 -08:00
Girish Ramakrishnan dac3eef57c Skip generating self-signed cert if we have a domain 2017-01-28 09:10:53 -08:00
Girish Ramakrishnan 2fac7dd736 delete old nginx configs on infra update
we changed the cert location and reloading nginx fails...
2017-01-28 09:10:49 -08:00
Girish Ramakrishnan 74e2415308 Make this an infra update
This has to be an infra update since the nginx configuration has
to be rewritten for the new data layout
2017-01-28 01:01:24 -08:00
Girish Ramakrishnan 41fae04b69 more 0.98.0 changes 2017-01-27 10:14:10 -08:00
Johannes Zellner 32a88a342c Add update notification mail tests 2017-01-27 09:51:26 -08:00
Johannes Zellner b5bcde5093 Fix update email tests 2017-01-27 09:51:26 -08:00
Johannes Zellner 68c36e8a18 Only send update notification mails if autoupdate is disabled 2017-01-27 09:51:26 -08:00
Johannes Zellner f6a9e1f4d8 Revert "Fix tests: we do not send mails anymore"
This reverts commit 7c72cd4399.
2017-01-27 09:51:26 -08:00
Johannes Zellner 2abd42096e Add showdown node module for update mails 2017-01-27 09:51:26 -08:00
Johannes Zellner 922e214c52 Revert "Remove now unused mailer.boxUpdateAvailable()"
This reverts commit 558093eab1.
2017-01-27 09:51:26 -08:00
Johannes Zellner 6ce8899231 Revert "Do not send box update emails to admins"
This reverts commit 865b041474.
2017-01-27 09:51:26 -08:00
Girish Ramakrishnan cbfad632c2 Handle 401 in app purchase 2017-01-27 07:47:56 -08:00
Johannes Zellner 7804aed5d7 Query graphite for 10 apps at a time at most
If many apps are installed, we may reach graphite's query string
size limit, so we get the app details now 10 at a time
2017-01-26 22:53:52 -08:00
Johannes Zellner b90b1dbbbe Show graph labels on the side 2017-01-26 22:38:00 -08:00
Johannes Zellner 020ec54264 Allow changing the autoupdate pattern in the settings view 2017-01-26 21:31:05 -08:00
Johannes Zellner 0568093a2a Add rest wrapper for autoupdate pattern route 2017-01-26 21:31:05 -08:00
Johannes Zellner c9281bf863 docs: Remove oauth proxy from the authentication docs 2017-01-26 16:17:21 -08:00
Johannes Zellner de451b2fe8 Redirect to the webadmin if update progress is 100 2017-01-26 15:52:57 -08:00
Girish Ramakrishnan ddf5c51737 Make it 90 instead 2017-01-26 15:45:07 -08:00
Johannes Zellner a33ccb32d2 Use autoupdate pattern constant in tests 2017-01-26 15:38:29 -08:00
Johannes Zellner 0b03018a7b Add constant for special 'never' autoupdate pattern 2017-01-26 15:36:24 -08:00
Johannes Zellner 1b688410e7 Add more changes 2017-01-26 15:27:29 -08:00
Johannes Zellner 6d031af012 Allow changing domain on caas always 2017-01-26 15:22:02 -08:00
Johannes Zellner 67a5151070 Also pick the token when migrating a caas cloudron to a different domain 2017-01-26 15:22:02 -08:00
Johannes Zellner a4b299bf6e Use domain validation for dns setup dialog 2017-01-26 15:22:02 -08:00
Johannes Zellner 383d1eb406 Add angular directive for domain validation input fields 2017-01-26 15:22:02 -08:00
Johannes Zellner 3901144eae Do not use the caas token as a do token 2017-01-26 15:22:02 -08:00
Johannes Zellner 317c6db1d5 Show all DNS providers also for caas 2017-01-26 15:22:02 -08:00
Johannes Zellner 1e14f8e2b9 Update and sync the footer in all webadmin pages 2017-01-26 15:22:02 -08:00
Girish Ramakrishnan 88fc7ca915 move the files and not the directory
... because box is a btrfs subvolume
2017-01-26 14:16:27 -08:00
Girish Ramakrishnan b983e205d2 Add more changes 2017-01-26 13:24:59 -08:00
Girish Ramakrishnan 9cdbc6ba36 capitalize 2017-01-26 13:08:56 -08:00
Girish Ramakrishnan 895f5f7398 Expand backup error in the mail 2017-01-26 13:03:36 -08:00
Girish Ramakrishnan f41b08d573 Add timestamp to emails 2017-01-26 12:47:23 -08:00
Girish Ramakrishnan 3e21b6cad3 Add ensureBackup log 2017-01-26 12:47:23 -08:00
Johannes Zellner 1a32482f66 Remove unused code in ami creation script 2017-01-26 11:11:07 -08:00
Johannes Zellner ee1e083f32 Add initial version of the AMI creation script 2017-01-25 14:06:26 -08:00
Girish Ramakrishnan ebd3a15140 always restart nginx 2017-01-25 12:04:52 -08:00
Girish Ramakrishnan d93edc6375 box.service: start after nginx 2017-01-25 11:28:31 -08:00
Girish Ramakrishnan 3ed17f3a2a doc: restore-url -> encryption-key 2017-01-25 09:47:25 -08:00
Girish Ramakrishnan 8d9cfbd3de Add 0.98.0 changes 2017-01-24 19:20:47 -08:00
Girish Ramakrishnan f142d34f83 Move box data out of appdata volume
This lets us restore the box if the app volume becomes full

Fixes #186
2017-01-24 13:48:09 -08:00
Girish Ramakrishnan 357ca55dec remove unused var 2017-01-24 10:41:58 -08:00
Girish Ramakrishnan d7a8731027 remove unused var 2017-01-24 10:41:38 -08:00
Girish Ramakrishnan 9117c7d141 Use $USER 2017-01-24 10:32:32 -08:00
Girish Ramakrishnan 472020f90c APPICONS_DIR -> APP_ICONS_DIR 2017-01-24 10:13:25 -08:00
Girish Ramakrishnan 2256a0dd3a group paths together 2017-01-24 10:12:05 -08:00
Girish Ramakrishnan 458b5d1e32 bump mail container 2017-01-23 16:26:44 -08:00
Girish Ramakrishnan 1e6abed4aa tests: create mail directory 2017-01-23 15:09:08 -08:00
Girish Ramakrishnan cdd4b426d5 use elif 2017-01-23 14:03:36 -08:00
Girish Ramakrishnan 75b60a2949 Make restore work without a domain
Fixes #195
2017-01-23 13:04:08 -08:00
Girish Ramakrishnan 9ab34ee43a Check for ubuntu version 2017-01-23 12:58:08 -08:00
Johannes Zellner 3c9d7706de Let the api call fail instead of explictily checking the token 2017-01-23 21:40:06 +01:00
Johannes Zellner 8b5b954cbb Only ever send heartbeats for caas cloudrons 2017-01-23 21:38:22 +01:00
Johannes Zellner b2204925d3 Remove unused setup_start.sh creation 2017-01-23 21:36:47 +01:00
Girish Ramakrishnan 63734155f2 doc: domain arg is redundant 2017-01-23 11:10:21 -08:00
Girish Ramakrishnan eb0ae3400a send mailConfig stat 2017-01-23 10:01:54 -08:00
Johannes Zellner db8db430b9 Avoid warning from systemd by reloading the daemon after chaning journald config 2017-01-23 11:01:02 +01:00
Johannes Zellner c0b2b1c26d Escape shell vars in the unbound unit file 2017-01-23 10:27:23 +01:00
Johannes Zellner 7da20e95e3 Use a proper systemd unit file for unbound
Part of #191
2017-01-23 10:14:20 +01:00
Girish Ramakrishnan f30f90e6be Stop mail container before moving the dirs 2017-01-22 21:57:34 -08:00
Girish Ramakrishnan 7f05b48bd7 Revert "Migrate mail data after downloading restore data"
This reverts commit e7c399c36a.
2017-01-22 02:42:14 -08:00
Girish Ramakrishnan ea257b95d9 Fix dirnames when backing up 2017-01-21 23:40:41 -08:00
Girish Ramakrishnan e7c399c36a Migrate mail data after downloading restore data
This allows us to be backward compatible
2017-01-21 23:33:57 -08:00
Girish Ramakrishnan d84666fb43 Move mail data out of box
This will help us with putting a size on box data

Mail container version is bumped because we want to recreate it

Part of #186
2017-01-20 20:22:08 -08:00
Girish Ramakrishnan 1eb33099af dkim directory is now automatically created in cloudron.js 2017-01-20 15:18:03 -08:00
Girish Ramakrishnan e35dbd522f More debugMode fixes 2017-01-20 09:56:44 -08:00
Girish Ramakrishnan db6474ef2a Merge readonlyRootfs and development mode into debug mode
The core issue we want to solve is to debug a running app.
Let's make it explicit that it is in debugging mode because
functions like update/backup/restore don't work.

Part of #171
2017-01-20 09:29:32 -08:00
Johannes Zellner e437671baf Add basic --help for gulp develop 2017-01-20 15:11:17 +01:00
Johannes Zellner f60d640c8e Set developmentMode default to false 2017-01-20 12:07:25 +01:00
Johannes Zellner 56c992e51b Check for 19GB instead of 20GB in cloudron-setup
This is as reporting the disk size may vary from the one selected when
creating the server. Eg EC2 20GB storage results in 21474836480 bytes
which in turn will be calculated as less than 20GB in the script
2017-01-20 11:22:43 +01:00
Girish Ramakrishnan 12ee7b9521 send readonly and dev mode fields 2017-01-19 19:01:29 -08:00
Girish Ramakrishnan c8de557ff7 More 0.97.0 changes 2017-01-19 15:59:52 -08:00
Girish Ramakrishnan 90adaf29d7 Update manifestformat (remove developmentMode)
Fixes #171
2017-01-19 15:57:29 -08:00
Girish Ramakrishnan a71323f8b3 Add developmentMode flag to appdb
Part of #171
2017-01-19 15:57:24 -08:00
Girish Ramakrishnan 155995c7f3 Allow memoryLimit to be unrestricted programatically 2017-01-19 15:11:40 -08:00
Girish Ramakrishnan 319632e996 add readonlyRootfs to the database 2017-01-19 15:11:40 -08:00
Johannes Zellner 33d55318d8 Do not read oauth details in gulpfile from env 2017-01-19 23:41:07 +01:00
Johannes Zellner ec1abf8926 Remove creation of now unused and broken provision.sh 2017-01-19 23:18:01 +01:00
Girish Ramakrishnan 9a41f111b0 Fix failing tests 2017-01-19 12:51:16 -08:00
Girish Ramakrishnan 7ef6bd0d3f Add readonlyRootfs flag to apps table
When turned off, it will put the app in a writable rootfs. This
allows us to debug live/production apps (like change start.sh) and
just get them up and running. Once turned off, this app cannot be
updated anymore (unless the force flag is set). This way we can
then update it using the CLI if we are convinced that the upcoming
update fixes the problem.

Part of #171
2017-01-19 11:55:25 -08:00
Girish Ramakrishnan 02f0bb3ea5 Add readonly flag
Part of #171
2017-01-19 10:55:13 -08:00
Girish Ramakrishnan e12b236617 More 0.97.0 changes 2017-01-19 10:45:41 -08:00
Girish Ramakrishnan 6662a4d7d6 Collect every 60min
If we are crashing so problem, we have bigger problems...
2017-01-19 10:11:36 -08:00
Girish Ramakrishnan 85315d8fc5 Do not stash more than 2mb in log file
For reference, each crash increases the file size by 112K.
So we can store around 20 crashes.

Fixes #190
2017-01-19 10:09:49 -08:00
Girish Ramakrishnan 9f5a7e4c08 cloudron-setup: keep the cursor in the same line 2017-01-19 10:09:47 -08:00
Girish Ramakrishnan ea0e61e6a4 Remove unused function 2017-01-19 09:12:54 -08:00
Johannes Zellner c301e9b088 Show better backup progress in settings ui 2017-01-19 17:30:01 +01:00
Johannes Zellner 70e861b106 Distinguish between app task and backup in progress 2017-01-19 17:08:18 +01:00
Johannes Zellner f5c6862627 Improve backup creation UI
- Do not prompt the user if he really wants to create a backup
- Show error message if a backup can't be created at the moment
2017-01-19 17:04:22 +01:00
Johannes Zellner d845f1ae5b Indicate in the mail subject if it contains more than one crash 2017-01-19 16:52:44 +01:00
Johannes Zellner 7c7d67c6c2 Append the log separator looks nicer 2017-01-19 16:30:20 +01:00
Johannes Zellner c9fcbcc61c No need to print the unitName in the separator 2017-01-19 15:42:30 +01:00
Johannes Zellner 9ac06e7f85 Stash crash logs for up to 30min
This avoids spaming us with crash logs

Part of #190
2017-01-19 15:23:20 +01:00
Johannes Zellner 6eafac2cad Do not rely on fdisk's human readable unit output
Using the bytes output will fix an issue where the disk size is reported
either as terrabyte or also megabyte.
So far we disallowed 1TB disks but allowed 20MB disks.
2017-01-19 13:53:50 +01:00
Johannes Zellner 60cb0bdfb1 Add 0.97.0 changes 2017-01-19 13:17:09 +01:00
Johannes Zellner 979956315c Only ever remove the app icon on uninstall 2017-01-19 12:39:31 +01:00
Johannes Zellner 62ba031702 Skip icon download without an appStoreId 2017-01-19 12:38:41 +01:00
Girish Ramakrishnan 284cb7bee5 doc: remove double header 2017-01-18 23:41:41 -08:00
Girish Ramakrishnan 735c22bc98 doc: more cleanup on selfhosting doc 2017-01-18 23:37:33 -08:00
Girish Ramakrishnan a2beed01a1 doc: move cli section down 2017-01-18 23:31:21 -08:00
Girish Ramakrishnan 93fc6b06a2 doc: add alerts section 2017-01-18 23:14:22 -08:00
Girish Ramakrishnan a327ce8a82 doc: cleanup selfhosting guide 2017-01-18 23:09:06 -08:00
Girish Ramakrishnan f8374929ac generate mail.ini and not mail_vars.ini 2017-01-18 09:11:34 -08:00
Girish Ramakrishnan 5f93290fc7 Fix crash 2017-01-18 08:43:11 -08:00
Johannes Zellner 4d139232bf caas always has a valid appstore token to show the appstore view 2017-01-18 13:05:25 +01:00
Girish Ramakrishnan 804947f039 use dir mount instead of file mount
file mounting is fraught with problems wrt change notifications.

first, we must be carefule that the inode does not change.

second, changes outside container do not result in fs events inside the container.
haraka cache settings files and relies on fs events. So, even
though the file gets updated inside the container, haraka doesn't
see it.

https://github.com/docker/docker/issues/15793
2017-01-17 23:59:23 -08:00
Girish Ramakrishnan 89fb2b57ff recreate mail config when we have owner email id 2017-01-17 23:34:05 -08:00
Girish Ramakrishnan 1262d11cb3 Prefix event enum with EVENT_ 2017-01-17 23:18:08 -08:00
Girish Ramakrishnan 1ba72db4f8 Add prerelease option 2017-01-17 21:23:57 -08:00
Girish Ramakrishnan 7d2304e4a1 Move 0.94.1 changes 2017-01-17 11:01:12 -08:00
Girish Ramakrishnan ebf1dc1b08 listen for cert changed events and restart mail container
neither haraka nor dovecot restarts on cert change

Fixes #47
2017-01-17 10:59:00 -08:00
Girish Ramakrishnan ce31f56eb6 Keep configurePlainIP private 2017-01-17 10:32:46 -08:00
Girish Ramakrishnan 7dd52779dc generate cert files for mail container
this allows us to not track paths anymore

part of #47
2017-01-17 10:21:44 -08:00
Girish Ramakrishnan 2eb5cab74b enable route to set admin certificate 2017-01-17 10:01:05 -08:00
Girish Ramakrishnan db50382b18 check user cert and then the le cert
part of #47
2017-01-17 09:59:40 -08:00
Girish Ramakrishnan 32b061c768 user certs are saved with extension user.cert/key
part of #47
2017-01-17 09:59:30 -08:00
Girish Ramakrishnan 740e85d28c make code a bit readable 2017-01-17 09:57:15 -08:00
Girish Ramakrishnan 568a7f814d rename func 2017-01-17 09:51:04 -08:00
Girish Ramakrishnan b99438e550 remove unused function 2017-01-17 09:18:48 -08:00
Girish Ramakrishnan bcdf90a8d9 typo 2017-01-17 09:17:09 -08:00
Girish Ramakrishnan 536c16929b Remove showTutorial 2017-01-17 09:11:34 -08:00
Johannes Zellner d392293b50 Remove unused require 2017-01-17 16:32:22 +01:00
Johannes Zellner 16371d4528 Use the apps.js layer instead of the raw appdb in apphealthmonitor.js 2017-01-17 16:32:12 +01:00
Johannes Zellner cdd0b48023 Remove redundant information in user event email 2017-01-17 16:16:39 +01:00
Johannes Zellner 15cac726c4 Use the correct var 2017-01-17 16:15:19 +01:00
Johannes Zellner 6dc69a4d5d Streamline the email subject lines 2017-01-17 16:02:42 +01:00
Johannes Zellner c52dfcf52f Adjust user deletion dialog based on feedback 2017-01-17 16:02:26 +01:00
Johannes Zellner eaac13b1c1 app.fqdn already takes care of altDomain 2017-01-17 16:01:10 +01:00
Johannes Zellner 3e83f3d4ee Put our link to all mails and sync the formatting 2017-01-17 15:47:18 +01:00
Johannes Zellner 3845a8f02b HTMLify user added email to admins 2017-01-17 15:34:50 +01:00
Johannes Zellner c932be77f8 Mention that backup storage configuration is about S3 configuration 2017-01-17 15:23:52 +01:00
Johannes Zellner d89324162f Remove tutorial route tests 2017-01-17 13:05:47 +01:00
Johannes Zellner a0ef86f287 Remove now unused tutorial route and business logic
We can bring that back again if needed
2017-01-17 12:50:59 +01:00
Johannes Zellner 7255a86b32 Remove welcome tutorial css parts 2017-01-17 12:47:05 +01:00
Johannes Zellner 81862bf934 Remove the tutorial components and logic 2017-01-17 12:44:07 +01:00
Johannes Zellner 81b7e5645c This not an error if a cloudron is not yet registered
The change avoids scary logs with backtrace
2017-01-17 11:41:50 +01:00
Johannes Zellner 801367b68d Use specific functions for configureAdmin (with domain) and configurePlainIp (always)
This prevents from double configuring on startup on caas cloudrons
2017-01-17 11:38:33 +01:00
Johannes Zellner f2e8f325d1 Correct debug lines for cert renewal or not existing 2017-01-17 10:35:42 +01:00
Girish Ramakrishnan 138743b55f More 0.94.1 changes 2017-01-16 16:39:18 -08:00
Johannes Zellner 7f8db644d1 Use in-memory rate limit
Related to #187
2017-01-16 16:49:03 +01:00
Johannes Zellner c7e410c41b Add express-rate-limit module 2017-01-16 16:48:43 +01:00
Johannes Zellner 08f3b0b612 Add rate limit test 2017-01-16 16:48:17 +01:00
Johannes Zellner a2782ef7a6 Normal users do not have access to the tutorial 2017-01-16 12:59:21 +01:00
Johannes Zellner 34fac8eb05 Do not show appstore for non-admins 2017-01-16 12:58:05 +01:00
Johannes Zellner 56338beae1 Ensure the appstore login input field has focus 2017-01-16 12:53:34 +01:00
Johannes Zellner 17e9f3b41d Move error label in app error dialog to the title 2017-01-16 12:47:58 +01:00
Johannes Zellner 2c06b9325f Add missing callback 2017-01-16 12:35:26 +01:00
Johannes Zellner 2dfb91dcc9 Embed the appstore login instead of a dialog 2017-01-16 12:34:33 +01:00
Johannes Zellner 9f20dfb237 Allow installation on reported main memory of 990 2017-01-16 10:36:16 +01:00
Girish Ramakrishnan da2aecc76a Save generated fallback certs as part of the backup
this way we don't get a new cert across restarts
2017-01-14 13:18:54 -08:00
Girish Ramakrishnan 7c72cd4399 Fix tests: we do not send mails anymore 2017-01-14 13:01:21 -08:00
Girish Ramakrishnan 5647b0430a Simplify onConfigured logic
We had all this logic because we allowed the user to create a CaaS
cloudron with a custom domain from the appstore. This flow has changed
now.

One can only set the DNS config after verification. Only thing that
is required is a domain check.
2017-01-14 12:59:16 -08:00
Girish Ramakrishnan 7c94543da8 bump test version 2017-01-13 20:06:15 -08:00
Girish Ramakrishnan 2118952120 send the ownerType as part of mailbox query 2017-01-13 19:53:58 -08:00
Girish Ramakrishnan d45927cdf4 unbound: listen on 0.0.0.0 2017-01-13 15:22:54 -08:00
Johannes Zellner c8e99e351e Update the selfhosting installation docs to reflect the dns setup changes 2017-01-13 15:15:25 +01:00
Girish Ramakrishnan fb56237122 0.94.1 changes 2017-01-12 19:28:27 -08:00
Girish Ramakrishnan 89152fabde use latest test image 2017-01-12 19:28:27 -08:00
Girish Ramakrishnan 726463d497 use le-staging in dev for better testing 2017-01-12 19:28:27 -08:00
Girish Ramakrishnan 055e41ac90 Make unbound reply on cloudron network
Because of the docker upgrade, dnsbl queries are failing again
since we are not using the unbound server from the containers.

For some reason, docker cannot query 127.0.0.1 (https://github.com/docker/docker/issues/14627).

Make unbound listed on the cloudron network and let docker proxy
DNS calls to unbound (docker always use the embedded DNS server
when using UDN).

See also #130
2017-01-12 19:28:23 -08:00
Girish Ramakrishnan 878878e5e4 Bump mail container for testing 2017-01-12 12:04:24 -08:00
Girish Ramakrishnan 7742c8a58e Remove unused function 2017-01-12 11:50:59 -08:00
Girish Ramakrishnan 04476999f7 Fix grammar 2017-01-12 11:48:03 -08:00
Girish Ramakrishnan 5bff7ebaa1 remove dead comment 2017-01-12 11:46:52 -08:00
Girish Ramakrishnan 44742ea3ae Fix bug where cloudron cannot be setup if initial dns credentials were invalid
To reproduce:
* https://ip
* provide invalid dns creds. at this point, config.fqdn gets set already
* cannot setup anymore
2017-01-12 11:46:52 -08:00
Girish Ramakrishnan d6ea7fc3a0 Move setupDns to cloudron.js 2017-01-12 11:46:49 -08:00
Girish Ramakrishnan 2b49cde2c2 cloudron-setup: validate tlsProvider 2017-01-12 10:31:54 -08:00
Johannes Zellner 1008981306 Adapt to new notification library version
the notification template is now in the html pages itself
2017-01-12 16:00:57 +01:00
Johannes Zellner 146f3ad00e Do not show 0 progress in update
If the initial app takes very long to backup, do not show 0 progress for
a long time
2017-01-12 16:00:57 +01:00
Johannes Zellner 5219eff190 Remove 'app at' for app backup message 2017-01-12 16:00:57 +01:00
Johannes Zellner abfd7b8aea Update angular notification library to support maxCount 2017-01-12 16:00:57 +01:00
Johannes Zellner d98f64094e Set the correct progress percentage 2017-01-12 16:00:56 +01:00
Johannes Zellner a8d254738e Only set the update page title to Cloudron 2017-01-12 16:00:56 +01:00
Johannes Zellner 1c9f2495e3 Show the detailed backup progress during update
Fixes #157
2017-01-12 16:00:34 +01:00
Johannes Zellner aa4d95f352 Remove unused node module showdown 2017-01-12 13:13:37 +01:00
Johannes Zellner 558093eab1 Remove now unused mailer.boxUpdateAvailable() 2017-01-12 13:11:18 +01:00
Johannes Zellner 865b041474 Do not send box update emails to admins
Fixes #160
2017-01-12 13:09:12 +01:00
Johannes Zellner 1888319313 Send altDomain as Host header if it is set
At least nextcloud will respond with 400 if the Host header is not
matching
2017-01-12 10:45:16 +01:00
Girish Ramakrishnan 0be7679619 Hold the docker package
One idea was to use docker binary packages. However, docker binaries
are statically linked and are incompatible with devicemapper.

See https://github.com/docker/docker/issues/14035 for more info.

Holding will let the user turn on automatic updates for non-security
packages as well.

Fixes #183
2017-01-12 01:09:19 -08:00
Girish Ramakrishnan bbef6c2bc2 Fix docker storage driver detection
When docker is not passed the --storage-driver option, it tries to
auto detect the storage driver. Roughly:
1. If existing storage paths like /var/lib/docker/aufs exist, it will
   choose that driver.

2. It has a priority list of drivers to scan in order (driver.go)
   As it stands the ordering is aufs, btrfs and then devicemapper.

3. Docker will attempt to "init" each driver. aufs, for example,
   tests for insmod'ing aufs and also looks into /proc/filesystems.

The fact that we installed aufs-tools and linux drivers (for aufs
driver) was a programming error since we want docker to use devicemapper.

However, what is curious is why docker still ended up choosing devicemapper
despite having all aufs requirements (as we do not pass --storage-driver explicitly).

The answer is that "apt-get install aufs-tool linux-image-* docker-engine"
can install packages in any order! This means there is a race on how docker
chooses the storage engine. In most cases, since linux-image-* is a big package,
docker gets to install first and ends up using devicemapper since aufs module is not found yet.
For some people, linux-image-* possibly installs first and thus docker
chooses aufs!

Mystery solved.

Part of #183
2017-01-12 01:08:22 -08:00
Girish Ramakrishnan be59267747 Enable unattended upgrades
This is usually installed and enabled by default

https://help.ubuntu.com/community/AutomaticSecurityUpdates

Note that automatic reboot is not enabled. Not clear if we should be.

Part of #183
2017-01-11 22:36:51 -08:00
Girish Ramakrishnan b4477d26b7 Reload the docker service file 2017-01-11 15:40:16 -08:00
Girish Ramakrishnan ce0afb3d80 Explicitly specify the storage driver as devicemapper
For reasons unknown, the images build by the buildbot (which currently
uses btrfs), does not work with devicemapper.

Existing cloudrons with aufs will not be affected because docker will
just ignore it.

devmapper: Base device already exists and has filesystem xfs on it. User specified filesystem will be ignored.

Existing AUFS users can move to devicemapper either by restoring to
a new cloudron (recommended) OR
* systemctl stop box
* systemctl stop docker
* rm -rf /var/lib/docker
* Edit /home/yellowtent/data/INFRA_VERSION. Change the "version" field to "1"
* systemctl start docker
* systemctl start box # this will download images all over

Fixes #182
2017-01-11 14:53:11 -08:00
Johannes Zellner 0b5cd304ea We also don't need to prefix with my. when using the adminFqdn 2017-01-11 23:09:06 +01:00
Girish Ramakrishnan e54ad97fa7 cloudron-setup: set the apiServerOrigin for --env 2017-01-11 12:36:01 -08:00
Girish Ramakrishnan 66960ea785 cloudron-setup: Add --env flag 2017-01-10 20:42:24 -08:00
Girish Ramakrishnan 72dd3026ca collect docker info output
this has information like the storage driver
2017-01-10 20:42:24 -08:00
Girish Ramakrishnan 4c719de86c restart docker only if config changed 2017-01-10 18:50:21 -08:00
Girish Ramakrishnan c7a0b017b4 Fix crash 2017-01-10 18:50:21 -08:00
Johannes Zellner 91c931b53c Revert "Remove broken external domain validation"
This reverts commit 9b1b833fac.
2017-01-11 03:46:41 +01:00
Girish Ramakrishnan 6f2b2adca9 Enable apparmor explicitly 2017-01-10 18:15:10 -08:00
Girish Ramakrishnan 3176bc1afa Fix failing tests 2017-01-10 16:54:15 -08:00
Girish Ramakrishnan b929adf2dd Fix migration 2017-01-10 16:23:01 -08:00
Girish Ramakrishnan f3d3b31bed Fix error return type 2017-01-10 16:16:42 -08:00
Girish Ramakrishnan f17eaaf025 Add TODO note 2017-01-10 16:16:37 -08:00
Girish Ramakrishnan 80d65acd0d Set the domain only during dns setup
If we change the domain when dns settings are changed, then migration
fails because we callout to appstore API via the domain (for example,
backup url call will fail because it uses the new domain name).
2017-01-10 16:16:32 -08:00
Girish Ramakrishnan ba02d333d1 remove unused requires 2017-01-10 16:16:25 -08:00
Johannes Zellner 9b9d30c092 Remove commented out section of the nginx.conf 2017-01-11 00:09:51 +01:00
Johannes Zellner d47de31744 Rename nakeddomain.html to noapp.html 2017-01-11 00:08:13 +01:00
Johannes Zellner edc7efae5f Do not overwrite the provider previously set 2017-01-11 00:02:19 +01:00
Johannes Zellner 18007be9e1 Also use adminFqdn in setup.js 2017-01-10 23:58:28 +01:00
Johannes Zellner d68ae4866c The adminFqdn already has the my. part 2017-01-10 23:58:28 +01:00
Girish Ramakrishnan f4b635a169 Fix error type 2017-01-10 14:21:36 -08:00
Johannes Zellner d674d72508 Add missing https:// for adminFqdn 2017-01-10 22:54:45 +01:00
Johannes Zellner 6ee76f8ee4 No need for my. my- magic anymore 2017-01-10 22:54:45 +01:00
Johannes Zellner 06338e0a1f Redirect to naked domain if we are not on a webadmin origin 2017-01-10 22:54:45 +01:00
Johannes Zellner 349c261238 Remove configStatus.domain and replace with toplevel adminFqdn 2017-01-10 22:54:45 +01:00
Girish Ramakrishnan eb057fb399 Add note that port 25 is blocked on some DO accounts 2017-01-10 12:38:34 -08:00
Johannes Zellner 5d739f012c Never use the cloudron email account for LetsEncrypt 2017-01-10 18:14:59 +01:00
Johannes Zellner 741d56635f show a maximum of 3 error notifications at once 2017-01-10 15:58:15 +01:00
Johannes Zellner 35404a2832 Return expected dns records also if we hit NXDOMAIN 2017-01-10 15:51:53 +01:00
Johannes Zellner 99505fc287 Call the correct function to get dns email records in the webadmin 2017-01-10 15:43:14 +01:00
Johannes Zellner a20b331095 Convert settings JSON to objects 2017-01-10 15:24:16 +01:00
Johannes Zellner 06a9a82da0 Disable query for non approved apps 2017-01-10 14:01:46 +01:00
Johannes Zellner 03383eecbc Also remind the user on app install if manual dns is used 2017-01-10 13:47:58 +01:00
Johannes Zellner 89ae1a8b92 Ensure wildcard backend is pre-selected on configure 2017-01-10 13:43:33 +01:00
Johannes Zellner 7061195059 Show different text for manual and wildcard dns backends 2017-01-10 13:41:20 +01:00
Johannes Zellner 9556d4b72c Fix the busy state of the dns backend change form 2017-01-10 13:34:00 +01:00
Johannes Zellner dd764f1508 Sync the dns provider selection in the ui parts 2017-01-10 13:16:25 +01:00
Johannes Zellner 0a154339e6 Fix the normal case of changing dns provider 2017-01-10 13:15:14 +01:00
Johannes Zellner 2502b94f20 Remind the user to setup the DNS record on app configuration 2017-01-10 13:11:37 +01:00
Johannes Zellner 9b1b833fac Remove broken external domain validation 2017-01-10 13:05:06 +01:00
Johannes Zellner 848ca9817d Give better DNS error feedback after app installation 2017-01-10 13:01:15 +01:00
Johannes Zellner 9a159b50c6 Do not recommend manual dns backend 2017-01-10 12:34:28 +01:00
Johannes Zellner 11fb0d9850 Verify the my.domain instead of the zone 2017-01-10 12:30:14 +01:00
Johannes Zellner 3f925e5b96 Improve manual dns backend error message 2017-01-10 12:09:30 +01:00
Johannes Zellner 714ae18658 Fix the manual dns verification 2017-01-10 12:07:32 +01:00
Johannes Zellner 226164c591 This error is already a SubdomainError 2017-01-10 11:40:05 +01:00
Johannes Zellner 1d44d0a987 Remove dns validation code in settings.js 2017-01-10 11:33:33 +01:00
Johannes Zellner babfb5efbb Make the verifyDnsConfig() api return the valid credentials 2017-01-10 11:32:44 +01:00
Johannes Zellner badbb89c92 Add INVALID_PROVIDER to SubdomainError 2017-01-10 11:32:24 +01:00
Johannes Zellner 50e705fb25 Remove unused requires 2017-01-10 11:14:16 +01:00
Johannes Zellner b9e0530ced Fill in the noops in the other backends 2017-01-10 11:13:33 +01:00
Johannes Zellner 9c793f1317 Make the new interface available in subdomains.js 2017-01-10 11:13:02 +01:00
Johannes Zellner cef93012bf Implement verifyDnsConfig() for manual dns 2017-01-10 11:12:38 +01:00
Johannes Zellner bd099cc844 Implement verifyDnsConfig() for route53 2017-01-10 11:12:25 +01:00
Johannes Zellner c1029ba3b0 Implement verifyDnsConfig() for digitalocean 2017-01-10 11:12:13 +01:00
Johannes Zellner 152025baa7 Add verifyDnsConfig() to the dns backend where it belongs 2017-01-10 11:11:41 +01:00
Johannes Zellner 94f0f48cba Send backend provider with stats route 2017-01-10 10:22:47 +01:00
Girish Ramakrishnan 9b5c312aa1 Disable Testing tab
Part of #180
2017-01-09 21:08:01 -08:00
Girish Ramakrishnan fdb488a4c3 installApp bundle first because syncConfigState might block 2017-01-09 19:06:32 -08:00
Girish Ramakrishnan 69536e2263 Do not show multiple Access control sections for email apps 2017-01-09 19:00:15 -08:00
Girish Ramakrishnan 3f8ea6f2ee Make app auto install as part of async flow
It was called in nextTick() and was done async but had no chance to
run because the platform.initialize() which is sync was blocking it
2017-01-09 18:24:41 -08:00
Girish Ramakrishnan 3b035405b0 debug.formatArgs API has changed 2017-01-09 16:41:04 -08:00
Girish Ramakrishnan 7b1a6e605b ensure backup directory exists
this is because the filename can now contain subpaths
2017-01-09 16:09:54 -08:00
Girish Ramakrishnan 26ed331f8e Add default clients in clients.js 2017-01-09 15:41:29 -08:00
Johannes Zellner 29581b1f48 cog is a circle 2017-01-09 22:58:01 +01:00
Girish Ramakrishnan 16ea13b88c Check status for cloudron to be ready 2017-01-09 13:29:17 -08:00
Girish Ramakrishnan 2311107465 remove misleading comments 2017-01-09 12:35:39 -08:00
Girish Ramakrishnan 35cf9c454a taskmanager: track paused state 2017-01-09 12:26:18 -08:00
Girish Ramakrishnan 4c2a57daf3 0.94.0 changes 2017-01-09 11:26:29 -08:00
Girish Ramakrishnan ed9889af11 Add note about alive and heartbeat job 2017-01-09 11:14:11 -08:00
Girish Ramakrishnan 89dc2ec3f6 Remove configured event 2017-01-09 11:02:33 -08:00
Girish Ramakrishnan 7811359b2f Move cron.initialize to cloudron.js 2017-01-09 11:00:09 -08:00
Girish Ramakrishnan 21c66915a6 Refactor taskmanager resume flow 2017-01-09 10:49:34 -08:00
Girish Ramakrishnan e3e99408d5 say the container was restarted automatically 2017-01-09 10:46:43 -08:00
Girish Ramakrishnan 01f16659ac remove unused requires 2017-01-09 10:33:23 -08:00
Girish Ramakrishnan 9e8f120fdd Make ensureFallbackCertificate error without a domain 2017-01-09 10:28:28 -08:00
Girish Ramakrishnan 3b9b9a1629 ensure fallback cert exists before platform is started 2017-01-09 10:28:28 -08:00
Girish Ramakrishnan 9e2f43c3b1 initialize platform only when domain is available 2017-01-09 10:28:25 -08:00
Girish Ramakrishnan 588bb2df2f Pull docker images in initialize script
This allows us to move platform.initialize to whenever the domain
is setup. Thus allowing box code to startup faster the first time
around.
2017-01-09 09:22:23 -08:00
Girish Ramakrishnan 3c55ba1ea9 doc: clarify httpPort 2017-01-09 09:17:35 -08:00
Johannes Zellner 2a86216a4a Fix race for mailConfig in settings view 2017-01-09 13:58:11 +01:00
Johannes Zellner e3ea2323c5 Defer configure checks to after tutorial
Fixes #154
2017-01-09 13:45:01 +01:00
Johannes Zellner 6b55f3ae11 Highlight the domain for the manual/wildcard DNS setup 2017-01-09 13:37:54 +01:00
Johannes Zellner f3496a421b Remove tooltip for memory requirement 2017-01-09 11:53:18 +01:00
Girish Ramakrishnan a4bba37606 Call mailer.start on configured 2017-01-07 23:40:34 -08:00
Girish Ramakrishnan 56c4908365 restart mail container on configure event 2017-01-07 23:33:20 -08:00
Girish Ramakrishnan 18f6c4f2cd Refactor configure event handling into onConfigured event 2017-01-07 23:31:29 -08:00
Girish Ramakrishnan d0ea1a4cf4 Send bounce alerts to cloudron owner
Fixes #166
2017-01-07 23:24:12 -08:00
Girish Ramakrishnan aa75824cc6 Pass alerts_from and alerts_to to mail container
Part of #166
2017-01-07 22:31:40 -08:00
Girish Ramakrishnan 61d5005c4b Use mail_vars.ini to pass mail container config 2017-01-07 16:42:24 -08:00
Girish Ramakrishnan 72d58f48e4 Remove invalid event 2017-01-07 14:28:33 -08:00
Girish Ramakrishnan 3f3b97dc16 Send oom email to cloudron admins
Part of #166
2017-01-07 13:52:33 -08:00
Girish Ramakrishnan 8a05fdcb10 Fix language 2017-01-07 12:35:26 -08:00
Girish Ramakrishnan 6fd3466db1 Send cert renewal errors to support@cloudron.io as well
Part of #166
2017-01-07 12:29:43 -08:00
Girish Ramakrishnan f354baf685 Inc -> UG 2017-01-07 11:59:13 -08:00
Girish Ramakrishnan d009acf8e0 doc: upgrading from filesystem backend
Fixes #156
2017-01-07 11:57:37 -08:00
Johannes Zellner fd479d04a0 Fix nginx config to make non vhost configs default_server
Nginx does not match on the ip as a vhost. This no basically replaces
the commented out section in the nginx.conf
2017-01-06 22:09:10 +01:00
Girish Ramakrishnan a3dc641be1 Skip sending heartbeat if we have no fqdn 2017-01-06 09:42:56 -08:00
Johannes Zellner a59f179e9d warn the user in manual and wildcard cert case 2017-01-06 18:42:22 +01:00
Johannes Zellner 4128bc437b Ensure text is center in the footer 2017-01-06 18:23:59 +01:00
Johannes Zellner e1b176594a The matching location needs to be my.domain 2017-01-06 18:17:27 +01:00
Johannes Zellner 35b11d7b22 Add footers to the setup views 2017-01-06 17:57:22 +01:00
Johannes Zellner bd65e1f35d Put some redirects in the setup pages to end up in the correct one always 2017-01-06 17:25:24 +01:00
Johannes Zellner a243478fff Create separate ip and my. domain nginx configs 2017-01-06 16:01:49 +01:00
Johannes Zellner f0fdc00e78 Always setup an nginx config for ip as the webadmin config 2017-01-06 12:42:21 +01:00
Johannes Zellner a21210ab29 Fix bug where we check for mail dns records without mail being enabled 2017-01-06 12:20:48 +01:00
Johannes Zellner 684e7df939 At least resolve nameservers for dns settings validator 2017-01-06 11:08:10 +01:00
Johannes Zellner 9be5f5d837 If we already have a domain set, directly wait for dns 2017-01-06 10:54:56 +01:00
Johannes Zellner 6c5fb67b58 Give the actual domain in status if set
This allows the webui served up on ip to redirect correctly
2017-01-06 10:47:42 +01:00
Girish Ramakrishnan 616ec408d6 Remove redundant reboot message 2017-01-06 10:23:10 +01:00
Girish Ramakrishnan 5969b4825c dns_ready is not required since it is part of status 2017-01-06 10:23:10 +01:00
Girish Ramakrishnan 64c888fbdb Send config state as part of the status 2017-01-06 10:23:10 +01:00
Girish Ramakrishnan 8a0fe413ba Visit IP if no domain provided 2017-01-06 10:23:10 +01:00
Girish Ramakrishnan 270a1f4b95 Merge gIsConfigured into config state 2017-01-06 10:23:10 +01:00
Girish Ramakrishnan 8f4ed47b63 track the config state in cloudron.js 2017-01-06 10:23:10 +01:00
Girish Ramakrishnan 09997398b1 Disallow dnsSetup if domain already set 2017-01-06 10:23:10 +01:00
Girish Ramakrishnan 0b68d1c9aa Reconfigure admin when domain gets set 2017-01-06 10:23:10 +01:00
Girish Ramakrishnan cc9904c8c7 Move nginx config and cert generation to box code 2017-01-06 10:23:10 +01:00
Girish Ramakrishnan 16ab523cb2 Store IP certs as part of nginx cert dir (otherwise, it will get backed up) 2017-01-06 10:23:10 +01:00
Girish Ramakrishnan 20a75b7819 tag -> prefix 2017-01-05 23:20:02 -08:00
Girish Ramakrishnan 49e299b62d Add ubuntu-standard
Fixes #170
2017-01-05 14:05:46 -08:00
Girish Ramakrishnan 98a2090c72 install curl and python before using them 2017-01-05 14:03:30 -08:00
Johannes Zellner 38c542b05a Add route to check dns and cert status 2017-01-05 20:37:26 +01:00
Johannes Zellner fc5fa621f3 Ensure the dkim folder for the domain exists 2017-01-05 17:14:27 +01:00
Johannes Zellner 6ec1a75cbb Ensure Dkim key in the readDkimPublicKeySync() function 2017-01-05 17:04:03 +01:00
Johannes Zellner bbba16cc9a make input fields shorter 2017-01-05 16:35:38 +01:00
Johannes Zellner 564d3d563c Preselect dns provider if possible 2017-01-05 16:32:34 +01:00
Johannes Zellner a858a4b4c1 Let the user know what we are waiting for 2017-01-05 16:31:23 +01:00
Johannes Zellner 2d6d8a7ea8 Create fallback certs only if fqdn is already set 2017-01-05 16:29:10 +01:00
Johannes Zellner 5b5ed9e043 Always create box/mail/dkim folder 2017-01-05 16:15:00 +01:00
Johannes Zellner 801c40420c Create setup nginx config and cert for ip setup 2017-01-05 16:02:03 +01:00
Johannes Zellner c185b3db71 Set correct busy states in setup views 2017-01-05 15:59:07 +01:00
Johannes Zellner 0f70b73e81 Cleanup some of the setup html code 2017-01-05 14:43:18 +01:00
Johannes Zellner d9865f9b0f Allow box to startup without fqdn 2017-01-05 14:02:04 +01:00
Johannes Zellner 59deb8b708 Do not fire configured event if no fqdn is set 2017-01-05 13:05:36 +01:00
Johannes Zellner 617fa98dee Further improve the dns setup ui 2017-01-05 12:31:37 +01:00
Johannes Zellner c9cb1cabc4 Improve dns setup ui 2017-01-05 12:08:52 +01:00
Johannes Zellner 92ab6b5aa4 Cleanup the dns setup code 2017-01-05 11:53:45 +01:00
Johannes Zellner a66f250350 Redirect to setupdns.html for non caas if not activated 2017-01-05 11:53:23 +01:00
Johannes Zellner 39200f4418 Add client.js wrapper for dns setup route 2017-01-05 11:53:05 +01:00
Johannes Zellner 4f1c7742ef Add public route for dns setup
This route is only available until the Cloudron is activated and also
only in self-hosted ones
2017-01-05 11:52:38 +01:00
Johannes Zellner e812cbcbe9 add setupdns to gulpfile 2017-01-05 11:17:39 +01:00
Johannes Zellner 2e0670a5c1 Strip dns setup from normal setup.html 2017-01-05 11:02:52 +01:00
Johannes Zellner 92c92db595 Add separate file for dns setup 2017-01-05 11:02:43 +01:00
Johannes Zellner 1764567e1f Make domain optional in cloudron-setup 2017-01-05 10:49:41 +01:00
Johannes Zellner 7eeb8bcac1 Only mark dns fields red if dirty and invalid 2017-01-05 10:49:41 +01:00
Johannes Zellner c718b4ccdd ngEnter directive is now unused 2017-01-05 10:49:41 +01:00
Johannes Zellner 4f5ffc92a6 Cleanup setup.js 2017-01-05 10:49:41 +01:00
Johannes Zellner 4c485f7bd0 Remove old setup wizard step templates 2017-01-05 10:49:41 +01:00
Johannes Zellner 7076a31821 Also send domain with dns credentials 2017-01-05 10:49:41 +01:00
Johannes Zellner 68965f6da3 Change the location to the new domain at the end of setup 2017-01-05 10:49:41 +01:00
Johannes Zellner b6a545d1f5 Add separate entry for wildcard in dns setup
Fixes #168
2017-01-05 10:49:41 +01:00
Johannes Zellner c0afff4d13 Add view for dns credentials in setup 2017-01-05 10:49:41 +01:00
Johannes Zellner 604faa6669 Skip forward for caas after admin setup 2017-01-05 10:49:41 +01:00
Johannes Zellner d94d1af7f5 Avoid angular flicker in setup 2017-01-05 10:49:41 +01:00
Johannes Zellner 9feb5dedd5 Remove all the wizard step logic from setup 2017-01-05 10:49:41 +01:00
Johannes Zellner 99948c4ed5 Use class nesting for setup 2017-01-05 10:49:41 +01:00
Girish Ramakrishnan 967bab678d Fix listing of app backups
The id can now contain path and not just the filename
2017-01-05 01:03:44 -08:00
Girish Ramakrishnan 135c296ac7 Remove the Z suffix 2017-01-05 00:12:31 -08:00
Girish Ramakrishnan e83ee48ed5 Pass collation tag to backup functions
Fixes #159
2017-01-05 00:10:16 -08:00
Girish Ramakrishnan 1539fe0906 preserve msecs portion in backup file format
this is required because the second precision causes backups to fail
because of duplicate file name. this happens in tests.

part of #159
2017-01-04 21:57:03 -08:00
Girish Ramakrishnan c06bddd19e Fix backup filename prefix in sql query 2017-01-04 21:41:31 -08:00
Girish Ramakrishnan ceb78f21bb remove redundant reuseOldAppBackup 2017-01-04 21:20:36 -08:00
Girish Ramakrishnan 5af201d4ee remove unused require 2017-01-04 19:37:39 -08:00
Girish Ramakrishnan 794efb5ef5 Merge backupDone webhook into caas storage backend 2017-01-04 16:29:25 -08:00
Girish Ramakrishnan 31a9437b2c Add backupDone hook 2017-01-04 16:23:12 -08:00
Girish Ramakrishnan 2b27e554fd Change backup filenames
appbackup_%s_%s-v%s.tar.gz -> app_%s_%s_v%s.tar.gz
    drop 'backup'. rationale: it is known these files are backups
    timestamp has '-'. rationale: colon in filename confuses tools like scp (they think it is a hostname)

backup_%s-v%s.tar.gz -> box_%s_v%s.tar.gz
    drop 'backup' and name it 'box'. this makes it clear it related to the box backup
    timestamp has '-'. rationale: colon in filename confuses tools like scp (they think it is a hostname)

Part of #159
2017-01-04 13:36:25 -08:00
Girish Ramakrishnan 4784b7b00e Fix coding style 2017-01-04 13:36:16 -08:00
Girish Ramakrishnan e547a719f6 remove dead code 2017-01-04 13:35:39 -08:00
Johannes Zellner 24f2d201ed Remove ip cache in sysinfo 2017-01-04 21:40:47 +01:00
Girish Ramakrishnan 792dfc731c Revert "Make virtualbox 20GB vdi work"
This reverts commit 67d840a1b3.

Change the docs for virtualbox for now to create a bigger VDI
2017-01-04 10:14:57 -08:00
Johannes Zellner 6697b39e79 Set password digest explicitly
sha1 used to be the fallback but with node 6.* the fallback is deprecated
2017-01-04 09:59:14 -08:00
Girish Ramakrishnan db1eeff2c3 Add test to check if user can be readded after removal
Fixes #162
2017-01-03 19:12:00 -08:00
Girish Ramakrishnan fc624701bf Use cloudron-setup from CDN
Fixes #165
2017-01-03 15:39:17 -08:00
Girish Ramakrishnan 591cc52944 Run initializeBaseImage script from the release tarball
Part of #165
2017-01-03 14:48:39 -08:00
Girish Ramakrishnan 67d840a1b3 Make virtualbox 20GB vdi work 2017-01-03 14:30:59 -08:00
Girish Ramakrishnan 8ffa951407 Clearly mark message as an error 2017-01-03 14:28:04 -08:00
Girish Ramakrishnan af39c2c7ae Replace cloudron-version with a python script
This will allow us to check version without node installed

Part of #165
2017-01-03 14:23:00 -08:00
Girish Ramakrishnan 5903c7d0bc remove x-bit from logcollector.js 2017-01-03 09:46:53 -08:00
Johannes Zellner dbb79fc9e6 Remove unused customDomain check in setup flow 2017-01-03 14:58:41 +01:00
Johannes Zellner ef1408fddb Remove unsed vars in cloudron-setup 2017-01-03 09:26:08 +01:00
Johannes Zellner 47ecb0e1cf Test minimum requirements before continue in cloudron-setup
Fixes #153
2017-01-02 18:03:28 +01:00
Johannes Zellner 55fad3d57e Convert booleans for the correct object 2017-01-02 14:15:20 +01:00
Johannes Zellner 496a44d412 Also update app dns records in dynamic dns case 2017-01-02 14:00:07 +01:00
Johannes Zellner 05721f73cc Fix typo 2017-01-02 13:51:58 +01:00
Johannes Zellner 424c36ea49 Convert boolean settings values
The db table only stores strings
2017-01-02 13:47:51 +01:00
Johannes Zellner a38097e2f5 Refresh dns if dynamic dns is enabled 2017-01-02 13:14:03 +01:00
Johannes Zellner b26cb4d339 Add dynamic dns settings key 2017-01-02 13:05:48 +01:00
Johannes Zellner 3523974163 Add initial refreshDNS() function 2017-01-02 13:00:30 +01:00
Johannes Zellner a2bdd294a8 update the version tag in the selfhosting docs 2017-01-01 17:17:24 +01:00
Girish Ramakrishnan f85bfdf451 Explain what the MB is 2016-12-31 09:39:17 -08:00
142 changed files with 6526 additions and 2458 deletions
+39
View File
@@ -698,3 +698,42 @@
[0.93.0]
* Smoother upgrades
[0.94.0]
* Cloudron domain can now be set after installation
* Backups are now organized by directory
* Document upgrading from Filesystem backend
* Send certificate renewal errors, OOM errors to cloudron admins
* Email bounce alerts are sent to the Cloudron owner
[0.94.1]
* Suppress upgrade emails
* Enable unattended upgrades
* Standardize on using devicemapper for docker storage backend
* Show detailed backup progress
* Fix DNSBL issue in mail container
* Fix issue where bounce emails were not sent to aliases
* Remove tutorial
* Restart mail container on certificate change
[0.97.0]
* Fix missing app icon issue
* Fix issue where box sends out crash reports incessantly
* (API) Allow memory limit to be set to -1 (unlimited)
* (API) Move developmentMode flag from manifest to apps route
[0.98.0]
* Send stat on whether email is enabled
* Fix bug where heartbeat was sent for self-hosted Cloudrons
* Make Cloudron function even when disk is full
* Fix thunderbird connection issue
* Send more detailed logs for backup failures
* Restart nginx if it crashed automatically
* Support all DNS providers for managed Cloudrons
* Add granular configuration for auto-updates
[0.99.0]
* Fix bug where ports <= 1023 were not reserved
* Cleanup graphs UI
* Polish webadmin UI
* Fix bug where hard disk size was detected incorrectly
* Use overlay2 as docker storage backend for scaleway
BIN
View File
Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.5 KiB

After

Width:  |  Height:  |  Size: 14 KiB

+165
View File
@@ -0,0 +1,165 @@
#!/bin/bash
set -eu -o pipefail
assertNotEmpty() {
: "${!1:? "$1 is not set."}"
}
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)"
export JSON="${SOURCE_DIR}/node_modules/.bin/json"
IMAGE_ID="ami-5aee2235" # ubuntu 16.04 eu-central-1
INSTANCE_TYPE="t2.micro"
SECURITY_GROUP="sg-19f5a770" # everything open on eu-central-1
BLOCK_DEVICE="DeviceName=/dev/sda1,Ebs={VolumeSize=20,DeleteOnTermination=true,VolumeType=gp2}"
SSH_KEY_NAME="id_rsa_yellowtent"
revision=$(git rev-parse HEAD)
ami_name=""
server_id=""
server_ip=""
destroy_server="yes"
deploy_env="prod"
args=$(getopt -o "" -l "revision:,name:,no-destroy,env:" -n "$0" -- "$@")
eval set -- "${args}"
while true; do
case "$1" in
--env) deploy_env="$2"; shift 2;;
--revision) revision="$2"; shift 2;;
--name) ami_name="$2"; shift 2;;
--no-destroy) destroy_server="no"; shift 2;;
--) break;;
*) echo "Unknown option $1"; exit 1;;
esac
done
export AWS_DEFAULT_REGION="eu-central-1" # we have to use us-east-1 to publish
# TODO fix this
export AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY}"
export AWS_SECRET_ACCESS_KEY="${AWS_ACCESS_SECRET}"
echo "=> Creating AMI"
readonly ssh_keys="${HOME}/.ssh/id_rsa_yellowtent"
readonly SSH="ssh -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}"
if [[ ! -f "${ssh_keys}" ]]; then
echo "caas ssh key is missing at ${ssh_keys} (pick it up from secrets repo)"
exit 1
fi
function get_pretty_revision() {
local git_rev="$1"
local sha1=$(git rev-parse --short "${git_rev}" 2>/dev/null)
echo "${sha1}"
}
now=$(date "+%Y-%m-%d-%H%M%S")
pretty_revision=$(get_pretty_revision "${revision}")
if [[ -z "${ami_name}" ]]; then
# if you change this, change the regexp is appstore/janitor.js
ami_name="box-${deploy_env}-${pretty_revision}-${now}" # remove slashes
fi
echo "=> Create EC2 instance"
id=$(aws ec2 run-instances --image-id "${IMAGE_ID}" --instance-type "${INSTANCE_TYPE}" --security-group-ids "${SECURITY_GROUP}" --block-device-mappings "${BLOCK_DEVICE}" --key-name "${SSH_KEY_NAME}"\
| $JSON Instances \
| $JSON 0.InstanceId)
[[ -z "$id" ]] && exit 1
echo "Instance created with ID $id"
echo "=> Waiting for instance to get a public IP"
while true; do
server_ip=$(aws ec2 describe-instances --instance-ids ${id} \
| $JSON Reservations.0.Instances \
| $JSON 0.PublicIpAddress)
if [[ ! -z "${server_ip}" ]]; then
echo ""
break
fi
echo -n "."
sleep 1
done
echo "Got public IP ${server_ip}"
echo "=> Waiting for ssh connection"
while true; do
echo -n "."
if $SSH ubuntu@${server_ip} echo "hello"; then
echo ""
break
fi
sleep 5
done
echo "=> Fetching cloudron-setup"
while true; do
if $SSH ubuntu@${server_ip} wget "https://cloudron.io/cloudron-setup" -O "cloudron-setup"; then
echo ""
break
fi
echo -n "."
sleep 5
done
echo "=> Running cloudron-setup"
$SSH ubuntu@${server_ip} sudo /bin/bash "cloudron-setup" --env "${deploy_env}" --provider "ec2"
echo "=> Creating AMI"
image_id=$(aws ec2 create-image --instance-id "${id}" --name "${ami_name}" | $JSON ImageId)
[[ -z "$id" ]] && exit 1
echo "Creating AMI with Id ${image_id}"
echo "=> Waiting for AMI to be created"
while true; do
state=$(aws ec2 describe-images --image-ids ${image_id} \
| $JSON Images \
| $JSON 0.State)
if [[ "${state}" == "available" ]]; then
echo ""
break
fi
echo -n "."
sleep 5
done
if [[ "${destroy_server}" == "yes" ]]; then
echo "=> Deleting EC2 instance"
while true; do
state=$(aws ec2 terminate-instances --instance-id "${id}" \
| $JSON TerminatingInstances \
| $JSON 0.CurrentState.Name)
if [[ "${state}" == "shutting-down" ]]; then
echo ""
break
fi
echo -n "."
sleep 5
done
fi
echo ""
echo "Done."
echo ""
echo "New AMI is: ${image_id}"
echo ""
+38 -26
View File
@@ -2,10 +2,11 @@
set -euv -o pipefail
readonly PROVIDER="${1:-generic}"
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly arg_provider="${1:-generic}"
readonly arg_infraversionpath="${SOURCE_DIR}/${2:-}"
function die {
echo $1
exit 1
@@ -16,26 +17,12 @@ export DEBIAN_FRONTEND=noninteractive
apt-get -o Dpkg::Options::="--force-confdef" update -y
apt-get -o Dpkg::Options::="--force-confdef" dist-upgrade -y
# https://docs.docker.com/engine/installation/linux/ubuntulinux/
echo "==> Installing Docker"
apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" > /etc/apt/sources.list.d/docker.list
apt-get -y update
apt-get -y install \
aufs-tools \
linux-image-extra-$(uname -r) \
linux-image-extra-virtual \
docker-engine=1.12.5-0~ubuntu-xenial # apt-cache madison docker-engine
echo "==> Enable memory accounting"
sed -e 's/^GRUB_CMDLINE_LINUX="\(.*\)"$/GRUB_CMDLINE_LINUX="\1 cgroup_enable=memory swapaccount=1 panic_on_oops=1 panic=5"/' -i /etc/default/grub
update-grub
echo "==> Installing required packages"
debconf-set-selections <<< 'mysql-server mysql-server/root_password password password'
debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password password'
# this enables automatic security upgrades (https://help.ubuntu.com/community/AutomaticSecurityUpdates)
apt-get -y install \
acl \
awscli \
@@ -51,6 +38,7 @@ apt-get -y install \
pwgen \
rcconf \
swaks \
unattended-upgrades \
unbound
echo "==> Installing node.js"
@@ -61,18 +49,42 @@ ln -sf /usr/local/node-6.9.2/bin/npm /usr/bin/npm
apt-get install -y python # Install python which is required for npm rebuild
[[ "$(python --version 2>&1)" == "Python 2.7."* ]] || die "Expecting python version to be 2.7.x"
echo "==> Downloading docker images"
if [ -f ${SOURCE_DIR}/infra_version.js ]; then
images=$(node -e "var i = require('${SOURCE_DIR}/infra_version.js'); console.log(i.baseImages.join(' '), Object.keys(i.images).map(function (x) { return i.images[x].tag; }).join(' '));")
# https://docs.docker.com/engine/installation/linux/ubuntulinux/
echo "==> Installing Docker"
apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" > /etc/apt/sources.list.d/docker.list
apt-get -y update
echo "Pulling images: ${images}"
for image in ${images}; do
docker pull "${image}"
done
else
echo "No infra_versions.js found, skipping image download"
# create systemd drop-in file
mkdir -p /etc/systemd/system/docker.service.d
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/docker daemon -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=devicemapper" > /etc/systemd/system/docker.service.d/cloudron.conf
apt-get -y --allow-downgrades install docker-engine=1.12.5-0~ubuntu-xenial # apt-cache madison docker-engine
apt-mark hold docker-engine # do not update docker
storage_driver=$(docker info | grep "Storage Driver" | sed 's/.*: //')
if [[ "${storage_driver}" != "devicemapper" ]]; then
echo "Docker is using "${storage_driver}" instead of devicemapper"
exit 1
fi
echo "==> Enable memory accounting"
apt-get -y install grub2
sed -e 's/^GRUB_CMDLINE_LINUX="\(.*\)"$/GRUB_CMDLINE_LINUX="\1 cgroup_enable=memory swapaccount=1 panic_on_oops=1 panic=5"/' -i /etc/default/grub
update-grub
echo "==> Downloading docker images"
if [ ! -f "${arg_infraversionpath}/infra_version.js" ]; then
echo "No infra_versions.js found"
exit 1
fi
images=$(node -e "var i = require('${arg_infraversionpath}/infra_version.js'); console.log(i.baseImages.join(' '), Object.keys(i.images).map(function (x) { return i.images[x].tag; }).join(' '));")
echo -e "\tPulling docker images: ${images}"
for image in ${images}; do
docker pull "${image}"
done
echo "==> Install collectd"
if ! apt-get install -y collectd collectd-utils; then
# FQDNLookup is true in default debian config. The box code has a custom collectd.conf that fixes this
+2 -3
View File
@@ -5,9 +5,8 @@
require('supererror')({ splatchError: true });
// remove timestamp from debug() based output
require('debug').formatArgs = function formatArgs() {
arguments[0] = this.namespace + ' ' + arguments[0];
return arguments;
require('debug').formatArgs = function formatArgs(args) {
args[0] = this.namespace + ' ' + args[0];
};
var appHealthMonitor = require('./src/apphealthmonitor.js'),
+2 -18
View File
@@ -151,6 +151,8 @@ If `altDomain` is set, the app can be accessed from `https://<altDomain>`.
* `SAMEORIGIN` - allows embedding from the same domain as the app. This is the default.
* `ALLOW-FROM https://example.com/` - allows this app to be embedded from example.com
`memoryLimit` is the maximum memory this app can use (in bytes) including swap. If set to 0, the app uses the `memoryLimit` value set in the manifest. If set to -1, the app gets unlimited memory.
Read more about the options at [MDN](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options).
Response (200):
@@ -964,24 +966,6 @@ Response (204):
{}
```
### Tutorial
POST `/api/v1/profile/tutorial` <scope>profile</scope>
Toggles display of the tutorial when the token owner logs in.
Request:
```
{
showTutorial: <boolean>
}
```
Response (204):
```
{}
```
## Settings
### Get auto update pattern
-7
View File
@@ -44,13 +44,6 @@ Applications can be broadly categorized based on their user management as follow
* No user
* Such apps have no concept of logged-in user.
* The Cloudron provides a `website visibility` setting that allows a Cloudron admin to optionally
install an OAuth proxy in front of such applications. In such a case, a user visiting the website first
authenticates with the OAuth proxy and once authenticated is allowed into the application.
* When an OAuth proxy is installed, such applications can use the `X-Authenticated-User` header from the
[ICAP Extensions](https://tools.ietf.org/html/draft-stecher-icap-subid-00#section-3.4) de facto standard.
This value can be used for display purposes or creating meta data for a document.
* Single user
* Such apps only have a single user who is usually also the `admin`.
+5 -18
View File
@@ -76,7 +76,7 @@ The `author` field contains the name and email of the app developer (or company)
Example:
```
"author": "Cloudron Inc <girish@cloudron.io>"
"author": "Cloudron UG <girish@cloudron.io>"
```
## changelog
@@ -150,20 +150,6 @@ Example:
"description:": "file://DESCRIPTION.md"
```
## developmentMode
Type: boolean
Required: no
Setting `developmentMode` to true disables readonly rootfs and the default memory limit. In addition,
the application *pauses* on start and can be started manually using `cloudron exec`. Note that you
cannot submit an app to the store with this field turned on.
This mode can be used to identify the files being modified by your application - often required to
debug situations where your app does not run on a readonly rootfs. Run your app using `cloudron exec`
and use `find / -mmin -30` to find file that have been changed or created in the last 30 minutes.
## healthCheckPath
Type: url path
@@ -186,9 +172,10 @@ Type: positive integer
Required: yes
The `httpPort` field contains the TCP port on which your app is listening for HTTP requests. This port
is exposed to the world via subdomain/location that the user chooses at installation time. While not
required, it is good practice to mark this port as `EXPOSE` in the Dockerfile.
The `httpPort` field contains the TCP port on which your app is listening for HTTP requests. This
is the HTTP port the Cloudron will use to access your app internally.
While not required, it is good practice to mark this port as `EXPOSE` in the Dockerfile.
Cloudron Apps are containerized and thus two applications can listen on the same port. In reality,
they are in different network namespaces and do not conflict with each other.
+79 -90
View File
@@ -1,8 +1,7 @@
# Overview
The Cloudron platform can be installed on public cloud servers from EC2, Digital Ocean, Hetzner,
Linode, OVH, Scaleway, Vultr etc. Running Cloudron on a home server or company intranet is work
in progress.
Linode, OVH, Scaleway, Vultr etc. Cloudron also runs well on a home server or company intranet.
If you run into any trouble following this guide, ask us at our [chat](https://chat.cloudron.io).
@@ -22,30 +21,7 @@ work, the Cloudron requires a way to programmatically configure the DNS entries
Note that the Cloudron will never overwrite _existing_ DNS entries and refuse to install
apps on existing subdomains.
# CLI Tool
The [Cloudron tool](https://git.cloudron.io/cloudron/cloudron-cli) is useful for managing
a Cloudron. <b class="text-danger">The Cloudron CLI tool has to be run on a Laptop or PC</b>
## Linux & OS X
Installing the CLI tool requires node.js and npm. The CLI tool can be installed using the following command:
```
npm install -g cloudron
```
Depending on your setup, you may need to run this as root.
On OS X, it is known to work with the `openssl` package from homebrew.
See [#14](https://git.cloudron.io/cloudron/cloudron-cli/issues/14) for more information.
## Windows
The CLI tool does not work on Windows. Please contact us on our [chat](https://chat.cloudron.io) if you want to help with Windows support.
# Provider
# Cloud Server
DigitalOcean and EC2 (Amazon Web Services) are frequently tested by us.
@@ -66,20 +42,6 @@ Please let us know if any of them requires tweaks or adjustments.
# Installing
## Choose Domain
A domain name is required when installing the Cloudron. Currently, only Second Level Domains
are supported. For example, `example.com`, `example.co.uk` will work fine. Choosing a domain
name at any other level like `cloudron.example.com` will not work.
The domain name must use one of the following name servers:
* AWS Route 53
* Digital Ocean
* Wildcard - If your domain does not use any of the name servers above, you can manually add
a wildcard (`*`) DNS entry.
You will have to provide the DNS API credentials after you complete the installation.
## Create server
Create an `Ubuntu 16.04 (Xenial)` server with at-least `1gb` RAM. Do not make any changes
@@ -95,30 +57,19 @@ Since Linode does not manage SSH keys, be sure to add the public key to
Use the [boot script](https://github.com/scaleway-community/scaleway-docker/issues/2) to
enable memory accouting.
## Setup `my` subdomain
The Cloudron web interface is installed at the `my` subdomain of your domain.
Add a `A` DNS record for the `my` subdomain with the IP of the server created
above. Doing this will allow the Cloudron to start up with a valid TLS certificate.
## Run setup
SSH into your server and run the following commands:
```
wget https://git.cloudron.io/cloudron/box/raw/v0.92.1/scripts/cloudron-setup
wget https://cloudron.io/cloudron-setup
chmod +x cloudron-setup
./cloudron-setup --domain <domain> --provider <digitalocean|ec2|generic|scaleway>
./cloudron-setup --provider <digitalocean|ec2|generic|scaleway>
```
The setup will take around 10-15 minutes.
`cloudron-setup` takes the following arguments:
* `--domain` is the domain name in which apps are installed. Currently, only Second Level
Domains are supported. For example, `example.com`, `example.co.uk`, `example.rocks` will
work fine. Choosing a domain name at any other level like `cloudron.example.com` will not
work.
**cloudron-setup** takes the following arguments:
* `--provider` is the name of your VPS provider. If the name is not on the list, simply
choose `generic`. In most cases, the `generic` provider mostly will work fine.
@@ -140,23 +91,17 @@ the latest version. You can set this to an older version when restoring a Cloudr
* `--restore-url` is a backup URL to restore from.
## Finish setup
## Domain setup
Once the setup script completes, the server will reboot, then visit `https://my.<domain>` to complete the installation.
Once the setup script completes, the server will reboot, then visit your server by its
IP address (`https://ip`) to complete the installation.
Please note the following:
The setup website will show a certificate warning. Accept the self-signed certificate
and proceed to the domain setup.
1. The website should already have a valid TLS certificate. If you see any certificate warnings, it means your Cloudron was not created correctly.
2. If you see a login screen, instead of a setup screen, it means that someone else got to your Cloudron first and set it up
already! In this unlikely case, simply delete the server and start over.
Once the setup is done, you can access the admin page in the future at `https://my.<domain>`.
## DNS
Cloudron has to be given the API credentials for configuring your domain under `Certs & Domains`
in the web UI.
Currently, only Second Level Domains are supported. For example, `example.com`,
`example.co.uk` will work fine. Choosing a domain name at any other level like
`cloudron.example.com` will not work.
### Route 53
@@ -203,17 +148,21 @@ If your domain *does not* use Route 53 or Digital Ocean, setup a wildcard (`*`)
IP of the server created above. If your DNS provider has an API, please open an
[issue](https://git.cloudron.io/cloudron/box/issues) and we may be able to support it.
## Backups
## Finish Setup
Once the domain setup is done, the Cloudron will configure the DNS and get a SSL certificate. It will automatically redirect to `https://my.<domain>`.
# Backups
The Cloudron creates encrypted backups once a day. Each app is backed up independently and these
backups have the prefix `appbackup_`. The platform state is backed up independently with the
prefix `backup_`.
backups have the prefix `app_`. The platform state is backed up independently with the
prefix `box_`.
By default, backups reside in `/var/backups`. Please note that having backups reside in the same
physical machine as the Cloudron server instance is dangerous and it must be changed to
an external storage location like `S3` as soon as possible.
### Amazon S3
## Amazon S3
Provide S3 backup credentials in the `Settings` page and leave the endpoint field empty.
@@ -245,7 +194,7 @@ for most use-cases.
}
```
### Minio S3
## Minio S3
[Minio](https://minio.io/) is a distributed object storage server, providing the same API as Amazon S3.
Since Cloudron supports S3, any API compatible solution should be supported as well, if this is not the case, let us know.
@@ -291,7 +240,7 @@ reputation should be easy to get back.
* AWS/EC2 - Fill the PTR [request form](https://aws-portal.amazon.com/gp/aws/html-forms-controller/contactus/ec2-email-limit-rdns-request.
* Digital Ocean - Digital Ocean sets up a PTR record based on the droplet's name. So, simply rename
your droplet to `my.<domain>`.
your droplet to `my.<domain>`. Note that some new Digital Ocean accounts have [port 25 blocked](https://www.digitalocean.com/community/questions/port-25-smtp-external-access).
* Scaleway - Edit your security group to allow email. You can also set a PTR record on the interface with your
`my.<domain>`.
@@ -299,9 +248,36 @@ reputation should be easy to get back.
* Check if your IP is listed in any DNSBL list [here](http://multirbl.valli.org/). In most cases,
you can apply for removal of your IP by filling out a form at the DNSBL manager site.
* When using wildcard or manual DNS backends, you have to setup the DMARC, MX records manually.
* Finally, check your spam score at [mail-tester.com](https://www.mail-tester.com/). The Cloudron
should get 100%, if not please let us know.
# CLI Tool
The [Cloudron tool](https://git.cloudron.io/cloudron/cloudron-cli) is useful for managing
a Cloudron. <b class="text-danger">The Cloudron CLI tool has to be installed & run on a Laptop or PC</b>
Once installed, you can install, configure, list, backup and restore apps from the command line.
## Linux & OS X
Installing the CLI tool requires node.js and npm. The CLI tool can be installed using the following command:
```
npm install -g cloudron
```
Depending on your setup, you may need to run this as root.
On OS X, it is known to work with the `openssl` package from homebrew.
See [#14](https://git.cloudron.io/cloudron/cloudron-cli/issues/14) for more information.
## Windows
The CLI tool does not work on Windows. Please contact us on our [chat](https://chat.cloudron.io) if you want to help with Windows support.
# Updates
Apps installed from the Cloudron Store are automatically updated every night.
@@ -318,24 +294,26 @@ case an update fails, it can be [restored](/references/selfhosting.html#restore)
### Upgrade
An **upgrade** requires a new OS image and thus involves creating the Cloudron from scratch.
This process involves creating a new server with the latest code and restoring it from the
last backup. Currently only Cloudrons using the **S3 backup storage** support upgrades.
Read more about [backup storage](#s3), otherwise contact us in our [chat](https://chat.cloudron.io).
An **upgrade** requires a new OS image. This process involves creating a new server from scratch
with the latest code and restoring it from the last backup.
To upgrade follow these steps closely:
* Create a new backup - `cloudron machine backup create <domain>`
* Create a new backup - `cloudron machine backup create`
* List the latest backup - `cloudron machine backup list <domain>`
* List the latest backup - `cloudron machine backup list`
* Make the latest box backup (files starting with `backup_`) public. This can be done from the AWS S3 console as seen here:
* Make the backup available for the new cloudron instance:
<img src="/docs/img/aws_backup_public.png" class="shadow haze"><br/>
* `S3` - When storing backup ins S3, make the latest box backup public - files starting with `box_` (from v0.94.0) or `backup_`. This can be done from the AWS S3 console as seen here:
* Copy the new public URL of the latest backup for use as the `--restore-url` below.
<img src="/docs/img/aws_backup_public.png" class="shadow haze"><br/>
<img src="/docs/img/aws_backup_link.png" class="shadow haze"><br/>
Copy the new public URL of the latest backup for use as the `--restore-url` below.
<img src="/docs/img/aws_backup_link.png" class="shadow haze"><br/>
* `File system` - When storing backups in `/var/backups`, you have to make the box and the app backups available to the new Cloudron instance's `/var/backups`. This can be achieved in a variety of ways depending on the situation: like scp'ing the backup files to the machine before installation, mounting the external backup hard drive into the new Cloudron's `/var/backup` OR downloading a copy of the backup using `cloudron machine backup download` and uploading them to the new machine. After doing so, pass `file:///var/backups/<path to box backup>` as the `--restore-url` below.
* Create a new Cloudron by following the [installing](/references/selfhosting.html#installing) section.
When running the setup script, pass in the `--encryption-key` and `--restore-url` flags.
@@ -344,9 +322,9 @@ To upgrade follow these steps closely:
Similar to the initial installation, a Cloudron upgrade looks like:
```
$ ssh root@newserverip
> wget https://git.cloudron.io/cloudron/box/raw/v0.92.1/scripts/cloudron-setup
> wget https://cloudron.io/cloudron-setup
> chmod +x cloudron-setup
> ./cloudron-setup --domain <domain> --provider <digitalocean|ec2|generic|scaleway> --encryption-key <key> --restore-url <publicS3Url>
> ./cloudron-setup --provider <digitalocean|ec2|generic|scaleway> --encryption-key <key> --restore-url <publicS3Url>
```
* Finally, once you see the newest version being displayed in your Cloudron webinterface, you can safely delete the old server instance.
@@ -355,13 +333,16 @@ $ ssh root@newserverip
To restore a Cloudron from a specific backup:
* Select the backup - `cloudron machine backup list <domain>`
* Select the backup - `cloudron machine backup list`
* Make the box backup public (this can be done from the S3 console). Also, copy the URL of
the backup for use as the `restore-url` below.
* Make the backup public
* `S3` - Make the box backup publicly readable - files starting with `box_` (from v0.94.0) or `backup_`. This can be done from the AWS S3 console. Once the box has restored, you can make it private again.
* `File system` - When storing backups in `/var/backups`, you have to make the box and the app backups available to the new Cloudron instance's `/var/backups`. This can be achieved in a variety of ways depending on the situation: like scp'ing the backup files to the new machine before Cloudron installation OR mounting an external backup hard drive into the new Cloudron's `/var/backup` OR downloading a copy of the backup using `cloudron machine backup download` and uploading them to the new machine. After doing so, pass `file:///var/backups/<path to box backup>` as the `--restore-url` below.
* Create a new Cloudron by following the [installing](/references/selfhosting.html#installing) section.
When running the setup script, pass in the `version`, `restore-key` and `restore-url` flags.
When running the setup script, pass in the `version`, `encryption-key` and `restore-url` flags.
The `version` field is the version of the Cloudron that the backup corresponds to (it is embedded
in the backup file name).
@@ -375,6 +356,14 @@ You can SSH into your Cloudron and collect logs:
* `docker ps` will give you the list of containers. The addon containers are named as `mail`, `postgresql`,
`mysql` etc. If you want to get a specific container's log output, `journalctl -a CONTAINER_ID=<container_id>`.
# Alerts
The Cloudron will notify the Cloudron administrator via email if apps go down, run out of memory, have updates
available etc.
You will have to setup a 3rd party service like [Cloud Watch](https://aws.amazon.com/cloudwatch/) or [UptimeRobot](http://uptimerobot.com/) to monitor the Cloudron itself. You can use `https://my.<domain>/api/v1/cloudron/status`
as the health check URL.
# Help
If you run into any problems, join us at our [chat](https://chat.cloudron.io) or [email us](mailto:support@cloudron.io).
+14 -11
View File
@@ -83,7 +83,7 @@ FROM cloudron/base:0.9.0
ADD server.js /app/code/server.js
CMD [ "/usr/local/node-4.2.1/bin/node", "/app/code/server.js" ]
CMD [ "/usr/local/node-4.4.7/bin/node", "/app/code/server.js" ]
```
The `FROM` command specifies that we want to start off with Cloudron's [base image](/references/baseimage.html).
@@ -94,12 +94,12 @@ The `ADD` command copies the source code of the app into the directory `/app/cod
about the `/app/code` directory and it is merely a convention we use to store the application code.
The `CMD` command specifies how to run the server. The base image already contains many different versions of
node.js. We use Node 4.2.1 here.
node.js. We use Node 4.4.7 here.
This Dockerfile can be built and run locally as:
```
docker build -t tutorial .
docker run -p 8000:8000 -ti tutorial
docker run -p 8000:8000 -t tutorial
```
## Manifest
@@ -271,14 +271,18 @@ You can also execute arbitrary commands:
$ cloudron exec env # display the env variables that your app is running with
```
### DevelopmentMode
### Debugging
When debugging complex startup scripts, one can specify `"developmentMode": true,` in the CloudronManifest.json.
This will ignore the `RUN` command, specified in the Dockerfile and allows the developer to interactively test
the startup scripts using `cloudron exec`.
An app can be placed in `debug` mode by passing `--debug` to `cloudron install` or `cloudron configure`.
Doing so, runs the app in a non-readonly rootfs and unlimited memory. By default, this will also ignore
the `RUN` command specified in the Dockerfile. The developer can then interactively test the app and
startup scripts using `cloudron exec`.
**Note:** that an app running in this mode has full read/write access to the filesystem and all memory limits are lifted.
This mode can be used to identify the files being modified by your application - often required to
debug situations where your app does not run on a readonly rootfs. Run your app using `cloudron exec`
and use `find / -mmin -30` to find file that have been changed or created in the last 30 minutes.
You can turn off debugging mode using `cloudron configure --no-debug`.
# Addons
@@ -429,9 +433,8 @@ other Cloudron users. This can be done using:
cloudron upload
```
The app should now be visible in the Store view of your cloudron under
the 'Testing' section. You can check if the icon, description and other details
appear correctly.
You should now be able to visit `/#/appstore/<appid>?version=<appversion>` on your
Cloudron to check if the icon, description and other details appear correctly.
Other Cloudron users can install your app on their Cloudron's using
`cloudron install --appstore-id <appid@version>`.
+30 -4
View File
@@ -40,12 +40,21 @@ gulp.task('3rdparty', function () {
// JavaScript
// --------------
gulp.task('js', ['js-index', 'js-setup', 'js-update'], function () {});
if (argv.help || argv.h) {
console.log('Supported arguments for "gulp develop":');
console.log(' --client-id <clientId>');
console.log(' --client-secret <clientSecret>');
console.log(' --api-origin <cloudron api uri>');
process.exit(1);
}
gulp.task('js', ['js-index', 'js-setup', 'js-setupdns', 'js-update'], function () {});
var oauth = {
clientId: argv.clientId || process.env.CLOUDRON_CLIENT_ID || 'cid-webadmin',
clientSecret: argv.clientSecret || process.env.CLOUDRON_CLIENT_SECRET || 'unused',
apiOrigin: argv.apiOrigin || process.env.CLOUDRON_API_ORIGIN || ''
clientId: argv.clientId || 'cid-webadmin',
clientSecret: argv.clientSecret || 'unused',
apiOrigin: argv.apiOrigin || ''
};
console.log();
@@ -94,6 +103,22 @@ gulp.task('js-setup', function () {
.pipe(gulp.dest('webadmin/dist/js'));
});
gulp.task('js-setupdns', function () {
// needs special treatment for error handling
var uglifyer = uglify();
uglifyer.on('error', function (error) {
console.error(error);
});
gulp.src(['webadmin/src/js/setupdns.js', 'webadmin/src/js/client.js'])
.pipe(ejs({ oauth: oauth }, { ext: '.js' }))
.pipe(sourcemaps.init())
.pipe(concat('setupdns.js', { newLine: ';' }))
.pipe(uglifyer)
.pipe(sourcemaps.write())
.pipe(gulp.dest('webadmin/dist/js'));
});
gulp.task('js-update', function () {
// needs special treatment for error handling
var uglifyer = uglify();
@@ -162,6 +187,7 @@ gulp.task('watch', ['default'], function () {
gulp.watch(['webadmin/src/templates/*.html'], ['html-templates']);
gulp.watch(['webadmin/src/js/update.js'], ['js-update']);
gulp.watch(['webadmin/src/js/setup.js', 'webadmin/src/js/client.js'], ['js-setup']);
gulp.watch(['webadmin/src/js/setupdns.js', 'webadmin/src/js/client.js'], ['js-setupdns']);
gulp.watch(['webadmin/src/js/index.js', 'webadmin/src/js/client.js', 'webadmin/src/js/appstore.js', 'webadmin/src/js/main.js', 'webadmin/src/views/*.js'], ['js-index']);
gulp.watch(['webadmin/src/3rdparty/**/*'], ['3rdparty']);
});
@@ -0,0 +1,16 @@
var dbm = global.dbm || require('db-migrate');
var type = dbm.dataType;
exports.up = function(db, callback) {
db.runSql('ALTER TABLE users DROP COLUMN showTutorial', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE users ADD COLUMN showTutorial BOOLEAN DEFAULT 0', function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -0,0 +1,15 @@
dbm = dbm || require('db-migrate');
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN debugModeJson TEXT', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps DROP COLUMN debugModeJson ', function (error) {
if (error) console.error(error);
callback(error);
});
};
+1 -1
View File
@@ -19,7 +19,6 @@ CREATE TABLE IF NOT EXISTS users(
modifiedAt VARCHAR(512) NOT NULL,
admin INTEGER NOT NULL,
displayName VARCHAR(512) DEFAULT '',
showTutorial BOOLEAN DEFAULT 0,
PRIMARY KEY(id));
CREATE TABLE IF NOT EXISTS groups(
@@ -68,6 +67,7 @@ CREATE TABLE IF NOT EXISTS apps(
altDomain VARCHAR(256),
xFrameOptions VARCHAR(512),
sso BOOLEAN DEFAULT 1, // whether user chose to enable SSO
debugModeJson TEXT, // options for development mode
lastBackupId VARCHAR(128), // tracks last valid backup, can be removed
+3215 -18
View File
File diff suppressed because it is too large Load Diff
+3 -2
View File
@@ -17,7 +17,7 @@
"aws-sdk": "^2.1.46",
"body-parser": "^1.13.1",
"checksum": "^0.1.1",
"cloudron-manifestformat": "^2.5.1",
"cloudron-manifestformat": "^2.6.0",
"connect-ensure-login": "^0.1.1",
"connect-lastmile": "^0.1.0",
"connect-timeout": "^1.5.0",
@@ -31,6 +31,7 @@
"ejs": "^2.2.4",
"ejs-cli": "^1.2.0",
"express": "^4.12.4",
"express-rate-limit": "^2.6.0",
"express-session": "^1.11.3",
"gulp-sass": "^3.0.0",
"hat": "0.0.3",
@@ -58,7 +59,7 @@
"proxy-middleware": "^0.13.0",
"safetydance": "^0.1.1",
"semver": "^4.3.6",
"showdown": "^1.4.4",
"showdown": "^1.6.0",
"split": "^1.0.0",
"superagent": "^1.8.3",
"supererror": "^0.7.1",
+82 -43
View File
@@ -7,9 +7,32 @@ if [[ ${EUID} -ne 0 ]]; then
exit 1
fi
if [[ $(lsb_release -rs) != "16.04" ]]; then
echo "Cloudron requires Ubuntu 16.04" > /dev/stderr
exit 1
fi
# change this to a hash when we make a upgrade release
readonly INIT_BASESYSTEM_SCRIPT_URL="https://git.cloudron.io/cloudron/box/raw/master/baseimage/initializeBaseUbuntuImage.sh"
readonly LOG_FILE="/var/log/cloudron-setup.log"
readonly MINIMUM_DISK_SIZE_GB="19" # this is the size of "/" and required to fit in docker images 19 is a safe bet for different reporting on 20GB min
readonly MINIMUM_MEMORY="990" # this is mostly reported for 1GB main memory (DO 992, EC2 990)
# copied from cloudron-resize-fs.sh
readonly physical_memory=$(free -m | awk '/Mem:/ { print $2 }')
readonly disk_device="$(for d in $(find /dev -type b); do [ "$(mountpoint -d /)" = "$(mountpoint -x $d)" ] && echo $d && break; done)"
readonly disk_size_bytes=$(fdisk -l ${disk_device} | grep "Disk ${disk_device}" | awk '{ printf $5 }')
readonly disk_size_gb=$((${disk_size_bytes}/1024/1024/1024))
# verify the system has minimum requirements met
if [[ "${physical_memory}" -lt "${MINIMUM_MEMORY}" ]]; then
echo "Error: Cloudron requires atleast 1GB physical memory"
exit 1
fi
if [[ "${disk_size_gb}" -lt "${MINIMUM_DISK_SIZE_GB}" ]]; then
echo "Error: Cloudron requires atleast 20GB disk space (Disk space on ${disk_device} is ${disk_size_gb}GB)"
exit 1
fi
initBaseImage="true"
# provisioning data
@@ -21,10 +44,11 @@ dnsProvider="manual"
tlsProvider="le-prod"
versionsUrl="https://s3.amazonaws.com/prod-cloudron-releases/versions.json"
requestedVersion="latest"
apiServer="https://api.cloudron.io"
apiServerOrigin="https://api.cloudron.io"
dataJson=""
prerelease=false
args=$(getopt -o "" -l "domain:,help,skip-baseimage-init,data:,provider:,encryption-key:,restore-url:,tls-provider:,version:,versions-url:,api-server:,dns-provider:" -n "$0" -- "$@")
args=$(getopt -o "" -l "domain:,help,skip-baseimage-init,data:,provider:,encryption-key:,restore-url:,tls-provider:,version:,versions-url:,api-server:,dns-provider:,env:,prerelease" -n "$0" -- "$@")
eval set -- "${args}"
while true; do
@@ -37,10 +61,24 @@ while true; do
--tls-provider) tlsProvider="$2"; shift 2;;
--dns-provider) dnsProvider="$2"; shift 2;;
--version) requestedVersion="$2"; shift 2;;
--env)
if [[ "$2" == "dev" ]]; then
apiServerOrigin="https://api.dev.cloudron.io"
versionsUrl="https://s3.amazonaws.com/dev-cloudron-releases/versions.json"
tlsProvider="le-staging"
prerelease="true"
elif [[ "$2" == "staging" ]]; then
apiServerOrigin="https://api.staging.cloudron.io"
versionsUrl="https://s3.amazonaws.com/staging-cloudron-releases/versions.json"
tlsProvider="le-staging"
prerelease="true"
fi
shift 2;;
--versions-url) versionsUrl="$2"; shift 2;;
--api-server) apiServer="$2"; shift 2;;
--api-server) apiServerOrigin="$2"; shift 2;;
--skip-baseimage-init) initBaseImage="false"; shift;;
--data) dataJson="$2"; shift 2;;
--prerelease) prerelease="true"; shift;;
--) break;;
*) echo "Unknown option $1"; exit 1;;
esac
@@ -48,11 +86,6 @@ done
# validate arguments in the absence of data
if [[ -z "${dataJson}" ]]; then
if [[ -z "${domain}" ]]; then
echo "--domain is required"
exit 1
fi
if [[ -z "${provider}" ]]; then
echo "--provider is required (generic, scaleway, ec2, digitalocean)"
exit 1
@@ -66,6 +99,11 @@ if [[ -z "${dataJson}" ]]; then
exit 1
fi
if [[ "${tlsProvider}" != "fallback" && "${tlsProvider}" != "le-prod" && "${tlsProvider}" != "le-staging" ]]; then
echo "--tls-provider must be one of: le-prod, le-staging, fallback"
exit 1
fi
if [[ -z "${dnsProvider}" ]]; then
echo "--dns-provider is required (noop, manual)"
exit 1
@@ -80,8 +118,6 @@ echo "##############################################"
echo " Cloudron Setup (${requestedVersion}) "
echo "##############################################"
echo ""
echo "The server will reboot at the end to complete the setup."
echo ""
echo " Follow setup logs in a second terminal with:"
echo " $ tail -f ${LOG_FILE}"
echo ""
@@ -89,43 +125,27 @@ echo " Join us at https://chat.cloudron.io for any questions."
echo ""
if [[ "${initBaseImage}" == "true" ]]; then
echo "=> Update package repositories ..."
echo "=> Updating apt and installing script dependancies"
if ! apt-get update &>> "${LOG_FILE}"; then
echo "Could not update package repositories"
exit 1
fi
echo "=> Installing setup dependencies ..."
if ! apt-get install curl -y &>> "${LOG_FILE}"; then
if ! apt-get install curl python3 ubuntu-standard -y &>> "${LOG_FILE}"; then
echo "Could not install setup dependencies (curl)"
exit 1
fi
echo "=> Downloading initialization script"
if ! curl -s "${INIT_BASESYSTEM_SCRIPT_URL}" > /tmp/initializeBaseUbuntuImage.sh; then
echo "Could not download initialization script"
exit 1
fi
echo "=> Installing base dependencies (this takes some time) ..."
if ! /bin/bash /tmp/initializeBaseUbuntuImage.sh "${provider}" &>> "${LOG_FILE}"; then
echo "Init script failed. See ${LOG_FILE} for details"
exit 1
fi
rm /tmp/initializeBaseUbuntuImage.sh
fi
echo "=> Checking version"
if ! npm install -g cloudron-version@0.1.1 &>> "${LOG_FILE}"; then
echo "Failed to install cloudron-version npm package"
exit 1
releaseJson=$(curl -s "${versionsUrl}")
if [[ "$requestedVersion" == "latest" ]]; then
pre=$([[ "${prerelease}" == "true" ]] && echo "null" || echo "-pre")
version=$(echo "${releaseJson}" | python3 -c "import json,sys,collections;obj=json.load(sys.stdin, object_pairs_hook=collections.OrderedDict);latest=list(v for v in obj if '${pre}' not in v)[-1];print(latest)")
else
version="${requestedVersion}"
fi
NPM_BIN=$(npm bin -g 2>/dev/null)
if ! version=$(${NPM_BIN}/cloudron-version --out version --versions-url "${versionsUrl}" --version "${requestedVersion}"); then
echo "No such version ${requestedVersion}"
exit 1
fi
if ! sourceTarballUrl=$(${NPM_BIN}/cloudron-version --out tarballUrl --versions-url "${versionsUrl}" --version "${requestedVersion}"); then
if ! sourceTarballUrl=$(echo "${releaseJson}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj[sys.argv[1]]["sourceTarballUrl"])' "${version}"); then
echo "No source code for version ${requestedVersion}"
exit 1
fi
@@ -138,7 +158,7 @@ if [[ -z "${dataJson}" ]]; then
"boxVersionsUrl": "${versionsUrl}",
"fqdn": "${domain}",
"provider": "${provider}",
"apiServerOrigin": "${apiServer}",
"apiServerOrigin": "${apiServerOrigin}",
"tlsConfig": {
"provider": "${tlsProvider}"
},
@@ -150,6 +170,9 @@ if [[ -z "${dataJson}" ]]; then
"backupFolder": "/var/backups",
"key": "${encryptionKey}"
},
"updateConfig": {
"prerelease": ${prerelease}
},
"version": "${version}"
}
EOF
@@ -160,7 +183,7 @@ EOF
"boxVersionsUrl": "${versionsUrl}",
"fqdn": "${domain}",
"provider": "${provider}",
"apiServerOrigin": "${apiServer}",
"apiServerOrigin": "${apiServerOrigin}",
"restore": {
"url": "${restoreUrl}",
"key": "${encryptionKey}"
@@ -174,13 +197,24 @@ else
data="${dataJson}"
fi
echo "=> Downloading and running installer for version ${version} (this takes some time) ..."
echo "=> Downloading version ${version} ..."
box_src_tmp_dir=$(mktemp -dt box-src-XXXXXX)
if ! curl -sL "${sourceTarballUrl}" | tar -zxf - -C "${box_src_tmp_dir}"; then
echo "Could not download source tarball. See ${LOG_FILE} for details"
exit 1
fi
if [[ "${initBaseImage}" == "true" ]]; then
echo -n "=> Installing base dependencies and downloading docker images (this takes some time) ..."
if ! /bin/bash "${box_src_tmp_dir}/baseimage/initializeBaseUbuntuImage.sh" "${provider}" "../src" &>> "${LOG_FILE}"; then
echo "Init script failed. See ${LOG_FILE} for details"
exit 1
fi
echo ""
fi
echo "=> Installing version ${version} (this takes some time) ..."
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" --data "${data}" &>> "${LOG_FILE}"; then
echo "Failed to install cloudron. See ${LOG_FILE} for details"
exit 1
@@ -189,16 +223,21 @@ fi
echo -n "=> Waiting for cloudron to be ready (this takes some time) ..."
while true; do
echo -n "."
if journalctl -u box -a | grep "platformReady: " >/dev/null; then
break
if status=$(curl -q -f "http://localhost:3000/api/v1/cloudron/status" 2>/dev/null); then
[[ -z "$domain" ]] && break # with no domain, we are up and running
[[ "$status" == *"\"tls\": true"* ]] && break # with a domain, wait for the cert
fi
sleep 10
done
echo -e "\n\nRebooting this server now to let bootloader changes take effect.\n"
echo -e "Visit https://my.${domain} to finish setup once the server has rebooted.\n"
if [[ -n "${domain}" ]]; then
echo -e "Visit https://my.${domain} to finish setup once the server has rebooted.\n"
else
echo -e "Visit https://<IP> to finish setup once the server has rebooted.\n"
fi
if [[ "${initBaseImage}" == "true" ]]; then
systemctl reboot
fi
-8
View File
@@ -16,10 +16,6 @@ readonly box_src_tmp_dir="$(realpath ${script_dir}/..)"
readonly is_update=$([[ -f "${CLOUDRON_CONF}" ]] && echo "yes" || echo "no")
# create a provision file for testing. %q escapes args. %q is reused as much as necessary to satisfy $@
(echo -e "#!/bin/bash\n"; printf "%q " "${script_dir}/installer.sh" "$@") > /root/provision.sh
chmod +x /root/provision.sh
arg_data=""
args=$(getopt -o "" -l "data:,data-file:" -n "$0" -- "$@")
@@ -68,9 +64,5 @@ rm -rf "${BOX_SRC_DIR}"
mv "${box_src_tmp_dir}" "${BOX_SRC_DIR}"
chown -R "${USER}:${USER}" "${BOX_SRC_DIR}"
# create a start file for testing. %q escapes args
(echo -e "#!/bin/bash\n"; printf "%q " "${BOX_SRC_DIR}/setup/start.sh" --data "${arg_data}") > /home/yellowtent/setup_start.sh
chmod +x /home/yellowtent/setup_start.sh
echo "==> installer: calling box setup script"
"${BOX_SRC_DIR}/setup/start.sh" --data "${arg_data}"
+76 -79
View File
@@ -6,11 +6,12 @@ echo "==> Cloudron Start"
readonly USER="yellowtent"
readonly DATA_FILE="/root/user_data.img"
readonly BOX_SRC_DIR="/home/${USER}/box"
readonly DATA_DIR="/home/${USER}/data"
readonly CONFIG_DIR="/home/${USER}/configs"
readonly SETUP_PROGRESS_JSON="/home/yellowtent/setup/website/progress.json"
readonly ADMIN_LOCATION="my" # keep this in sync with constants.js
readonly HOME_DIR="/home/${USER}"
readonly BOX_SRC_DIR="${HOME_DIR}/box"
readonly DATA_DIR="${HOME_DIR}/data" # app and platform data
readonly BOX_DATA_DIR="${HOME_DIR}/boxdata" # box data
readonly CONFIG_DIR="${HOME_DIR}/configs"
readonly SETUP_PROGRESS_JSON="${HOME_DIR}/setup/website/progress.json"
readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 2400"
@@ -18,12 +19,6 @@ readonly script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${script_dir}/argparser.sh" "$@" # this injects the arg_* variables used below
# keep this is sync with config.js appFqdn()
readonly admin_fqdn=$([[ "${arg_is_custom_domain}" == "true" ]] && echo "${ADMIN_LOCATION}.${arg_fqdn}" || echo "${ADMIN_LOCATION}-${arg_fqdn}")
readonly admin_origin="https://${admin_fqdn}"
readonly is_update=$([[ -f "${CONFIG_DIR}/cloudron.conf" ]] && echo "true" || echo "false")
set_progress() {
local percent="$1"
local message="$2"
@@ -32,7 +27,7 @@ set_progress() {
(echo "{ \"update\": { \"percent\": \"${percent}\", \"message\": \"${message}\" }, \"backup\": {} }" > "${SETUP_PROGRESS_JSON}") 2> /dev/null || true # as this will fail in non-update mode
}
set_progress "10" "Configuring host"
set_progress "20" "Configuring host"
sed -e 's/^#NTP=/NTP=0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org/' -i /etc/systemd/timesyncd.conf
timedatectl set-ntp 1
timedatectl set-timezone UTC
@@ -70,12 +65,23 @@ mkdir -p /etc/iptables && iptables-save > /etc/iptables/rules.v4
echo "==> Configuring docker"
cp "${script_dir}/start/docker-cloudron-app.apparmor" /etc/apparmor.d/docker-cloudron-app
systemctl enable apparmor
systemctl restart apparmor
usermod yellowtent -a -G docker
sed -e 's,^ExecStart=.*$,ExecStart=/usr/bin/docker daemon -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs,' -i /lib/systemd/system/docker.service
usermod ${USER} -a -G docker
temp_file=$(mktemp)
# create systemd drop-in. some apps do not work with aufs
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/docker daemon -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=devicemapper --dns=172.18.0.1 --dns-search=." > "${temp_file}"
systemctl enable docker
systemctl restart docker
# restart docker if options changed
if [[ ! -f /etc/systemd/system/docker.service.d/cloudron.conf ]] || ! diff -q /etc/systemd/system/docker.service.d/cloudron.conf "${temp_file}" >/dev/null; then
mkdir -p /etc/systemd/system/docker.service.d
mv "${temp_file}" /etc/systemd/system/docker.service.d/cloudron.conf
systemctl daemon-reload
systemctl restart docker
fi
docker network create --subnet=172.18.0.0/16 cloudron || true
# caas has ssh on port 202 and we disable password login
if [[ "${arg_provider}" == "caas" ]]; then
@@ -107,20 +113,34 @@ fi
# keep these in sync with paths.js
echo "==> Ensuring directories"
[[ "${is_update}" == "false" ]] && btrfs subvolume create "${DATA_DIR}/box"
mkdir -p "${DATA_DIR}/box/appicons"
mkdir -p "${DATA_DIR}/box/certs"
mkdir -p "${DATA_DIR}/box/mail/dkim/${arg_fqdn}"
mkdir -p "${DATA_DIR}/box/acme" # acme keys
if ! btrfs subvolume show "${DATA_DIR}/mail" &> /dev/null; then
# Migrate mail data to new format
docker stop mail || true # otherwise the move below might fail if mail container writes in the middle
rm -rf "${DATA_DIR}/mail" # this used to be mail container's run directory
btrfs subvolume create "${DATA_DIR}/mail"
[[ -d "${DATA_DIR}/box/mail" ]] && mv "${DATA_DIR}/box/mail/"* "${DATA_DIR}/mail"
rm -rf "${DATA_DIR}/box/mail"
fi
mkdir -p "${DATA_DIR}/graphite"
mkdir -p "${DATA_DIR}/mail/dkim"
mkdir -p "${DATA_DIR}/mysql"
mkdir -p "${DATA_DIR}/postgresql"
mkdir -p "${DATA_DIR}/mongodb"
mkdir -p "${DATA_DIR}/snapshots"
mkdir -p "${DATA_DIR}/addons"
mkdir -p "${DATA_DIR}/addons/mail"
mkdir -p "${DATA_DIR}/collectd/collectd.conf.d"
mkdir -p "${DATA_DIR}/acme" # acme challenges
mkdir -p "${DATA_DIR}/acme"
mkdir -p "${BOX_DATA_DIR}"
if btrfs subvolume show "${DATA_DIR}/box" &> /dev/null; then
# Migrate box data out of data volume
mv "${DATA_DIR}/box/"* "${BOX_DATA_DIR}"
btrfs subvolume delete "${DATA_DIR}/box"
fi
mkdir -p "${BOX_DATA_DIR}/appicons"
mkdir -p "${BOX_DATA_DIR}/certs"
mkdir -p "${BOX_DATA_DIR}/acme" # acme keys
echo "==> Configuring journald"
sed -e "s/^#SystemMaxUse=.*$/SystemMaxUse=100M/" \
@@ -133,32 +153,38 @@ sed -e "s/^WatchdogSec=.*$/WatchdogSec=3min/" \
-i /lib/systemd/system/systemd-journald.service
# Give user access to system logs
usermod -a -G systemd-journal yellowtent
usermod -a -G systemd-journal ${USER}
mkdir -p /var/log/journal # in some images, this directory is not created making system log to /run/systemd instead
chown root:systemd-journal /var/log/journal
systemctl daemon-reload
systemctl restart systemd-journald
setfacl -n -m u:yellowtent:r /var/log/journal/*/system.journal
setfacl -n -m u:${USER}:r /var/log/journal/*/system.journal
echo "==> Creating config directory"
rm -rf "${CONFIG_DIR}" && mkdir "${CONFIG_DIR}"
chown yellowtent:yellowtent "${CONFIG_DIR}"
echo "==> Setting up unbound"
# DO uses Google nameservers by default. This causes RBL queries to fail (host 2.0.0.127.zen.spamhaus.org)
# We do not use dnsmasq because it is not a recursive resolver and defaults to the value in the interfaces file (which is Google DNS!)
# We listen on 0.0.0.0 because there is no way control ordering of docker (which creates the 172.18.0.0/16) and unbound
echo -e "server:\n\tinterface: 0.0.0.0\n\taccess-control: 127.0.0.1 allow\n\taccess-control: 172.18.0.1/16 allow" > /etc/unbound/unbound.conf.d/cloudron-network.conf
echo "==> Adding systemd services"
cp -r "${script_dir}/start/systemd/." /etc/systemd/system/
systemctl daemon-reload
systemctl enable unbound
systemctl enable cloudron.target
systemctl enable iptables-restore
# For logrotate
systemctl enable --now cron
# DO uses Google nameservers by default. This causes RBL queries to fail (host 2.0.0.127.zen.spamhaus.org)
# We do not use dnsmasq because it is not a recursive resolver and defaults to the value in the interfaces file (which is Google DNS!)
systemctl enable --now unbound
# ensure unbound runs
systemctl restart unbound
echo "==> Configuring sudoers"
rm -f /etc/sudoers.d/yellowtent
cp "${script_dir}/start/sudoers" /etc/sudoers.d/yellowtent
rm -f /etc/sudoers.d/${USER}
cp "${script_dir}/start/sudoers" /etc/sudoers.d/${USER}
echo "==> Configuring collectd"
rm -rf /etc/collectd
@@ -171,35 +197,18 @@ echo "==> Configuring nginx"
unlink /etc/nginx 2>/dev/null || rm -rf /etc/nginx
ln -s "${DATA_DIR}/nginx" /etc/nginx
mkdir -p "${DATA_DIR}/nginx/applications"
mkdir -p "${DATA_DIR}/nginx/cert"
cp "${script_dir}/start/nginx/nginx.conf" "${DATA_DIR}/nginx/nginx.conf"
cp "${script_dir}/start/nginx/mime.types" "${DATA_DIR}/nginx/mime.types"
# generate these for update code paths as well to overwrite splash
admin_cert_file="${DATA_DIR}/nginx/cert/host.cert"
admin_key_file="${DATA_DIR}/nginx/cert/host.key"
if [[ -f "${DATA_DIR}/box/certs/${admin_fqdn}.cert" && -f "${DATA_DIR}/box/certs/${admin_fqdn}.key" ]]; then
admin_cert_file="${DATA_DIR}/box/certs/${admin_fqdn}.cert"
admin_key_file="${DATA_DIR}/box/certs/${admin_fqdn}.key"
fi
${BOX_SRC_DIR}/node_modules/.bin/ejs-cli -f "${script_dir}/start/nginx/appconfig.ejs" \
-O "{ \"vhost\": \"${admin_fqdn}\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"admin\", \"sourceDir\": \"${BOX_SRC_DIR}\", \"certFilePath\": \"${admin_cert_file}\", \"keyFilePath\": \"${admin_key_file}\", \"xFrameOptions\": \"SAMEORIGIN\" }" > "${DATA_DIR}/nginx/applications/admin.conf"
mkdir -p "${DATA_DIR}/nginx/cert"
if [[ -f "${DATA_DIR}/box/certs/host.cert" && -f "${DATA_DIR}/box/certs/host.key" ]]; then
cp "${DATA_DIR}/box/certs/host.cert" "${DATA_DIR}/nginx/cert/host.cert"
cp "${DATA_DIR}/box/certs/host.key" "${DATA_DIR}/nginx/cert/host.key"
else
if [[ -z "${arg_tls_cert}" || -z "${arg_tls_key}" ]]; then
echo "==> Creating fallback certs"
openssl req -x509 -newkey rsa:2048 -keyout "${DATA_DIR}/nginx/cert/host.key" -out "${DATA_DIR}/nginx/cert/host.cert" -days 3650 -subj "/CN=${arg_fqdn}" -nodes
else
echo "${arg_tls_cert}" > "${DATA_DIR}/nginx/cert/host.cert"
echo "${arg_tls_key}" > "${DATA_DIR}/nginx/cert/host.key"
fi
if ! grep "^Restart=" /etc/systemd/system/multi-user.target.wants/nginx.service; then
# default nginx service file does not restart on crash
echo -e "\n[Service]\nRestart=always\n" >> /etc/systemd/system/multi-user.target.wants/nginx.service
systemctl daemon-reload
fi
systemctl start nginx
# bookkeep the version as part of data
echo "{ \"version\": \"${arg_version}\", \"boxVersionsUrl\": \"${arg_box_versions_url}\" }" > "${DATA_DIR}/box/version"
echo "{ \"version\": \"${arg_version}\", \"boxVersionsUrl\": \"${arg_box_versions_url}\" }" > "${BOX_DATA_DIR}/version"
# remove old snapshots. if we do want to keep this around, we will have to fix the chown -R below
# which currently fails because these are readonly fs
@@ -226,14 +235,15 @@ if [[ -n "${arg_restore_url}" ]]; then
echo "==> Downloading backup: ${arg_restore_url} and key: ${arg_restore_key}"
while true; do
if $curl -L "${arg_restore_url}" | openssl aes-256-cbc -d -pass "pass:${arg_restore_key}" | tar -zxf - -C "${DATA_DIR}/box"; then break; fi
if $curl -L "${arg_restore_url}" | openssl aes-256-cbc -d -pass "pass:${arg_restore_key}" \
| tar -zxf - --overwrite --transform="s,^box/\?,boxdata/," --transform="s,^mail/\?,data/mail/," --show-transformed-names -C "${HOME_DIR}"; then break; fi
echo "Failed to download data, trying again"
done
set_progress "35" "Setting up MySQL"
if [[ -f "${DATA_DIR}/box/box.mysqldump" ]]; then
if [[ -f "${BOX_DATA_DIR}/box.mysqldump" ]]; then
echo "==> Importing existing database into MySQL"
mysql -u root -p${mysql_root_password} box < "${DATA_DIR}/box/box.mysqldump"
mysql -u root -p${mysql_root_password} box < "${BOX_DATA_DIR}/box.mysqldump"
fi
fi
@@ -266,6 +276,11 @@ cat > "${CONFIG_DIR}/cloudron.conf" <<CONF_END
"appBundle": ${arg_app_bundle}
}
CONF_END
# pass these out-of-band because they have new lines which interfere with json
if [[ -n "${arg_tls_cert}" && -n "${arg_tls_key}" ]]; then
echo "${arg_tls_cert}" > "${CONFIG_DIR}/host.cert"
echo "${arg_tls_key}" > "${CONFIG_DIR}/host.key"
fi
echo "==> Creating config.json for webadmin"
cat > "${BOX_SRC_DIR}/webadmin/dist/config.json" <<CONF_END
@@ -275,12 +290,10 @@ cat > "${BOX_SRC_DIR}/webadmin/dist/config.json" <<CONF_END
CONF_END
echo "==> Changing ownership"
chown "${USER}:${USER}" "${CONFIG_DIR}/cloudron.conf"
chown "${USER}:${USER}" -R "${CONFIG_DIR}"
chown "${USER}:${USER}" -R "${DATA_DIR}/nginx" "${DATA_DIR}/collectd" "${DATA_DIR}/addons" "${DATA_DIR}/acme"
# during updates, do not trample mail ownership behind the the mail container's back
find "${DATA_DIR}/box" -mindepth 1 -maxdepth 1 -not -path "${DATA_DIR}/box/mail" -print0 | xargs -0 chown -R "${USER}:${USER}"
chown "${USER}:${USER}" "${DATA_DIR}/box"
chown "${USER}:${USER}" -R "${DATA_DIR}/box/mail/dkim" # this is owned by box currently since it generates the keys
chown "${USER}:${USER}" -R "${BOX_DATA_DIR}"
chown "${USER}:${USER}" -R "${DATA_DIR}/mail/dkim" # this is owned by box currently since it generates the keys
chown "${USER}:${USER}" "${DATA_DIR}/INFRA_VERSION" 2>/dev/null || true
chown "${USER}:${USER}" "${DATA_DIR}"
@@ -305,25 +318,9 @@ if [[ ! -z "${arg_tls_config}" ]]; then
-e "REPLACE INTO settings (name, value) VALUES (\"tls_config\", '$arg_tls_config')" box
fi
echo "==> Adding default clients"
# The domain might have changed, therefor we have to update the record
# !!! This needs to be in sync with the webadmin, specifically login_callback.js
readonly ADMIN_SCOPES="cloudron,developer,profile,users,apps,settings"
mysql -u root -p${mysql_root_password} \
-e "REPLACE INTO clients (id, appId, type, clientSecret, redirectURI, scope) VALUES (\"cid-webadmin\", \"Settings\", \"built-in\", \"secret-webadmin\", \"${admin_origin}\", \"${ADMIN_SCOPES}\")" box
mysql -u root -p${mysql_root_password} \
-e "REPLACE INTO clients (id, appId, type, clientSecret, redirectURI, scope) VALUES (\"cid-sdk\", \"SDK\", \"built-in\", \"secret-sdk\", \"${admin_origin}\", \"*,roleSdk\")" box
mysql -u root -p${mysql_root_password} \
-e "REPLACE INTO clients (id, appId, type, clientSecret, redirectURI, scope) VALUES (\"cid-cli\", \"Cloudron Tool\", \"built-in\", \"secret-cli\", \"${admin_origin}\", \"*,roleSdk\")" box
set_progress "60" "Starting Cloudron"
systemctl start cloudron.target
sleep 2 # give systemd sometime to start the processes
set_progress "80" "Reloading nginx"
nginx -s reload
set_progress "100" "Done"
set_progress "90" "Done"
+3 -3
View File
@@ -16,9 +16,9 @@ existing_swap=$(cat /proc/meminfo | grep SwapTotal | awk '{ printf "%.0f", $2/10
readonly physical_memory=$(free -m | awk '/Mem:/ { print $2 }')
readonly swap_size=$((${physical_memory} - ${existing_swap})) # if you change this, fix enoughResourcesAvailable() in client.js
readonly app_count=$((${physical_memory} / 200)) # estimated app count
readonly disk_size_gb=$(fdisk -l ${disk_device} | grep "Disk ${disk_device}" | awk '{ printf "%.0f", $3 }')
readonly disk_size=$((disk_size_gb * 1024))
readonly system_size=10240 # 10 gigs for system libs, apps images, installer, box code and tmp
readonly disk_size_bytes=$(fdisk -l ${disk_device} | grep "Disk ${disk_device}" | awk '{ printf $5 }') # can't rely on fdisk human readable units, using bytes instead
readonly disk_size=$((${disk_size_bytes}/1024/1024))
readonly system_size=10240 # 10 gigs for system libs, apps images, installer, box code, data and tmp
readonly ext4_reserved=$((disk_size * 5 / 100)) # this can be changes using tune2fs -m percent /dev/vda1
echo "Disk device: ${disk_device}"
+4
View File
@@ -5,8 +5,12 @@ map $http_upgrade $connection_upgrade {
}
server {
<% if (vhost) { %>
listen 443;
server_name <%= vhost %>;
<% } else { %>
listen 443 default_server;
<% } %>
ssl on;
# paths are relative to prefix and not to this file
-29
View File
@@ -57,35 +57,6 @@ http {
}
}
# This server handles the naked domain for custom domains.
# It can also be used for wildcard subdomain 404. This feature is not used by the Cloudron itself
# because box always sets up DNS records for app subdomains.
server {
listen 443 default_server;
ssl on;
ssl_certificate cert/host.cert;
ssl_certificate_key cert/host.key;
error_page 404 = @fallback;
location @fallback {
internal;
root /home/yellowtent/box/webadmin/dist;
rewrite ^/$ /nakeddomain.html break;
}
location / {
internal;
root /home/yellowtent/box/webadmin/dist;
rewrite ^/$ /nakeddomain.html break;
}
# required for /api/v1/cloudron/avatar
location /api/ {
proxy_pass http://127.0.0.1:3000;
client_max_body_size 1m;
}
}
include applications/*.conf;
}
+1 -1
View File
@@ -4,7 +4,7 @@ OnFailure=crashnotifier@%n.service
StopWhenUnneeded=true
; journald crashes result in a EPIPE in node. Cannot ignore it as it results in loss of logs.
BindsTo=systemd-journald.service
After=mysql.service
After=mysql.service nginx.service
; As cloudron-resize-fs is a one-shot, the Wants= automatically ensures that the service *finishes*
Wants=cloudron-resize-fs.service
+14
View File
@@ -0,0 +1,14 @@
# The default ubuntu unbound service uses SysV fallback mode, we want a proper unit file so unbound gets restarted correctly
[Unit]
Description=Unbound DNS Resolver
After=network.target
[Service]
PIDFile=/run/unbound.pid
ExecStart=/usr/sbin/unbound -d
ExecReload=/bin/kill -HUP $MAINPID
Restart=always
[Install]
WantedBy=multi-user.target
+11 -5
View File
@@ -1,5 +1,3 @@
/* jslint node:true */
'use strict';
exports = module.exports = {
@@ -60,7 +58,7 @@ var assert = require('assert'),
var APPS_FIELDS_PREFIXED = [ 'apps.id', 'apps.appStoreId', 'apps.installationState', 'apps.installationProgress', 'apps.runState',
'apps.health', 'apps.containerId', 'apps.manifestJson', 'apps.httpPort', 'apps.location', 'apps.dnsRecordId',
'apps.accessRestrictionJson', 'apps.lastBackupId', 'apps.oldConfigJson', 'apps.memoryLimit', 'apps.altDomain',
'apps.xFrameOptions', 'apps.sso' ].join(',');
'apps.xFrameOptions', 'apps.sso', 'apps.debugModeJson' ].join(',');
var PORT_BINDINGS_FIELDS = [ 'hostPort', 'environmentVariable', 'appId' ].join(',');
@@ -98,6 +96,10 @@ function postProcess(result) {
result.xFrameOptions = result.xFrameOptions || 'SAMEORIGIN';
result.sso = !!result.sso; // make it bool
assert(result.debugModeJson === null || typeof result.debugModeJson === 'string');
result.debugMode = safe.JSON.parse(result.debugModeJson);
delete result.debugModeJson;
}
function get(id, callback) {
@@ -185,11 +187,12 @@ function add(id, appStoreId, manifest, location, portBindings, data, callback) {
var installationState = data.installationState || exports.ISTATE_PENDING_INSTALL;
var lastBackupId = data.lastBackupId || null; // used when cloning
var sso = 'sso' in data ? data.sso : null;
var debugModeJson = data.debugMode ? JSON.stringify(data.debugMode) : null;
var queries = [ ];
queries.push({
query: 'INSERT INTO apps (id, appStoreId, manifestJson, installationState, location, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, lastBackupId, sso) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
args: [ id, appStoreId, manifestJson, installationState, location, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, lastBackupId, sso ]
query: 'INSERT INTO apps (id, appStoreId, manifestJson, installationState, location, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, lastBackupId, sso, debugModeJson) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
args: [ id, appStoreId, manifestJson, installationState, location, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, lastBackupId, sso, debugModeJson ]
});
Object.keys(portBindings).forEach(function (env) {
@@ -299,6 +302,9 @@ function updateWithConstraints(id, app, constraints, callback) {
} else if (p === 'accessRestriction') {
fields.push('accessRestrictionJson = ?');
values.push(JSON.stringify(app[p]));
} else if (p === 'debugMode') {
fields.push('debugModeJson = ?');
values.push(JSON.stringify(app[p]));
} else if (p !== 'portBindings') {
fields.push(p + ' = ?');
values.push(app[p]);
+8 -8
View File
@@ -1,9 +1,9 @@
'use strict';
var appdb = require('./appdb.js'),
apps = require('./apps.js'),
assert = require('assert'),
async = require('async'),
config = require('./config.js'),
DatabaseError = require('./databaseerror.js'),
debug = require('debug')('box:apphealthmonitor'),
docker = require('./docker.js').connection,
@@ -50,7 +50,7 @@ function setHealth(app, health, callback) {
debugApp(app, 'marking as unhealthy since not seen for more than %s minutes', UNHEALTHY_THRESHOLD/(60 * 1000));
if (app.appStoreId !== '') mailer.appDied(app); // do not send mails for dev apps
if (app.debugMode) mailer.appDied(app); // do not send mails for dev apps
gHealthInfo[app.id].emailSent = true;
} else {
debugApp(app, 'waiting for sometime to update the app health');
@@ -93,7 +93,7 @@ function checkAppHealth(app, callback) {
var healthCheckUrl = 'http://127.0.0.1:' + app.httpPort + manifest.healthCheckPath;
superagent
.get(healthCheckUrl)
.set('Host', config.appFqdn(app.location)) // required for some apache configs with rewrite rules
.set('Host', app.fqdn) // required for some apache configs with rewrite rules
.redirects(0)
.timeout(HEALTHCHECK_INTERVAL)
.end(function (error, res) {
@@ -111,13 +111,13 @@ function checkAppHealth(app, callback) {
}
function processApps(callback) {
appdb.getAll(function (error, apps) {
apps.getAll(function (error, result) {
if (error) return callback(error);
async.each(apps, checkAppHealth, function (error) {
async.each(result, checkAppHealth, function (error) {
if (error) console.error(error);
var alive = apps
var alive = result
.filter(function (a) { return a.installationState === appdb.ISTATE_INSTALLED && a.runState === appdb.RSTATE_RUNNING && a.health === appdb.HEALTH_HEALTHY; })
.map(function (a) { return (a.location || 'naked_domain') + '|' + a.manifest.id; }).join(', ');
@@ -166,8 +166,8 @@ function processDockerEvents() {
debug('OOM Context: %s', context);
// do not send mails for dev apps
if ((!app || app.appStoreId !== '') && (now - lastOomMailTime > OOM_MAIL_LIMIT)) {
mailer.unexpectedExit(program, context); // app can be null if it's an addon crash
if ((!app || !app.debugMode) && (now - lastOomMailTime > OOM_MAIL_LIMIT)) {
mailer.oomEvent(program, context); // app can be null if it's an addon crash
lastOomMailTime = now;
}
});
+45 -14
View File
@@ -129,18 +129,21 @@ function validateHostname(location, fqdn) {
// validate the port bindings
function validatePortBindings(portBindings, tcpPorts) {
assert.strictEqual(typeof portBindings, 'object');
// keep the public ports in sync with firewall rules in scripts/initializeBaseUbuntuImage.sh
// these ports are reserved even if we listen only on 127.0.0.1 because we setup HostIp to be 127.0.0.1
// for custom tcp ports
var RESERVED_PORTS = [
22, /* ssh */
25, /* smtp */
53, /* dns */
80, /* http */
143, /* imap */
202, /* caas ssh */
443, /* https */
465, /* smtps */
587, /* submission */
919, /* ssh */
993, /* imaps */
2003, /* graphite (lo) */
2004, /* graphite (lo) */
@@ -162,9 +165,9 @@ function validatePortBindings(portBindings, tcpPorts) {
if (!/^[a-zA-Z0-9_]+$/.test(env)) return new AppsError(AppsError.BAD_FIELD, env + ' is not valid environment variable');
if (!Number.isInteger(portBindings[env])) return new AppsError(AppsError.BAD_FIELD, portBindings[env] + ' is not an integer');
if (portBindings[env] <= 0 || portBindings[env] > 65535) return new AppsError(AppsError.BAD_FIELD, portBindings[env] + ' is out of range');
if (RESERVED_PORTS.indexOf(portBindings[env]) !== -1) return new AppsError(AppsError.PORT_RESERVED, String(portBindings[env]));
if (portBindings[env] <= 1023 || portBindings[env] > 65535) return new AppsError(AppsError.BAD_FIELD, portBindings[env] + ' is not in permitted range');
}
// it is OK if there is no 1-1 mapping between values in manifest.tcpPorts and portBindings. missing values implies
@@ -207,6 +210,9 @@ function validateMemoryLimit(manifest, memoryLimit) {
// this is needed so an app update can change the value in the manifest, and if not set by the user, the new value should be used
if (memoryLimit === 0) return null;
// a special value that indicates unlimited memory
if (memoryLimit === -1) return null;
if (memoryLimit < min) return new AppsError(AppsError.BAD_FIELD, 'memoryLimit too small');
if (memoryLimit > max) return new AppsError(AppsError.BAD_FIELD, 'memoryLimit too large');
@@ -227,6 +233,16 @@ function validateXFrameOptions(xFrameOptions) {
return (uri.protocol === 'http:' || uri.protocol === 'https:') ? null : new AppsError(AppsError.BAD_FIELD, 'xFrameOptions ALLOW-FROM uri must be a valid http[s] uri' );
}
function validateDebugMode(debugMode) {
assert.strictEqual(typeof debugMode, 'object');
if (debugMode === null) return null;
if ('cmd' in debugMode && debugMode.cmd !== null && !Array.isArray(debugMode.cmd)) return new AppsError(AppsError.BAD_FIELD, 'debugMode.cmd must be an array or null' );
if ('readonlyRootfs' in debugMode && typeof debugMode.readonlyRootfs !== 'boolean') return new AppsError(AppsError.BAD_FIELD, 'debugMode.readonlyRootfs must be a boolean' );
return null;
}
function getDuplicateErrorDetails(location, portBindings, error) {
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof portBindings, 'object');
@@ -262,7 +278,7 @@ function getAppConfig(app) {
}
function getIconUrlSync(app) {
var iconPath = paths.APPICONS_DIR + '/' + app.id + '.png';
var iconPath = paths.APP_ICONS_DIR + '/' + app.id + '.png';
return fs.existsSync(iconPath) ? '/api/v1/apps/' + app.id + '/icon' : null;
}
@@ -370,7 +386,7 @@ function purchase(appId, appstoreId, callback) {
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
if (result.statusCode === 404) return callback(new AppsError(AppsError.NOT_FOUND));
if (result.statusCode === 403) return callback(new AppsError(AppsError.BILLING_REQUIRED));
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppsError(AppsError.BILLING_REQUIRED));
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
callback(null);
@@ -476,7 +492,8 @@ function install(data, auditSource, callback) {
memoryLimit = data.memoryLimit || 0,
altDomain = data.altDomain || null,
xFrameOptions = data.xFrameOptions || 'SAMEORIGIN',
sso = 'sso' in data ? data.sso : null;
sso = 'sso' in data ? data.sso : null,
debugMode = data.debugMode || null;
assert(data.appStoreId || data.manifest); // atleast one of them is required
@@ -504,6 +521,9 @@ function install(data, auditSource, callback) {
error = validateXFrameOptions(xFrameOptions);
if (error) return callback(error);
error = validateDebugMode(debugMode);
if (error) return callback(error);
if ('sso' in data && !('optionalSso' in manifest)) return callback(new AppsError(AppsError.BAD_FIELD, 'sso can only be specified for apps with optionalSso'));
// if sso was unspecified, enable it by default if possible
if (sso === null) sso = !!manifest.addons['simpleauth'] || !!manifest.addons['ldap'] || !!manifest.addons['oauth'];
@@ -515,7 +535,7 @@ function install(data, auditSource, callback) {
if (icon) {
if (!validator.isBase64(icon)) return callback(new AppsError(AppsError.BAD_FIELD, 'icon is not base64'));
if (!safe.fs.writeFileSync(path.join(paths.APPICONS_DIR, appId + '.png'), new Buffer(icon, 'base64'))) {
if (!safe.fs.writeFileSync(path.join(paths.APP_ICONS_DIR, appId + '.png'), new Buffer(icon, 'base64'))) {
return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving icon:' + safe.error.message));
}
}
@@ -533,7 +553,8 @@ function install(data, auditSource, callback) {
memoryLimit: memoryLimit,
altDomain: altDomain,
xFrameOptions: xFrameOptions,
sso: sso
sso: sso,
debugMode: debugMode
};
var from = (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app';
@@ -545,7 +566,7 @@ function install(data, auditSource, callback) {
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails(location, portBindings, error));
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
// save cert to data/box/certs
// save cert to boxdata/certs
if (cert && key) {
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.cert'), cert)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving cert: ' + safe.error.message));
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.key'), key)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving key: ' + safe.error.message));
@@ -612,7 +633,13 @@ function configure(appId, data, auditSource, callback) {
if (error) return callback(error);
}
// save cert to data/box/certs. TODO: move this to apptask when we have a real task queue
if ('debugMode' in data) {
values.debugMode = data.debugMode;
error = validateDebugMode(values.debugMode);
if (error) return callback(error);
}
// save cert to boxdata/certs. TODO: move this to apptask when we have a real task queue
if ('cert' in data && 'key' in data) {
if (data.cert && data.key) {
error = certificates.validateCertificate(data.cert, data.key, config.appFqdn(location));
@@ -683,11 +710,11 @@ function update(appId, data, auditSource, callback) {
if (data.icon) {
if (!validator.isBase64(data.icon)) return callback(new AppsError(AppsError.BAD_FIELD, 'icon is not base64'));
if (!safe.fs.writeFileSync(path.join(paths.APPICONS_DIR, appId + '.png'), new Buffer(data.icon, 'base64'))) {
if (!safe.fs.writeFileSync(path.join(paths.APP_ICONS_DIR, appId + '.png'), new Buffer(data.icon, 'base64'))) {
return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving icon:' + safe.error.message));
}
} else {
safe.fs.unlinkSync(path.join(paths.APPICONS_DIR, appId + '.png'));
safe.fs.unlinkSync(path.join(paths.APP_ICONS_DIR, appId + '.png'));
}
}
@@ -699,12 +726,16 @@ function update(appId, data, auditSource, callback) {
// this allows cloudron install -f --app <appid> for an app installed from the appStore
if (app.manifest.id !== values.manifest.id) {
if (!data.force) return callback(new AppsError(AppsError.BAD_FIELD, 'manifest id does not match. force to override'));
// clear appStoreId so that this app does not get updates anymore. this will mark it as a dev app
// clear appStoreId so that this app does not get updates anymore
values.appStoreId = '';
}
// do not update apps in debug mode
if (app.debugMode && !data.force) return callback(new AppsError(AppsError.BAD_STATE, 'debug mode enabled. force to override'));
// Ensure we update the memory limit in case the new app requires more memory as a minimum
if (values.manifest.memoryLimit && app.memoryLimit < values.manifest.memoryLimit) {
// 0 and -1 are special values for memory limit indicating unset and unlimited
if (app.memoryLimit > 0 && values.manifest.memoryLimit && app.memoryLimit < values.manifest.memoryLimit) {
values.memoryLimit = values.manifest.memoryLimit;
}
+9 -12
View File
@@ -22,9 +22,8 @@ exports = module.exports = {
require('supererror')({ splatchError: true });
// remove timestamp from debug() based output
require('debug').formatArgs = function formatArgs() {
arguments[0] = this.namespace + ' ' + arguments[0];
return arguments;
require('debug').formatArgs = function formatArgs(args) {
args[0] = this.namespace + ' ' + args[0];
};
var addons = require('./addons.js'),
@@ -34,8 +33,6 @@ var addons = require('./addons.js'),
async = require('async'),
backups = require('./backups.js'),
certificates = require('./certificates.js'),
clients = require('./clients.js'),
ClientsError = clients.ClientsError,
config = require('./config.js'),
database = require('./database.js'),
debug = require('debug')('box:apptask'),
@@ -193,6 +190,9 @@ function downloadIcon(app, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
// nothing to download if we dont have an appStoreId
if (!app.appStoreId) return callback(null);
debugApp(app, 'Downloading icon of %s@%s', app.appStoreId, app.manifest.version);
var iconUrl = config.apiServerOrigin() + '/api/v1/apps/' + app.appStoreId + '/versions/' + app.manifest.version + '/icon';
@@ -206,7 +206,7 @@ function downloadIcon(app, callback) {
if (error && !error.response) return retryCallback(new Error('Network error downloading icon:' + error.message));
if (res.statusCode !== 200) return retryCallback(null); // ignore error. this can also happen for apps installed with cloudron-cli
if (!safe.fs.writeFileSync(path.join(paths.APPICONS_DIR, app.id + '.png'), res.body)) return retryCallback(new Error('Error saving icon:' + safe.error.message));
if (!safe.fs.writeFileSync(path.join(paths.APP_ICONS_DIR, app.id + '.png'), res.body)) return retryCallback(new Error('Error saving icon:' + safe.error.message));
retryCallback(null);
});
@@ -281,7 +281,7 @@ function removeIcon(app, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
fs.unlink(path.join(paths.APPICONS_DIR, app.id + '.png'), function (error) {
fs.unlink(path.join(paths.APP_ICONS_DIR, app.id + '.png'), function (error) {
if (error && error.code !== 'ENOENT') debugApp(app, 'cannot remove icon : %s', error);
callback(null);
});
@@ -356,7 +356,6 @@ function install(app, callback) {
addons.teardownAddons.bind(null, app, app.manifest.addons),
deleteVolume.bind(null, app),
unregisterSubdomain.bind(null, app, app.location),
// removeIcon.bind(null, app), // do not remove icon for non-appstore installs
reserveHttpPort.bind(null, app),
@@ -412,7 +411,7 @@ function backup(app, callback) {
async.series([
updateApp.bind(null, app, { installationProgress: '10, Backing up' }),
backups.backupApp.bind(null, app, app.manifest),
backups.backupApp.bind(null, app, app.manifest, 'appbackups' /* tag */),
// done!
function (callback) {
@@ -456,7 +455,6 @@ function restore(app, callback) {
docker.deleteImage(app.oldConfig.manifest, done);
},
removeIcon.bind(null, app),
reserveHttpPort.bind(null, app),
@@ -595,14 +593,13 @@ function update(app, callback) {
docker.deleteImage(app.oldConfig.manifest, done);
},
// removeIcon.bind(null, app), // do not remove icon, otherwise the UI breaks for a short time...
function (next) {
if (app.installationState === appdb.ISTATE_PENDING_FORCE_UPDATE) return next(null);
async.series([
updateApp.bind(null, app, { installationProgress: '30, Backing up app' }),
backups.backupApp.bind(null, app, app.oldConfig.manifest)
backups.backupApp.bind(null, app, app.oldConfig.manifest, 'appbackups' /* tag */)
], next);
},
+2 -1
View File
@@ -49,8 +49,9 @@ function getByAppIdPaged(page, perPage, appId, callback) {
assert.strictEqual(typeof appId, 'string');
assert.strictEqual(typeof callback, 'function');
// box versions (0.93.x and below) used to use appbackup_ prefix
database.query('SELECT ' + BACKUPS_FIELDS + ' FROM backups WHERE type = ? AND state = ? AND id LIKE ? ORDER BY creationTime DESC LIMIT ?,?',
[ exports.BACKUP_TYPE_APP, exports.BACKUP_STATE_NORMAL, 'appbackup\\_' + appId + '\\_%', (page-1)*perPage, perPage ], function (error, results) {
[ exports.BACKUP_TYPE_APP, exports.BACKUP_STATE_NORMAL, '%app%\\_' + appId + '\\_%', (page-1)*perPage, perPage ], function (error, results) {
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
results.forEach(function (result) { postProcess(result); });
+34 -51
View File
@@ -46,8 +46,7 @@ var addons = require('./addons.js'),
shell = require('./shell.js'),
settings = require('./settings.js'),
SettingsError = require('./settings.js').SettingsError,
util = require('util'),
webhooks = require('./webhooks.js');
util = require('util');
var BACKUP_BOX_CMD = path.join(__dirname, 'scripts/backupbox.sh'),
BACKUP_APP_CMD = path.join(__dirname, 'scripts/backupapp.sh'),
@@ -132,7 +131,6 @@ function getByAppIdPaged(page, perPage, appId, callback) {
});
}
// backupId is the filename. appbackup_%s_%s-v%s.tar.gz
function getRestoreConfig(backupId, callback) {
assert.strictEqual(typeof backupId, 'string');
assert.strictEqual(typeof callback, 'function');
@@ -149,7 +147,6 @@ function getRestoreConfig(backupId, callback) {
});
}
// backupId is the filename. appbackup_%s_%s-v%s.tar.gz
function getRestoreUrl(backupId, callback) {
assert.strictEqual(typeof backupId, 'string');
assert.strictEqual(typeof callback, 'function');
@@ -174,15 +171,16 @@ function getRestoreUrl(backupId, callback) {
});
}
function copyLastBackup(app, manifest, callback) {
function copyLastBackup(app, manifest, prefix, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof app.lastBackupId, 'string');
assert(manifest && typeof manifest === 'object');
assert.strictEqual(typeof prefix, 'string');
assert.strictEqual(typeof callback, 'function');
var now = new Date();
var toFilenameArchive = util.format('appbackup_%s_%s-v%s.tar.gz', app.id, now.toISOString(), manifest.version);
var toFilenameConfig = util.format('appbackup_%s_%s-v%s.json', app.id, now.toISOString(), manifest.version);
var timestamp = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
var toFilenameArchive = util.format('%s/app_%s_%s_v%s.tar.gz', prefix, app.id, timestamp, manifest.version);
var toFilenameConfig = util.format('%s/app_%s_%s_v%s.json', prefix, app.id, timestamp, manifest.version);
settings.getBackupConfig(function (error, backupConfig) {
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
@@ -206,11 +204,12 @@ function copyLastBackup(app, manifest, callback) {
});
}
function backupBoxWithAppBackupIds(appBackupIds, callback) {
function backupBoxWithAppBackupIds(appBackupIds, prefix, callback) {
assert(util.isArray(appBackupIds));
assert.strictEqual(typeof prefix, 'string');
var now = new Date();
var filebase = util.format('backup_%s-v%s', now.toISOString(), config.version());
var timestamp = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
var filebase = util.format('%s/box_%s_v%s', prefix, timestamp, config.version());
var filename = filebase + '.tar.gz';
settings.getBackupConfig(function (error, backupConfig) {
@@ -229,7 +228,7 @@ function backupBoxWithAppBackupIds(appBackupIds, callback) {
backupdb.add({ id: filename, version: config.version(), type: backupdb.BACKUP_TYPE_BOX, dependsOn: appBackupIds }, function (error) {
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
webhooks.backupDone(filename, null /* app */, appBackupIds, function (error) {
api(backupConfig.provider).backupDone(filename, null /* app */, appBackupIds, function (error) {
if (error) return callback(error);
callback(null, filename);
});
@@ -239,18 +238,6 @@ function backupBoxWithAppBackupIds(appBackupIds, callback) {
});
}
// this function expects you to have a lock
// function backupBox(callback) {
// apps.getAll(function (error, allApps) {
// if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
//
// var appBackupIds = allApps.map(function (app) { return app.lastBackupId; });
// appBackupIds = appBackupIds.filter(function (id) { return id !== null; }); // remove apps that were never backed up
//
// backupBoxWithAppBackupIds(appBackupIds, callback);
// });
// }
function canBackupApp(app) {
// only backup apps that are installed or pending configure or called from apptask. Rest of them are in some
// state not good for consistent backup (i.e addons may not have been setup completely)
@@ -260,29 +247,14 @@ function canBackupApp(app) {
app.installationState === appdb.ISTATE_PENDING_UPDATE; // called from apptask
}
// set the 'creation' date of lastBackup so that the backup persists across time based archival rules
// s3 does not allow changing creation time, so copying the last backup is easy way out for now
function reuseOldAppBackup(app, manifest, callback) {
assert.strictEqual(typeof app.lastBackupId, 'string');
assert(manifest && typeof manifest === 'object');
assert.strictEqual(typeof callback, 'function');
copyLastBackup(app, manifest, function (error, newBackupId) {
if (error) return callback(error);
debugApp(app, 'reuseOldAppBackup: reused old backup %s as %s', app.lastBackupId, newBackupId);
callback(null, newBackupId);
});
}
function createNewAppBackup(app, manifest, callback) {
function createNewAppBackup(app, manifest, prefix, callback) {
assert.strictEqual(typeof app, 'object');
assert(manifest && typeof manifest === 'object');
assert.strictEqual(typeof prefix, 'string');
assert.strictEqual(typeof callback, 'function');
var now = new Date();
var filebase = util.format('appbackup_%s_%s-v%s', app.id, now.toISOString(), manifest.version);
var timestamp = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
var filebase = util.format('%s/app_%s_%s_v%s', prefix, app.id, timestamp, manifest.version);
var configFilename = filebase + '.json', dataFilename = filebase + '.tar.gz';
settings.getBackupConfig(function (error, backupConfig) {
@@ -324,9 +296,10 @@ function setRestorePoint(appId, lastBackupId, callback) {
});
}
function backupApp(app, manifest, callback) {
function backupApp(app, manifest, prefix, callback) {
assert.strictEqual(typeof app, 'object');
assert(manifest && typeof manifest === 'object');
assert.strictEqual(typeof prefix, 'string');
assert.strictEqual(typeof callback, 'function');
var backupFunction;
@@ -337,11 +310,13 @@ function backupApp(app, manifest, callback) {
return callback(new BackupsError(BackupsError.BAD_STATE, 'App not healthy and never backed up previously'));
}
backupFunction = reuseOldAppBackup.bind(null, app, manifest);
// set the 'creation' date of lastBackup so that the backup persists across time based archival rules
// s3 does not allow changing creation time, so copying the last backup is easy way out for now
backupFunction = copyLastBackup.bind(null, app, manifest, prefix);
} else {
var appConfig = apps.getAppConfig(app);
appConfig.manifest = manifest;
backupFunction = createNewAppBackup.bind(null, app, manifest);
backupFunction = createNewAppBackup.bind(null, app, manifest, prefix);
if (!safe.fs.writeFileSync(path.join(paths.DATA_DIR, app.id + '/config.json'), JSON.stringify(appConfig), 'utf8')) {
return callback(safe.error);
@@ -367,6 +342,8 @@ function backupBoxAndApps(auditSource, callback) {
callback = callback || NOOP_CALLBACK;
var prefix = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
eventlog.add(eventlog.ACTION_BACKUP_START, auditSource, { });
apps.getAll(function (error, allApps) {
@@ -375,18 +352,20 @@ function backupBoxAndApps(auditSource, callback) {
var processed = 0;
var step = 100/(allApps.length+1);
progress.set(progress.BACKUP, processed, '');
progress.set(progress.BACKUP, step * processed, '');
async.mapSeries(allApps, function iterator(app, iteratorCallback) {
progress.set(progress.BACKUP, step * processed, 'Backing up ' + (app.altDomain || config.appFqdn(app.location)));
++processed;
backupApp(app, app.manifest, function (error, backupId) {
backupApp(app, app.manifest, prefix, function (error, backupId) {
if (error && error.reason !== BackupsError.BAD_STATE) {
debugApp(app, 'Unable to backup', error);
return iteratorCallback(error);
}
progress.set(progress.BACKUP, step * processed, 'Backed up app at ' + app.location);
progress.set(progress.BACKUP, step * processed, 'Backed up ' + (app.altDomain || config.appFqdn(app.location)));
iteratorCallback(null, backupId || null); // clear backupId if is in BAD_STATE and never backed up
});
@@ -398,7 +377,9 @@ function backupBoxAndApps(auditSource, callback) {
backupIds = backupIds.filter(function (id) { return id !== null; }); // remove apps in bad state that were never backed up
backupBoxWithAppBackupIds(backupIds, function (error, filename) {
progress.set(progress.BACKUP, step * processed, 'Backing up system data');
backupBoxWithAppBackupIds(backupIds, prefix, function (error, filename) {
progress.set(progress.BACKUP, 100, error ? error.message : '');
eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { errorMessage: error ? error.message : null, filename: filename });
@@ -421,7 +402,7 @@ function backup(auditSource, callback) {
backupBoxAndApps(auditSource, function (error) { // start the backup operation in the background
if (error) {
debug('backup failed.', error);
mailer.backupFailed(JSON.stringify(error));
mailer.backupFailed(error);
}
locker.unlock(locker.OP_FULL_BACKUP);
@@ -433,6 +414,8 @@ function backup(auditSource, callback) {
function ensureBackup(auditSource, callback) {
assert.strictEqual(typeof auditSource, 'object');
debug('ensureBackup: %j', auditSource);
getPaged(1, 1, function (error, backups) {
if (error) {
debug('Unable to list backups', error);
+84 -33
View File
@@ -1,14 +1,21 @@
'use strict';
exports = module.exports = {
installAdminCertificate: installAdminCertificate,
renewAll: renewAll,
setFallbackCertificate: setFallbackCertificate,
setAdminCertificate: setAdminCertificate,
CertificatesError: CertificatesError,
ensureFallbackCertificate: ensureFallbackCertificate,
setFallbackCertificate: setFallbackCertificate,
validateCertificate: validateCertificate,
ensureCertificate: ensureCertificate,
getAdminCertificatePath: getAdminCertificatePath,
setAdminCertificate: setAdminCertificate,
getAdminCertificate: getAdminCertificate,
renewAll: renewAll,
events: new (require('events').EventEmitter)(),
EVENT_CERT_CHANGED: 'cert_changed',
// exported for testing
_getApi: getApi
@@ -31,8 +38,6 @@ var acme = require('./cert/acme.js'),
paths = require('./paths.js'),
safe = require('safetydance'),
settings = require('./settings.js'),
subdomains = require('./subdomains.js'),
sysinfo = require('./sysinfo.js'),
user = require('./user.js'),
util = require('util'),
x509 = require('x509');
@@ -84,34 +89,53 @@ function getApi(app, callback) {
// we simply update the account with the latest email we have each time when getting letsencrypt certs
// https://github.com/ietf-wg-acme/acme/issues/30
user.getOwner(function (error, owner) {
options.email = error ? 'support@cloudron.io' : owner.email; // can error if not activated yet
options.email = error ? 'support@cloudron.io' : (owner.alternateEmail || owner.email); // can error if not activated yet
callback(null, api, options);
});
});
}
function installAdminCertificate(callback) {
if (process.env.BOX_ENV === 'test') return callback();
function ensureFallbackCertificate(callback) {
// ensure a fallback certificate that much of our code requires
var certFilePath = path.join(paths.APP_CERTS_DIR, 'host.cert');
var keyFilePath = path.join(paths.APP_CERTS_DIR, 'host.key');
debug('installAdminCertificate');
var fallbackCertPath = path.join(paths.NGINX_CERT_DIR, 'host.cert');
var fallbackKeyPath = path.join(paths.NGINX_CERT_DIR, 'host.key');
sysinfo.getIp(function (error, ip) {
if (error) return callback(error);
if (fs.existsSync(certFilePath) && fs.existsSync(keyFilePath)) { // existing custom fallback certs (when restarting, restoring, updating)
debug('ensureFallbackCertificate: using fallback certs provided by user');
if (!safe.child_process.execSync('cp ' + certFilePath + ' ' + fallbackCertPath)) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
if (!safe.child_process.execSync('cp ' + keyFilePath + ' ' + fallbackKeyPath)) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
subdomains.waitForDns(config.adminFqdn(), ip, 'A', { interval: 30000, times: 50000 }, function (error) {
if (error) return callback(error);
return callback();
}
ensureCertificate({ location: constants.ADMIN_LOCATION }, function (error, certFilePath, keyFilePath) {
if (error) { // currently, this can never happen
debug('Error obtaining certificate. Proceed anyway', error);
return callback();
}
if (config.tlsCert() && config.tlsKey()) {
// cert from CaaS or cloudron-setup. these files should _not_ be part of the backup
debug('ensureFallbackCertificate: using CaaS/cloudron-setup fallback certs');
if (!safe.fs.writeFileSync(fallbackCertPath, config.tlsCert())) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
if (!safe.fs.writeFileSync(fallbackKeyPath, config.tlsKey())) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
nginx.configureAdmin(certFilePath, keyFilePath, callback);
});
});
});
return callback();
}
// generate a self-signed cert. it's in backup dir so that we don't create a new cert across restarts
// FIXME: this cert does not cover the naked domain. needs SAN
if (config.fqdn()) {
debug('ensureFallbackCertificate: generating self-signed certificate');
var certCommand = util.format('openssl req -x509 -newkey rsa:2048 -keyout %s -out %s -days 3650 -subj /CN=*.%s -nodes', keyFilePath, certFilePath, config.fqdn());
safe.child_process.execSync(certCommand);
if (!safe.child_process.execSync('cp ' + certFilePath + ' ' + fallbackCertPath)) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
if (!safe.child_process.execSync('cp ' + keyFilePath + ' ' + fallbackKeyPath)) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
return callback();
} else {
debug('ensureFallbackCertificate: cannot generate fallback certificate without domain');
return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, 'No domain set'));
}
}
function isExpiringSync(certFilePath, hours) {
@@ -199,12 +223,14 @@ function renewAll(auditSource, callback) {
// reconfigure and reload nginx. this is required for the case where we got a renewed cert after fallback
var configureFunc = app.location === constants.ADMIN_LOCATION ?
nginx.configureAdmin.bind(null, certFilePath, keyFilePath)
nginx.configureAdmin.bind(null, certFilePath, keyFilePath, constants.NGINX_ADMIN_CONFIG_FILE_NAME, config.adminFqdn())
: nginx.configureApp.bind(null, app, certFilePath, keyFilePath);
configureFunc(function (ignoredError) {
if (ignoredError) debug('fallbackExpiredCertificates: error reconfiguring app', ignoredError);
exports.events.emit(exports.EVENT_CERT_CHANGED, domain);
iteratorCallback(); // move to next app
});
});
@@ -269,6 +295,8 @@ function setFallbackCertificate(cert, key, callback) {
if (!safe.fs.writeFileSync(path.join(paths.NGINX_CERT_DIR, 'host.cert'), cert)) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
if (!safe.fs.writeFileSync(path.join(paths.NGINX_CERT_DIR, 'host.key'), key)) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
exports.events.emit(exports.EVENT_CERT_CHANGED, '*.' + config.fqdn());
nginx.reload(function (error) {
if (error) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, error));
@@ -283,15 +311,14 @@ function getFallbackCertificatePath(callback) {
callback(null, path.join(paths.NGINX_CERT_DIR, 'host.cert'), path.join(paths.NGINX_CERT_DIR, 'host.key'));
}
// FIXME: setting admin cert needs to restart the mail container because it uses admin cert
function setAdminCertificate(cert, key, callback) {
assert.strictEqual(typeof cert, 'string');
assert.strictEqual(typeof key, 'string');
assert.strictEqual(typeof callback, 'function');
var vhost = config.adminFqdn();
var certFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.cert');
var keyFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.key');
var certFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.user.cert');
var keyFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.user.key');
var error = validateCertificate(cert, key, vhost);
if (error) return callback(new CertificatesError(CertificatesError.INVALID_CERT, error.message));
@@ -300,21 +327,44 @@ function setAdminCertificate(cert, key, callback) {
if (!safe.fs.writeFileSync(certFilePath, cert)) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
if (!safe.fs.writeFileSync(keyFilePath, key)) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error.message));
nginx.configureAdmin(certFilePath, keyFilePath, callback);
exports.events.emit(exports.EVENT_CERT_CHANGED, vhost);
nginx.configureAdmin(certFilePath, keyFilePath, constants.NGINX_ADMIN_CONFIG_FILE_NAME, config.adminFqdn(), callback);
}
function getAdminCertificatePath(callback) {
assert.strictEqual(typeof callback, 'function');
var vhost = config.adminFqdn();
var certFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.cert');
var keyFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.key');
var certFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.user.cert');
var keyFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.user.key');
if (fs.existsSync(certFilePath) && fs.existsSync(keyFilePath)) return callback(null, certFilePath, keyFilePath);
certFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.cert');
keyFilePath = path.join(paths.APP_CERTS_DIR, vhost + '.key');
if (fs.existsSync(certFilePath) && fs.existsSync(keyFilePath)) return callback(null, certFilePath, keyFilePath);
getFallbackCertificatePath(callback);
}
function getAdminCertificate(callback) {
assert.strictEqual(typeof callback, 'function');
getAdminCertificatePath(function (error, certFilePath, keyFilePath) {
if (error) return callback(error);
var cert = safe.fs.readFileSync(certFilePath);
if (!cert) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error));
var key = safe.fs.readFileSync(keyFilePath);
if (!cert) return callback(new CertificatesError(CertificatesError.INTERNAL_ERROR, safe.error));
return callback(null, cert, key);
});
}
function ensureCertificate(app, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
@@ -336,10 +386,11 @@ function ensureCertificate(app, callback) {
debug('ensureCertificate: %s. certificate already exists at %s', domain, keyFilePath);
if (!isExpiringSync(certFilePath, 24 * 1)) return callback(null, certFilePath, keyFilePath);
debug('ensureCertificate: %s cert require renewal', domain);
} else {
debug('ensureCertificate: %s cert does not exist', domain);
}
debug('ensureCertificate: %s cert require renewal', domain);
getApi(app, function (error, api, apiOptions) {
if (error) return callback(error);
+21
View File
@@ -10,6 +10,8 @@ exports = module.exports = {
getByAppId: getByAppId,
getByAppIdAndType: getByAppIdAndType,
upsert: upsert,
delByAppId: delByAppId,
delByAppIdAndType: delByAppIdAndType,
@@ -112,6 +114,25 @@ function add(id, appId, type, clientSecret, redirectURI, scope, callback) {
});
}
function upsert(id, appId, type, clientSecret, redirectURI, scope, callback) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof appId, 'string');
assert.strictEqual(typeof type, 'string');
assert.strictEqual(typeof clientSecret, 'string');
assert.strictEqual(typeof redirectURI, 'string');
assert.strictEqual(typeof scope, 'string');
assert.strictEqual(typeof callback, 'function');
var data = [ id, appId, type, clientSecret, redirectURI, scope ];
database.query('REPLACE INTO clients (id, appId, type, clientSecret, redirectURI, scope) VALUES (?, ?, ?, ?, ?, ?)', data, function (error, result) {
if (error && error.code === 'ER_DUP_ENTRY') return callback(new DatabaseError(DatabaseError.ALREADY_EXISTS));
if (error || result.affectedRows === 0) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
callback(null);
});
}
function del(id, callback) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof callback, 'function');
+27 -6
View File
@@ -14,6 +14,8 @@ exports = module.exports = {
addClientTokenByUserId: addClientTokenByUserId,
delToken: delToken,
addDefaultClients: addDefaultClients,
// keep this in sync with start.sh ADMIN_SCOPES that generates the cid-webadmin
SCOPE_APPS: 'apps',
SCOPE_DEVELOPER: 'developer',
@@ -34,14 +36,16 @@ exports = module.exports = {
TYPE_PROXY: 'addon-proxy'
};
var assert = require('assert'),
util = require('util'),
hat = require('hat'),
appdb = require('./appdb.js'),
tokendb = require('./tokendb.js'),
var appdb = require('./appdb.js'),
assert = require('assert'),
async = require('async'),
clientdb = require('./clientdb.js'),
config = require('./config.js'),
DatabaseError = require('./databaseerror.js'),
debug = require('debug')('box:clients'),
hat = require('hat'),
tokendb = require('./tokendb.js'),
util = require('util'),
uuid = require('node-uuid');
function ClientsError(reason, errorOrMessage) {
@@ -304,7 +308,7 @@ function delToken(clientId, tokenId, callback) {
assert.strictEqual(typeof tokenId, 'string');
assert.strictEqual(typeof callback, 'function');
get(clientId, function (error, result) {
get(clientId, function (error) {
if (error) return callback(error);
tokendb.del(tokenId, function (error) {
@@ -315,3 +319,20 @@ function delToken(clientId, tokenId, callback) {
});
});
}
function addDefaultClients(callback) {
assert.strictEqual(typeof callback, 'function');
debug('Adding default clients');
// The domain might have changed, therefor we have to update the record
// !!! This needs to be in sync with the webadmin, specifically login_callback.js
const ADMIN_SCOPES="cloudron,developer,profile,users,apps,settings";
// id, appId, type, clientSecret, redirectURI, scope
async.series([
clientdb.upsert.bind(null, 'cid-webadmin', 'Settings', 'built-in', 'secret-webadmin', config.adminOrigin(), ADMIN_SCOPES),
clientdb.upsert.bind(null, 'cid-sdk', 'SDK', 'built-in', 'secret-sdk', config.adminOrigin(), '*,roleSdk'),
clientdb.upsert.bind(null, 'cid-cli', 'Cloudron Tool', 'built-in', 'secret-cli', config.adminOrigin(), '*, roleSdk')
], callback);
}
+257 -87
View File
@@ -8,6 +8,7 @@ exports = module.exports = {
activate: activate,
getConfig: getConfig,
getStatus: getStatus,
dnsSetup: dnsSetup,
sendHeartbeat: sendHeartbeat,
sendAliveStatus: sendAliveStatus,
@@ -17,34 +18,38 @@ exports = module.exports = {
retire: retire,
migrate: migrate,
isConfiguredSync: isConfiguredSync,
getConfigStateSync: getConfigStateSync,
checkDiskSpace: checkDiskSpace,
readDkimPublicKeySync: readDkimPublicKeySync,
refreshDNS: refreshDNS,
events: new (require('events').EventEmitter)(),
EVENT_CONFIGURED: 'configured'
EVENT_ACTIVATED: 'activated'
};
var apps = require('./apps.js'),
assert = require('assert'),
async = require('async'),
backups = require('./backups.js'),
certificates = require('./certificates.js'),
child_process = require('child_process'),
clients = require('./clients.js'),
config = require('./config.js'),
constants = require('./constants.js'),
cron = require('./cron.js'),
debug = require('debug')('box:cloudron'),
df = require('node-df'),
eventlog = require('./eventlog.js'),
fs = require('fs'),
locker = require('./locker.js'),
mailer = require('./mailer.js'),
nginx = require('./nginx.js'),
os = require('os'),
path = require('path'),
paths = require('./paths.js'),
platform = require('./platform.js'),
progress = require('./progress.js'),
safe = require('safetydance'),
settings = require('./settings.js'),
@@ -53,6 +58,7 @@ var apps = require('./apps.js'),
subdomains = require('./subdomains.js'),
superagent = require('superagent'),
sysinfo = require('./sysinfo.js'),
taskmanager = require('./taskmanager.js'),
tokendb = require('./tokendb.js'),
updateChecker = require('./updatechecker.js'),
user = require('./user.js'),
@@ -82,7 +88,7 @@ const BOX_AND_USER_TEMPLATE = {
var gUpdatingDns = false, // flag for dns update reentrancy
gBoxAndUserDetails = null, // cached cloudron details like region,size...
gIsConfigured = null; // cached configured state so that return value is synchronous. null means we are not initialized yet
gConfigState = { dns: false, tls: false, configured: false };
function CloudronError(reason, errorOrMessage) {
assert.strictEqual(typeof reason, 'string');
@@ -107,6 +113,7 @@ CloudronError.BAD_FIELD = 'Field error';
CloudronError.INTERNAL_ERROR = 'Internal Error';
CloudronError.EXTERNAL_ERROR = 'External Error';
CloudronError.ALREADY_PROVISIONED = 'Already Provisioned';
CloudronError.ALREADY_SETUP = 'Already Setup';
CloudronError.BAD_STATE = 'Bad state';
CloudronError.ALREADY_UPTODATE = 'No Update Available';
CloudronError.NOT_FOUND = 'Not found';
@@ -115,67 +122,151 @@ CloudronError.SELF_UPGRADE_NOT_SUPPORTED = 'Self upgrade not supported';
function initialize(callback) {
assert.strictEqual(typeof callback, 'function');
ensureDkimKeySync();
exports.events.on(exports.EVENT_CONFIGURED, addDnsRecords);
if (!fs.existsSync(paths.FIRST_RUN_FILE)) {
debug('initialize: installing app bundle on first run');
process.nextTick(installAppBundle);
fs.writeFileSync(paths.FIRST_RUN_FILE, 'been there, done that', 'utf8');
}
syncConfigState(callback);
async.series([
installAppBundle,
checkConfigState,
configureDefaultServer
], callback);
}
function uninitialize(callback) {
assert.strictEqual(typeof callback, 'function');
exports.events.removeListener(exports.EVENT_CONFIGURED, addDnsRecords);
exports.events.removeListener(exports.EVENT_FIRST_RUN, installAppBundle);
platform.events.removeListener(platform.EVENT_READY, onPlatformReady);
callback(null);
async.series([
cron.uninitialize,
taskmanager.pauseTasks,
mailer.stop,
platform.uninitialize
], callback);
}
function isConfiguredSync() {
return gIsConfigured === true;
function onConfigured(callback) {
callback = callback || NOOP_CALLBACK;
// if we hit here, the domain has to be set, this is a logic issue if it isn't
assert(config.fqdn());
debug('onConfigured: current state: %j', gConfigState);
if (gConfigState.configured) return callback(); // re-entracy flag
gConfigState.configured = true;
platform.events.on(platform.EVENT_READY, onPlatformReady);
async.series([
clients.addDefaultClients,
cron.initialize,
certificates.ensureFallbackCertificate,
platform.initialize, // requires fallback certs for mail container
addDnsRecords,
configureAdmin,
mailer.start
], callback);
}
function isConfigured(callback) {
// set of rules to see if we have the configs required for cloudron to function
// note this checks for missing configs and not invalid configs
function onPlatformReady(callback) {
callback = callback || NOOP_CALLBACK;
settings.getDnsConfig(function (error, dnsConfig) {
if (error) return callback(error);
debug('onPlatformReady');
if (!dnsConfig) return callback(null, false);
async.series([
taskmanager.resumeTasks
], callback);
}
var isConfigured = (config.isCustomDomain() && (dnsConfig.provider === 'route53' || dnsConfig.provider === 'digitalocean' || dnsConfig.provider === 'noop' || dnsConfig.provider === 'manual')) ||
(!config.isCustomDomain() && dnsConfig.provider === 'caas');
function getConfigStateSync() {
return gConfigState;
}
callback(null, isConfigured);
function checkConfigState(callback) {
callback = callback || NOOP_CALLBACK;
if (!config.fqdn()) {
settings.events.once(settings.DNS_CONFIG_KEY, function () { checkConfigState(); }); // check again later
return callback(null);
}
debug('checkConfigState: configured');
onConfigured(callback);
}
function dnsSetup(dnsConfig, domain, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof callback, 'function');
if (config.fqdn()) return callback(new CloudronError(CloudronError.ALREADY_SETUP));
settings.setDnsConfig(dnsConfig, domain, function (error) {
if (error && error.reason === SettingsError.BAD_FIELD) return callback(new CloudronError(CloudronError.BAD_FIELD, error.message));
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
config.set('fqdn', domain); // set fqdn only after dns config is valid, otherwise cannot re-setup if we failed
onConfigured(); // do not block
callback();
});
}
function syncConfigState(callback) {
assert(!gIsConfigured);
function configureDefaultServer(callback) {
callback = callback || NOOP_CALLBACK;
isConfigured(function (error, configured) {
debug('configureDefaultServer: domain %s', config.fqdn());
if (process.env.BOX_ENV === 'test') return callback();
var certFilePath = path.join(paths.NGINX_CERT_DIR, 'default.cert');
var keyFilePath = path.join(paths.NGINX_CERT_DIR, 'default.key');
if (!fs.existsSync(certFilePath) || !fs.existsSync(keyFilePath)) {
debug('configureDefaultServer: create new cert');
var certCommand = util.format('openssl req -x509 -newkey rsa:2048 -keyout %s -out %s -days 3650 -subj /CN=%s -nodes', keyFilePath, certFilePath, 'localhost');
safe.child_process.execSync(certCommand);
}
safe.fs.unlinkSync(path.join(paths.NGINX_APPCONFIG_DIR,'ip_based_setup.conf'));
nginx.configureAdmin(certFilePath, keyFilePath, 'default.conf', '', function (error) {
if (error) return callback(error);
debug('syncConfigState: configured = %s', configured);
debug('configureDefaultServer: done');
if (configured) {
exports.events.emit(exports.EVENT_CONFIGURED);
} else {
settings.events.once(settings.DNS_CONFIG_KEY, function () { syncConfigState(); }); // check again later
}
callback(null);
});
}
gIsConfigured = configured;
function configureAdmin(callback) {
callback = callback || NOOP_CALLBACK;
callback();
if (process.env.BOX_ENV === 'test') return callback();
debug('configureAdmin');
sysinfo.getIp(function (error, ip) {
if (error) return callback(error);
subdomains.waitForDns(config.adminFqdn(), ip, 'A', { interval: 30000, times: 50000 }, function (error) {
if (error) return callback(error);
gConfigState.dns = true;
certificates.ensureCertificate({ location: constants.ADMIN_LOCATION }, function (error, certFilePath, keyFilePath) {
if (error) { // currently, this can never happen
debug('Error obtaining certificate. Proceed anyway', error);
return callback();
}
gConfigState.tls = true;
nginx.configureAdmin(certFilePath, keyFilePath, constants.NGINX_ADMIN_CONFIG_FILE_NAME, config.adminFqdn(), callback);
});
});
});
}
@@ -238,6 +329,8 @@ function activate(username, password, email, displayName, ip, auditSource, callb
eventlog.add(eventlog.ACTION_ACTIVATE, auditSource, { });
exports.events.emit(exports.EVENT_ACTIVATED);
callback(null, { token: token, expires: expires });
});
});
@@ -259,7 +352,9 @@ function getStatus(callback) {
boxVersionsUrl: config.get('boxVersionsUrl'),
apiServerOrigin: config.apiServerOrigin(), // used by CaaS tool
provider: config.provider(),
cloudronName: cloudronName
cloudronName: cloudronName,
adminFqdn: config.fqdn() ? config.adminFqdn() : null,
configState: gConfigState
});
});
});
@@ -327,7 +422,7 @@ function getConfig(callback) {
}
function sendHeartbeat() {
if (!config.token()) return;
if (config.provider() !== 'caas') return;
var url = config.apiServerOrigin() + '/api/v1/boxes/' + config.fqdn() + '/heartbeat';
superagent.post(url).query({ token: config.token(), version: config.version() }).timeout(30 * 1000).end(function (error, result) {
@@ -345,7 +440,8 @@ function sendAliveStatus(callback) {
};
}
function sendAliveStatusWithAppstoreConfig(appstoreConfig) {
function sendAliveStatusWithAppstoreConfig(backendSettings, appstoreConfig) {
assert.strictEqual(typeof backendSettings, 'object');
assert.strictEqual(typeof appstoreConfig.userId, 'string');
assert.strictEqual(typeof appstoreConfig.cloudronId, 'string');
assert.strictEqual(typeof appstoreConfig.token, 'string');
@@ -354,7 +450,8 @@ function sendAliveStatus(callback) {
var data = {
domain: config.fqdn(),
version: config.version(),
provider: config.provider()
provider: config.provider(),
backendSettings: backendSettings
};
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
@@ -366,44 +463,74 @@ function sendAliveStatus(callback) {
});
}
// Caas Cloudrons do not store appstore credentials in their local database
if (config.provider() === 'caas') {
if (!config.token()) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, 'no token set'));
settings.getAll(function (error, result) {
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
var url = config.apiServerOrigin() + '/api/v1/exchangeBoxTokenWithUserToken';
superagent.post(url).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
if (error && !error.response) return callback(new CloudronError(CloudronError.EXTERNAL_ERROR, error));
if (result.statusCode !== 201) return callback(new CloudronError(CloudronError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
var backendSettings = {
dnsConfig: {
provider: result[settings.DNS_CONFIG_KEY].provider,
wildcard: result[settings.DNS_CONFIG_KEY].provider === 'manual' ? result[settings.DNS_CONFIG_KEY].wildcard : undefined
},
tlsConfig: {
provider: result[settings.TLS_CONFIG_KEY].provider
},
backupConfig: {
provider: result[settings.BACKUP_CONFIG_KEY].provider
},
mailConfig: {
enabled: result[settings.MAIL_CONFIG_KEY].enabled
},
autoupdatePattern: result[settings.AUTOUPDATE_PATTERN_KEY]
};
sendAliveStatusWithAppstoreConfig(result.body);
});
} else {
settings.getAppstoreConfig(function (error, result) {
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
if (!result.token) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, 'not registered yet'));
// Caas Cloudrons do not store appstore credentials in their local database
if (config.provider() === 'caas') {
var url = config.apiServerOrigin() + '/api/v1/exchangeBoxTokenWithUserToken';
superagent.post(url).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
if (error && !error.response) return callback(new CloudronError(CloudronError.EXTERNAL_ERROR, error));
if (result.statusCode !== 201) return callback(new CloudronError(CloudronError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
sendAliveStatusWithAppstoreConfig(result);
});
}
}
sendAliveStatusWithAppstoreConfig(backendSettings, result.body);
});
} else {
settings.getAppstoreConfig(function (error, result) {
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
function ensureDkimKeySync() {
var dkimPrivateKeyFile = path.join(paths.MAIL_DATA_DIR, 'dkim/' + config.fqdn() + '/private');
var dkimPublicKeyFile = path.join(paths.MAIL_DATA_DIR, 'dkim/' + config.fqdn() + '/public');
if (!result.token) {
debug('sendAliveStatus: Cloudron not yet registered');
return callback(null);
}
if (fs.existsSync(dkimPrivateKeyFile) && fs.existsSync(dkimPublicKeyFile)) {
debug('DKIM keys already present');
return;
}
debug('Generating new DKIM keys');
child_process.execSync('openssl genrsa -out ' + dkimPrivateKeyFile + ' 1024');
child_process.execSync('openssl rsa -in ' + dkimPrivateKeyFile + ' -out ' + dkimPublicKeyFile + ' -pubout -outform PEM');
sendAliveStatusWithAppstoreConfig(backendSettings, result);
});
}
});
}
function readDkimPublicKeySync() {
var dkimPublicKeyFile = path.join(paths.MAIL_DATA_DIR, 'dkim/' + config.fqdn() + '/public');
if (!config.fqdn()) {
debug('Cannot read dkim public key without a domain.', safe.error);
return null;
}
var dkimPath = path.join(paths.MAIL_DATA_DIR, 'dkim/' + config.fqdn());
var dkimPrivateKeyFile = path.join(dkimPath, 'private');
var dkimPublicKeyFile = path.join(dkimPath, 'public');
if (!fs.existsSync(dkimPrivateKeyFile) || !fs.existsSync(dkimPublicKeyFile)) {
debug('Generating new DKIM keys');
if (!safe.fs.mkdirSync(dkimPath) && safe.error.code !== 'EEXIST') {
debug('Error creating dkim.', safe.error);
return null;
}
child_process.execSync('openssl genrsa -out ' + dkimPrivateKeyFile + ' 1024');
child_process.execSync('openssl rsa -in ' + dkimPrivateKeyFile + ' -out ' + dkimPublicKeyFile + ' -pubout -outform PEM');
} else {
debug('DKIM keys already present');
}
var publicKey = safe.fs.readFileSync(dkimPublicKeyFile, 'utf8');
if (publicKey === null) {
@@ -447,8 +574,8 @@ function txtRecordsWithSpf(callback) {
});
}
function addDnsRecords() {
var callback = NOOP_CALLBACK;
function addDnsRecords(callback) {
callback = callback || NOOP_CALLBACK;
if (process.env.BOX_ENV === 'test') return callback();
@@ -473,7 +600,7 @@ function addDnsRecords() {
records.push(webadminRecord);
records.push(dkimRecord);
} else {
// for non-custom domains, we show a nakeddomain.html page
// for non-custom domains, we show a noapp.html page
var nakedDomainRecord = { subdomain: '', type: 'A', values: [ ip ] };
records.push(nakedDomainRecord);
@@ -633,8 +760,8 @@ function doUpdate(boxUpdateInfo, callback) {
apiServerOrigin: config.apiServerOrigin(),
webServerOrigin: config.webServerOrigin(),
fqdn: config.fqdn(),
tlsCert: fs.readFileSync(path.join(paths.NGINX_CERT_DIR, 'host.cert'), 'utf8'),
tlsKey: fs.readFileSync(path.join(paths.NGINX_CERT_DIR, 'host.key'), 'utf8'),
tlsCert: config.tlsCert(),
tlsKey: config.tlsKey(),
isCustomDomain: config.isCustomDomain(),
isDemo: config.isDemo(),
@@ -654,6 +781,8 @@ function doUpdate(boxUpdateInfo, callback) {
debug('updating box %s %j', boxUpdateInfo.sourceTarballUrl, data);
progress.set(progress.UPDATE, 5, 'Downloading and extracting new version');
shell.sudo('update', [ UPDATE_CMD, boxUpdateInfo.sourceTarballUrl, JSON.stringify(data) ], function (error) {
if (error) return updateError(error);
@@ -663,14 +792,14 @@ function doUpdate(boxUpdateInfo, callback) {
}
function installAppBundle(callback) {
callback = callback || NOOP_CALLBACK;
assert.strictEqual(typeof callback, 'function');
if (fs.existsSync(paths.FIRST_RUN_FILE)) return callback();
var bundle = config.get('appBundle');
debug('initialize: installing app bundle on first run: %j', bundle);
if (!bundle || bundle.length === 0) {
debug('installAppBundle: no bundle set');
return callback();
}
if (!bundle || bundle.length === 0) return callback();
async.eachSeries(bundle, function (appInfo, iteratorCallback) {
debug('autoInstall: installing %s at %s', appInfo.appstoreId, appInfo.location);
@@ -686,6 +815,8 @@ function installAppBundle(callback) {
}, function (error) {
if (error) debug('autoInstallApps: ', error);
fs.writeFileSync(paths.FIRST_RUN_FILE, 'been there, done that', 'utf8');
callback();
});
}
@@ -779,12 +910,51 @@ function migrate(options, callback) {
if (!options.domain) return doMigrate(options, callback);
var dnsConfig = _.pick(options, 'domain', 'provider', 'accessKeyId', 'secretAccessKey', 'region', 'endpoint');
var dnsConfig = _.pick(options, 'domain', 'provider', 'accessKeyId', 'secretAccessKey', 'region', 'endpoint', 'token');
settings.setDnsConfig(dnsConfig, function (error) {
settings.setDnsConfig(dnsConfig, options.domain, function (error) {
if (error && error.reason === SettingsError.BAD_FIELD) return callback(new CloudronError(CloudronError.BAD_FIELD, error.message));
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
// TODO: should probably rollback dns config if migrate fails
doMigrate(options, callback);
});
}
function refreshDNS(callback) {
callback = callback || NOOP_CALLBACK;
sysinfo.getIp(function (error, ip) {
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
debug('refreshDNS: current ip %s', ip);
addDnsRecords(function (error) {
if (error) return callback(error);
debug('refreshDNS: done for system records');
apps.getAll(function (error, result) {
if (error) return callback(error);
async.each(result, function (app, callback) {
// get the current record before updating it
subdomains.get(app.location, 'A', function (error, values) {
if (error) return callback(error);
// refuse to update any existing DNS record for custom domains that we did not create
if (values.length !== 0 && !app.dnsRecordId) return callback(null, new Error('DNS Record already exists'));
subdomains.upsert(app.location, 'A', [ ip ], callback);
});
}, function (error) {
if (error) return callback(error);
debug('refreshDNS: done for apps');
callback();
});
});
});
});
}
+13
View File
@@ -35,6 +35,9 @@ exports = module.exports = {
isDev: isDev,
isDemo: isDemo,
tlsCert: tlsCert,
tlsKey: tlsKey,
// for testing resets to defaults
_reset: _reset
};
@@ -216,3 +219,13 @@ function isDemo() {
function provider() {
return get('provider');
}
function tlsCert() {
var certFile = path.join(baseDir(), 'configs/host.cert');
return safe.fs.readFileSync(certFile, 'utf8');
}
function tlsKey() {
var keyFile = path.join(baseDir(), 'configs/host.key');
return safe.fs.readFileSync(keyFile, 'utf8');
}
+5 -1
View File
@@ -26,6 +26,8 @@ exports = module.exports = {
ADMIN_GROUP_ID: 'admin',
NGINX_ADMIN_CONFIG_FILE_NAME: 'admin.conf',
GHOST_USER_FILE: '/tmp/cloudron_ghost.json',
DEFAULT_TOKEN_EXPIRATION: 7 * 24 * 60 * 60 * 1000, // 1 week
@@ -34,6 +36,8 @@ exports = module.exports = {
DEMO_USERNAME: 'cloudron',
DKIM_SELECTOR: 'cloudron'
DKIM_SELECTOR: 'cloudron',
AUTOUPDATE_PATTERN_NEVER: 'never'
};
+30 -4
View File
@@ -11,6 +11,7 @@ var apps = require('./apps.js'),
certificates = require('./certificates.js'),
cloudron = require('./cloudron.js'),
config = require('./config.js'),
constants = require('./constants.js'),
CronJob = require('cron').CronJob,
debug = require('debug')('box:cron'),
eventlog = require('./eventlog.js'),
@@ -22,8 +23,8 @@ var apps = require('./apps.js'),
var gAutoupdaterJob = null,
gBoxUpdateCheckerJob = null,
gAppUpdateCheckerJob = null,
gHeartbeatJob = null,
gAliveJob = null,
gHeartbeatJob = null, // for CaaS health check
gAliveJob = null, // send periodic stats
gBackupJob = null,
gCleanupTokensJob = null,
gCleanupBackupsJob = null,
@@ -31,7 +32,8 @@ var gAutoupdaterJob = null,
gSchedulerSyncJob = null,
gCertificateRenewJob = null,
gCheckDiskSpaceJob = null,
gCleanupEventlogJob = null;
gCleanupEventlogJob = null,
gDynamicDNSJob = null;
var NOOP_CALLBACK = function (error) { if (error) console.error(error); };
var AUDIT_SOURCE = { userId: null, username: 'cron' };
@@ -63,12 +65,14 @@ function initialize(callback) {
settings.events.on(settings.TIME_ZONE_KEY, recreateJobs);
settings.events.on(settings.AUTOUPDATE_PATTERN_KEY, autoupdatePatternChanged);
settings.events.on(settings.DYNAMIC_DNS_KEY, dynamicDNSChanged);
settings.getAll(function (error, allSettings) {
if (error) return callback(error);
recreateJobs(allSettings[settings.TIME_ZONE_KEY]);
autoupdatePatternChanged(allSettings[settings.AUTOUPDATE_PATTERN_KEY]);
dynamicDNSChanged(allSettings[settings.DYNAMIC_DNS_KEY]);
callback();
});
@@ -172,7 +176,7 @@ function autoupdatePatternChanged(pattern) {
if (gAutoupdaterJob) gAutoupdaterJob.stop();
if (pattern === 'never') return;
if (pattern === constants.AUTOUPDATE_PATTERN_NEVER) return;
gAutoupdaterJob = new CronJob({
cronTime: pattern,
@@ -193,6 +197,25 @@ function autoupdatePatternChanged(pattern) {
});
}
function dynamicDNSChanged(enabled) {
assert.strictEqual(typeof enabled, 'boolean');
assert(gBoxUpdateCheckerJob);
debug('Dynamic DNS setting changed to %s', enabled);
if (enabled) {
gDynamicDNSJob = new CronJob({
cronTime: '00 */10 * * * *',
onTick: cloudron.refreshDNS,
start: true,
timeZone: gBoxUpdateCheckerJob.cronTime.zone // hack
});
} else {
if (gDynamicDNSJob) gDynamicDNSJob.stop();
gDynamicDNSJob = null;
}
}
function uninitialize(callback) {
assert.strictEqual(typeof callback, 'function');
@@ -235,5 +258,8 @@ function uninitialize(callback) {
if (gCertificateRenewJob) gCertificateRenewJob.stop();
gCertificateRenewJob = null;
if (gDynamicDNSJob) gDynamicDNSJob.stop();
gDynamicDNSJob = null;
callback();
}
+14 -1
View File
@@ -4,7 +4,8 @@ exports = module.exports = {
upsert: upsert,
get: get,
del: del,
waitForDns: require('./waitfordns.js')
waitForDns: require('./waitfordns.js'),
verifyDnsConfig: verifyDnsConfig
};
var assert = require('assert'),
@@ -111,3 +112,15 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
});
}
function verifyDnsConfig(dnsConfig, domain, ip, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof ip, 'string');
assert.strictEqual(typeof callback, 'function');
var credentials = {
provider: dnsConfig.provider
};
return callback(null, credentials);
}
+29 -1
View File
@@ -4,12 +4,14 @@ exports = module.exports = {
upsert: upsert,
get: get,
del: del,
waitForDns: require('./waitfordns.js')
waitForDns: require('./waitfordns.js'),
verifyDnsConfig: verifyDnsConfig
};
var assert = require('assert'),
async = require('async'),
debug = require('debug')('box:dns/digitalocean'),
dns = require('native-dns'),
SubdomainError = require('../subdomains.js').SubdomainError,
superagent = require('superagent'),
util = require('util');
@@ -171,3 +173,29 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
});
}
function verifyDnsConfig(dnsConfig, domain, ip, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof ip, 'string');
assert.strictEqual(typeof callback, 'function');
var credentials = {
provider: dnsConfig.provider,
token: dnsConfig.token
};
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
dns.resolveNs(domain, function (error, nameservers) {
if (error && error.code === 'ENOTFOUND') return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to resolve nameservers for this domain'));
if (error || !nameservers) return callback(new SubdomainError(SubdomainError.BAD_FIELD, error ? error.message : 'Unable to get nameservers'));
upsert(credentials, domain, 'my', 'A', [ ip ], function (error, changeId) {
if (error) return callback(error);
debug('verifyDnsConfig: A record added with change id %s', changeId);
callback(null, credentials);
});
});
}
+12 -1
View File
@@ -10,7 +10,8 @@ exports = module.exports = {
upsert: upsert,
get: get,
del: del,
waitForDns: require('./waitfordns.js')
waitForDns: require('./waitfordns.js'),
verifyDnsConfig: verifyDnsConfig
};
var assert = require('assert'),
@@ -55,3 +56,13 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
callback(new Error('not implemented'));
}
function verifyDnsConfig(dnsConfig, domain, ip, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof ip, 'string');
assert.strictEqual(typeof callback, 'function');
// Result: dnsConfig object
callback(new Error('not implemented'));
}
+73 -2
View File
@@ -4,13 +4,15 @@ exports = module.exports = {
upsert: upsert,
get: get,
del: del,
waitForDns: require('./waitfordns.js')
waitForDns: require('./waitfordns.js'),
verifyDnsConfig: verifyDnsConfig
};
var assert = require('assert'),
async = require('async'),
debug = require('debug')('box:dns/noop'),
dns = require('native-dns'),
SubdomainError = require('../subdomains.js').SubdomainError,
sysinfo = require('../sysinfo.js'),
util = require('util');
function upsert(dnsConfig, zoneName, subdomain, type, values, callback) {
@@ -47,3 +49,72 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
return callback();
}
function verifyDnsConfig(dnsConfig, domain, ip, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof ip, 'string');
assert.strictEqual(typeof callback, 'function');
var adminDomain = 'my.' + domain;
dns.resolveNs(domain, function (error, nameservers) {
if (error || !nameservers) return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to get nameservers'));
// async.every only reports bools
var stashedError = null;
async.every(nameservers, function (nameserver, callback) {
// ns records cannot have cname
dns.resolve4(nameserver, function (error, nsIps) {
if (error || !nsIps || nsIps.length === 0) {
stashedError = new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
return callback(false);
}
async.every(nsIps, function (nsIp, callback) {
var req = dns.Request({
question: dns.Question({ name: adminDomain, type: 'A' }),
server: { address: nsIp },
timeout: 5000
});
req.on('timeout', function () {
debug('nameserver %s (%s) timed out when trying to resolve %s', nameserver, nsIp, adminDomain);
return callback(true); // should be ok if dns server is down
});
req.on('message', function (error, message) {
if (error) {
debug('nameserver %s (%s) returned error trying to resolve %s: %s', nameserver, nsIp, adminDomain, error);
return callback(false);
}
var answer = message.answer;
if (!answer || answer.length === 0) {
debug('bad answer from nameserver %s (%s) resolving %s (%s): %j', nameserver, nsIp, adminDomain, 'A', message);
return callback(false);
}
debug('verifyDnsConfig: ns: %s (%s), name:%s Actual:%j Expecting:%s', nameserver, nsIp, adminDomain, answer, ip);
var match = answer.some(function (a) {
return a.address === ip;
});
if (match) return callback(true); // done!
callback(false);
});
req.send();
}, callback);
});
}, function (success) {
if (stashedError) return callback(stashedError);
if (!success) return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'The domain ' + adminDomain + ' does not resolve to the server\'s IP ' + ip));
callback(null, { provider: dnsConfig.provider, wildcard: !!dnsConfig.wildcard });
});
});
}
+14 -3
View File
@@ -4,13 +4,12 @@ exports = module.exports = {
upsert: upsert,
get: get,
del: del,
waitForDns: waitForDns
waitForDns: waitForDns,
verifyDnsConfig: verifyDnsConfig
};
var assert = require('assert'),
debug = require('debug')('box:dns/noop'),
SubdomainError = require('../subdomains.js').SubdomainError,
sysinfo = require('../sysinfo.js'),
util = require('util');
function upsert(dnsConfig, zoneName, subdomain, type, values, callback) {
@@ -57,3 +56,15 @@ function waitForDns(domain, value, type, options, callback) {
callback();
}
function verifyDnsConfig(dnsConfig, domain, ip, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof ip, 'string');
assert.strictEqual(typeof callback, 'function');
var credentials = {
provider: dnsConfig.provider
};
return callback(null, credentials);
}
+42 -1
View File
@@ -5,6 +5,7 @@ exports = module.exports = {
get: get,
del: del,
waitForDns: require('./waitfordns.js'),
verifyDnsConfig: verifyDnsConfig,
// not part of "dns" interface
getHostedZone: getHostedZone
@@ -13,8 +14,10 @@ exports = module.exports = {
var assert = require('assert'),
AWS = require('aws-sdk'),
debug = require('debug')('box:dns/route53'),
dns = require('native-dns'),
SubdomainError = require('../subdomains.js').SubdomainError,
util = require('util');
util = require('util'),
_ = require('underscore');
function getDnsCredentials(dnsConfig) {
assert.strictEqual(typeof dnsConfig, 'object');
@@ -209,3 +212,41 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
});
}
function verifyDnsConfig(dnsConfig, domain, ip, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof ip, 'string');
assert.strictEqual(typeof callback, 'function');
var credentials = {
provider: dnsConfig.provider,
accessKeyId: dnsConfig.accessKeyId,
secretAccessKey: dnsConfig.secretAccessKey,
region: dnsConfig.region || 'us-east-1',
endpoint: dnsConfig.endpoint || null
};
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
dns.resolveNs(domain, function (error, nameservers) {
if (error && error.code === 'ENOTFOUND') return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to resolve nameservers for this domain'));
if (error || !nameservers) return callback(new SubdomainError(SubdomainError.BAD_FIELD, error ? error.message : 'Unable to get nameservers'));
getHostedZone(credentials, domain, function (error, zone) {
if (error) return callback(error);
if (!_.isEqual(zone.DelegationSet.NameServers.sort(), nameservers.sort())) {
debug('verifyDnsConfig: %j and %j do not match', nameservers, zone.DelegationSet.NameServers);
return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Domain nameservers are not set to Route53'));
}
upsert(credentials, domain, 'my', 'A', [ ip ], function (error, changeId) {
if (error) return callback(new SubdomainError(SubdomainError.INTERNAL_ERROR, error));
debug('verifyDnsConfig: A record added with change id %s', changeId);
callback(null, credentials);
});
});
});
}
+3 -13
View File
@@ -54,14 +54,6 @@ function debugApp(app, args) {
debug(prefix + ' ' + util.format.apply(util, Array.prototype.slice.call(arguments, 1)));
}
function targetBoxVersion(manifest) {
if ('targetBoxVersion' in manifest) return manifest.targetBoxVersion;
if ('minBoxVersion' in manifest) return manifest.minBoxVersion;
return '99999.99999.99999'; // compatible with the latest version
}
function pullImage(manifest, callback) {
var docker = exports.connection;
@@ -135,7 +127,6 @@ function createSubcontainer(app, name, cmd, options, callback) {
isAppContainer = !cmd; // non app-containers are like scheduler containers
var manifest = app.manifest;
var developmentMode = !!manifest.developmentMode;
var exposedPorts = {}, dockerPortBindings = { };
var domain = app.altDomain || config.appFqdn(app.location);
var stdEnv = [
@@ -165,8 +156,7 @@ function createSubcontainer(app, name, cmd, options, callback) {
// first check db record, then manifest
var memoryLimit = app.memoryLimit || manifest.memoryLimit || 0;
if (developmentMode) {
// developerMode does not restrict memory usage
if (memoryLimit === -1) { // unrestricted
memoryLimit = 0;
} else if (memoryLimit === 0 || memoryLimit < constants.DEFAULT_MEMORY_LIMIT) { // ensure we never go below minimum (in case we change the default)
memoryLimit = constants.DEFAULT_MEMORY_LIMIT;
@@ -187,7 +177,7 @@ function createSubcontainer(app, name, cmd, options, callback) {
name: name, // used for filtering logs
Tty: isAppContainer,
Image: app.manifest.dockerImage,
Cmd: (isAppContainer && developmentMode) ? [ '/bin/bash', '-c', 'echo "Development mode. Use cloudron exec to debug. Sleeping" && sleep infinity' ] : cmd,
Cmd: (isAppContainer && app.debugMode && app.debugMode.cmd) ? app.debugMode.cmd : cmd,
Env: stdEnv.concat(addonEnv).concat(portEnv),
ExposedPorts: isAppContainer ? exposedPorts : { },
Volumes: { // see also ReadonlyRootfs
@@ -205,7 +195,7 @@ function createSubcontainer(app, name, cmd, options, callback) {
MemorySwap: memoryLimit, // Memory + Swap
PortBindings: isAppContainer ? dockerPortBindings : { },
PublishAllPorts: false,
ReadonlyRootfs: !developmentMode, // see also Volumes in startContainer
ReadonlyRootfs: app.debugMode ? !!app.debugMode.readonlyRootfs : true,
RestartPolicy: {
"Name": isAppContainer ? "always" : "no",
"MaximumRetryCount": 0
+2 -2
View File
@@ -6,7 +6,7 @@
exports = module.exports = {
// a version bump means that all containers (apps and addons) are recreated
'version': 43,
'version': 45,
'baseImages': [ 'cloudron/base:0.9.0' ],
@@ -17,7 +17,7 @@ exports = module.exports = {
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:0.15.0' },
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:0.11.0' },
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:0.10.0' },
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:0.24.0' },
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:0.29.0' },
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:0.10.0' }
}
};
+2 -1
View File
@@ -152,7 +152,8 @@ function mailboxSearch(req, res, next) {
objectcategory: 'mailbox',
cn: mailbox.name,
uid: mailbox.name,
mail: mailbox.name + '@' + config.fqdn()
mail: mailbox.name + '@' + config.fqdn(),
ownerType: mailbox.ownerType
}
};
Executable → Regular
+42 -3
View File
@@ -12,26 +12,65 @@ var assert = require('assert'),
var COLLECT_LOGS_CMD = path.join(__dirname, 'scripts/collectlogs.sh');
var CRASH_LOG_TIMESTAMP_OFFSET = 1000 * 60 * 60; // 60 min
var CRASH_LOG_TIMESTAMP_FILE = '/tmp/crashlog.timestamp';
var CRASH_LOG_STASH_FILE = '/tmp/crashlog';
var CRASH_LOG_FILE_LIMIT = 2 * 1024 * 1024; // 2mb
function collectLogs(unitName, callback) {
assert.strictEqual(typeof unitName, 'string');
assert.strictEqual(typeof callback, 'function');
var logs = safe.child_process.execSync('sudo ' + COLLECT_LOGS_CMD + ' ' + unitName, { encoding: 'utf8' });
logs = logs + '\n\n=====================================\n\n';
callback(null, logs);
}
function stashLogs(logs) {
var stat = safe.fs.statSync(CRASH_LOG_STASH_FILE);
if (stat && (stat.size > CRASH_LOG_FILE_LIMIT)) {
console.error('Dropping logs since crash file has become too big');
return;
}
// append here
safe.fs.writeFileSync(CRASH_LOG_STASH_FILE, logs, { flag: 'a' });
}
function sendFailureLogs(processName, options) {
assert.strictEqual(typeof processName, 'string');
assert.strictEqual(typeof options, 'object');
collectLogs(options.unit || processName, function (error, result) {
collectLogs(options.unit || processName, function (error, newLogs) {
if (error) {
console.error('Failed to collect logs.', error);
result = util.format('Failed to collect logs.', error);
newLogs = util.format('Failed to collect logs.', error);
}
console.log('Sending failure logs for', processName);
mailer.unexpectedExit(processName, result);
var timestamp = safe.fs.readFileSync(CRASH_LOG_TIMESTAMP_FILE, 'utf8');
// check if we already sent a mail in the last CRASH_LOG_TIME_OFFSET window
if (timestamp && (parseInt(timestamp) + CRASH_LOG_TIMESTAMP_OFFSET) > Date.now()) {
console.log('Crash log already sent within window. Stashing logs.');
return stashLogs(newLogs);
}
var stashedLogs = safe.fs.readFileSync(CRASH_LOG_STASH_FILE, 'utf8');
var compiledLogs = stashedLogs ? (stashedLogs + newLogs) : newLogs;
var mailSubject = processName + (stashedLogs ? ' and others' : '');
mailer.unexpectedExit(mailSubject, compiledLogs, function (error) {
if (error) {
console.log('Error sending crashlog. Stashing logs.');
return stashLogs(newLogs);
}
// write the new timestamp file and delete stash file
safe.fs.writeFileSync(CRASH_LOG_TIMESTAMP_FILE, String(Date.now()));
safe.fs.unlinkSync(CRASH_LOG_STASH_FILE);
});
});
}
+10 -7
View File
@@ -1,18 +1,21 @@
<%if (format === 'text') { %>
Dear Admin,
Dear Cloudron Admin,
The application titled '<%= title %>' that you installed at <%= appFqdn %>
is not responding.
The application '<%= title %>' installed at <%= appFqdn %> is not responding.
This is most likely a problem in the application.
You are receiving this email because you are an Admin of the Cloudron at <%= fqdn %>.
To resolve this, you can try the following:
* Restart the app in the app configuration dialog
* Restore the app to the latest backup
* Contact us via support@cloudron.io or https://chat.cloudron.io
Thank you,
Application WatchDog
Powered by https://cloudron.io
Sent at: <%= new Date().toUTCString() %>
<% } else { %>
<% } %>
+6 -5
View File
@@ -1,18 +1,19 @@
<%if (format === 'text') { %>
Dear Admin,
Dear Cloudron Admin,
A new version <%= updateInfo.manifest.version %> of the app '<%= app.manifest.title %>' installed at <%= app.fqdn %> is available!
a new version <%= updateInfo.manifest.version %> of the app '<%= app.manifest.title %>' installed at <%= app.fqdn %> is available!
The app will update automatically tonight. Alternately, update immediately at <%= webadminUrl %>.
Changes:
<%= updateInfo.manifest.changelog %>
Thank you,
your Cloudron
Powered by https://cloudron.io
Sent at: <%= new Date().toUTCString() %>
<% } else { %>
<% } %>
+9 -6
View File
@@ -1,17 +1,20 @@
<%if (format === 'text') { %>
Dear Cloudron Team,
Dear Cloudron Admin,
Backup of <%= fqdn %> failed.
Thank you,
Your Cloudron
creating a backup of <%= fqdn %> has failed.
-------------------------------------
<%- message %>
-------------------------------------
Powered by https://cloudron.io
Sent at: <%= new Date().toUTCString() %>
<% } else { %>
<% } %>
@@ -1,13 +1,20 @@
<%if (format === 'text') { %>
Dear Cloudron Team,
<%= domain %> was not renewed.
Dear Cloudron Admin,
The certificate for <%= domain %> could not be renewed.
-------------------------------------
<%- message %>
Thank you,
Your Cloudron
-------------------------------------
Powered by https://cloudron.io
Sent at: <%= new Date().toUTCString() %>
<% } else { %>
<% } %>
-1
View File
@@ -12,4 +12,3 @@ Subject: <%= subject %>
<% } else { %>
<% } %>
+25
View File
@@ -0,0 +1,25 @@
<%if (format === 'text') { %>
Dear Cloudron Admin,
The <%= program %> on <%= fqdn %> exited unexpectedly!
The program has been restarted but should this message appear repeatedly,
you should give the program more memory.
Please see some excerpt of the logs below.
-------------------------------------
<%- context %>
-------------------------------------
Powered by https://cloudron.io
Sent at: <%= new Date().toUTCString() %>
<% } else { %>
<% } %>
+8 -5
View File
@@ -1,19 +1,22 @@
<%if (format === 'text') { %>
Dear Cloudron Team,
Dear Cloudron Admin,
<%= fqdn %> is running out of disk space.
Disk space logs are attached.
Thank you,
Your Cloudron
-------------------------------------
<%- message %>
-------------------------------------
Powered by https://cloudron.io
Sent at: <%= new Date().toUTCString() %>
<% } else { %>
<% } %>
+4 -12
View File
@@ -2,18 +2,15 @@
Hi <%= user.displayName || user.username || user.alternateEmail || user.email %>,
Someone, hopefully you, has requested your <%= cloudronName %> account's password
Someone, hopefully you, has requested your account's password
be reset. If you did not request this reset, please ignore this message.
To reset your password, please visit the following page:
<%= resetLink %>
When you visit the above page, you will be prompted to enter a new password.
After you have submitted the form, you can login using the new password.
Powered by Cloudron.io
Powered by https://cloudron.io
<% } else { %>
@@ -24,7 +21,7 @@ Powered by Cloudron.io
<h3>Hi <%= user.displayName || user.username || user.alternateEmail || user.email %>,</h3>
<p>
Someone, hopefully you, has requested your <%= cloudronName %> account's password be reset.<br/>
Someone, hopefully you, has requested your account's password be reset.<br/>
If you did not request this reset, please ignore this message.
</p>
@@ -32,16 +29,11 @@ Powered by Cloudron.io
<a href="<%= resetLink %>">Click to reset your password</a>
</p>
<p>
When you visit the above page, you will be prompted to enter a new password.<br/>
After you have submitted the form, you can login using the new password.
</p>
<br/>
<br/>
<div style="font-size: 10px; color: #333333; background: #ffffff;">
Powered by <a href="https://cloudron.io">Cloudron</a>.
Powered by <a href="https://cloudron.io">Cloudron</a>
</div>
</center>
+9 -6
View File
@@ -1,19 +1,22 @@
<%if (format === 'text') { %>
Dear Cloudron Team,
Dear Cloudron Admin,
Unfortunately <%= program %> on <%= fqdn %> exited unexpectedly!
Please see some excerpt of the logs below.
Thank you,
Your Cloudron
Please see some excerpt of the logs below:
-------------------------------------
<%- context %>
-------------------------------------
Powered by https://cloudron.io
Sent at: <%= new Date().toUTCString() %>
<% } else { %>
<% } %>
+33 -2
View File
@@ -1,8 +1,8 @@
<%if (format === 'text') { %>
Hi <%= cloudronName %> Admin,
Dear <%= cloudronName %> Admin,
User with email <%= user.alternateEmail || user.email %> was added to <%= cloudronName %>.
A new user with email <%= user.alternateEmail || user.email %> was added to <%= cloudronName %>.
<% if (inviteLink) { %>
As requested, this user has not been sent an invitation email.
@@ -12,7 +12,38 @@ To set a password and perform any configuration on behalf of the user, please us
<% } %>
Powered by https://cloudron.io
<% } else { %>
<center>
<img src="<%= cloudronAvatarUrl %>" width="128px" height="128px"/>
<h3>Dear <%= cloudronName %> Admin,</h3>
<p>
A new user with email <%= user.alternateEmail || user.email %> was added to <%= cloudronName %>.
</p>
<% if (inviteLink) { %>
<p>
As requested, this user has not been sent an invitation email.<br/>
<br/>
<a href="<%= inviteLink %>">Set a password and perform any configuration on behalf of the user</a>
</p>
<% } %>
<br/>
<br/>
<div style="font-size: 10px; color: #333333; background: #ffffff;">
Powered by <a href="https://cloudron.io">Cloudron</a>.
</div>
</center>
<img src="https://analytics.cloudron.io/piwik.php?idsite=2&rec=1&e_c=CloudronEmail&e_a=userAdded" style="border:0" alt="" />
<% } %>
+5 -6
View File
@@ -1,15 +1,14 @@
<%if (format === 'text') { %>
Dear Admin,
Dear Cloudron Admin,
User <%= user.username || user.alternateEmail || user.email %> <%= event %> in the Cloudron at <%= fqdn %>.
User <%= user.username || user.alternateEmail || user.email %> <%= event %>.
You are receiving this email because you are an Admin of the Cloudron at <%= fqdn %>.
Thank you,
User Manager
Powered by https://cloudron.io
Sent at: <%= new Date().toUTCString() %>
<% } else { %>
<% } %>
+8 -8
View File
@@ -1,10 +1,10 @@
<%if (format === 'text') { %>
Hi <%= user.displayName || user.username || user.alternateEmail || user.email %>,
Dear <%= user.displayName || user.username || user.alternateEmail || user.email %>,
Welcome to <%= cloudronName %>!
To get started, activate your account:
Follow the link to get started.
<%= setupLink %>
<% if (invitor && invitor.email) { %>
@@ -12,7 +12,7 @@ You are receiving this email because you were invited by <%= invitor.email %>.
<% } %>
Powered by Cloudron.io
Powered by https://cloudron.io
<% } else { %>
@@ -25,20 +25,20 @@ Powered by Cloudron.io
<h2>Welcome to <%= cloudronName %>!</h2>
<p>
To get started, <a href="<%= setupLink %>">activate your account</a>.
<a href="<%= setupLink %>">Get started</a>.
</p>
<br/>
<br/>
<div style="font-size: 10px; color: #333333; background: #ffffff;">
Powered by <a href="https://cloudron.io">Cloudron</a>.
<br/>
<% if (invitor && invitor.email) { %>
You are receiving this email because you were invited by <%= invitor.email %>.
<% } %>
<br/>
Powered by <a href="https://cloudron.io">Cloudron</a>
</div>
</center>
+69 -31
View File
@@ -1,8 +1,8 @@
'use strict';
exports = module.exports = {
initialize: initialize,
uninitialize: uninitialize,
start: start,
stop: stop,
userAdded: userAdded,
userRemoved: userRemoved,
@@ -15,6 +15,7 @@ exports = module.exports = {
unexpectedExit: unexpectedExit,
appDied: appDied,
oomEvent: oomEvent,
outOfDiskSpace: outOfDiskSpace,
backupFailed: backupFailed,
@@ -34,7 +35,6 @@ exports = module.exports = {
var assert = require('assert'),
async = require('async'),
cloudron = require('./cloudron.js'),
config = require('./config.js'),
debug = require('debug')('box:mailer'),
dns = require('native-dns'),
@@ -50,29 +50,36 @@ var assert = require('assert'),
util = require('util'),
_ = require('underscore');
var NOOP_CALLBACK = function (error) { if (error) console.error(error); };
var MAIL_TEMPLATES_DIR = path.join(__dirname, 'mail_templates');
var gMailQueue = [ ],
gDnsReady = false,
gCheckDnsTimerId = null;
function initialize(callback) {
function splatchError(error) {
var result = { };
Object.getOwnPropertyNames(error).forEach(function (key) {
var value = this[key];
if (value instanceof Error) value = splatchError(value);
result[key] = value;
}, error /* thisArg */);
return util.inspect(result, { depth: null, showHidden: true });
}
function start(callback) {
assert.strictEqual(typeof callback, 'function');
if (cloudron.isConfiguredSync()) {
checkDns();
} else {
cloudron.events.on(cloudron.EVENT_CONFIGURED, checkDns);
}
checkDns();
callback(null);
}
function uninitialize(callback) {
function stop(callback) {
assert.strictEqual(typeof callback, 'function');
cloudron.events.removeListener(cloudron.EVENT_CONFIGURED, checkDns);
// TODO: interrupt processQueue as well
clearTimeout(gCheckDnsTimerId);
gCheckDnsTimerId = null;
@@ -157,14 +164,15 @@ function processQueue() {
// note : this function should NOT access the database. it is called by the crashnotifier
// which does not initialize mailer or the databse
function sendMails(queue) {
function sendMails(queue, callback) {
assert(util.isArray(queue));
callback = callback || NOOP_CALLBACK;
docker.getContainer('mail').inspect(function (error, data) {
if (error) return console.error(error);
if (error) return callback(error);
var mailServerIp = safe.query(data, 'NetworkSettings.Networks.cloudron.IPAddress');
if (!mailServerIp) return debug('Error querying mail server IP');
if (!mailServerIp) return callback('Error querying mail server IP');
var transport = nodemailer.createTransport(smtpTransport({
host: mailServerIp,
@@ -181,6 +189,8 @@ function sendMails(queue) {
callback(null);
}, function done() {
debug('Done processing mail queue');
callback(null);
});
});
}
@@ -229,7 +239,7 @@ function mailUserEventToAdmins(user, event) {
var mailOptions = {
from: mailConfig().from,
to: adminEmails.join(', '),
subject: util.format('%s %s in Cloudron %s', user.username || user.alternateEmail || user.email, event, config.fqdn()),
subject: util.format('[%s] %s %s', config.fqdn(), user.username || user.alternateEmail || user.email, event),
text: render('user_event.ejs', { fqdn: config.fqdn(), user: user, event: event, format: 'text' }),
};
@@ -298,15 +308,22 @@ function userAdded(user, inviteSent) {
fqdn: config.fqdn(),
user: user,
inviteLink: inviteSent ? null : config.adminOrigin() + '/api/v1/session/account/setup.html?reset_token=' + user.resetToken,
format: 'text',
cloudronName: cloudronName
cloudronName: cloudronName,
cloudronAvatarUrl: config.adminOrigin() + '/api/v1/cloudron/avatar'
};
var templateDataText = JSON.parse(JSON.stringify(templateData));
templateDataText.format = 'text';
var templateDataHTML = JSON.parse(JSON.stringify(templateData));
templateDataHTML.format = 'html';
var mailOptions = {
from: mailConfig().from,
to: adminEmails.join(', '),
subject: util.format('%s: user %s added', cloudronName, user.alternateEmail || user.email),
text: render('user_added.ejs', templateData)
subject: util.format('[%s] User %s added', config.fqdn(), user.alternateEmail || user.email),
text: render('user_added.ejs', templateDataText),
html: render('user_added.ejs', templateDataHTML)
};
enqueue(mailOptions);
@@ -359,7 +376,7 @@ function passwordReset(user) {
var mailOptions = {
from: mailConfig().from,
to: user.alternateEmail || user.email,
subject: cloudronName + ': Password Reset',
subject: util.format('[%s] Password Reset', config.fqdn()),
text: render('password_reset.ejs', templateDataText),
html: render('password_reset.ejs', templateDataHTML)
};
@@ -371,7 +388,7 @@ function passwordReset(user) {
function appDied(app) {
assert.strictEqual(typeof app, 'object');
debug('Sending mail for app %s @ %s died', app.id, app.location);
debug('Sending mail for app %s @ %s died', app.id, app.fqdn);
getAdminEmails(function (error, adminEmails) {
if (error) return console.log('Error getting admins', error);
@@ -379,8 +396,8 @@ function appDied(app) {
var mailOptions = {
from: mailConfig().from,
to: config.provider() === 'caas' ? 'support@cloudron.io' : adminEmails.concat('support@cloudron.io').join(', '),
subject: util.format('App %s is down', app.location),
text: render('app_down.ejs', { fqdn: config.fqdn(), title: app.manifest.title, appFqdn: config.appFqdn(app.location), format: 'text' })
subject: util.format('[%s] App %s is down', config.fqdn(), app.fqdn),
text: render('app_down.ejs', { fqdn: config.fqdn(), title: app.manifest.title, appFqdn: app.fqdn, format: 'text' })
};
enqueue(mailOptions);
@@ -441,7 +458,7 @@ function appUpdateAvailable(app, updateInfo) {
var mailOptions = {
from: mailConfig().from,
to: adminEmails.join(', '),
subject: util.format('%s has a new update available', app.fqdn),
subject: util.format('[%s] Update available for %s', config.fqdn(), app.fqdn),
text: render('app_update_available.ejs', { fqdn: config.fqdn(), webadminUrl: config.adminOrigin(), app: app, updateInfo: updateInfo, format: 'text' })
};
@@ -466,7 +483,9 @@ function outOfDiskSpace(message) {
});
}
function backupFailed(message) {
function backupFailed(error) {
var message = splatchError(error);
getAdminEmails(function (error, adminEmails) {
if (error) return console.log('Error getting admins', error);
@@ -490,7 +509,7 @@ function certificateRenewalError(domain, message) {
var mailOptions = {
from: mailConfig().from,
to: config.provider() === 'caas' ? 'support@cloudron.io' : adminEmails.join(', '),
to: config.provider() === 'caas' ? 'support@cloudron.io' : adminEmails.concat('support@cloudron.io').join(', '),
subject: util.format('[%s] Certificate renewal error', domain),
text: render('certificate_renewal_error.ejs', { domain: domain, message: message, format: 'text' })
};
@@ -499,12 +518,31 @@ function certificateRenewalError(domain, message) {
});
}
// this function bypasses the queue intentionally. it is also expected to work without the mailer module initialized
// NOTE: crashnotifier should be able to send mail when there is no db
function unexpectedExit(program, context) {
function oomEvent(program, context) {
assert.strictEqual(typeof program, 'string');
assert.strictEqual(typeof context, 'string');
getAdminEmails(function (error, adminEmails) {
if (error) return console.log('Error getting admins', error);
var mailOptions = {
from: mailConfig().from,
to: config.provider() === 'caas' ? 'support@cloudron.io' : adminEmails.concat('support@cloudron.io').join(', '),
subject: util.format('[%s] %s exited unexpectedly', config.fqdn(), program),
text: render('oom_event.ejs', { fqdn: config.fqdn(), program: program, context: context, format: 'text' })
};
sendMails([ mailOptions ]);
});
}
// this function bypasses the queue intentionally. it is also expected to work without the mailer module initialized
// NOTE: crashnotifier should be able to send mail when there is no db
function unexpectedExit(program, context, callback) {
assert.strictEqual(typeof program, 'string');
assert.strictEqual(typeof context, 'string');
assert.strictEqual(typeof callback, 'function');
var mailOptions = {
from: mailConfig().from,
to: 'support@cloudron.io',
@@ -512,7 +550,7 @@ function unexpectedExit(program, context) {
text: render('unexpected_exit.ejs', { fqdn: config.fqdn(), program: program, context: context, format: 'text' })
};
sendMails([ mailOptions ]);
sendMails([ mailOptions ], callback);
}
function sendFeedback(user, type, subject, description) {
+13 -4
View File
@@ -14,28 +14,31 @@ exports = module.exports = {
configureAdmin: configureAdmin,
configureApp: configureApp,
unconfigureApp: unconfigureApp,
reload: reload
reload: reload,
removeAppConfigs: removeAppConfigs
};
var NGINX_APPCONFIG_EJS = fs.readFileSync(__dirname + '/../setup/start/nginx/appconfig.ejs', { encoding: 'utf8' }),
RELOAD_NGINX_CMD = path.join(__dirname, 'scripts/reloadnginx.sh');
function configureAdmin(certFilePath, keyFilePath, callback) {
function configureAdmin(certFilePath, keyFilePath, configFileName, vhost, callback) {
assert.strictEqual(typeof certFilePath, 'string');
assert.strictEqual(typeof keyFilePath, 'string');
assert.strictEqual(typeof configFileName, 'string');
assert.strictEqual(typeof vhost, 'string');
assert.strictEqual(typeof callback, 'function');
var data = {
sourceDir: path.resolve(__dirname, '..'),
adminOrigin: config.adminOrigin(),
vhost: config.adminFqdn(),
vhost: vhost, // if vhost is empty it will become the default_server
endpoint: 'admin',
certFilePath: certFilePath,
keyFilePath: keyFilePath,
xFrameOptions: 'SAMEORIGIN'
};
var nginxConf = ejs.render(NGINX_APPCONFIG_EJS, data);
var nginxConfigFilename = path.join(paths.NGINX_APPCONFIG_DIR, 'admin.conf');
var nginxConfigFilename = path.join(paths.NGINX_APPCONFIG_DIR, configFileName);
if (!safe.fs.writeFileSync(nginxConfigFilename, nginxConf)) return callback(safe.error);
@@ -93,3 +96,9 @@ function unconfigureApp(app, callback) {
function reload(callback) {
shell.sudo('reload', [ RELOAD_NGINX_CMD ], callback);
}
function removeAppConfigs() {
for (var appConfigFile of fs.readdirSync(paths.NGINX_APPCONFIG_DIR)) {
fs.unlinkSync(path.join(paths.NGINX_APPCONFIG_DIR, appConfigFile));
}
}
+6
View File
@@ -1,3 +1,9 @@
<footer class="text-center">
<span class="text-muted">&copy; 2017 <a href="https://cloudron.io" target="_blank">Cloudron</a></span>
<span class="text-muted"><a href="https://twitter.com/cloudron_io" target="_blank">Twitter <i class="fa fa-twitter"></i></a></span>
<span class="text-muted"><a href="https://chat.cloudron.io" target="_blank">Chat <i class="fa fa-comments"></i></a></span>
</footer>
</body>
</html>
+3 -3
View File
@@ -6,7 +6,7 @@
<title> <%= title %> </title>
<link href="/api/v1/cloudron/avatar" rel="icon" type="image/png">
<link href="/api/v1/cloudron/avatar?<%= Math.random() %>" rel="icon" type="image/png">
<!-- Theme CSS -->
<link href="<%= adminOrigin %>/theme.css" rel="stylesheet">
@@ -32,8 +32,8 @@
<nav class="navbar navbar-default navbar-static-top shadow" role="navigation" style="margin-bottom: 0">
<div class="container-fluid">
<div class="navbar-header">
<span class="navbar-brand navbar-brand-icon"><img src="/api/v1/cloudron/avatar" width="40" height="40"/></span>
<span class="navbar-brand"><%= cloudronName %></span>
<a href="/" class="navbar-brand navbar-brand-icon"><img src="/api/v1/cloudron/avatar?<%= Math.random() %>" width="40" height="40"/></a>
<a href="/" class="navbar-brand"><%= cloudronName %></a>
</div>
</div>
</nav>
+1 -1
View File
@@ -8,7 +8,7 @@
<div class="card">
<div class="row">
<div class="col-md-12" style="text-align: center;">
<img width="128" height="128" src="<%= applicationLogo %>"/>
<img width="128" height="128" src="<%= applicationLogo %>?<%= Math.random() %>"/>
<h1><small>Login to</small> <%= applicationName %></h1>
<br/>
</div>
+16 -20
View File
@@ -5,29 +5,25 @@ var config = require('./config.js'),
// keep these values in sync with start.sh
exports = module.exports = {
CLOUDRON_DEFAULT_AVATAR_FILE: path.join(__dirname + '/../assets/avatar.png'),
INFRA_VERSION_FILE: path.join(config.baseDir(), 'data/INFRA_VERSION'),
DATA_DIR: path.join(config.baseDir(), 'data'),
BOX_DATA_DIR: path.join(config.baseDir(), 'boxdata'),
ACME_CHALLENGES_DIR: path.join(config.baseDir(), 'data/acme'),
ADDON_CONFIG_DIR: path.join(config.baseDir(), 'data/addons'),
COLLECTD_APPCONFIG_DIR: path.join(config.baseDir(), 'data/collectd/collectd.conf.d'),
MAIL_DATA_DIR: path.join(config.baseDir(), 'data/mail'),
NGINX_CONFIG_DIR: path.join(config.baseDir(), 'data/nginx'),
NGINX_APPCONFIG_DIR: path.join(config.baseDir(), 'data/nginx/applications'),
NGINX_CERT_DIR: path.join(config.baseDir(), 'data/nginx/cert'),
ADDON_CONFIG_DIR: path.join(config.baseDir(), 'data/addons'),
COLLECTD_APPCONFIG_DIR: path.join(config.baseDir(), 'data/collectd/collectd.conf.d'),
DATA_DIR: path.join(config.baseDir(), 'data'),
BOX_DATA_DIR: path.join(config.baseDir(), 'data/box'),
// this is not part of appdata because an icon may be set before install
APPICONS_DIR: path.join(config.baseDir(), 'data/box/appicons'),
APP_CERTS_DIR: path.join(config.baseDir(), 'data/box/certs'),
MAIL_DATA_DIR: path.join(config.baseDir(), 'data/box/mail'),
CLOUDRON_AVATAR_FILE: path.join(config.baseDir(), 'data/box/avatar.png'),
CLOUDRON_DEFAULT_AVATAR_FILE: path.join(__dirname + '/../assets/avatar.png'),
FIRST_RUN_FILE: path.join(config.baseDir(), 'data/box/first_run'),
UPDATE_CHECKER_FILE: path.join(config.baseDir(), 'data/box/updatechecker.json'),
ACME_CHALLENGES_DIR: path.join(config.baseDir(), 'data/acme'),
ACME_ACCOUNT_KEY_FILE: path.join(config.baseDir(), 'data/box/acme/acme.key'),
INFRA_VERSION_FILE: path.join(config.baseDir(), 'data/INFRA_VERSION')
ACME_ACCOUNT_KEY_FILE: path.join(config.baseDir(), 'boxdata/acme/acme.key'),
APP_ICONS_DIR: path.join(config.baseDir(), 'boxdata/appicons'),
APP_CERTS_DIR: path.join(config.baseDir(), 'boxdata/certs'),
CLOUDRON_AVATAR_FILE: path.join(config.baseDir(), 'boxdata/avatar.png'),
FIRST_RUN_FILE: path.join(config.baseDir(), 'boxdata/first_run'),
UPDATE_CHECKER_FILE: path.join(config.baseDir(), 'boxdata/updatechecker.json')
};
+63 -43
View File
@@ -5,26 +5,27 @@ exports = module.exports = {
uninitialize: uninitialize,
events: new (require('events').EventEmitter)(),
EVENT_READY: 'ready',
isReadySync: isReadySync
EVENT_READY: 'ready'
};
var apps = require('./apps.js'),
assert = require('assert'),
async = require('async'),
cloudron = require('./cloudron.js'),
config = require('./config.js'),
certificates = require('./certificates.js'),
debug = require('debug')('box:platform'),
fs = require('fs'),
hat = require('hat'),
infra = require('./infra_version.js'),
nginx = require('./nginx.js'),
os = require('os'),
paths = require('./paths.js'),
safe = require('safetydance'),
settings = require('./settings.js'),
shell = require('./shell.js'),
subdomains = require('./subdomains.js'),
user = require('./user.js'),
util = require('util'),
_ = require('underscore');
@@ -39,6 +40,12 @@ function initialize(callback) {
settings.events.on(settings.MAIL_CONFIG_KEY, function () { startMail(NOOP_CALLBACK); });
certificates.events.on(certificates.EVENT_CERT_CHANGED, function (domain) {
if (domain === '*.' + config.fqdn() || domain === config.adminFqdn()) startMail(NOOP_CALLBACK);
});
cloudron.events.on(cloudron.EVENT_ACTIVATED, function () { createMailConfig(NOOP_CALLBACK); });
var existingInfra = { version: 'none' };
if (fs.existsSync(paths.INFRA_VERSION_FILE)) {
existingInfra = safe.JSON.parse(fs.readFileSync(paths.INFRA_VERSION_FILE, 'utf8'));
@@ -56,7 +63,6 @@ function initialize(callback) {
async.series([
stopContainers.bind(null, existingInfra),
createDockerNetwork,
startAddons.bind(null, existingInfra),
removeOldImages,
startApps.bind(null, existingInfra),
@@ -80,12 +86,10 @@ function initialize(callback) {
function uninitialize(callback) {
clearTimeout(gPlatformReadyTimer);
gPlatformReadyTimer = null;
settings.events.removeListener(settings.MAIL_CONFIG_KEY, startMail);
callback();
}
function isReadySync() {
return gPlatformReadyTimer === null;
// TODO: unregister event listeners
callback();
}
function removeOldImages(callback) {
@@ -122,10 +126,6 @@ function stopContainers(existingInfra, callback) {
callback();
}
function createDockerNetwork(callback) {
shell.execSync('createDockerNetwork', 'docker network create --subnet=172.18.0.0/16 cloudron || true', callback);
}
function startGraphite(callback) {
const tag = infra.images.graphite.tag;
const dataDir = paths.DATA_DIR;
@@ -219,6 +219,25 @@ function startMongodb(callback) {
callback();
}
function createMailConfig(callback) {
assert.strictEqual(typeof callback, 'function');
const fqdn = config.fqdn();
const mailFqdn = config.adminFqdn();
const alertsFrom = 'no-reply@' + config.fqdn();
user.getOwner(function (error, owner) {
var alertsTo = [ 'webmaster@cloudron.io' ].concat(error ? [] : owner.email).join(',');
if (!safe.fs.writeFileSync(paths.DATA_DIR + '/addons/mail/mail.ini',
`mail_domain=${fqdn}\nmail_server_name=${mailFqdn}\nalerts_from=${alertsFrom}\nalerts_to=${alertsTo}`, 'utf8')) {
return callback(new Error('Could not create mail var file:' + safe.error.message));
}
callback();
});
}
function startMail(callback) {
// mail (note: 2525 is hardcoded in mail container and app use this port)
// MAIL_SERVER_NAME is the hostname of the mailserver i.e server uses these certs
@@ -227,50 +246,50 @@ function startMail(callback) {
const tag = infra.images.mail.tag;
const dataDir = paths.DATA_DIR;
const fqdn = config.fqdn();
const mailFqdn = config.adminFqdn();
const memoryLimit = Math.max((1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 128, 256);
// TODO: watch for a signal here should the certificate path change. Note that haraka reloads
// config automatically if the contents of the certificate changes (eg, renawal).
certificates.getAdminCertificatePath(function (error, certFilePath, keyFilePath) {
// admin and mail share the same certificate
certificates.getAdminCertificate(function (error, cert, key) {
if (error) return callback(error);
if (!safe.fs.writeFileSync(paths.DATA_DIR + '/addons/mail/tls_cert.pem', cert)) return callback(new Error('Could not create cert file:' + safe.error.message));
if (!safe.fs.writeFileSync(paths.DATA_DIR + '/addons/mail/tls_key.pem', key)) return callback(new Error('Could not create key file:' + safe.error.message));
settings.getMailConfig(function (error, mailConfig) {
if (error) return callback(error);
shell.execSync('startMail', 'docker rm -f mail || true');
var ports = mailConfig.enabled ? '-p 587:2525 -p 993:9993 -p 4190:4190 -p 25:2525' : '';
createMailConfig(function (error) {
if (error) return callback(error);
const cmd = `docker run --restart=always -d --name="mail" \
--net cloudron \
--net-alias mail \
-m ${memoryLimit}m \
--memory-swap ${memoryLimit * 2}m \
-e "MAIL_DOMAIN=${fqdn}" \
-e "MAIL_SERVER_NAME=${mailFqdn}" \
-v "${dataDir}/box/mail:/app/data" \
-v "${dataDir}/mail:/run" \
-v "${certFilePath}:/etc/tls_cert.pem:ro" \
-v "${keyFilePath}:/etc/tls_key.pem:ro" \
${ports} \
--read-only -v /tmp ${tag}`;
var ports = mailConfig.enabled ? '-p 587:2525 -p 993:9993 -p 4190:4190 -p 25:2525' : '';
shell.execSync('startMail', cmd);
const cmd = `docker run --restart=always -d --name="mail" \
--net cloudron \
--net-alias mail \
-m ${memoryLimit}m \
--memory-swap ${memoryLimit * 2}m \
-v "${dataDir}/mail:/app/data" \
-v "${dataDir}/addons/mail:/etc/mail" \
${ports} \
--read-only -v /run -v /tmp ${tag}`;
if (!mailConfig.enabled || process.env.BOX_ENV === 'test') return callback();
shell.execSync('startMail', cmd);
// Add MX and DMARC record. Note that DMARC policy depends on DKIM signing and thus works
// only if we use our internal mail server.
var records = [
{ subdomain: '_dmarc', type: 'TXT', values: [ '"v=DMARC1; p=reject; pct=100"' ] },
{ subdomain: '', type: 'MX', values: [ '10 ' + config.mailFqdn() + '.' ] }
];
if (!mailConfig.enabled || process.env.BOX_ENV === 'test') return callback();
async.mapSeries(records, function (record, iteratorCallback) {
subdomains.upsert(record.subdomain, record.type, record.values, iteratorCallback);
}, callback);
// Add MX and DMARC record. Note that DMARC policy depends on DKIM signing and thus works
// only if we use our internal mail server.
var records = [
{ subdomain: '_dmarc', type: 'TXT', values: [ '"v=DMARC1; p=reject; pct=100"' ] },
{ subdomain: '', type: 'MX', values: [ '10 ' + config.mailFqdn() + '.' ] }
];
async.mapSeries(records, function (record, iteratorCallback) {
subdomains.upsert(record.subdomain, record.type, record.values, iteratorCallback);
}, callback);
});
});
});
}
@@ -305,6 +324,7 @@ function startApps(existingInfra, callback) {
apps.restoreInstalledApps(callback);
} else {
debug('startApps: reconfiguring installed apps');
nginx.removeAppConfigs(); // should we change the cert location, nginx will not start
apps.configureInstalledApps(callback);
}
}
+7 -2
View File
@@ -55,7 +55,8 @@ function removeInternalAppFields(app) {
memoryLimit: app.memoryLimit,
altDomain: app.altDomain,
xFrameOptions: app.xFrameOptions,
sso: app.sso
sso: app.sso,
debugMode: app.debugMode
};
}
@@ -86,7 +87,7 @@ function getApps(req, res, next) {
function getAppIcon(req, res, next) {
assert.strictEqual(typeof req.params.id, 'string');
var iconPath = paths.APPICONS_DIR + '/' + req.params.id + '.png';
var iconPath = paths.APP_ICONS_DIR + '/' + req.params.id + '.png';
fs.exists(iconPath, function (exists) {
if (!exists) return next(new HttpError(404, 'No such icon'));
res.sendFile(iconPath);
@@ -126,6 +127,8 @@ function installApp(req, res, next) {
if ('sso' in data && typeof data.sso !== 'boolean') return next(new HttpError(400, 'sso must be a boolean'));
if (('debugMode' in data) && typeof data.debugMode !== 'object') return next(new HttpError(400, 'debugMode must be an object'));
debug('Installing app :%j', data);
apps.install(data, auditSource(req), function (error, app) {
@@ -162,6 +165,8 @@ function configureApp(req, res, next) {
if (data.altDomain && typeof data.altDomain !== 'string') return next(new HttpError(400, 'altDomain must be a string'));
if (data.xFrameOptions && typeof data.xFrameOptions !== 'string') return next(new HttpError(400, 'xFrameOptions must be a string'));
if (('debugMode' in data) && typeof data.debugMode !== 'object') return next(new HttpError(400, 'debugMode must be an object'));
debug('Configuring app id:%s data:%j', req.params.id, data);
apps.configure(req.params.id, data, auditSource(req), function (error) {
+18
View File
@@ -2,6 +2,7 @@
exports = module.exports = {
activate: activate,
dnsSetup: dnsSetup,
setupTokenAuth: setupTokenAuth,
getStatus: getStatus,
reboot: reboot,
@@ -23,6 +24,8 @@ var assert = require('assert'),
HttpSuccess = require('connect-lastmile').HttpSuccess,
progress = require('../progress.js'),
mailer = require('../mailer.js'),
settings = require('../settings.js'),
SettingsError = settings.SettingsError,
superagent = require('superagent'),
updateChecker = require('../updatechecker.js'),
_ = require('underscore');
@@ -80,6 +83,21 @@ function activate(req, res, next) {
});
}
function dnsSetup(req, res, next) {
assert.strictEqual(typeof req.body, 'object');
if (typeof req.body.provider !== 'string') return next(new HttpError(400, 'provider is required'));
if (typeof req.body.domain !== 'string' || !req.body.domain) return next(new HttpError(400, 'domain is required'));
cloudron.dnsSetup(req.body, req.body.domain, function (error) {
if (error && error.reason === CloudronError.ALREADY_SETUP) return next(new HttpError(409, error.message));
if (error && error.reason === CloudronError.BAD_FIELD) return next(new HttpError(400, error.message));
if (error) return next(new HttpError(500, error));
next(new HttpSuccess(200));
});
}
function setupTokenAuth(req, res, next) {
assert.strictEqual(typeof req.query, 'object');
+2 -18
View File
@@ -3,8 +3,7 @@
exports = module.exports = {
get: get,
update: update,
changePassword: changePassword,
setShowTutorial: setShowTutorial
changePassword: changePassword
};
var assert = require('assert'),
@@ -28,8 +27,7 @@ function get(req, res, next) {
email: req.user.email,
alternateEmail: req.user.alternateEmail,
admin: req.user.admin,
displayName: req.user.displayName,
showTutorial: req.user.showTutorial
displayName: req.user.displayName
}));
}
@@ -66,17 +64,3 @@ function changePassword(req, res, next) {
next(new HttpSuccess(204));
});
}
function setShowTutorial(req, res, next) {
assert.strictEqual(typeof req.user, 'object');
assert.strictEqual(typeof req.body, 'object');
if (typeof req.body.showTutorial !== 'boolean') return next(new HttpError(400, 'showTutorial must be a boolean.'));
user.setShowTutorial(req.user.id, req.body.showTutorial, function (error) {
if (error && error.reason === UserError.NOT_FOUND) return next(new HttpError(403, 'Wrong password'));
if (error) return next(new HttpError(500, error));
next(new HttpSuccess(204));
});
}
+4 -3
View File
@@ -27,13 +27,14 @@ exports = module.exports = {
getAppstoreConfig: getAppstoreConfig,
setAppstoreConfig: setAppstoreConfig,
setCertificate: setCertificate,
setFallbackCertificate: setFallbackCertificate,
setAdminCertificate: setAdminCertificate
};
var assert = require('assert'),
certificates = require('../certificates.js'),
CertificatesError = require('../certificates.js').CertificatesError,
config = require('../config.js'),
HttpError = require('connect-lastmile').HttpError,
HttpSuccess = require('connect-lastmile').HttpSuccess,
safe = require('safetydance'),
@@ -170,7 +171,7 @@ function setDnsConfig(req, res, next) {
if (typeof req.body.provider !== 'string') return next(new HttpError(400, 'provider is required'));
settings.setDnsConfig(req.body, function (error) {
settings.setDnsConfig(req.body, config.fqdn(), function (error) {
if (error && error.reason === SettingsError.BAD_FIELD) return next(new HttpError(400, error.message));
if (error) return next(new HttpError(500, error));
@@ -234,7 +235,7 @@ function setAppstoreConfig(req, res, next) {
}
// default fallback cert
function setCertificate(req, res, next) {
function setFallbackCertificate(req, res, next) {
assert.strictEqual(typeof req.body, 'object');
if (!req.body.cert || typeof req.body.cert !== 'string') return next(new HttpError(400, 'cert must be a string'));
+3 -3
View File
@@ -42,7 +42,7 @@ var SERVER_URL = 'http://localhost:' + config.get('port');
// Test image information
var TEST_IMAGE_REPO = 'cloudron/test';
var TEST_IMAGE_TAG = '17.0.0';
var TEST_IMAGE_TAG = '18.0.0';
var TEST_IMAGE = TEST_IMAGE_REPO + ':' + TEST_IMAGE_TAG;
// var TEST_IMAGE_ID = child_process.execSync('docker inspect --format={{.Id}} ' + TEST_IMAGE).toString('utf8').trim();
@@ -229,7 +229,7 @@ describe('Apps', function () {
}, callback);
},
settings.setDnsConfig.bind(null, { provider: 'route53', accessKeyId: 'accessKeyId', secretAccessKey: 'secretAccessKey', endpoint: 'http://localhost:5353' }),
settings.setDnsConfig.bind(null, { provider: 'route53', accessKeyId: 'accessKeyId', secretAccessKey: 'secretAccessKey', endpoint: 'http://localhost:5353' }, config.fqdn()),
settings.setTlsConfig.bind(null, { provider: 'caas' }),
settings.setBackupConfig.bind(null, { provider: 'caas', token: 'BACKUP_TOKEN', bucket: 'Bucket', prefix: 'Prefix' })
], function (error) {
@@ -1036,7 +1036,7 @@ describe('Apps', function () {
apiHockServer = http.createServer(apiHockInstance.handler).listen(port, callback);
},
settings.setDnsConfig.bind(null, { provider: 'route53', accessKeyId: 'accessKeyId', secretAccessKey: 'secretAccessKey', endpoint: 'http://localhost:5353' }),
settings.setDnsConfig.bind(null, { provider: 'route53', accessKeyId: 'accessKeyId', secretAccessKey: 'secretAccessKey', endpoint: 'http://localhost:5353' }, config.fqdn()),
settings.setTlsConfig.bind(null, { provider: 'caas' }),
+2 -4
View File
@@ -146,8 +146,7 @@ describe('OAuth2', function () {
createdAt: (new Date()).toUTCString(),
modifiedAt: (new Date()).toUTCString(),
resetToken: hat(256),
displayName: '',
showTutorial: false
displayName: ''
};
var APP_0 = {
@@ -1303,8 +1302,7 @@ describe('Password', function () {
createdAt: (new Date()).toUTCString(),
modifiedAt: (new Date()).toUTCString(),
resetToken: hat(256),
displayName: '',
showTutorial: false
displayName: ''
};
// make csrf always succeed for testing
-43
View File
@@ -100,7 +100,6 @@ describe('Profile API', function () {
expect(result.body.username).to.equal(USERNAME_0.toLowerCase());
expect(result.body.email).to.equal(EMAIL_0.toLowerCase());
expect(result.body.admin).to.be.ok();
expect(result.body.showTutorial).to.be.ok();
expect(result.body.displayName).to.be.a('string');
expect(result.body.password).to.not.be.ok();
expect(result.body.salt).to.not.be.ok();
@@ -139,7 +138,6 @@ describe('Profile API', function () {
expect(result.body.username).to.equal(USERNAME_0.toLowerCase());
expect(result.body.email).to.equal(EMAIL_0.toLowerCase());
expect(result.body.admin).to.be.ok();
expect(result.body.showTutorial).to.be.ok();
expect(result.body.displayName).to.be.a('string');
expect(result.body.password).to.not.be.ok();
expect(result.body.salt).to.not.be.ok();
@@ -278,45 +276,4 @@ describe('Profile API', function () {
});
});
});
describe('showTutorial change', function () {
before(setup);
after(cleanup);
it('fails due to missing showTutorial', function (done) {
superagent.post(SERVER_URL + '/api/v1/profile/tutorial')
.query({ access_token: token_0 })
.send({})
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
done();
});
});
it('fails due to wrong showTutorial type', function (done) {
superagent.post(SERVER_URL + '/api/v1/profile/tutorial')
.query({ access_token: token_0 })
.send({ showTutorial: 'true' })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
done();
});
});
it('succeeds', function (done) {
superagent.post(SERVER_URL + '/api/v1/profile/tutorial')
.query({ access_token: token_0 })
.send({ showTutorial: false })
.end(function (err, res) {
expect(res.statusCode).to.equal(204);
superagent.get(SERVER_URL + '/api/v1/profile/').query({ access_token: token_0 }).end(function (error, result) {
expect(result.statusCode).to.equal(200);
expect(result.body.showTutorial).to.not.be.ok();
done();
});
});
});
});
});
+3 -2
View File
@@ -10,6 +10,7 @@ var appdb = require('../../appdb.js'),
child_process = require('child_process'),
cloudron = require('../../cloudron.js'),
config = require('../../config.js'),
constants = require('../../constants.js'),
database = require('../../database.js'),
expect = require('expect.js'),
path = require('path'),
@@ -119,10 +120,10 @@ describe('Settings API', function () {
superagent.post(SERVER_URL + '/api/v1/settings/autoupdate_pattern')
.query({ access_token: token })
.send({ pattern: 'never' })
.send({ pattern: constants.AUTOUPDATE_PATTERN_NEVER })
.end(function (err, res) {
expect(res.statusCode).to.equal(200);
expect(eventPattern).to.eql('never');
expect(eventPattern).to.eql(constants.AUTOUPDATE_PATTERN_NEVER);
done();
});
});
+4 -8
View File
@@ -42,9 +42,7 @@ if [[ "$1" == "s3" ]]; then
if [ $# -gt 9 ]; then
export AWS_SESSION_TOKEN="${10}"
fi
fi
if [[ "$1" == "filesystem" ]]; then
elif [[ "$1" == "filesystem" ]]; then
readonly backup_folder="$3"
readonly backup_config_fileName="$4"
readonly backup_data_fileName="$5"
@@ -52,7 +50,7 @@ if [[ "$1" == "filesystem" ]]; then
fi
# perform backup
readonly now=$(date "+%Y-%m-%dT%H:%M:%S")
readonly now=$(date "+%Y-%m-%d-%H%M%S")
readonly app_data_dir="${DATA_DIR}/${app_id}"
readonly app_data_snapshot="${DATA_DIR}/snapshots/${app_id}-${now}"
@@ -99,10 +97,8 @@ if [[ "$1" == "s3" ]]; then
fi
cat "${error_log}" && rm "${error_log}"
done
fi
if [[ "$1" == "filesystem" ]]; then
mkdir -p "${backup_folder}"
elif [[ "$1" == "filesystem" ]]; then
mkdir -p $(dirname "${backup_folder}/${backup_config_fileName}")
echo "Storing backup config to ${backup_folder}/${backup_config_fileName}"
cat "${app_data_snapshot}/config.json" > "${backup_folder}/${backup_config_fileName}"
+13 -15
View File
@@ -37,24 +37,23 @@ if [[ "$1" == "s3" ]]; then
if [ $# -gt 7 ]; then
export AWS_SESSION_TOKEN="$8"
fi
fi
if [[ "$1" == "filesystem" ]]; then
elif [[ "$1" == "filesystem" ]]; then
readonly backup_folder="$2"
readonly backup_fileName="$3"
readonly password="$4"
fi
# perform backup
now=$(date "+%Y-%m-%dT%H:%M:%S")
BOX_DATA_DIR="${HOME}/data/box"
box_snapshot_dir="${HOME}/data/snapshots/box-${now}"
BOX_DATA_DIR="${HOME}/boxdata"
MAIL_DATA_DIR="${HOME}/data/mail"
mail_snapshot_dir="${HOME}/data/snapshots/mail"
echo "Creating MySQL dump"
mysqldump -u root -ppassword --single-transaction --routines --triggers box > "${BOX_DATA_DIR}/box.mysqldump"
echo "Snapshoting backup as backup-${now}"
btrfs subvolume snapshot -r "${BOX_DATA_DIR}" "${box_snapshot_dir}"
echo "Snapshotting mail"
btrfs subvolume delete "${mail_snapshot_dir}" &> /dev/null || true
btrfs subvolume snapshot -r "${MAIL_DATA_DIR}" "${mail_snapshot_dir}"
# will be checked at the end
try=0
@@ -72,25 +71,24 @@ if [[ "$1" == "s3" ]]; then
# use aws instead of curl because curl will always read entire stream memory to set Content-Length
# aws will do multipart upload
if tar -czf - -C "${box_snapshot_dir}" . \
if tar -czf - -C "${HOME}" --transform="s,^boxdata/\?,box/," --transform="s,^data/mail/\?,mail/," --show-transformed-names boxdata data/mail \
| openssl aes-256-cbc -e -pass "pass:${password}" \
| aws ${optional_args} s3 cp - "${s3_url}" 2>"${error_log}"; then
break
fi
cat "${error_log}" && rm "${error_log}"
done
fi
if [[ "$1" == "filesystem" ]]; then
elif [[ "$1" == "filesystem" ]]; then
echo "Storing backup to ${backup_folder}/${backup_fileName}"
mkdir -p "${backup_folder}"
mkdir -p $(dirname "${backup_folder}/${backup_fileName}")
tar -czf - -C "${box_snapshot_dir}" . | openssl aes-256-cbc -e -pass "pass:${password}" > "${backup_folder}/${backup_fileName}"
tar -czf - -C "${HOME}" --transform="s,^boxdata/\?,box/," --transform="s,^data/mail/\?,mail/," --show-transformed-names boxdata data/mail \
| openssl aes-256-cbc -e -pass "pass:${password}" > "${backup_folder}/${backup_fileName}"
fi
echo "Deleting backup snapshot"
btrfs subvolume delete "${box_snapshot_dir}"
btrfs subvolume delete "${mail_snapshot_dir}"
if [[ ${try} -eq 5 ]]; then
echo "Backup failed"
+3
View File
@@ -31,6 +31,9 @@ echo
echo
echo "docker"
echo "------"
docker info
echo
echo
journalctl --all --no-pager -u docker -n 50
echo
echo
+1 -1
View File
@@ -8,7 +8,7 @@ if [[ ${EUID} -ne 0 ]]; then
fi
readonly UPDATER_SERVICE="cloudron-updater"
readonly DATA_FILE="/tmp/cloudron-update-data.json"
readonly DATA_FILE="/root/cloudron-update-data.json"
readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 300"
if [[ $# == 1 && "$1" == "--check" ]]; then
+18 -21
View File
@@ -8,22 +8,18 @@ exports = module.exports = {
var assert = require('assert'),
async = require('async'),
auth = require('./auth.js'),
certificates = require('./certificates.js'),
clients = require('./clients.js'),
cloudron = require('./cloudron.js'),
cron = require('./cron.js'),
config = require('./config.js'),
database = require('./database.js'),
eventlog = require('./eventlog.js'),
express = require('express'),
http = require('http'),
mailer = require('./mailer.js'),
middleware = require('./middleware'),
passport = require('passport'),
path = require('path'),
platform = require('./platform.js'),
routes = require('./routes/index.js'),
taskmanager = require('./taskmanager.js');
RateLimit = require('express-rate-limit'),
routes = require('./routes/index.js');
var gHttpServer = null;
var gSysadminHttpServer = null;
@@ -45,12 +41,22 @@ function initializeExpressSync() {
app.set('view engine', 'ejs');
app.set('json spaces', 2); // pretty json
// for rate limiting
app.enable('trust proxy');
var limiter = new RateLimit({
windowMs: 60*1000, // 1 minute
max: 200, // limit each IP to 200 requests per windowMs
delayMs: 0 // disable delaying - full speed until the max limit is reached
});
if (process.env.BOX_ENV !== 'test') app.use(middleware.morgan('Box :method :url :status :response-time ms - :res[content-length]', { immediate: false }));
var router = new express.Router();
router.del = router.delete; // amend router.del for readability further on
app
.use(limiter)
.use(middleware.timeout(REQUEST_TIMEOUT))
.use(json)
.use(urlencoded)
@@ -81,6 +87,7 @@ function initializeExpressSync() {
// public routes
router.post('/api/v1/cloudron/activate', routes.cloudron.setupTokenAuth, routes.cloudron.activate);
router.post('/api/v1/cloudron/dns_setup', routes.cloudron.dnsSetup); // only available until no-domain
router.get ('/api/v1/cloudron/progress', routes.cloudron.getProgress);
router.get ('/api/v1/cloudron/status', routes.cloudron.getStatus);
router.get ('/api/v1/cloudron/avatar', routes.settings.getCloudronAvatar); // this is a public alias for /api/v1/settings/cloudron_avatar
@@ -106,7 +113,6 @@ function initializeExpressSync() {
router.get ('/api/v1/profile', profileScope, routes.profile.get);
router.post('/api/v1/profile', profileScope, routes.profile.update);
router.post('/api/v1/profile/password', profileScope, routes.user.verifyPassword, routes.profile.changePassword);
router.post('/api/v1/profile/tutorial', profileScope, routes.profile.setShowTutorial);
// user routes
router.get ('/api/v1/users', usersScope, routes.user.requireAdmin, routes.user.list);
@@ -183,9 +189,9 @@ function initializeExpressSync() {
router.post('/api/v1/settings/dns_config', settingsScope, routes.user.requireAdmin, routes.settings.setDnsConfig);
router.get ('/api/v1/settings/backup_config', settingsScope, routes.user.requireAdmin, routes.settings.getBackupConfig);
router.post('/api/v1/settings/backup_config', settingsScope, routes.user.requireAdmin, routes.settings.setBackupConfig);
router.post('/api/v1/settings/certificate', settingsScope, routes.user.requireAdmin, routes.settings.setCertificate);
// See #47
// router.post('/api/v1/settings/admin_certificate', settingsScope, routes.user.requireAdmin, routes.settings.setAdminCertificate);
router.post('/api/v1/settings/certificate', settingsScope, routes.user.requireAdmin, routes.settings.setFallbackCertificate);
router.post('/api/v1/settings/admin_certificate', settingsScope, routes.user.requireAdmin, routes.settings.setAdminCertificate);
router.get ('/api/v1/settings/time_zone', settingsScope, routes.user.requireAdmin, routes.settings.getTimeZone);
router.post('/api/v1/settings/time_zone', settingsScope, routes.user.requireAdmin, routes.settings.setTimeZone);
router.get ('/api/v1/settings/appstore_config', settingsScope, routes.user.requireAdmin, routes.settings.getAppstoreConfig);
@@ -267,12 +273,7 @@ function start(callback) {
async.series([
auth.initialize,
database.initialize,
cloudron.initialize, // keep this here because it reads activation state that others depend on
certificates.installAdminCertificate, // keep this before cron to block heartbeats until cert is ready
platform.initialize,
taskmanager.initialize,
mailer.initialize,
cron.initialize,
cloudron.initialize,
gHttpServer.listen.bind(gHttpServer, config.get('port'), '127.0.0.1'),
gSysadminHttpServer.listen.bind(gSysadminHttpServer, config.get('sysadminPort'), '127.0.0.1'),
eventlog.add.bind(null, eventlog.ACTION_START, { userId: null, username: 'boot' }, { version: config.version() })
@@ -285,13 +286,9 @@ function stop(callback) {
if (!gHttpServer) return callback(null);
async.series([
auth.uninitialize,
cloudron.uninitialize,
taskmanager.uninitialize,
platform.uninitialize,
cron.uninitialize,
mailer.uninitialize,
database.uninitialize,
auth.uninitialize,
gHttpServer.close.bind(gHttpServer),
gSysadminHttpServer.close.bind(gSysadminHttpServer)
], function (error) {
+60 -105
View File
@@ -23,6 +23,9 @@ exports = module.exports = {
getDnsConfig: getDnsConfig,
setDnsConfig: setDnsConfig,
getDynamicDnsConfig: getDynamicDnsConfig,
setDynamicDnsConfig: setDynamicDnsConfig,
getBackupConfig: getBackupConfig,
setBackupConfig: setBackupConfig,
@@ -46,6 +49,7 @@ exports = module.exports = {
CLOUDRON_NAME_KEY: 'cloudron_name',
DEVELOPER_MODE_KEY: 'developer_mode',
DNS_CONFIG_KEY: 'dns_config',
DYNAMIC_DNS_KEY: 'dynamic_dns',
BACKUP_CONFIG_KEY: 'backup_config',
TLS_CONFIG_KEY: 'tls_config',
UPDATE_CONFIG_KEY: 'update_config',
@@ -62,13 +66,11 @@ var assert = require('assert'),
CronJob = require('cron').CronJob,
DatabaseError = require('./databaseerror.js'),
debug = require('debug')('box:settings'),
digitalocean = require('./dns/digitalocean.js'),
dns = require('native-dns'),
cloudron = require('./cloudron.js'),
CloudronError = cloudron.CloudronError,
moment = require('moment-timezone'),
paths = require('./paths.js'),
route53 = require('./dns/route53.js'),
safe = require('safetydance'),
settingsdb = require('./settingsdb.js'),
subdomains = require('./subdomains.js'),
@@ -84,6 +86,7 @@ var gDefaults = (function () {
result[exports.TIME_ZONE_KEY] = 'America/Los_Angeles';
result[exports.CLOUDRON_NAME_KEY] = 'Cloudron';
result[exports.DEVELOPER_MODE_KEY] = true;
result[exports.DYNAMIC_DNS_KEY] = false;
result[exports.DNS_CONFIG_KEY] = { provider: 'manual' };
result[exports.BACKUP_CONFIG_KEY] = {
provider: 'filesystem',
@@ -155,6 +158,7 @@ function getEmailDnsRecords(callback) {
// check if DKIM is already setup
dns.resolveTxt(records.dkim.subdomain + '.' + config.fqdn(), function (error, txtRecords) {
if (error && error.code === 'ENOTFOUND') return callback(null, records); // not setup
if (error) return callback(error);
// ensure this is an array resolveTxt() returns undefined if no records are found
@@ -168,6 +172,7 @@ function getEmailDnsRecords(callback) {
// check if SPF is already setup
dns.resolveTxt(config.fqdn(), function (error, txtRecords) {
if (error && error.code === 'ENOTFOUND') return callback(null, records); // not setup
if (error) return callback(error);
// ensure this is an array resolveTxt() returns undefined if no records are found
@@ -198,7 +203,7 @@ function setAutoupdatePattern(pattern, callback) {
assert.strictEqual(typeof pattern, 'string');
assert.strictEqual(typeof callback, 'function');
if (pattern !== 'never') { // check if pattern is valid
if (pattern !== constants.AUTOUPDATE_PATTERN_NEVER) { // check if pattern is valid
var job = safe.safeCall(function () { return new CronJob(pattern); });
if (!job) return callback(new SettingsError(SettingsError.BAD_FIELD, 'Invalid pattern'));
}
@@ -334,122 +339,63 @@ function getDnsConfig(callback) {
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(null, gDefaults[exports.DNS_CONFIG_KEY]);
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, error));
callback(null, JSON.parse(value)); // accessKeyId, secretAccessKey, region
callback(null, JSON.parse(value));
});
}
function validateRoute53Config(domain, dnsConfig, callback) {
const zoneName = domain;
if (process.env.BOX_ENV === 'test') return callback();
sysinfo.getIp(function (error, ip) {
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, 'Error getting IP:' + error.message));
dns.resolveNs(zoneName, function (error, nameservers) {
if (error || !nameservers) return callback(error || new Error('Unable to get nameservers'));
route53.getHostedZone(dnsConfig, zoneName, function (error, zone) {
if (error && error.reason === SubdomainError.ACCESS_DENIED) return callback(new SettingsError(SettingsError.BAD_FIELD, 'Error getting zone information: Access denied'));
if (error && error.reason === SubdomainError.NOT_FOUND) return callback(new SettingsError(SettingsError.BAD_FIELD, 'Zone not found'));
if (error && error.reason === SubdomainError.EXTERNAL_ERROR) return callback(new SettingsError(SettingsError.BAD_FIELD, 'Error getting zone information:' + error.message));
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, error));
if (!_.isEqual(zone.DelegationSet.NameServers.sort(), nameservers.sort())) {
debug('validateRoute53Config: %j and %j do not match', nameservers, zone.DelegationSet.NameServers);
return callback(new SettingsError(SettingsError.BAD_FIELD, 'Domain nameservers are not set to Route53'));
}
route53.upsert(dnsConfig, zoneName, 'my', 'A', [ ip ], function (error, changeId) {
if (error && error.reason === SubdomainError.ACCESS_DENIED) return callback(new SettingsError(SettingsError.BAD_FIELD, 'Error adding A record. Access denied'));
if (error && error.reason === SubdomainError.NOT_FOUND) return callback(new SettingsError(SettingsError.BAD_FIELD, 'Zone not found'));
if (error && error.reason === SubdomainError.EXTERNAL_ERROR) return callback(new SettingsError(SettingsError.BAD_FIELD, 'Error adding A record:' + error.message));
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, error));
debug('validateRoute53Config: A record added with change id %s', changeId);
callback();
});
});
});
});
}
function validateDigitalOceanConfig(domain, dnsConfig, callback) {
const zoneName = domain;
if (process.env.BOX_ENV === 'test') return callback();
sysinfo.getIp(function (error, ip) {
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, 'Error getting IP:' + error.message));
dns.resolveNs(zoneName, function (error, nameservers) {
if (error || !nameservers) return callback(error || new Error('Unable to get nameservers'));
digitalocean.upsert(dnsConfig, zoneName, 'my', 'A', [ ip ], function (error, changeId) {
if (error && error.reason === SubdomainError.ACCESS_DENIED) return callback(new SettingsError(SettingsError.BAD_FIELD, 'Error adding A record. Access denied'));
if (error && error.reason === SubdomainError.NOT_FOUND) return callback(new SettingsError(SettingsError.BAD_FIELD, 'Domain not found'));
if (error && error.reason === SubdomainError.EXTERNAL_ERROR) return callback(new SettingsError(SettingsError.BAD_FIELD, 'Error adding A record:' + error.message));
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, error));
debug('validateDigitalOceanConfig: A record added with change id %s', changeId);
callback();
});
});
});
}
function setDnsConfig(dnsConfig, callback) {
function setDnsConfig(dnsConfig, domain, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof callback, 'function');
var credentials, validator;
sysinfo.getIp(function (error, ip) {
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, 'Error getting IP:' + error.message));
if (dnsConfig.provider === 'route53') {
if (typeof dnsConfig.accessKeyId !== 'string') return callback(new SettingsError(SettingsError.BAD_FIELD, 'accessKeyId must be a string'));
if (typeof dnsConfig.secretAccessKey !== 'string') return callback(new SettingsError(SettingsError.BAD_FIELD, 'secretAccessKey must be a string'));
credentials = {
provider: dnsConfig.provider,
accessKeyId: dnsConfig.accessKeyId,
secretAccessKey: dnsConfig.secretAccessKey,
region: dnsConfig.region || 'us-east-1',
endpoint: dnsConfig.endpoint || null
};
validator = validateRoute53Config.bind(null, dnsConfig.domain || config.fqdn());
} else if (dnsConfig.provider === 'caas' || dnsConfig.provider === 'noop' || dnsConfig.provider === 'manual') {
credentials = {
provider: dnsConfig.provider
};
validator = function (caasConfig, next) { return next(); };
} else if (dnsConfig.provider === 'digitalocean') {
if (typeof dnsConfig.token !== 'string') return callback(new SettingsError(SettingsError.BAD_FIELD, 'token must be a string'));
credentials = {
provider: dnsConfig.provider,
token: dnsConfig.token
};
validator = validateDigitalOceanConfig.bind(null, dnsConfig.domain || config.fqdn());
} else {
return callback(new SettingsError(SettingsError.BAD_FIELD, 'provider must be route53, digitalocean, noop, manual or caas'));
}
validator(credentials, function (error) {
if (error) return callback(error);
settingsdb.set(exports.DNS_CONFIG_KEY, JSON.stringify(credentials), function (error) {
subdomains.verifyDnsConfig(dnsConfig, domain, ip, function (error, result) {
if (error && error.reason === SubdomainError.ACCESS_DENIED) return callback(new SettingsError(SettingsError.BAD_FIELD, 'Error adding A record. Access denied'));
if (error && error.reason === SubdomainError.NOT_FOUND) return callback(new SettingsError(SettingsError.BAD_FIELD, 'Zone not found'));
if (error && error.reason === SubdomainError.EXTERNAL_ERROR) return callback(new SettingsError(SettingsError.BAD_FIELD, 'Error adding A record:' + error.message));
if (error && error.reason === SubdomainError.BAD_FIELD) return callback(new SettingsError(SettingsError.BAD_FIELD, error.message));
if (error && error.reason === SubdomainError.INVALID_PROVIDER) return callback(new SettingsError(SettingsError.BAD_FIELD, error.message));
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, error));
exports.events.emit(exports.DNS_CONFIG_KEY, dnsConfig);
settingsdb.set(exports.DNS_CONFIG_KEY, JSON.stringify(result), function (error) {
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, error));
callback(null);
exports.events.emit(exports.DNS_CONFIG_KEY, dnsConfig);
callback(null);
});
});
});
}
function getDynamicDnsConfig(callback) {
assert.strictEqual(typeof callback, 'function');
settingsdb.get(exports.DYNAMIC_DNS_KEY, function (error, enabled) {
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(null, gDefaults[exports.DYNAMIC_DNS_KEY]);
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, error));
// settingsdb holds string values only
callback(null, !!enabled);
});
}
function setDynamicDnsConfig(enabled, callback) {
assert.strictEqual(typeof enabled, 'boolean');
assert.strictEqual(typeof callback, 'function');
// settingsdb takes string values only
settingsdb.set(exports.DYNAMIC_DNS_KEY, enabled ? 'enabled' : '', function (error) {
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, error));
exports.events.emit(exports.DYNAMIC_DNS_KEY, enabled);
return callback(null);
});
}
function getTlsConfig(callback) {
assert.strictEqual(typeof callback, 'function');
@@ -639,6 +585,15 @@ function getAll(callback) {
var result = _.extend({ }, gDefaults);
settings.forEach(function (setting) { result[setting.name] = setting.value; });
// convert booleans
result[exports.DEVELOPER_MODE_KEY] = !!result[exports.DEVELOPER_MODE_KEY];
result[exports.DYNAMIC_DNS_KEY] = !!result[exports.DYNAMIC_DNS_KEY];
// convert JSON objects
[exports.DNS_CONFIG_KEY, exports.TLS_CONFIG_KEY, exports.BACKUP_CONFIG_KEY, exports.MAIL_CONFIG_KEY].forEach(function (key) {
result[key] = typeof result[key] === 'object' ? result[key] : safe.JSON.parse(result[key]);
});
callback(null, result);
});
}
+29
View File
@@ -11,12 +11,15 @@ exports = module.exports = {
copyObject: copyObject,
removeBackup: removeBackup,
backupDone: backupDone,
testConfig: testConfig
};
var assert = require('assert'),
AWS = require('aws-sdk'),
config = require('../config.js'),
debug = require('debug')('box:storage/caas'),
safe = require('safetydance'),
SettingsError = require('../settings.js').SettingsError,
superagent = require('superagent');
@@ -183,3 +186,29 @@ function testConfig(apiConfig, callback) {
callback();
}
function backupDone(filename, app, appBackupIds, callback) {
assert.strictEqual(typeof filename, 'string');
assert(!app || typeof app === 'object');
assert(!appBackupIds || Array.isArray(appBackupIds));
assert.strictEqual(typeof callback, 'function');
debug('backupDone %s', filename);
var url = config.apiServerOrigin() + '/api/v1/boxes/' + config.fqdn() + '/backupDone';
var data = {
boxVersion: config.version(),
restoreKey: filename,
appId: app ? app.id : null,
appVersion: app ? app.manifest.version : null,
appBackupIds: appBackupIds
};
superagent.post(url).send(data).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
if (error && !error.response) return callback(error);
if (result.statusCode !== 200) return callback(new Error(result.text));
if (!result.body) return callback(new Error('Unexpected response'));
return callback(null);
});
}
+12
View File
@@ -11,6 +11,8 @@ exports = module.exports = {
copyObject: copyObject,
removeBackup: removeBackup,
backupDone: backupDone,
testConfig: testConfig
};
@@ -151,3 +153,13 @@ function testConfig(apiConfig, callback) {
callback();
}
function backupDone(filename, app, appBackupIds, callback) {
assert.strictEqual(typeof filename, 'string');
assert(!app || typeof app === 'object');
assert(!appBackupIds || Array.isArray(appBackupIds));
assert.strictEqual(typeof callback, 'function');
callback();
}
+11
View File
@@ -17,6 +17,8 @@ exports = module.exports = {
copyObject: copyObject,
removeBackup: removeBackup,
backupDone: backupDone,
testConfig: testConfig
};
@@ -111,3 +113,12 @@ function testConfig(apiConfig, callback) {
callback(new Error('not implemented'));
}
function backupDone(filename, app, appBackupIds, callback) {
assert.strictEqual(typeof filename, 'string');
assert(!app || typeof app === 'object');
assert(!appBackupIds || Array.isArray(appBackupIds));
assert.strictEqual(typeof callback, 'function');
callback(new Error('not implemented'));
}
+12
View File
@@ -11,6 +11,8 @@ exports = module.exports = {
copyObject: copyObject,
removeBackup: removeBackup,
backupDone: backupDone,
testConfig: testConfig
};
@@ -204,3 +206,13 @@ function testConfig(apiConfig, callback) {
});
});
}
function backupDone(filename, app, appBackupIds, callback) {
assert.strictEqual(typeof filename, 'string');
assert(!app || typeof app === 'object');
assert(!appBackupIds || Array.isArray(appBackupIds));
assert.strictEqual(typeof callback, 'function');
callback();
}
+14
View File
@@ -5,6 +5,7 @@ module.exports = exports = {
upsert: upsert,
get: get,
waitForDns: waitForDns,
verifyDnsConfig: verifyDnsConfig,
SubdomainError: SubdomainError
};
@@ -41,6 +42,7 @@ SubdomainError.STILL_BUSY = 'Still busy';
SubdomainError.MISSING_CREDENTIALS = 'Missing credentials';
SubdomainError.INTERNAL_ERROR = 'Internal error';
SubdomainError.ACCESS_DENIED = 'Access denied';
SubdomainError.INVALID_PROVIDER = 'provider must be route53, digitalocean, noop, manual or caas';
// choose which subdomain backend we use for test purpose we use route53
function api(provider) {
@@ -120,3 +122,15 @@ function waitForDns(domain, value, type, options, callback) {
});
}
function verifyDnsConfig(dnsConfig, domain, ip, callback) {
assert(dnsConfig && typeof dnsConfig === 'object'); // the dns config to test with
assert(typeof dnsConfig.provider === 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof ip, 'string');
assert.strictEqual(typeof callback, 'function');
var backend = api(dnsConfig.provider);
if (!backend) return callback(new SubdomainError(SubdomainError.INVALID_PROVIDER));
api(dnsConfig.provider).verifyDnsConfig(dnsConfig, domain, ip, callback);
}
+1 -11
View File
@@ -14,8 +14,6 @@ var assert = require('assert'),
scaleway = require('./sysinfo/scaleway.js'),
util = require('util');
var gCachedIp = null;
function SysInfoError(reason, errorOrMessage) {
assert.strictEqual(typeof reason, 'string');
assert(errorOrMessage instanceof Error || typeof errorOrMessage === 'string' || typeof errorOrMessage === 'undefined');
@@ -54,17 +52,9 @@ function getApi(callback) {
function getIp(callback) {
assert.strictEqual(typeof callback, 'function');
if (gCachedIp) return callback(null, gCachedIp);
getApi(function (error, api) {
if (error) return callback(error);
api.getIp(function (error, ip) {
if (error) return callback(error);
gCachedIp = ip;
callback(null, gCachedIp);
});
api.getIp(callback);
});
}
+27 -49
View File
@@ -1,8 +1,8 @@
'use strict';
exports = module.exports = {
initialize: initialize,
uninitialize: uninitialize,
resumeTasks: resumeTasks,
pauseTasks: pauseTasks,
stopAppTask: stopAppTask,
startAppTask: startAppTask,
@@ -16,10 +16,8 @@ var appdb = require('./appdb.js'),
assert = require('assert'),
async = require('async'),
child_process = require('child_process'),
cloudron = require('./cloudron.js'),
debug = require('debug')('box:taskmanager'),
locker = require('./locker.js'),
platform = require('./platform.js'),
sendFailureLogs = require('./logcollector.js').sendFailureLogs,
util = require('util'),
_ = require('underscore');
@@ -29,31 +27,43 @@ var gPendingTasks = [ ];
var TASK_CONCURRENCY = 3;
var NOOP_CALLBACK = function (error) { if (error) console.error(error); };
var gPaused = true;
function initialize(callback) {
assert.strictEqual(typeof callback, 'function');
// resume app tasks when platform is ready or after a crash
function resumeTasks(callback) {
callback = callback || NOOP_CALLBACK;
debug('resuming tasks');
locker.on('unlocked', startNextTask);
if (platform.isReadySync()) {
platformReady();
} else {
platform.events.on(platform.EVENT_READY, platformReady);
}
gPaused = false;
callback();
appdb.getAll(function (error, apps) {
if (error) return callback(error);
apps.forEach(function (app) {
if (app.installationState === appdb.ISTATE_INSTALLED && app.runState === appdb.RSTATE_RUNNING) return;
if (app.installationState === appdb.ISTATE_ERROR) return;
debug('Creating process for %s (%s) with state %s', app.location, app.id, app.installationState);
restartAppTask(app.id, NOOP_CALLBACK); // restart because the auto-installer could have queued up tasks already
});
callback(null);
});
}
function uninitialize(callback) {
function pauseTasks(callback) {
assert.strictEqual(typeof callback, 'function');
gPendingTasks = [ ]; // clear this first, otherwise stopAppTask will resume them
cloudron.events.removeListener(cloudron.EVENT_CONFIGURED, resumeTasks);
platform.events.removeListener(platform.EVENT_READY, platformReady);
locker.removeListener('unlocked', startNextTask);
gPaused = true;
async.eachSeries(Object.keys(gActiveTasks), stopAppTask, callback);
}
@@ -76,38 +86,6 @@ function waitForPendingTasks(callback) {
checkTasks();
}
function platformReady() {
if (cloudron.isConfiguredSync()) {
debug('platformReady: configured, resuming tasks'); // cloudron-setup script relies on this log message
resumeTasks();
} else {
debug('platformReady: not configured yet. waiting for configured event');
cloudron.events.on(cloudron.EVENT_CONFIGURED, resumeTasks);
}
}
// resume app tasks when platform is ready or after a crash
function resumeTasks(callback) {
callback = callback || NOOP_CALLBACK;
debug('resuming tasks');
appdb.getAll(function (error, apps) {
if (error) return callback(error);
apps.forEach(function (app) {
if (app.installationState === appdb.ISTATE_INSTALLED && app.runState === appdb.RSTATE_RUNNING) return;
if (app.installationState === appdb.ISTATE_ERROR) return;
debug('Creating process for %s (%s) with state %s', app.location, app.id, app.installationState);
restartAppTask(app.id, NOOP_CALLBACK); // restart because the auto-installer could have queued up tasks already
});
callback(null);
});
}
function startNextTask() {
if (gPendingTasks.length === 0) return;
@@ -124,7 +102,7 @@ function startAppTask(appId, callback) {
return callback(new Error(util.format('Task for %s is already active', appId)));
}
if (!platform.isReadySync()) {
if (gPaused) {
debug('Platform not ready yet, queueing task for %s', appId);
gPendingTasks.push(appId);
return callback();
+11 -13
View File
@@ -30,8 +30,7 @@ describe('Apps', function () {
createdAt: 'sometime back',
modifiedAt: 'now',
resetToken: hat(256),
displayName: '',
showTutorial: false
displayName: ''
};
var USER_0 = {
@@ -43,8 +42,7 @@ describe('Apps', function () {
createdAt: 'sometime back',
modifiedAt: 'now',
resetToken: hat(256),
displayName: '',
showTutorial: false
displayName: ''
};
var USER_1 = {
@@ -56,8 +54,7 @@ describe('Apps', function () {
createdAt: 'sometime back',
modifiedAt: 'now',
resetToken: hat(256),
displayName: '',
showTutorial: false
displayName: ''
};
var GROUP_0 = {
@@ -171,20 +168,21 @@ describe('Apps', function () {
describe('validatePortBindings', function () {
it('does not allow invalid host port', function () {
expect(apps._validatePortBindings({ port: -1 })).to.be.an(Error);
expect(apps._validatePortBindings({ port: 0 })).to.be.an(Error);
expect(apps._validatePortBindings({ port: 'text' })).to.be.an(Error);
expect(apps._validatePortBindings({ port: 65536 })).to.be.an(Error);
expect(apps._validatePortBindings({ port: 1024 })).to.be.an(Error);
expect(apps._validatePortBindings({ port: -1 }, { port: 5000 })).to.be.an(Error);
expect(apps._validatePortBindings({ port: 0 }, { port: 5000 })).to.be.an(Error);
expect(apps._validatePortBindings({ port: 'text' }, { port: 5000 })).to.be.an(Error);
expect(apps._validatePortBindings({ port: 65536 }, { port: 5000 })).to.be.an(Error);
expect(apps._validatePortBindings({ port: 470 }, { port: 5000 })).to.be.an(Error);
});
it('does not allow ports not as part of manifest', function () {
expect(apps._validatePortBindings({ port: 1567 })).to.be.an(Error);
expect(apps._validatePortBindings({ port: 1567 }, { })).to.be.an(Error);
expect(apps._validatePortBindings({ port: 1567 }, { port3: null })).to.be.an(Error);
});
it('allows valid bindings', function () {
expect(apps._validatePortBindings({ port: 1025 }, { port: null })).to.be(null);
expect(apps._validatePortBindings({ port: 1024 }, { port: 5000 })).to.be(null);
expect(apps._validatePortBindings({
port1: 4033,
port2: 3242,
+2 -2
View File
@@ -31,7 +31,7 @@ var MANIFEST = {
"contactEmail": "support@cloudron.io",
"version": "0.1.0",
"manifestVersion": 1,
"dockerImage": "cloudron/test:17.0.0",
"dockerImage": "cloudron/test:18.0.0",
"healthCheckPath": "/",
"httpPort": 7777,
"tcpPorts": {
@@ -85,7 +85,7 @@ describe('apptask', function () {
async.series([
database.initialize,
appdb.add.bind(null, APP.id, APP.appStoreId, APP.manifest, APP.location, APP.portBindings, APP),
settings.setDnsConfig.bind(null, { provider: 'route53', accessKeyId: 'accessKeyId', secretAccessKey: 'secretAccessKey', endpoint: 'http://localhost:5353' }),
settings.setDnsConfig.bind(null, { provider: 'route53', accessKeyId: 'accessKeyId', secretAccessKey: 'secretAccessKey', endpoint: 'http://localhost:5353' }, config.fqdn()),
settings.setTlsConfig.bind(null, { provider: 'caas' })
], done);
});
+1 -1
View File
@@ -3,7 +3,7 @@
set -eu
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
readonly TEST_IMAGE="cloudron/test:17.0.0"
readonly TEST_IMAGE="cloudron/test:18.0.0"
# reset sudo timestamp to avoid wrong success
sudo -k || sudo --reset-timestamp
+12 -13
View File
@@ -31,8 +31,7 @@ var USER_0 = {
createdAt: 'sometime back',
modifiedAt: 'now',
resetToken: hat(256),
displayName: '',
showTutorial: false
displayName: ''
};
var USER_1 = {
@@ -44,8 +43,7 @@ var USER_1 = {
createdAt: 'sometime back',
modifiedAt: 'now',
resetToken: '',
displayName: 'Herbert 1',
showTutorial: false
displayName: 'Herbert 1'
};
var USER_2 = {
@@ -57,8 +55,7 @@ var USER_2 = {
createdAt: 'sometime back',
modifiedAt: 'now',
resetToken: '',
displayName: 'Herbert 2',
showTutorial: false
displayName: 'Herbert 2'
};
describe('database', function () {
@@ -544,7 +541,8 @@ describe('database', function () {
memoryLimit: 4294967296,
altDomain: null,
xFrameOptions: 'DENY',
sso: true
sso: true,
debugMode: null
};
var APP_1 = {
id: 'appid-1',
@@ -565,7 +563,8 @@ describe('database', function () {
memoryLimit: 0,
altDomain: null,
xFrameOptions: 'SAMEORIGIN',
sso: true
sso: true,
debugMode: null
};
it('add fails due to missing arguments', function () {
@@ -1065,7 +1064,7 @@ describe('database', function () {
it('add app succeeds', function (done) {
var backup = {
id: 'appbackup_appid_123',
id: 'app_appid_123',
version: '1.0.0',
type: backupdb.BACKUP_TYPE_APP,
dependsOn: [ ]
@@ -1078,7 +1077,7 @@ describe('database', function () {
});
it('get succeeds', function (done) {
backupdb.get('appbackup_appid_123', function (error, result) {
backupdb.get('app_appid_123', function (error, result) {
expect(error).to.be(null);
expect(result.version).to.be('1.0.0');
expect(result.type).to.be(backupdb.BACKUP_TYPE_APP);
@@ -1094,7 +1093,7 @@ describe('database', function () {
expect(results).to.be.an(Array);
expect(results.length).to.be(1);
expect(results[0].id).to.be('appbackup_appid_123');
expect(results[0].id).to.be('app_appid_123');
expect(results[0].version).to.be('1.0.0');
expect(results[0].dependsOn).to.eql([]);
@@ -1103,11 +1102,11 @@ describe('database', function () {
});
it('delete succeeds', function (done) {
backupdb.del('appbackup_appid_123', function (error, result) {
backupdb.del('app_appid_123', function (error, result) {
expect(error).to.be(null);
expect(result).to.not.be.ok();
backupdb.get('appbackup_appid_123', function (error, result) {
backupdb.get('app_appid_123', function (error, result) {
expect(error).to.be.a(DatabaseError);
expect(error.reason).to.equal(DatabaseError.NOT_FOUND);
expect(result).to.not.be.ok();
+4 -3
View File
@@ -8,6 +8,7 @@
var async = require('async'),
AWS = require('aws-sdk'),
config = require('../config.js'),
database = require('../database.js'),
expect = require('expect.js'),
nock = require('nock'),
@@ -32,7 +33,7 @@ describe('dns provider', function () {
provider: 'noop'
};
settings.setDnsConfig(data, done);
settings.setDnsConfig(data, config.fqdn(), done);
});
it('upsert succeeds', function (done) {
@@ -73,7 +74,7 @@ describe('dns provider', function () {
token: TOKEN
};
settings.setDnsConfig(data, done);
settings.setDnsConfig(data, config.fqdn(), done);
});
it('upsert non-existing record succeeds', function (done) {
@@ -397,7 +398,7 @@ describe('dns provider', function () {
// Comment this out and replace the config with real tokens to test against AWS proper
AWS.Route53 = Route53Mock;
settings.setDnsConfig(data, done);
settings.setDnsConfig(data, config.fqdn(), done);
});
it('upsert non-existing record succeeds', function (done) {
+1 -2
View File
@@ -33,8 +33,7 @@ var USER_0 = {
createdAt: 'sometime back',
modifiedAt: 'now',
resetToken: hat(256),
displayName: '',
showTutorial: false
displayName: ''
};
function setup(done) {
+27 -2
View File
@@ -6,7 +6,8 @@
'use strict';
var progress = require('../progress.js'),
var async = require('async'),
progress = require('../progress.js'),
config = require('../config.js'),
database = require('../database.js'),
expect = require('expect.js'),
@@ -214,6 +215,31 @@ describe('Server', function () {
});
});
describe('rate limit', function () {
before(function (done) {
server.start(done);
});
after(function (done) {
server.stop(done);
nock.cleanAll();
});
it('gets throttled after 200 requests', function (done) {
async.times(200, function (n, next) {
superagent.get(SERVER_URL + '/api/v1/cloudron/status', function (error, result) {
expect(result.statusCode).to.equal(200);
next();
});
}, function () {
superagent.get(SERVER_URL + '/api/v1/cloudron/status', function (error, result) {
expect(result.statusCode).to.equal(429);
done();
});
});
});
});
describe('cors', function () {
before(function (done) {
server.start(function (error) {
@@ -275,4 +301,3 @@ describe('Server', function () {
});
});
});
+3 -2
View File
@@ -5,7 +5,8 @@
'use strict';
var database = require('../database.js'),
var config = require('../config.js'),
database = require('../database.js'),
expect = require('expect.js'),
settings = require('../settings.js');
@@ -87,7 +88,7 @@ describe('Settings', function () {
});
it('can set dns config', function (done) {
settings.setDnsConfig({ provider: 'route53', accessKeyId: 'accessKeyId', secretAccessKey: 'secretAccessKey' }, function (error) {
settings.setDnsConfig({ provider: 'route53', accessKeyId: 'accessKeyId', secretAccessKey: 'secretAccessKey' }, config.fqdn(), function (error) {
expect(error).to.be(null);
done();
});
+1 -1
View File
@@ -10,7 +10,7 @@ readonly source_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")"/../.. && pwd)"
rm -rf $HOME/.cloudron_test 2>/dev/null || true # some of those docker container data requires sudo to be removed
mkdir -p $HOME/.cloudron_test
cd $HOME/.cloudron_test
mkdir -p data/appdata data/box/appicons data/mail data/nginx/cert data/nginx/applications data/collectd/collectd.conf.d data/addons configs data/box/certs data/box/mail/dkim/localhost data/box/mail/dkim/foobar.com
mkdir -p data/appdata boxdata/appicons data/mail data/addons/mail data/nginx/cert data/nginx/applications data/collectd/collectd.conf.d data/addons configs boxdata/certs data/mail/dkim/localhost data/mail/dkim/foobar.com
# put cert
openssl req -x509 -newkey rsa:2048 -keyout data/nginx/cert/host.key -out data/nginx/cert/host.cert -days 3650 -subj '/CN=localhost' -nodes
+179 -60
View File
@@ -9,6 +9,7 @@
var appdb = require('../appdb.js'),
async = require('async'),
config = require('../config.js'),
constants = require('../constants.js'),
database = require('../database.js'),
deepExtend = require('deep-extend'),
expect = require('expect.js'),
@@ -79,22 +80,23 @@ function checkMails(number, done) {
}, 500);
}
describe('updatechecker - checkBoxUpdates', function () {
describe('updatechecker - box - manual', function () {
before(function (done) {
config.set('version', '1.0.0');
config.set('boxVersionsUrl', 'http://localhost:4444/release.json');
async.series([
database.initialize,
mailer._clearMailQueue,
user.createOwner.bind(null, USER_0.username, USER_0.password, USER_0.email, USER_0.displayName, AUDIT_SOURCE)
user.createOwner.bind(null, USER_0.username, USER_0.password, USER_0.email, USER_0.displayName, AUDIT_SOURCE),
settings.setAutoupdatePattern.bind(null, constants.AUTOUPDATE_PATTERN_NEVER)
], done);
});
function after(done) {
after(function (done) {
mailer._clearMailQueue();
database._clear(done);
}
});
it('no updates', function (done) {
nock.cleanAll();
@@ -109,6 +111,7 @@ describe('updatechecker - checkBoxUpdates', function () {
updatechecker.checkBoxUpdates(function (error) {
expect(!error).to.be.ok();
expect(updatechecker.getUpdateInfo().box).to.be(null);
expect(scope.isDone()).to.be.ok();
checkMails(0, done);
});
@@ -128,6 +131,7 @@ describe('updatechecker - checkBoxUpdates', function () {
updatechecker.checkBoxUpdates(function (error) {
expect(!error).to.be.ok();
expect(updatechecker.getUpdateInfo().box.version).to.be('2.0.0');
expect(scope.isDone()).to.be.ok();
checkMails(1, done);
});
@@ -146,8 +150,9 @@ describe('updatechecker - checkBoxUpdates', function () {
updatechecker.checkBoxUpdates(function (error) {
expect(!error).to.be.ok();
expect(updatechecker.getUpdateInfo().box.version).to.be('2.0.0');
expect(scope.isDone()).to.be.ok();
checkMails(0, done); // already notified for 2.0.0
checkMails(1, done); // already notified for 2.0.0
});
});
@@ -163,6 +168,8 @@ describe('updatechecker - checkBoxUpdates', function () {
updatechecker.checkBoxUpdates(function (error) {
expect(!error).to.be.ok();
expect(updatechecker.getUpdateInfo().box).to.be(null);
expect(scope.isDone()).to.be.ok();
checkMails(0, done);
});
});
@@ -182,6 +189,8 @@ describe('updatechecker - checkBoxUpdates', function () {
updatechecker.checkBoxUpdates(function (error) {
expect(!error).to.be.ok();
expect(updatechecker.getUpdateInfo().box.version).to.be('2.0.0-pre0');
expect(scope.isDone()).to.be.ok();
checkMails(1, done);
});
});
@@ -198,6 +207,8 @@ describe('updatechecker - checkBoxUpdates', function () {
updatechecker.checkBoxUpdates(function (error) {
expect(!error).to.be.ok();
expect(updatechecker.getUpdateInfo().box.version).to.be('1.0.1'); // got the update
expect(scope.isDone()).to.be.ok();
checkMails(0, done); // but no email sent since patch release
});
});
@@ -214,12 +225,173 @@ describe('updatechecker - checkBoxUpdates', function () {
updatechecker.checkBoxUpdates(function (error) {
expect(error).to.be.ok();
expect(updatechecker.getUpdateInfo().box).to.be(null);
expect(scope.isDone()).to.be.ok();
checkMails(0, done);
});
});
});
describe('updatechecker - checkAppUpdates', function () {
describe('updatechecker - box - automatic', function () {
before(function (done) {
config.set('version', '1.0.0');
config.set('boxVersionsUrl', 'http://localhost:4444/release.json');
async.series([
database.initialize,
mailer._clearMailQueue,
user.createOwner.bind(null, USER_0.username, USER_0.password, USER_0.email, USER_0.displayName, AUDIT_SOURCE)
], done);
});
after(function (done) {
mailer._clearMailQueue();
database._clear(done);
});
it('new version', function (done) {
nock.cleanAll();
var releaseCopy = deepExtend({}, RELEASES);
delete releaseCopy['2.0.0-pre0'];
releaseCopy['1.0.0'].next = '2.0.0';
var scope = nock('http://localhost:4444')
.get('/release.json')
.reply(200, releaseCopy);
updatechecker.checkBoxUpdates(function (error) {
expect(!error).to.be.ok();
expect(updatechecker.getUpdateInfo().box.version).to.be('2.0.0');
expect(scope.isDone()).to.be.ok();
checkMails(0, done);
});
});
});
describe('updatechecker - app - manual', function () {
var APP_0 = {
id: 'appid-0',
appStoreId: 'io.cloudron.app',
installationState: appdb.ISTATE_PENDING_INSTALL,
installationProgress: null,
runState: null,
location: 'some-location-0',
manifest: {
version: '1.0.0', dockerImage: 'docker/app0', healthCheckPath: '/', httpPort: 80, title: 'app0',
tcpPorts: {
PORT: {
description: 'this is a port that i expose',
containerPort: '1234'
}
}
},
httpPort: null,
containerId: null,
portBindings: { PORT: 5678 },
healthy: null,
accessRestriction: null,
memoryLimit: 0
};
before(function (done) {
config.set('version', '1.0.0');
config.set('apiServerOrigin', 'http://localhost:4444');
async.series([
database.initialize,
database._clear,
mailer._clearMailQueue,
appdb.add.bind(null, APP_0.id, APP_0.appStoreId, APP_0.manifest, APP_0.location, APP_0.portBindings, APP_0),
user.createOwner.bind(null, USER_0.username, USER_0.password, USER_0.email, USER_0.displayName, AUDIT_SOURCE),
settings.setAutoupdatePattern.bind(null, constants.AUTOUPDATE_PATTERN_NEVER)
], done);
});
after(function (done) {
database._clear(done);
});
it('no updates', function (done) {
nock.cleanAll();
var scope = nock('http://localhost:4444')
.get('/api/v1/apps/io.cloudron.app/versions/1.0.0/update')
.query({ boxVersion: config.version() })
.reply(200, { update: null });
updatechecker.checkAppUpdates(function (error) {
expect(!error).to.be.ok();
expect(updatechecker.getUpdateInfo().apps).to.eql({});
expect(scope.isDone()).to.be.ok();
checkMails(0, done);
});
});
it('bad response', function (done) {
nock.cleanAll();
var scope = nock('http://localhost:4444')
.get('/api/v1/apps/io.cloudron.app/versions/1.0.0/update')
.query({ boxVersion: config.version() })
.reply(500, { update: { manifest: { version: '1.0.0' } } } );
updatechecker.checkAppUpdates(function (error) {
expect(!error).to.be.ok();
expect(updatechecker.getUpdateInfo().apps).to.eql({});
expect(scope.isDone()).to.be.ok();
checkMails(0, done);
});
});
it('offers new version', function (done) {
nock.cleanAll();
var scope = nock('http://localhost:4444')
.get('/api/v1/apps/io.cloudron.app/versions/1.0.0/update')
.query({ boxVersion: config.version() })
.reply(200, { update: { manifest: { version: '2.0.0' } } } );
updatechecker.checkAppUpdates(function (error) {
expect(!error).to.be.ok();
expect(updatechecker.getUpdateInfo().apps).to.eql({ 'appid-0': { manifest: { version: '2.0.0' } } });
expect(scope.isDone()).to.be.ok();
checkMails(1, done);
});
});
it('does not send mail for patch releases', function (done) {
nock.cleanAll();
var scope = nock('http://localhost:4444')
.get('/api/v1/apps/io.cloudron.app/versions/1.0.0/update')
.query({ boxVersion: config.version() })
.reply(200, { update: { manifest: { version: '1.0.1' } } } );
updatechecker.checkAppUpdates(function (error) {
expect(!error).to.be.ok();
expect(updatechecker.getUpdateInfo().apps).to.eql({ 'appid-0': { manifest: { version: '1.0.1' } } }); // got the update
expect(scope.isDone()).to.be.ok();
checkMails(0, done); // but no email sent since patch release
});
});
it('does not offer old version', function (done) {
nock.cleanAll();
updatechecker.checkAppUpdates(function (error) {
expect(!error).to.be.ok();
expect(updatechecker.getUpdateInfo().apps).to.eql({ });
checkMails(0, done);
});
});
});
describe('updatechecker - app - automatic', function () {
var APP_0 = {
id: 'appid-0',
appStoreId: 'io.cloudron.app',
@@ -260,36 +432,6 @@ describe('updatechecker - checkAppUpdates', function () {
database._clear(done);
});
it('no updates', function (done) {
nock.cleanAll();
var scope = nock('http://localhost:4444')
.get('/api/v1/apps/io.cloudron.app/versions/1.0.0/update')
.query({ boxVersion: config.version() })
.reply(200, { update: null });
updatechecker.checkAppUpdates(function (error) {
expect(!error).to.be.ok();
expect(updatechecker.getUpdateInfo().apps).to.eql({});
checkMails(0, done);
});
});
it('bad response', function (done) {
nock.cleanAll();
var scope = nock('http://localhost:4444')
.get('/api/v1/apps/io.cloudron.app/versions/1.0.0/update')
.query({ boxVersion: config.version() })
.reply(500, { update: { manifest: { version: '1.0.0' } } } );
updatechecker.checkAppUpdates(function (error) {
expect(!error).to.be.ok();
expect(updatechecker.getUpdateInfo().apps).to.eql({});
checkMails(0, done);
});
});
it('offers new version', function (done) {
nock.cleanAll();
@@ -301,31 +443,8 @@ describe('updatechecker - checkAppUpdates', function () {
updatechecker.checkAppUpdates(function (error) {
expect(!error).to.be.ok();
expect(updatechecker.getUpdateInfo().apps).to.eql({ 'appid-0': { manifest: { version: '2.0.0' } } });
checkMails(1, done);
});
});
expect(scope.isDone()).to.be.ok();
it('does not send mail for patch releases', function (done) {
nock.cleanAll();
var scope = nock('http://localhost:4444')
.get('/api/v1/apps/io.cloudron.app/versions/1.0.0/update')
.query({ boxVersion: config.version() })
.reply(200, { update: { manifest: { version: '1.0.1' } } } );
updatechecker.checkAppUpdates(function (error) {
expect(!error).to.be.ok();
expect(updatechecker.getUpdateInfo().apps).to.eql({ 'appid-0': { manifest: { version: '1.0.1' } } }); // got the update
checkMails(0, done); // but no email sent since patch release
});
});
it('does not offer old version', function (done) {
nock.cleanAll();
updatechecker.checkAppUpdates(function (error) {
expect(!error).to.be.ok();
expect(updatechecker.getUpdateInfo().apps).to.eql({ });
checkMails(0, done);
});
});
+23 -13
View File
@@ -31,7 +31,6 @@ var NEW_PASSWORD = 'oTHER@#$235';
var DISPLAY_NAME = 'Nobody cares';
var DISPLAY_NAME_NEW = 'Somone cares';
var userObject = null;
var groupObject = null;
var NON_ADMIN_GROUP = 'members';
var AUDIT_SOURCE = { ip: '1.2.3.4' };
@@ -51,18 +50,13 @@ function cleanupUsers(done) {
function createOwner(done) {
groups.create('admin', function () { // ignore error since it might already exist
groups.create(NON_ADMIN_GROUP, function (error, result) { // ignore error since it might already exist
expect(error).to.be(null);
groupObject = result;
user.createOwner(USERNAME, PASSWORD, EMAIL, DISPLAY_NAME, AUDIT_SOURCE, function (error, result) {
expect(error).to.not.be.ok();
expect(result).to.be.ok();
user.createOwner(USERNAME, PASSWORD, EMAIL, DISPLAY_NAME, AUDIT_SOURCE, function (error, result) {
expect(error).to.not.be.ok();
expect(result).to.be.ok();
userObject = result;
userObject = result;
done();
});
done();
});
});
}
@@ -731,7 +725,21 @@ describe('User', function () {
});
describe('admin change triggers mail', function () {
before(createOwner);
var groupObject;
before(function (done) {
createOwner(function (error) {
expect(error).to.not.be.ok();
groups.create(NON_ADMIN_GROUP, function (error, result) {
expect(error).to.be(null);
groupObject = result;
done();
});
});
});
after(cleanupUsers);
var user1 = {
@@ -979,7 +987,7 @@ describe('User', function () {
before(createOwner);
after(cleanupUsers);
it('fails for unkown user', function (done) {
it('fails for unknown user', function (done) {
user.remove('unknown', { }, function (error) {
expect(error.reason).to.be(UserError.NOT_FOUND);
done();
@@ -1016,5 +1024,7 @@ describe('User', function () {
});
});
});
it('can re-create user after user was removed', createOwner);
});
});
+17 -3
View File
@@ -12,6 +12,7 @@ exports = module.exports = {
var apps = require('./apps.js'),
async = require('async'),
config = require('./config.js'),
constants = require('./constants.js'),
debug = require('debug')('box:updatechecker'),
mailer = require('./mailer.js'),
paths = require('./paths.js'),
@@ -162,8 +163,15 @@ function checkAppUpdates(callback) {
} else if (semver.satisfies(newState[app.id], '~' + app.manifest.version)) {
debug('Skipping notification of app update as this is a patch release');
} else {
debug('Notifying user of app update for %s from %s to %s', app.id, app.manifest.version, updateInfo.manifest.version);
mailer.appUpdateAvailable(app, updateInfo);
// only send notifications if update pattern is 'never'
settings.getAutoupdatePattern(function (error, result) {
if (error) return console.error(error);
if (result !== constants.AUTOUPDATE_PATTERN_NEVER) return;
debug('Notifying user of app update for %s from %s to %s', app.id, app.manifest.version, updateInfo.manifest.version);
mailer.appUpdateAvailable(app, updateInfo);
});
}
iteratorDone();
@@ -209,7 +217,13 @@ function checkBoxUpdates(callback) {
if (semver.satisfies(gBoxUpdateInfo.version, '~' + config.version())) {
debug('Skipping notification of box update as this is a patch release');
} else {
mailer.boxUpdateAvailable(updateInfo.version, updateInfo.changelog);
// only send notifications if update pattern is 'never'
settings.getAutoupdatePattern(function (error, result) {
if (error) return console.error(error);
if (result !== constants.AUTOUPDATE_PATTERN_NEVER) return;
mailer.boxUpdateAvailable(updateInfo.version, updateInfo.changelog);
});
}
state.box = updateInfo.version;
+6 -20
View File
@@ -21,8 +21,7 @@ exports = module.exports = {
sendInvite: sendInvite,
setGroups: setGroups,
setAliases: setAliases,
getAliases: getAliases,
setShowTutorial: setShowTutorial
getAliases: getAliases
};
var assert = require('assert'),
@@ -51,6 +50,7 @@ var assert = require('assert'),
var CRYPTO_SALT_SIZE = 64; // 512-bit salt
var CRYPTO_ITERATIONS = 10000; // iterations
var CRYPTO_KEY_LENGTH = 512; // bits
var CRYPTO_DIGEST = 'sha1'; // used to be the default in node 4.1.1 cannot change since it will affect existing db records
function asyncIf(cond, func, next) {
if (!cond) return next();
@@ -163,7 +163,7 @@ function createUser(username, password, email, displayName, auditSource, options
crypto.randomBytes(CRYPTO_SALT_SIZE, function (error, salt) {
if (error) return callback(new UserError(UserError.INTERNAL_ERROR, error));
crypto.pbkdf2(password, salt, CRYPTO_ITERATIONS, CRYPTO_KEY_LENGTH, function (error, derivedKey) {
crypto.pbkdf2(password, salt, CRYPTO_ITERATIONS, CRYPTO_KEY_LENGTH, CRYPTO_DIGEST, function (error, derivedKey) {
if (error) return callback(new UserError(UserError.INTERNAL_ERROR, error));
var now = (new Date()).toISOString();
@@ -176,8 +176,7 @@ function createUser(username, password, email, displayName, auditSource, options
createdAt: now,
modifiedAt: now,
resetToken: hat(256),
displayName: displayName,
showTutorial: true
displayName: displayName
};
asyncIf(!!username, mailboxdb.add.bind(null, username, user.id /* owner */, mailboxdb.TYPE_USER), function (error) {
@@ -238,7 +237,7 @@ function verify(userId, password, callback) {
if (verifyGhost(user.username, password)) return callback(null, user);
var saltBinary = new Buffer(user.salt, 'hex');
crypto.pbkdf2(password, saltBinary, CRYPTO_ITERATIONS, CRYPTO_KEY_LENGTH, function (error, derivedKey) {
crypto.pbkdf2(password, saltBinary, CRYPTO_ITERATIONS, CRYPTO_KEY_LENGTH, CRYPTO_DIGEST, function (error, derivedKey) {
if (error) return callback(new UserError(UserError.INTERNAL_ERROR, error));
var derivedKeyHex = new Buffer(derivedKey, 'binary').toString('hex');
@@ -524,7 +523,7 @@ function setPassword(userId, newPassword, callback) {
if (config.isDemo() && user.username === constants.DEMO_USERNAME) return callback(new UserError(UserError.BAD_FIELD, 'Not allowed in demo mode'));
var saltBuffer = new Buffer(user.salt, 'hex');
crypto.pbkdf2(newPassword, saltBuffer, CRYPTO_ITERATIONS, CRYPTO_KEY_LENGTH, function (error, derivedKey) {
crypto.pbkdf2(newPassword, saltBuffer, CRYPTO_ITERATIONS, CRYPTO_KEY_LENGTH, CRYPTO_DIGEST, function (error, derivedKey) {
if (error) return callback(new UserError(UserError.INTERNAL_ERROR, error));
user.modifiedAt = (new Date()).toISOString();
@@ -622,19 +621,6 @@ function sendInvite(userId, options, callback) {
});
}
function setShowTutorial(userId, showTutorial, callback) {
assert.strictEqual(typeof userId, 'string');
assert.strictEqual(typeof showTutorial, 'boolean');
assert.strictEqual(typeof callback, 'function');
userdb.update(userId, { showTutorial: showTutorial }, function (error) {
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new UserError(UserError.NOT_FOUND, error));
if (error) return callback(new UserError(UserError.INTERNAL_ERROR, error));
callback(null);
});
}
function setAliases(userId, aliases, callback) {
assert.strictEqual(typeof userId, 'string');
assert(util.isArray(aliases));

Some files were not shown because too many files have changed in this diff Show More