Compare commits

..

344 Commits

Author SHA1 Message Date
Girish Ramakrishnan 7e6a83df84 Fix migration callback 2017-11-18 02:11:00 -08:00
Girish Ramakrishnan ec4910a45e Fix restore 2017-11-17 22:35:56 -08:00
Girish Ramakrishnan 6558c78094 change the json blobs to text 2017-11-17 15:52:40 -08:00
Girish Ramakrishnan 5df92d1903 remove dead code 2017-11-17 15:18:06 -08:00
Girish Ramakrishnan 05affa7d26 remove dead code 2017-11-17 15:17:50 -08:00
Girish Ramakrishnan 46c6c5a5a8 remove double .js 2017-11-17 14:50:53 -08:00
Girish Ramakrishnan 75da751c72 1.8.1 changes 2017-11-17 14:50:53 -08:00
Johannes Zellner b84f60671e Also fix the restoreConfigJson migration down script 2017-11-17 23:45:22 +01:00
Johannes Zellner 8dcb06cb02 Fix db migration down step for newConfigJson change 2017-11-17 23:41:22 +01:00
Girish Ramakrishnan 83bf739081 Update the license 2017-11-17 10:46:12 -08:00
Girish Ramakrishnan 48a52fae2e LE agreement URL has changed 2017-11-17 10:35:58 -08:00
Girish Ramakrishnan 0ddbda6068 Fix crash 2017-11-16 15:11:12 -08:00
Girish Ramakrishnan 360fa058ea store format information for restoring
fixes #483
2017-11-16 15:01:27 -08:00
Johannes Zellner 489d2022e6 Do not underline errored links 2017-11-16 23:18:50 +01:00
Girish Ramakrishnan f762d0c0a1 newConfig -> updateConfig 2017-11-16 12:36:07 -08:00
Girish Ramakrishnan 98cad0678d Handle json parse errors with new body-parser module 2017-11-16 11:47:17 -08:00
Girish Ramakrishnan 92acb2954f Rename restoreConfig to manifest in backup table
Only the manifest needs to be preserved in the backup table
2017-11-16 11:25:40 -08:00
Girish Ramakrishnan 00a6e4c982 Show doc url in info dialog
Fixes #486
2017-11-16 10:05:49 -08:00
Girish Ramakrishnan bf9eb4bd87 Switch the default to logs to show some useful information 2017-11-16 10:05:49 -08:00
Girish Ramakrishnan 2f4940acbd update modules 2017-11-16 09:34:00 -08:00
Girish Ramakrishnan 9f7ca552a6 handle various appstore errors 2017-11-16 00:23:34 -08:00
Girish Ramakrishnan 4272d5be8a Send feedback via API
Fixes #484
2017-11-15 23:31:13 -08:00
Girish Ramakrishnan 1babfb6e87 Allow admins to access all apps
Fixes #420
2017-11-15 19:24:11 -08:00
Girish Ramakrishnan 5663cf45f8 remove redundant reset 2017-11-15 19:08:38 -08:00
Girish Ramakrishnan d8cb2d1d25 test: reset is already part of setup 2017-11-15 18:56:27 -08:00
Girish Ramakrishnan 174a60bb07 fix linter warnings 2017-11-15 18:56:27 -08:00
Girish Ramakrishnan 3d7094bf28 Handle error in uploadFile 2017-11-15 18:45:23 -08:00
Girish Ramakrishnan 4d6616930a Fix failing test 2017-11-15 18:41:37 -08:00
Girish Ramakrishnan 24875ba292 Handle all errors and set focus correctly
Fixes #485
2017-11-14 18:26:42 -08:00
Johannes Zellner c58b2677b6 Fixup config tests and do not allow saving random values to the config file
Those will eventually be overwritten by start.sh anyways, we cannot rely
on those
2017-11-15 02:41:40 +01:00
Johannes Zellner 25146e1134 Allow tests to work without a cloudron.conf on disk 2017-11-15 02:40:50 +01:00
Johannes Zellner c0c35964fe Fix backups tests 2017-11-15 02:29:58 +01:00
Johannes Zellner 0bf9ab0a2b No need to put static database config in cloudron.conf 2017-11-15 02:29:36 +01:00
Johannes Zellner 6d86f4cbda Ensure we only save relevant config values 2017-11-15 02:29:07 +01:00
Girish Ramakrishnan d2741bbeb9 Allow mailTo to be configurable
Part of #485
2017-11-14 16:24:34 -08:00
Girish Ramakrishnan 690d02a353 Always show the DNS records in the UI 2017-11-14 15:13:56 -08:00
Johannes Zellner c629db9597 Remove preinstall app bundle support 2017-11-14 23:09:17 +01:00
Girish Ramakrishnan 67fcf85abb Allow restore if already restoring 2017-11-13 18:43:36 -08:00
Girish Ramakrishnan 527eace8f8 Fix j2xml usage 2017-11-13 11:10:42 -08:00
Girish Ramakrishnan e65230b833 update many dev modules 2017-11-13 10:57:36 -08:00
Girish Ramakrishnan 3e8334040b Update many node modules
also, use rimraf instead of del
2017-11-13 10:57:32 -08:00
Girish Ramakrishnan 2bcd3a8e4d Add a hack to stretch the multi-select box a bit 2017-11-12 02:50:28 -08:00
Girish Ramakrishnan e75b85fc3a Bump postgresql container to workaround shm issues
reconfiguring the postgresql configuring seems to fix some shm
issues on docker upgrade
2017-11-11 20:52:34 -08:00
Girish Ramakrishnan c4362d3339 Fix failing ldap test 2017-11-11 17:33:27 -08:00
Girish Ramakrishnan 85e492a632 Fix detection of container id from IP
https://docs.docker.com/engine/api/v1.32/#tag/Network

"Note that it uses a different, smaller representation of a network
than inspecting a single network. For example, the list of containers
attached to the network is not propagated in API versions 1.28 and up."

Verified using:

curl --unix-socket /var/run/docker.sock http::/networks/cloudron
2017-11-11 16:55:43 -08:00
Girish Ramakrishnan b8d4b67043 update aws-sdk and dockerode 2017-11-11 16:38:40 -08:00
Girish Ramakrishnan ffacd31259 bump the node version 2017-11-11 16:25:42 -08:00
Johannes Zellner 19f6da88da Do not disable access control elements if no group was created
There are still users to be selected
2017-11-12 00:09:05 +01:00
Girish Ramakrishnan c0faae4e27 Add more changes for 1.8.0 2017-11-11 11:14:42 -08:00
Girish Ramakrishnan a19c566eea Always show info box that displays app version
Fixes #478
2017-11-11 11:09:59 -08:00
Girish Ramakrishnan 3ec806452c Update node to 6.11.5 2017-11-10 19:25:08 -08:00
Girish Ramakrishnan 0c73cd5219 Update docker to 17.09 2017-11-10 18:49:28 -08:00
Girish Ramakrishnan 9b6bf719ff 1.7.8 changes 2017-11-09 09:40:26 -08:00
Girish Ramakrishnan 25431d3cc4 Fix the spacing 2017-11-09 09:29:42 -08:00
Girish Ramakrishnan e0805df3b1 Only show backup warning if using default location 2017-11-09 09:09:39 -08:00
Girish Ramakrishnan 8392fec570 Remove the bold 2017-11-08 20:57:40 -08:00
Girish Ramakrishnan 1c173ca83f Add UI to select users for access restriction 2017-11-08 20:54:38 -08:00
Girish Ramakrishnan 05a67db761 backup must be stored in ext4
Other file systems like FAT/CIFS can error with cryptic error messages
when saving filenames with special characters such as ':'
2017-11-08 12:26:25 -08:00
Girish Ramakrishnan bb24d5cf9e Order eventlog entries by time 2017-11-08 09:14:55 -08:00
Girish Ramakrishnan 8d2fbe931f Bump max limit to two times ram
part of #466
2017-11-07 10:07:05 -08:00
Girish Ramakrishnan 0a8adaac9f filter out empty usernames from groups
Fixes #472
2017-11-06 11:09:40 -08:00
Girish Ramakrishnan fa6d151325 Fix update mail templates 2017-11-02 21:34:03 -07:00
Girish Ramakrishnan a7296a0339 Rename filename to backupId in backup eventlog 2017-11-02 18:17:08 -07:00
Girish Ramakrishnan a6aee53ec2 Filter out failed backups 2017-11-02 18:13:51 -07:00
Girish Ramakrishnan 963ab2e791 More 1.7.7 changes 2017-11-02 16:30:13 -07:00
Girish Ramakrishnan ca724b8b03 Add cert renewal and user add/remove in weekly digest 2017-11-02 16:30:10 -07:00
Girish Ramakrishnan 88a929c85e Instead of appstore account, include owner alternate email 2017-11-02 15:10:05 -07:00
Girish Ramakrishnan 2bc0270880 1.7.7 changes 2017-11-02 12:18:51 -07:00
Girish Ramakrishnan 014b77b7aa Fix LE cert renewal failures
LE contacts the server by hostname and not by IP. This means that
when installing and reconfiguring the app it hits the default_server
route since nginx configs for the app are not generated at.

When doing in the daily cert renew, the nginx configs exist and we
are unable to renew the certs.
2017-11-02 11:43:43 -07:00
Girish Ramakrishnan 06f8aa8f29 Remove dead code
getNonApprovedCode code flow is ununsed (and broken by design on
the appstore side).
2017-11-02 10:36:30 -07:00
Girish Ramakrishnan a8c64bf9f7 Clarify heartbeat code
heartbeats are not sent for self-hosted cloudrons (only managed ones)
2017-11-02 10:26:21 -07:00
Girish Ramakrishnan 41ef16fbec link to memory limit docs 2017-11-01 09:25:05 -07:00
Girish Ramakrishnan 2a848a481b Add newline 2017-11-01 09:25:05 -07:00
Johannes Zellner 3963d76a80 The update dialog does not contain a form anymore
Fixes #467
2017-11-01 11:55:06 +01:00
Girish Ramakrishnan 8ede37a43d Make the dkim selector dynamic
it has to change with the adminLocation so that multiple cloudrons
can send out emails at the same time.
2017-10-31 12:18:40 -07:00
Girish Ramakrishnan 36534f6bb2 Fix indent 2017-10-31 12:12:02 -07:00
Girish Ramakrishnan 7eddcaf708 Allow setting app memory till memory limit
Fixes #466
2017-10-31 12:12:02 -07:00
Girish Ramakrishnan d8d2572aa1 Keep restarting mysql until it succeeds
MySQL restarts randomly fail on our CI systems. This is easily
reproducible:

root@smartserver:~# cp /tmp/mysql.cnf . && systemctl restart mysql && echo "Yes"
Yes
root@smartserver:~# cp /tmp/mysql.cnf . && systemctl restart mysql && echo "Yes"
Yes
root@smartserver:~# cp /tmp/mysql.cnf . && systemctl restart mysql && echo "Yes"
Job for mysql.service failed. See "systemctl status mysql.service" and "journalctl -xe" for details.

There also seems some apparmor issue:
[ 7389.111704] audit: type=1400 audit(1509404778.110:829): apparmor="DENIED" operation="open" profile="/usr/sbin/mysqld" name="/sys/devices/system/node/" pid=15618 comm="mysqld" requested_mask="r" denied_mask="r" fsuid=112 ouid=0

The apparmor issue is reported in https://bugs.launchpad.net/ubuntu/+source/mysql-5.7/+bug/1610765,
https://bugs.launchpad.net/ubuntu/+source/mysql-5.7/+bug/1658233 and
https://bugs.launchpad.net/ubuntu/+source/apparmor/+bug/1658239
2017-10-30 16:14:20 -07:00
Girish Ramakrishnan 96a98a74ac Move the mysql block
The e2e is failing sporadically with:

==> Changing ownership
==> Adding automated configs
mysql: [Warning] Using a password on the command line interface can be insecure.
ERROR 2002 (HY000): Can't connect to local MySQL server through socket '/var/run/mysqld/mysqld.sock' (2)

Maybe the dhparam creation is doing something causing mysql to not respond.
2017-10-30 08:03:47 -07:00
Girish Ramakrishnan d0a244e392 stash adminLocation also 2017-10-29 19:09:03 -07:00
Girish Ramakrishnan f09c89e33f Remove confusing batchSize logic from listDir
This also fixes a bug in removeDir in DO spaces

thanks to @syn for reporting
2017-10-29 19:04:10 -07:00
Johannes Zellner d53f0679e5 Also stash the zoneName to settings 2017-10-29 22:40:15 +01:00
Girish Ramakrishnan 527093ebcb Stash the fqdn in the db for the next multi-domain release 2017-10-29 12:08:27 -07:00
Girish Ramakrishnan bd5835b866 send adminFqdn as well 2017-10-29 09:36:51 -07:00
Girish Ramakrishnan 6dd70c0ef2 acme challenges must be answered by default_server
The challenge must be answered even before app nginx config
is available.
2017-10-28 23:39:03 -07:00
Girish Ramakrishnan acc90e16d7 1.7.6 changes 2017-10-28 21:07:44 -07:00
Girish Ramakrishnan 4b3aca7413 Bump mail container for sogo disconnect fix 2017-10-28 20:58:26 -07:00
Johannes Zellner 8daee764d2 Only require gcdns form input to be valid if that provider is selected 2017-10-28 22:37:56 +02:00
Girish Ramakrishnan 3dedda32d4 Configure http server to only listen on known vhosts/IP
For the rest it returns 404

Fixes #446
2017-10-27 00:10:50 -07:00
Girish Ramakrishnan d127b25f0f Only set the custom https agent for HTTPS minio
Otherwise, we get a Cannot set property ‘agent’ of undefined error
2017-10-26 18:38:45 -07:00
Johannes Zellner 6a2b0eedb3 Add ldap pagination support 2017-10-27 01:25:07 +02:00
Girish Ramakrishnan 8c81a97a4b Check that the backup location has perms to create a directory
The backup itself runs as root and this works fine. But when rotating
the backup, the copy fails because it is unable to create a directory.
2017-10-26 11:41:34 -07:00
Girish Ramakrishnan d9ab1a78d5 Make the my location customizable
Fixes #22
2017-10-25 23:00:43 -07:00
Girish Ramakrishnan 593df8ed49 Do not use ADMIN_LOCATION in tests 2017-10-25 21:38:11 -07:00
Girish Ramakrishnan b30def3620 move prerelease check to appstore 2017-10-25 21:34:56 -07:00
Johannes Zellner 9c02785d49 Support ldap group compare
Fixes #463
2017-10-24 02:00:00 +02:00
Johannes Zellner f747343159 Cleanup unused port bindings after an update 2017-10-23 22:11:33 +02:00
Johannes Zellner 2971910ccf Do not accept port bindings on update route 2017-10-23 22:06:28 +02:00
Johannes Zellner 56534b9647 Add appdb.delPortBinding() 2017-10-23 22:05:43 +02:00
Johannes Zellner a8d26067ee Allow autoupdates if new ports are added
Those will simply be disabled after update and the user has to
enable them through the app configuration
2017-10-20 22:27:48 +02:00
Johannes Zellner 4212e4bb00 Do not show any port binding update ui 2017-10-20 22:27:48 +02:00
Johannes Zellner 7b27ace7bf Update cloudron-setup help url 2017-10-20 22:13:54 +02:00
Girish Ramakrishnan d8944da68d 1.7.5 changes 2017-10-19 12:19:10 -07:00
Girish Ramakrishnan 433d797cb7 Add SMTPS port for apps that require TLS connections for mail relay 2017-10-19 12:15:28 -07:00
Girish Ramakrishnan 0b1d940128 cloudscale -> cloudscale.ch 2017-10-19 07:28:07 -07:00
Johannes Zellner 6016024026 Move restore functions into appropriate scope object 2017-10-18 00:40:02 +02:00
Johannes Zellner e199293229 Further reduce ui flickering on restore 2017-10-18 00:40:02 +02:00
Girish Ramakrishnan 2ebe92fec3 Do not chown mail directory 2017-10-16 23:18:37 -07:00
Girish Ramakrishnan 628cf1e3de bump mail container for superfluous sa-update supervisor file 2017-10-16 21:16:58 -07:00
Girish Ramakrishnan 9e9aaf68f0 No need to migrate mail data anymore 2017-10-16 21:13:57 -07:00
Girish Ramakrishnan b595ca422c 1.7.4 changes 2017-10-16 15:28:36 -07:00
Girish Ramakrishnan 9273a6c726 Add option to disable hardlinks
We can probably remove this later based on the use
2017-10-16 15:22:40 -07:00
Johannes Zellner 76d00d4e65 Render changelog markdown as html in app update dialog 2017-10-17 00:07:58 +02:00
Johannes Zellner 668c03a11b Give visual feedback in the restore dialog when fetching backups 2017-10-16 22:31:49 +02:00
Girish Ramakrishnan 1e72d2d651 remove debugs (too noisy) 2017-10-16 12:34:09 -07:00
Girish Ramakrishnan 89fc8efc67 Save as empty array if find output is empty 2017-10-16 10:54:48 -07:00
Girish Ramakrishnan 241dbf160e Remove Unused required 2017-10-15 14:07:03 -07:00
Girish Ramakrishnan e46bdc2caa Force the copy just like tar --overwrite 2017-10-13 23:23:36 -07:00
Girish Ramakrishnan e1cb91ca76 bump mail container 2017-10-13 22:36:54 -07:00
Girish Ramakrishnan 709c742c46 Fix tests 2017-10-12 21:14:13 -07:00
Girish Ramakrishnan ecad9c499c Port binding conflict can never happen in update route 2017-10-12 21:04:38 -07:00
Girish Ramakrishnan ed0879ffcd Stop the app only after the backup completed
App backup can take a long time or possibly not work at all. For such
cases, do not stop the app or leave it in some errored state.

newConfigJson is the new config to be updated to. This ensures that
the db has correct app info during the update.
2017-10-12 18:10:41 -07:00
Girish Ramakrishnan 61e2878b08 save/restore exec bit in files
this covers the case where user might stash some executable files
that are used by plugins.
2017-10-12 16:18:11 -07:00
Girish Ramakrishnan d97034bfb2 Follow backup format for box backups as well 2017-10-12 11:02:52 -07:00
Girish Ramakrishnan 21942552d6 Clarify the per-app backup flag 2017-10-12 11:02:52 -07:00
Girish Ramakrishnan dd68c8f91f Various backup fixes 2017-10-12 11:02:48 -07:00
Girish Ramakrishnan 28ce5f41e3 handle errors in log stream 2017-10-11 12:55:56 -07:00
Girish Ramakrishnan 5694e676bd Set default rentention to a week 2017-10-11 12:55:55 -07:00
Girish Ramakrishnan db8c5a116f Typo 2017-10-11 10:30:03 -07:00
Girish Ramakrishnan fa39f0fbf3 Add 1.7.3 changes 2017-10-11 00:50:41 -07:00
Girish Ramakrishnan 1444bb038f only upload needs to be retried
copy/delete are already retried in the sdk code
2017-10-11 00:08:41 -07:00
Girish Ramakrishnan ac9e421ecf improved backup progress and logging 2017-10-10 22:49:38 -07:00
Girish Ramakrishnan b60cbe5a55 move constant 2017-10-10 19:47:21 -07:00
Girish Ramakrishnan 56d794745b Sprinkle retries in syncer logic 2017-10-10 14:25:03 -07:00
Girish Ramakrishnan fd3b73bea2 typo in format name 2017-10-10 13:54:54 -07:00
Girish Ramakrishnan 78807782df Various hacks for exoscale-sos
SOS does not like multipart uploads. They just fail randomly.

As a fix, we try to detect filesystem files and skip multipart uploads
for files < 5GB. For > 5GB, we do multipart upload anyways (mostly fails).

The box backup is switched to flat-file for exoscale for the reason
above.
2017-10-10 11:03:20 -07:00
Girish Ramakrishnan 754b29b263 Start out empty if the previous run errored 2017-10-09 20:12:21 -07:00
Girish Ramakrishnan 9f97f48634 Add note on s3ForcePathStyle 2017-10-09 18:46:14 -07:00
Girish Ramakrishnan 815e5d9d9a graphs: Compute width of system graph from total memory
Fixes #452
2017-10-09 14:58:32 -07:00
Girish Ramakrishnan 91ec2eaaf5 sos: "/" must separate bucket and key name 2017-10-09 11:50:22 -07:00
Girish Ramakrishnan f8d3a7cadd Bump mail container (fixes spam crash) 2017-10-06 16:45:21 -07:00
Girish Ramakrishnan d04a09b015 Add note on bumping major infra version 2017-10-06 15:52:04 -07:00
Girish Ramakrishnan 5d997bcc89 Just mark DO Spaces as experimental instead 2017-10-06 14:45:14 -07:00
Girish Ramakrishnan f0dd90a1f5 listObjectsV2 does not work on some S3 providers
specifically, cloudscale does not support it
2017-10-05 12:07:14 -07:00
Girish Ramakrishnan ee8ee8e786 KeyCount is not set on some S3 providers 2017-10-05 11:36:54 -07:00
Girish Ramakrishnan ee1a4411f8 Do not crash if prefix is empty string
('' || undefined) will return undefined ...
2017-10-05 11:08:01 -07:00
Girish Ramakrishnan df6e6cb071 Allow s3 backend to accept self-signed certs
Fixes #316
2017-10-05 10:14:55 -07:00
Girish Ramakrishnan ba5645a20e Disable DO spaces since it is not yet production ready 2017-10-05 09:21:26 -07:00
Girish Ramakrishnan ca502a2d55 Display error code 2017-10-04 22:34:44 -07:00
Girish Ramakrishnan ecd53b48db Display the backup format 2017-10-04 22:11:11 -07:00
Girish Ramakrishnan b9efb0b50b Fix callback invokation 2017-10-04 19:28:40 -07:00
Johannes Zellner 3fb5034ebd Ensure we setup the correct OAuth redirectURI if altDomain is used 2017-10-05 01:10:25 +02:00
Girish Ramakrishnan afed3f3725 Remove duplicate debug 2017-10-04 15:08:26 -07:00
Girish Ramakrishnan b4f14575d7 Add 1.7.1 changes 2017-10-04 14:31:41 -07:00
Johannes Zellner f437a1f48c Only allow dns setup with subdomain if enterprise query argument is provided 2017-10-04 22:25:14 +02:00
Girish Ramakrishnan c3d7d867be Do not set logCallback 2017-10-04 12:32:12 -07:00
Girish Ramakrishnan 96c16cd5d2 remove debug 2017-10-04 11:54:17 -07:00
Girish Ramakrishnan af182e3df6 caas: cache the creds, otherwise we bombard the server 2017-10-04 11:49:38 -07:00
Girish Ramakrishnan d70ff7cd5b Make copy() return event emitter
This way the storage logic does not need to rely on progress
2017-10-04 11:02:50 -07:00
Johannes Zellner 38331e71e2 Ensure all S3 CopySource properties are URI encoded 2017-10-04 19:07:08 +02:00
Johannes Zellner 322a9a18d7 Use multipart copy for s3 and files larger than 5GB 2017-10-04 18:56:23 +02:00
Johannes Zellner 423ef546a9 Merge branch 'user_agent' into 'master'
Added user agent to health checks

See merge request !19
2017-10-04 11:48:02 +00:00
Dennis Schwerdel e3f3241966 Added user agent to health checks 2017-10-04 13:05:00 +02:00
Johannes Zellner eaef384ea5 Improve the invite link display
Fixes #445
2017-10-04 13:03:32 +02:00
Girish Ramakrishnan b85bc3aa01 s3: Must encode copySource
https://github.com/aws/aws-sdk-js/issues/1302
2017-10-03 15:51:05 -07:00
Girish Ramakrishnan 01154d0ae6 s3: better error messages 2017-10-03 14:46:59 -07:00
Girish Ramakrishnan 6494050d66 Make removeDir less noisy 2017-10-03 01:22:37 -07:00
Girish Ramakrishnan 8c7223ceed Fix cleanup logic to use the app backup format
box backup and app backup can have different format
2017-10-03 00:56:34 -07:00
Girish Ramakrishnan 21afc71d89 add tests for storage backends 2017-10-02 23:08:16 -07:00
Girish Ramakrishnan 7bf70956a1 fix tests 2017-10-02 18:42:13 -07:00
Girish Ramakrishnan 9e9b8b095e Provider dhparams.pem to the mail container 2017-10-02 01:51:28 -07:00
Girish Ramakrishnan 0f543e6703 s3: add progress detail
this is a bit of a hack and we should add another way to set the progress
(maybe via backups.setProgress or via a progress callback). this is because
some methods like removeDir can be called from backuptask and from box code.
2017-10-01 18:25:51 -07:00
Girish Ramakrishnan f9973e765c Add backup cleanup eventlog 2017-10-01 10:35:50 -07:00
Girish Ramakrishnan e089851ae9 add debugs 2017-09-30 20:36:08 -07:00
Girish Ramakrishnan c524d68c2f fix crash when cleaning up snapshots 2017-09-30 20:31:41 -07:00
Girish Ramakrishnan 5cccb50a31 fix backup cleanup logic 2017-09-30 18:38:45 -07:00
Girish Ramakrishnan 3d375b687a style: Fix quoting 2017-09-30 18:26:38 -07:00
Girish Ramakrishnan a93d453963 rename flat-file to rsync
not a name I like but cannot come up with anything better

https://en.wikipedia.org/wiki/Flat_file_database

the term 'rsync format' seems to be used in a few places
2017-09-30 14:19:19 -07:00
Girish Ramakrishnan f8ac2d4628 1.7.0 changes 2017-09-30 14:02:06 -07:00
Girish Ramakrishnan d5ba73716b add emptydirs test 2017-09-29 15:29:22 -07:00
Girish Ramakrishnan 954224dafb make syncer track directories 2017-09-29 15:29:18 -07:00
Johannes Zellner 8b341e2bf8 Only make nginx listen on ipv6 connections if it is supported by the system
Could not decide on the ejs formatting, never nice for me
2017-09-29 19:43:37 +02:00
Johannes Zellner 78fb9401ee Add config.hasIPv6() 2017-09-29 19:43:37 +02:00
Girish Ramakrishnan 4a5cbab194 Do not remove parent directory in fs.remove()
Do the pruning in the cleanup logic instead
2017-09-28 20:55:45 -07:00
Girish Ramakrishnan 19999abc50 s3: fix restore 2017-09-28 14:35:49 -07:00
Girish Ramakrishnan 5123b669d7 remove options.concurrency 2017-09-28 12:20:15 -07:00
Girish Ramakrishnan 565c8445e1 make backup progress work for per-app backups 2017-09-28 11:17:48 -07:00
Girish Ramakrishnan 404a019c56 s3: Check IsTruncated before accessing Contents 2017-09-28 10:36:56 -07:00
Girish Ramakrishnan 24dee80aa6 Make box backups always tarball based
this makes cloudron easy to restore. in the future, if required,
we can move out the mail data as a separate virtual app backup
2017-09-28 10:22:10 -07:00
Girish Ramakrishnan ce6df4bf96 Disable encryption for flat-file for now 2017-09-28 09:47:18 -07:00
Girish Ramakrishnan f8f6c7d93e Add progress detail when rotating snapshots 2017-09-28 09:29:46 -07:00
Girish Ramakrishnan bafc6dce98 s3: refactor out directory listing 2017-09-27 21:59:51 -07:00
Girish Ramakrishnan 56ee4d8e25 Remove old cache files when backup settings is changed 2017-09-27 21:04:46 -07:00
Girish Ramakrishnan eeef221b4e Fix race where pipe finishes before file is created
When there are 0 length files, this is easily reproducible.
2017-09-27 19:40:26 -07:00
Girish Ramakrishnan 4674653982 compare size and inode as well 2017-09-27 19:39:03 -07:00
Girish Ramakrishnan a34180c27b Add format to backupsdb
Call remove/removeDir based on the format
2017-09-27 18:02:30 -07:00
Girish Ramakrishnan aa8ce2c62e Use graphite 0.12.0
this fixes an issue where carbon does not startup properly
if a previous pid file was present
2017-09-27 15:35:55 -07:00
Girish Ramakrishnan b3c6b8aa15 do not spawn process just for chown 2017-09-27 15:07:19 -07:00
Girish Ramakrishnan 44a7a2579c rework backup status
* show backup progress even if not initiated by UI
* display backup progress in separate line
2017-09-27 15:07:15 -07:00
Girish Ramakrishnan 39f0e476f2 Start out empty if cache file is missing 2017-09-27 12:09:19 -07:00
Girish Ramakrishnan 003dc0dbaf Add todo 2017-09-27 11:50:49 -07:00
Girish Ramakrishnan e39329218d Make tests work 2017-09-27 11:38:43 -07:00
Girish Ramakrishnan 8d3fbc5432 Save backup logs and fix backup progress 2017-09-26 21:09:00 -07:00
Girish Ramakrishnan 2780de631e writable streams emit finish 2017-09-26 16:43:51 -07:00
Girish Ramakrishnan 399c756735 use exec so that filenames do not have to be escaped 2017-09-26 15:53:42 -07:00
Girish Ramakrishnan 859311f9e5 Process delete commands before add commands
This is required for cases where a dir becomes a file (or vice-versa)
2017-09-26 15:33:54 -07:00
Girish Ramakrishnan a9e89b57d9 merge caas storage into s3 backend 2017-09-26 12:28:33 -07:00
Girish Ramakrishnan 4e68abe51d Handle fs errors 2017-09-26 12:10:58 -07:00
Girish Ramakrishnan 12083f5608 Ignore all special files 2017-09-26 11:41:01 -07:00
Girish Ramakrishnan d1efb2db56 remove bogus mkdir 2017-09-26 11:34:24 -07:00
Girish Ramakrishnan adde28523f Add backup format to the backup UI 2017-09-26 10:46:02 -07:00
Girish Ramakrishnan f122f46fe2 Generate new index file by appending to file 2017-09-26 07:57:20 -07:00
Girish Ramakrishnan ad7fadb4a9 display backup id in the ui 2017-09-26 07:45:23 -07:00
Johannes Zellner be383582e0 Do not rely on external resource in the appstatus page 2017-09-26 15:33:05 +02:00
Girish Ramakrishnan 0a60365143 Initial version of flat-file uploader 2017-09-26 00:17:11 -07:00
Girish Ramakrishnan 2f6cb3e913 set format in the backup ui 2017-09-26 00:01:36 -07:00
Girish Ramakrishnan b0f85678d4 Implement downloadDir for flat-file format 2017-09-23 18:07:26 -07:00
Girish Ramakrishnan e43413e063 implement remove dir in storage backends 2017-09-23 12:34:51 -07:00
Girish Ramakrishnan e39a5c8872 preserve env in backuptask.js 2017-09-22 11:19:44 -07:00
Girish Ramakrishnan fb4b75dd2a Fix typo in comment 2017-09-22 11:19:37 -07:00
Girish Ramakrishnan 3c1ccc5cf4 Add exoscale provider 2017-09-21 17:50:03 -07:00
Girish Ramakrishnan abd66d6524 Add cloudscale as a provider 2017-09-21 17:49:26 -07:00
Girish Ramakrishnan b61b7f80b5 Add DO spaces 2017-09-21 12:25:39 -07:00
Girish Ramakrishnan efa850614d Add a s3-v4-compat provider 2017-09-21 12:13:45 -07:00
Girish Ramakrishnan 21c534c806 Ensure format is set in backupConfig 2017-09-21 09:49:55 -07:00
Girish Ramakrishnan 7e4ff2440c Fix text for manual DNS 2017-09-21 09:10:12 -07:00
Johannes Zellner f415e19f6f Do not unneccesarily mention error in the logs
Not so friendly for log searches
2017-09-21 15:00:35 +02:00
Girish Ramakrishnan 97da8717ca Refactor backup strategy logic into backups.js 2017-09-20 14:09:55 -07:00
Girish Ramakrishnan cbddb79d15 Resolve the id in rotateAppBackup 2017-09-20 09:38:55 -07:00
Johannes Zellner bffb935f0f Also send digest to appstore account owner 2017-09-20 16:33:25 +02:00
Johannes Zellner e50e0f730b Make nginx listen on :: for ipv6 2017-09-20 16:33:25 +02:00
Girish Ramakrishnan 26f33a8e9b Send resolved path to the storage APIs 2017-09-19 21:58:35 -07:00
Girish Ramakrishnan 952b1f6304 Make backuptask call back into backups.js 2017-09-19 20:27:49 -07:00
Girish Ramakrishnan a3293c4c35 Fix tests 2017-09-19 12:43:13 -07:00
Girish Ramakrishnan 4892473eff backupIds do not have extension anymore
this code existed for legacy reasons
2017-09-19 12:34:09 -07:00
Girish Ramakrishnan 221d5f95e1 ensure backupFolder is always set 2017-09-19 12:34:09 -07:00
Girish Ramakrishnan 84649b9471 Bring back backuptask
This is required for various small reasons:

* dir iteration with a way to pass messagein back to the upload() easily
* can be killed independently of box code
* allows us to run sync (blocking) commands in the upload logic
2017-09-19 12:32:38 -07:00
Girish Ramakrishnan 44435559ab Typo 2017-09-19 10:37:45 -07:00
Girish Ramakrishnan c351660a9a Implement backup rotation
Always upload to 'snapshot' dir and then rotate it. This will allow
us to keep pushing incrementally to 'snapshot' and do server side
rotations.
2017-09-18 21:17:34 -07:00
Girish Ramakrishnan 0a24130fd4 Just reset config instead of clearing cache 2017-09-18 19:41:15 -07:00
Girish Ramakrishnan ea13f8f97e Fix checkInstall script 2017-09-18 18:19:27 -07:00
Johannes Zellner d00801d020 Only require service account key for google dns on setup 2017-09-18 23:50:34 +02:00
Girish Ramakrishnan 8ced0aa78e copy: use hardlinks to preserve space 2017-09-18 14:29:48 -07:00
Girish Ramakrishnan f5d32a9178 copyBackup -> copy 2017-09-18 14:29:15 -07:00
Girish Ramakrishnan 7fc45b3215 Refactor out the backup snapshot logic 2017-09-18 12:43:11 -07:00
Girish Ramakrishnan 9bed14a3e8 Enable IP6 in unbound
On some provider (https://www.nine.ch) disabling IPv6 makes unbound
not respond to the DNS queries.

Also, I was unable to test with prefer-ip6 to 'no' because unbound fails:

unbound[5657]: /etc/unbound/unbound.conf.d/cloudron-network.conf:8: error: unknown keyword 'no'
unbound[5657]: read /etc/unbound/unbound.conf failed: 3 errors in configuration file
2017-09-18 11:41:02 -07:00
Girish Ramakrishnan 71233ecd95 Fix undefined variable 2017-09-18 11:14:04 -07:00
Girish Ramakrishnan 02097298c6 Fix indentation 2017-09-18 10:38:30 -07:00
Girish Ramakrishnan be03dd0821 remove unused require 2017-09-18 10:38:26 -07:00
Girish Ramakrishnan 5b77d2f0cf Add commented out debugging section for unbound 2017-09-18 10:38:22 -07:00
Girish Ramakrishnan 781f543e87 Rename API calls in the storage backend 2017-09-17 18:50:29 -07:00
Girish Ramakrishnan 6525a467a2 Rework backuptask into tar.js
This makes it easy to integrate another backup strategy
as the next step
2017-09-17 18:50:26 -07:00
Girish Ramakrishnan 6cddd61a24 Fix style 2017-09-17 18:50:23 -07:00
Girish Ramakrishnan b0ee116004 targz: make sourceDir a string 2017-09-17 18:50:15 -07:00
Girish Ramakrishnan 867a59d5d8 Pull it all to left 2017-09-15 15:47:37 -07:00
Girish Ramakrishnan 6f5085ebc3 Downcase email 2017-09-15 15:45:26 -07:00
Johannes Zellner e8a93dcb1b Add button to send test email
Fixes #419
2017-09-15 14:42:12 +02:00
Girish Ramakrishnan 09fe957cc7 style 2017-09-15 02:07:06 -07:00
Girish Ramakrishnan 020ccc8a99 gcdns: fix update/del confusion
in the DNS api, we always update/del all records of same type
2017-09-15 01:54:39 -07:00
Girish Ramakrishnan 7ed304bed8 Fix cloudflare domain display 2017-09-15 00:50:29 -07:00
Girish Ramakrishnan db1e39be11 Do not overwrite subdomain when location was changed
* Install in subdomain 'test'
* Move to subdomain 'test2'
* Move to another existing subdomain 'www' (this should be detected as conflict)
* Move to subdomain 'www2' (this should not remove 'www'). This is why dnsRecordId exists.
2017-09-14 22:31:48 -07:00
Girish Ramakrishnan f163577264 Typo 2017-09-14 18:38:48 -07:00
Girish Ramakrishnan 9c7080aea1 Show email text for gcdns 2017-09-14 18:33:07 -07:00
Girish Ramakrishnan c05a7c188f Coding style fixes 2017-09-14 18:15:59 -07:00
Girish Ramakrishnan 72e912770a translate network errors to SubdomainError
fixes #391
2017-09-14 16:14:16 -07:00
Girish Ramakrishnan 28c06d0a72 bump mail container 2017-09-14 12:07:53 -07:00
Girish Ramakrishnan 9805daa835 Add google-cloud/dns to shrinkwrap 2017-09-14 10:45:04 -07:00
Girish Ramakrishnan a920fd011c Merge branch 'feature/gcdns' into 'master'
Adding Google Cloud DNS support

See merge request !17
2017-09-14 17:44:20 +00:00
Girish Ramakrishnan 1b979ee1e9 Send rbl status as part of email check 2017-09-13 23:58:54 -07:00
Girish Ramakrishnan 70eae477dc Fix logstream test 2017-09-13 23:01:04 -07:00
Girish Ramakrishnan c16f7c7891 Fix storage tests 2017-09-13 22:50:38 -07:00
Girish Ramakrishnan 63b8a5b658 Add update pattern of wednesday night
Fixes #432, #435
2017-09-13 14:52:31 -07:00
Aleksandr Bogdanov c0bf51b79f A bit more polish 2017-09-13 21:17:40 +02:00
Aleksandr Bogdanov 3d4178b35c Adding Google Cloud DNS to "setupdns" stage 2017-09-13 21:00:29 +02:00
Aleksandr Bogdanov 34878bbc6a Make sure we don't touch records which are not managed by cloudron, but are in the same zone 2017-09-13 20:53:38 +02:00
Girish Ramakrishnan e78d976c8f Fix backup mapping (mail dir has moved) 2017-09-13 09:51:20 -07:00
Girish Ramakrishnan ba9662f3fa Add 1.6.5 changes 2017-09-12 22:32:57 -07:00
Girish Ramakrishnan c8750a3bed merge the logrotate scripts 2017-09-12 22:03:24 -07:00
Girish Ramakrishnan 9710f74250 remove collectd stats when app is uninstalled 2017-09-12 21:34:15 -07:00
Girish Ramakrishnan 52095cb8ab add debugs for timing backup and restore 2017-09-12 15:37:35 -07:00
Aleksandr Bogdanov c612966b41 Better validation 2017-09-12 22:47:46 +02:00
Aleksandr Bogdanov 90cf4f0784 Allowing to select a service account key as a file for gcdns 2017-09-12 22:35:40 +02:00
Aleksandr Bogdanov ec93d564e9 Adding Google Cloud DNS to webadmin 2017-09-12 19:03:23 +02:00
Aleksandr Bogdanov 37f9e60978 Fixing verifyDns 2017-09-12 16:29:07 +02:00
Johannes Zellner ca199961d5 Make settings.value field TEXT
We already store JSON blobs there and the gce dns backend
will require a larger blob for a certificate.
Since we use innodb the storage format in TEXT will only be different
if the data is large
2017-09-11 15:41:07 +02:00
Girish Ramakrishnan fd811ac334 Remove "cloudron" to fit in one line 2017-09-10 17:43:21 -07:00
Girish Ramakrishnan 609c1d3b78 bump mail container
this is also required since we moved the maildir
2017-09-10 00:07:48 -07:00
Girish Ramakrishnan 9906ed37ae Move mail data inside boxdata directory
This also makes the noop backend more useful because it will dump things
in data directory and user can back it up as they see fit.
2017-09-10 00:07:44 -07:00
Girish Ramakrishnan dcdce6d995 Use MAIL_DATA_DIR constant 2017-09-09 22:24:16 -07:00
Girish Ramakrishnan 9026c555f9 snapshots dir is not used anymore 2017-09-09 22:13:15 -07:00
Girish Ramakrishnan 547a80f17b make shell.exec options non-optional 2017-09-09 19:54:31 -07:00
Girish Ramakrishnan 300d3dd545 remove unused requires 2017-09-09 19:23:22 -07:00
Aleksandr Bogdanov 6fce729ed2 Adding Google Cloud DNS 2017-09-09 17:45:26 +02:00
Girish Ramakrishnan d233ee2a83 ask password only for destructive actions 2017-09-08 15:14:37 -07:00
Girish Ramakrishnan 3240a71feb wording 2017-09-08 14:42:54 -07:00
Girish Ramakrishnan 322be9e5ba Add ip blacklist check
Fixes #431
2017-09-08 13:29:32 -07:00
Girish Ramakrishnan e67ecae2d2 typo 2017-09-07 22:01:37 -07:00
Girish Ramakrishnan 75b3e7fc78 resolve symlinks correctly for deletion
part of #394
2017-09-07 21:57:08 -07:00
Girish Ramakrishnan 74c8d8cc6b set label on the redis container
this ensures that redis is stopped when app is stopped and also
helps identifying app related containers easily
2017-09-07 20:09:46 -07:00
Girish Ramakrishnan 51659a8d2d set label on the redis container
this ensures that redis is stopped when app is stopped and also
helps identifying app related containers easily
2017-09-07 19:54:05 -07:00
Girish Ramakrishnan 70acf1a719 Allow app volumes to be symlinked
The initial plan was to make app volumes to be set using a database
field but this makes the app backups non-portable. It also complicates
things wrt to app and server restores.

For now, ignore the problem and let them be symlinked.

Fixes #394
2017-09-07 15:50:34 -07:00
Girish Ramakrishnan 8d2f3b0217 Add note on disabling ssh password auth 2017-09-06 11:36:23 -07:00
Girish Ramakrishnan e498678488 Use node 6.11.3 2017-09-06 09:39:22 -07:00
Girish Ramakrishnan 513517b15e cf dns: filter by type and name in the REST API
Otherwise, we will have to implement pagination
2017-09-05 16:07:14 -07:00
Girish Ramakrishnan a96f8abaca DO DNS: list all pages of the domain 2017-09-05 15:52:59 -07:00
Johannes Zellner f7bcd54ef5 Better ui feedback on the repair mode 2017-09-05 23:11:04 +02:00
Johannes Zellner d58e4f58c7 Add hook to react whenever apps have changed 2017-09-05 23:10:45 +02:00
Girish Ramakrishnan 45f0f2adbe Fix wording 2017-09-05 10:38:33 -07:00
Johannes Zellner 36c72dd935 Sendgrid only has an api key similar postmark
Fixes #411
2017-09-05 11:28:28 +02:00
Girish Ramakrishnan df9e2a7856 Use robotsTxt in install route 2017-09-04 12:59:14 -07:00
Girish Ramakrishnan 2b043aa95f remove unused require 2017-09-04 12:59:05 -07:00
Johannes Zellner c0a09d1494 Add 1.6.4 changes 2017-09-04 18:53:11 +02:00
Johannes Zellner 1c5c4b5705 Improve overflow handling in logs and terminal view 2017-09-04 18:40:16 +02:00
Girish Ramakrishnan b56dcaac68 Only run scheduler when app is healthy
Fixes #393
2017-09-03 18:21:13 -07:00
Girish Ramakrishnan fd91ccc844 Update the unbound anchor key
This helps the unbound recover from any previous out of disk space
situation.

part of #269
2017-09-03 17:48:26 -07:00
Johannes Zellner fca1a70eaa Add initial repair button alongside webterminal
Part of #416
2017-09-01 20:08:22 +02:00
Johannes Zellner ed81b7890c Fixup the test for the password requirement change 2017-09-01 20:08:22 +02:00
Johannes Zellner cb8dcbf3dd Lift the password requirement for app configure/update/restore actions 2017-09-01 20:08:22 +02:00
Johannes Zellner 4bdbf1f62e Fix indentation 2017-09-01 20:08:22 +02:00
Johannes Zellner 47a8b4fdc2 After consuming the accessToken query param, remove it
Fixes #415
2017-09-01 10:25:28 +02:00
Johannes Zellner 5720e90580 Guide the user to use ctrl+v for pasting into webterminal
Fixes #413
2017-08-31 20:52:04 +02:00
Johannes Zellner f98e13d701 Better highlight dropdown menu hovers 2017-08-31 20:52:04 +02:00
Johannes Zellner d5d924861b Fix gravatar margin in navbar 2017-08-31 20:52:04 +02:00
Girish Ramakrishnan b81a92d407 disable ip6 in unbound as well
part of #412
2017-08-31 11:41:35 -07:00
Johannes Zellner 22b0100354 Ensure we don't crash if the terminal socket is not ready yet
Upstream patch submitted https://github.com/sourcelair/xterm.js/pull/933
2017-08-31 20:31:31 +02:00
Johannes Zellner 6eb6eab3f4 Let the browser handle paste keyboard shortcuts
Related to #413
2017-08-31 20:31:31 +02:00
Girish Ramakrishnan 57d5c2cc47 Use IPv4 address to connect to mysql
Fixes #412
2017-08-31 10:59:14 -07:00
Johannes Zellner 6a9eac7a24 Use the correct input change event
Fixes #414
2017-08-31 19:06:02 +02:00
Johannes Zellner e4760a07f0 Give feedback if the relay settings have successfully saved 2017-08-30 11:02:13 +02:00
Johannes Zellner 257e594de0 Allow mail relay provider specific UI
Only contains specific UI for postmark

Part of #411
2017-08-30 10:55:36 +02:00
Girish Ramakrishnan 6fea022a04 remove dead code 2017-08-29 14:47:59 -07:00
Girish Ramakrishnan f34840d127 remove old data migration paths 2017-08-29 13:08:31 -07:00
Girish Ramakrishnan f9706d6a05 Always generate nginx config for webadmin
Part of #406
2017-08-28 21:16:47 -07:00
Girish Ramakrishnan 61f7c1af48 Remove unused error codes 2017-08-28 15:27:17 -07:00
Girish Ramakrishnan 00786dda05 Do not crash if DNS creds do not work during startup
If DNS creds are invalid, then platform.start() keeps crashing on a
mail container update. For now, just log the error and move on.

Part of #406
2017-08-28 14:55:36 -07:00
Girish Ramakrishnan 8b9f44addc 1.6.3 changes 2017-08-28 13:49:15 -07:00
Johannes Zellner 56c7dbb6e4 Do not attempt to reconnect if the debug view is already gone
Fixes #408
2017-08-28 21:06:25 +02:00
Girish Ramakrishnan c47f878203 Set priority for MX records
Fixes #410
2017-08-26 15:54:38 -07:00
Girish Ramakrishnan 8a2107e6eb Show email text for Cloudflare 2017-08-26 15:37:24 -07:00
Girish Ramakrishnan cd9f0f69d8 email dialog has moved to it's own view 2017-08-26 15:36:12 -07:00
Girish Ramakrishnan 1da91b64f6 Filter out possibly sensitive information for normal users
Fixes #407
2017-08-26 14:47:51 -07:00
Johannes Zellner a87dd65c1d Workaround for firefox flexbox bug
Fixes selection while clicking on empty flexbox space.

This only happens in firefox and seems to be a bug in firefox
flexbox implementation, where the first child element with a
non zero size, in a flexbox managed `block` element, has the
`float` property.

Fixes #405
2017-08-24 23:29:42 +02:00
Johannes Zellner 7c63d9e758 Fix typo in css 2017-08-24 23:16:36 +02:00
Girish Ramakrishnan 329bf596ac Indicate that directories can be downloaded 2017-08-24 13:38:50 -07:00
142 changed files with 9094 additions and 3634 deletions
+139
View File
@@ -982,3 +982,142 @@
* Add webterminal to shell into apps from the admin UI
* Update Haraka for a few crash fixes
[1.6.3]
* Fixes selection issue while clicking on empty flexbox space
* Indicate directories can be downloaded in the web terminal
* Do not show app update indicator for normal users
* Display email notice when using Cloudflare DNS
* Set MX records correctly when using Cloudflare DNS
* Fix bug where webterminal can incorrectly appear in main view
* Do not crash if DNS credentials are invalid
[1.6.4]
* More descriptive Postmark email relay form
* Fix file upload in chrome
* Support Ctrl/Cmd+v webterminal pasting
* Ensure unbound always starts up
* Add option to run app in repair mode
[1.6.5]
* DigitalOcean DNS: Add pagination
* Cloudflare DNS: Optimize listing of DNS entries
* Update node to 6.11.3
* App volumes can now be symlinked individually to external storage
* Periodically check if IP is blacklisted and notify admins
* Do not ask password when re-configuring app (since it is non-destructive)
* Move mail data inside boxdata directory. This makes the no-op backend more useful
* Remove collectd stats when app is uninstalled
[1.7.0]
* Add rsync format for backups. This feature allows incremental backups
* Add Google DNS backend (thanks @syn)
* Add DigitalOcean spaces backup storage backend
* Add Cloudscale and Exoscale as supported VPS providers
* Display backup progress and status in the web interface
* Preliminary IPv6 support
* Add IP RBL status to web interface
* Add auto-update pattern `Every wednesday night`
* Update Haraka to 2.8.15. This fixes the issue where emails were bounced with the message 'Send MAIL FROM first'
* Do not overwrite existing subdomain when app's location is changed
* Add button to send test email
* Fix crash in carbon which made graphs disappear on some Cloudrons
[1.7.1]
* Add rsync format for backups. This feature allows incremental backups
* Add Google DNS backend (thanks @syn)
* Add DigitalOcean spaces backup storage backend
* Add Cloudscale and Exoscale as supported VPS providers
* Display backup progress and status in the web interface
* Preliminary IPv6 support
* Add IP RBL status to web interface
* Add auto-update pattern `Every wednesday night`
* Update Haraka to 2.8.15. This fixes the issue where emails were bounced with the message 'Send MAIL FROM first'
* Do not overwrite existing subdomain when app's location is changed
* Add button to send test email
* Fix crash in carbon which made graphs disappear on some Cloudrons
[1.7.2]
* Add rsync format for backups. This feature allows incremental backups
* Add Google DNS backend (thanks @syn)
* Add Cloudscale and Exoscale as supported VPS providers
* Display backup progress and status in the web interface
* Preliminary IPv6 support
* Add IP RBL status to web interface
* Add auto-update pattern `Every wednesday night`
* Update Haraka to 2.8.15. This fixes the issue where emails were bounced with the message 'Send MAIL FROM first'
* Do not overwrite existing subdomain when app's location is changed
* Add button to send test email
* Fix crash in carbon which made graphs disappear on some Cloudrons
* Fix issue where OAuth SSO did not work when alternate domain was used
[1.7.3]
* Add rsync format for backups. This feature allows incremental backups
* Add Google DNS backend (thanks @syn)
* Add Cloudscale and Exoscale as supported VPS providers
* Display backup progress and status in the web interface
* Preliminary IPv6 support
* Add IP RBL status to web interface
* Add auto-update pattern `Every wednesday night`
* Update Haraka to 2.8.15. This fixes the issue where emails were bounced with the message 'Send MAIL FROM first'
* Do not overwrite existing subdomain when app's location is changed
* Add button to send test email
* Fix crash in carbon which made graphs disappear on some Cloudrons
* Fix issue where OAuth SSO did not work when alternate domain was used
[1.7.4]
* Add rsync format for backups. This feature allows incremental backups
* Add Google DNS backend (thanks @syn)
* Add DigitalOcean spaces backup storage backend
* Add Cloudscale and Exoscale as supported VPS providers
* Display backup progress and status in the web interface
* Preliminary IPv6 support
* Add IP RBL status to web interface
* Add auto-update pattern `Every wednesday night`
* Update Haraka to 2.8.15. This fixes the issue where emails were bounced with the message 'Send MAIL FROM first'
* Do not overwrite existing subdomain when app's location is changed
* Add button to send test email
* Fix crash in carbon which made graphs disappear on some Cloudrons
* Fix issue where OAuth SSO did not work when alternate domain was used
* Changelog is now rendered in markdown format
[1.7.5]
* Expose a TLS relay port from mail container for Go applications
[1.7.6]
* Port bindings cannot be configured in update route anymore
* Implement LDAP group compare
* Pre-releases are now offered by appstore and not handled in box code anymore
* LDAP pagination support. This will fix the warnings in NextCloud and Rocket.Chat
* Check if directories can be created in the backup directory
* Do not set the HTTPS agent when using HTTP with minio backup backend
* Fix regression where a new domain config could not be set in the UI
* New mail container release that fixes email sending with SOGo
* Show 404 page for unknown domains
[1.7.7]
* Allow setting app memory till memory limit
* Make the dkim selector dynamic
* Fix issue where app update dialog did not close
* Fix LE cert renewal failures
* Send user and cert info in digest emails
* Send oom, app failures and other important mails to cloudron owner's alt mail
[1.8.0]
* Fix group email bounce when a group has users that have not signed up yet
* Do not restrict app memory limit to 4GB
* Fix display of the latest backup in the weekly digest
* Add UI to select users for access restriction
* Update docker to 17.09
* Update node to 6.11.5
* Display package version of installed apps in the info dialog
[1.8.1]
* Update node modules
* Allow a restore operation if app is already restoring
* Remove pre-install bundle support since it was hardly used
* Make the test email mail address configurable
* Allow admins to access all apps
* Send feedback via appstore API (instead of email)
* Show documentation URL in the app info dialog
* Update Lets Encrypt agrement URL (https://letsencrypt.org/documents/LE-SA-v1.2-November-15-2017.pdf)
+1 -1
View File
@@ -630,7 +630,7 @@ state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
box
Copyright (C) 2016 Cloudron UG
Copyright (C) 2016,2017 Cloudron UG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
+5 -5
View File
@@ -47,10 +47,10 @@ apt-get -y install \
cp /usr/share/unattended-upgrades/20auto-upgrades /etc/apt/apt.conf.d/20auto-upgrades
echo "==> Installing node.js"
mkdir -p /usr/local/node-6.11.2
curl -sL https://nodejs.org/dist/v6.11.2/node-v6.11.2-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-6.11.2
ln -sf /usr/local/node-6.11.2/bin/node /usr/bin/node
ln -sf /usr/local/node-6.11.2/bin/npm /usr/bin/npm
mkdir -p /usr/local/node-6.11.5
curl -sL https://nodejs.org/dist/v6.11.5/node-v6.11.5-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-6.11.5
ln -sf /usr/local/node-6.11.5/bin/node /usr/bin/node
ln -sf /usr/local/node-6.11.5/bin/npm /usr/bin/npm
apt-get install -y python # Install python which is required for npm rebuild
[[ "$(python --version 2>&1)" == "Python 2.7."* ]] || die "Expecting python version to be 2.7.x"
@@ -61,7 +61,7 @@ echo "==> Installing Docker"
mkdir -p /etc/systemd/system/docker.service.d
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=overlay2" > /etc/systemd/system/docker.service.d/cloudron.conf
curl -sL https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_17.03.1~ce-0~ubuntu-xenial_amd64.deb -o /tmp/docker.deb
curl -sL https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_17.09.0~ce-0~ubuntu_amd64.deb -o /tmp/docker.deb
# apt install with install deps (as opposed to dpkg -i)
apt install -y /tmp/docker.deb
rm /tmp/docker.deb
+3 -2
View File
@@ -6,9 +6,9 @@ var argv = require('yargs').argv,
autoprefixer = require('gulp-autoprefixer'),
concat = require('gulp-concat'),
cssnano = require('gulp-cssnano'),
del = require('del'),
ejs = require('gulp-ejs'),
gulp = require('gulp'),
rimraf = require('rimraf'),
sass = require('gulp-sass'),
serve = require('gulp-serve'),
sourcemaps = require('gulp-sourcemaps'),
@@ -196,7 +196,8 @@ gulp.task('watch', ['default'], function () {
});
gulp.task('clean', function () {
del.sync(['webadmin/dist', 'setup/splash/website']);
rimraf.sync('webadmin/dist');
rimraf.sync('setup/splash/website');
});
gulp.task('default', ['clean', 'html', 'js', '3rdparty', 'images', 'css'], function () {});
@@ -0,0 +1,15 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE settings MODIFY value TEXT', [], function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE settings MODIFY value VARCHAR(512)', [], function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -0,0 +1,25 @@
'use strict';
// ensure backupFolder and format are not empty
exports.up = function(db, callback) {
db.all('SELECT * FROM settings WHERE name=?', [ 'backup_config' ], function (error, result) {
if (error || result.length === 0) return callback(error);
var value = JSON.parse(result[0].value);
value.format = 'tgz'; // set the format
if (value.provider === 'filesystem' && !value.backupFolder) {
value.backupFolder = '/var/backups'; // set the backupFolder
}
db.runSql('UPDATE settings SET value = ? WHERE name = ?', [ JSON.stringify(value), 'backup_config' ], function (error) {
if (error) console.error('Error setting ownerid ' + JSON.stringify(u) + error);
callback();
});
});
};
exports.down = function(db, callback) {
callback();
};
@@ -0,0 +1,15 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE backups ADD COLUMN format VARCHAR(16) DEFAULT "tgz"', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE backups DROP COLUMN format', function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -0,0 +1,15 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN newConfigJson TEXT', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps DROP COLUMN newConfigJson', function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -0,0 +1,40 @@
'use strict';
var async = require('async');
exports.up = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE backups ADD COLUMN manifestJson TEXT'),
db.runSql.bind(db, 'START TRANSACTION;'),
// fill all the backups with restoreConfigs from current apps
function addManifests(callback) {
console.log('Importing manifests');
db.all('SELECT * FROM backups WHERE type="app"', function (error, backups) {
if (error) return callback(error);
async.eachSeries(backups, function (backup, next) {
var m = backup.restoreConfigJson ? JSON.parse(backup.restoreConfigJson) : null;
if (m) m = JSON.stringify(m.manifest);
db.runSql('UPDATE backups SET manifestJson=? WHERE id=?', [ m, backup.id ], next);
}, callback);
});
},
db.runSql.bind(db, 'COMMIT'),
// remove the restoreConfig
db.runSql.bind(db, 'ALTER TABLE backups DROP COLUMN restoreConfigJson')
], callback);
};
exports.down = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE backups DROP COLUMN manifestJson'),
db.runSql.bind(db, 'ALTER TABLE backups ADD COLUMN restoreConfigJson TEXT'),
], callback);
};
@@ -0,0 +1,15 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps CHANGE newConfigJson updateConfigJson TEXT', [], function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps CHANGE updateConfigJson newConfigJson TEXT', [], function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -0,0 +1,15 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps CHANGE lastBackupId restoreConfigJson TEXT', [], function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps CHANGE restoreConfigJson lastBackupId TEXT', [], function (error) {
if (error) console.error(error);
callback(error);
});
};
+7 -5
View File
@@ -60,7 +60,7 @@ CREATE TABLE IF NOT EXISTS apps(
manifestJson TEXT,
httpPort INTEGER, // this is the nginx proxy port and not manifest.httpPort
location VARCHAR(128) NOT NULL UNIQUE,
dnsRecordId VARCHAR(512), // tracks any id that we got back to track dns updates (unused)
dnsRecordId VARCHAR(512), // tracks any id that we got back to track dns updates
accessRestrictionJson TEXT, // { users: [ ], groups: [ ] }
createdAt TIMESTAMP(2) NOT NULL DEFAULT CURRENT_TIMESTAMP,
memoryLimit BIGINT DEFAULT 0,
@@ -72,8 +72,9 @@ CREATE TABLE IF NOT EXISTS apps(
enableBackup BOOLEAN DEFAULT 1,
// the following fields do not belong here, they can be removed when we use a queue for apptask
lastBackupId VARCHAR(128), // used to pass backupId to restore from to apptask
oldConfigJson TEXT, // used to pass old config for apptask
restoreConfigJson VARCHAR(256), // used to pass backupId to restore from to apptask
oldConfigJson TEXT, // used to pass old config for apptask (configure, restore)
updateConfigJson TEXT, // used to pass new config for apptask (update)
PRIMARY KEY(id));
@@ -93,7 +94,7 @@ CREATE TABLE IF NOT EXISTS authcodes(
CREATE TABLE IF NOT EXISTS settings(
name VARCHAR(128) NOT NULL UNIQUE,
value VARCHAR(512),
value TEXT,
PRIMARY KEY(name));
CREATE TABLE IF NOT EXISTS appAddonConfigs(
@@ -110,7 +111,8 @@ CREATE TABLE IF NOT EXISTS backups(
type VARCHAR(16) NOT NULL, /* 'box' or 'app' */
dependsOn TEXT, /* comma separate list of objects this backup depends on */
state VARCHAR(16) NOT NULL,
restoreConfigJson TEXT, /* JSON including the manifest of the backed up app */
manifestJson TEXT, /* to validate if the app can be installed in this version of box */
format VARCHAR(16) DEFAULT "tgz",
PRIMARY KEY (id));
+3257 -488
View File
File diff suppressed because it is too large Load Diff
+42 -39
View File
@@ -14,87 +14,90 @@
"node": ">=4.0.0 <=4.1.1"
},
"dependencies": {
"@google-cloud/dns": "^0.7.0",
"@sindresorhus/df": "^2.1.0",
"async": "^2.5.0",
"aws-sdk": "^2.97.0",
"body-parser": "^1.17.2",
"cloudron-manifestformat": "^2.9.0",
"async": "^2.6.0",
"aws-sdk": "^2.151.0",
"body-parser": "^1.18.2",
"cloudron-manifestformat": "^2.10.0",
"connect-ensure-login": "^0.1.1",
"connect-lastmile": "^0.1.0",
"connect-lastmile": "^1.0.2",
"connect-timeout": "^1.9.0",
"cookie-parser": "^1.3.5",
"cookie-session": "^1.1.0",
"cron": "^1.0.9",
"cookie-session": "^1.3.2",
"cron": "^1.3.0",
"csurf": "^1.6.6",
"db-migrate": "^0.10.0-beta.20",
"db-migrate": "^0.10.0-beta.24",
"db-migrate-mysql": "^1.1.10",
"debug": "^3.0.0",
"dockerode": "^2.4.3",
"debug": "^3.1.0",
"dockerode": "^2.5.3",
"ejs": "^2.5.7",
"ejs-cli": "^2.0.0",
"express": "^4.15.4",
"express-session": "^1.15.5",
"express": "^4.16.2",
"express-session": "^1.15.6",
"gulp-sass": "^3.0.0",
"hat": "0.0.3",
"hock": "https://registry.npmjs.org/hock/-/hock-1.3.2.tgz",
"json": "^9.0.3",
"ldapjs": "^1.0.0",
"mime": "^1.3.4",
"moment-timezone": "^0.5.5",
"morgan": "^1.7.0",
"lodash.chunk": "^4.2.0",
"mime": "^2.0.3",
"moment-timezone": "^0.5.14",
"morgan": "^1.9.0",
"multiparty": "^4.1.2",
"mysql": "^2.7.0",
"nodemailer": "^4.0.1",
"mysql": "^2.15.0",
"nodemailer": "^4.4.0",
"nodemailer-smtp-transport": "^2.7.4",
"oauth2orize": "^1.0.1",
"oauth2orize": "^1.11.0",
"once": "^1.3.2",
"parse-links": "^0.1.0",
"passport": "^0.2.2",
"passport-http": "^0.2.2",
"passport": "^0.4.0",
"passport-http": "^0.3.0",
"passport-http-bearer": "^1.0.1",
"passport-local": "^1.0.0",
"passport-oauth2-client-password": "^0.1.2",
"password-generator": "^2.0.2",
"password-generator": "^2.2.0",
"progress-stream": "^2.0.0",
"proxy-middleware": "^0.13.0",
"proxy-middleware": "^0.15.0",
"request": "^2.83.0",
"s3-block-read-stream": "^0.2.0",
"safetydance": "^0.2.0",
"semver": "^4.3.6",
"showdown": "^1.6.0",
"safetydance": "^0.7.1",
"semver": "^5.4.1",
"showdown": "^1.8.2",
"split": "^1.0.0",
"superagent": "^3.5.2",
"superagent": "^3.8.1",
"supererror": "^0.7.1",
"tar-fs": "^1.15.3",
"tldjs": "^1.6.2",
"tar-fs": "^1.16.0",
"tar-stream": "^1.5.5",
"tldjs": "^2.2.0",
"underscore": "^1.7.0",
"uuid": "^3.1.0",
"valid-url": "^1.0.9",
"validator": "^4.9.0",
"ws": "^2.3.1"
"validator": "^9.1.1",
"ws": "^3.3.1"
},
"devDependencies": {
"bootstrap-sass": "^3.3.3",
"del": "^1.1.1",
"expect.js": "*",
"gulp": "^3.9.1",
"gulp-autoprefixer": "^2.3.0",
"gulp-autoprefixer": "^4.0.0",
"gulp-concat": "^2.4.3",
"gulp-cssnano": "^2.1.0",
"gulp-ejs": "^1.0.0",
"gulp-ejs": "^3.1.0",
"gulp-sass": "^3.0.0",
"gulp-serve": "^1.0.0",
"gulp-sourcemaps": "^1.5.2",
"gulp-uglify": "^1.1.0",
"gulp-sourcemaps": "^2.6.1",
"gulp-uglify": "^3.0.0",
"hock": "~1.2.0",
"istanbul": "*",
"js2xmlparser": "^1.0.0",
"js2xmlparser": "^3.0.0",
"mocha": "*",
"mock-aws-s3": "^2.4.0",
"mock-aws-s3": "git+https://github.com/cloudron-io/mock-aws-s3.git",
"nock": "^9.0.14",
"node-sass": "^3.13.1",
"node-sass": "^4.6.1",
"readdirp": "https://registry.npmjs.org/readdirp/-/readdirp-2.1.0.tgz",
"request": "^2.65.0",
"yargs": "^3.15.0"
"yargs": "^10.0.3"
},
"scripts": {
"migrate_local": "DATABASE_URL=mysql://root:@localhost/box node_modules/.bin/db-migrate up",
+14 -18
View File
@@ -45,6 +45,7 @@ fi
initBaseImage="true"
# provisioning data
domain=""
adminLocation="my"
zoneName=""
provider=""
encryptionKey=""
@@ -63,13 +64,14 @@ baseDataDir=""
# TODO this is still there for the restore case, see other occasions below
versionsUrl="https://s3.amazonaws.com/prod-cloudron-releases/versions.json"
args=$(getopt -o "" -l "domain:,help,skip-baseimage-init,data:,data-dir:,provider:,encryption-key:,restore-url:,tls-provider:,version:,dns-provider:,env:,prerelease,skip-reboot,source-url:" -n "$0" -- "$@")
args=$(getopt -o "" -l "domain:,help,skip-baseimage-init,data:,data-dir:,provider:,encryption-key:,restore-url:,tls-provider:,version:,dns-provider:,env:,admin-location:,prerelease,skip-reboot,source-url:" -n "$0" -- "$@")
eval set -- "${args}"
while true; do
case "$1" in
--domain) domain="$2"; shift 2;;
--help) echo "See https://cloudron.io/references/selfhosting.html on how to install Cloudron"; exit 0;;
--admin-location) adminLocation="$2"; shift 2;;
--help) echo "See https://cloudron.io/documentation/installation/ on how to install Cloudron"; exit 0;;
--provider) provider="$2"; shift 2;;
--encryption-key) encryptionKey="$2"; shift 2;;
--restore-url) restoreUrl="$2"; shift 2;;
@@ -105,13 +107,15 @@ done
# validate arguments in the absence of data
if [[ -z "${dataJson}" ]]; then
if [[ -z "${provider}" ]]; then
echo "--provider is required (azure, digitalocean, ec2, lightsail, linode, ovh, rosehosting, scaleway, vultr or generic)"
echo "--provider is required (azure, cloudscale.ch, digitalocean, ec2, exoscale, lightsail, linode, ovh, rosehosting, scaleway, vultr or generic)"
exit 1
elif [[ \
"${provider}" != "ami" && \
"${provider}" != "azure" && \
"${provider}" != "cloudscale.ch" && \
"${provider}" != "digitalocean" && \
"${provider}" != "ec2" && \
"${provider}" != "exoscale" && \
"${provider}" != "gce" && \
"${provider}" != "lightsail" && \
"${provider}" != "linode" && \
@@ -121,7 +125,7 @@ if [[ -z "${dataJson}" ]]; then
"${provider}" != "vultr" && \
"${provider}" != "generic" \
]]; then
echo "--provider must be one of: azure, digitalocean, ec2, gce, lightsail, linode, ovh, rosehosting, scaleway, vultr or generic"
echo "--provider must be one of: azure, cloudscale.ch, digitalocean, ec2, exoscale, gce, lightsail, linode, ovh, rosehosting, scaleway, vultr or generic"
exit 1
fi
@@ -195,6 +199,7 @@ if [[ -z "${dataJson}" ]]; then
{
"boxVersionsUrl": "${versionsUrl}",
"fqdn": "${domain}",
"adminLocation": "${adminLocation}",
"zoneName": "${zoneName}",
"provider": "${provider}",
"apiServerOrigin": "${apiServerOrigin}",
@@ -209,11 +214,9 @@ if [[ -z "${dataJson}" ]]; then
"provider": "filesystem",
"backupFolder": "/var/backups",
"key": "${encryptionKey}",
"format": "tgz",
"retentionSecs": 172800
},
"updateConfig": {
"prerelease": ${prerelease}
},
"version": "${version}"
}
EOF
@@ -223,6 +226,7 @@ EOF
{
"boxVersionsUrl": "${versionsUrl}",
"fqdn": "${domain}",
"adminLocation": "${adminLocation}",
"zoneName": "${zoneName}",
"provider": "${provider}",
"apiServerOrigin": "${apiServerOrigin}",
@@ -259,17 +263,9 @@ fi
echo "=> Installing version ${version} (this takes some time) ..."
echo "${data}" > "${DATA_FILE}"
# poor mans semver
if [[ ${version} == "0.10"* ]]; then
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" --data-file "${DATA_FILE}" &>> "${LOG_FILE}"; then
echo "Failed to install cloudron. See ${LOG_FILE} for details"
exit 1
fi
else
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" --data-file "${DATA_FILE}" --data-dir "${baseDataDir}" &>> "${LOG_FILE}"; then
echo "Failed to install cloudron. See ${LOG_FILE} for details"
exit 1
fi
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" --data-file "${DATA_FILE}" --data-dir "${baseDataDir}" &>> "${LOG_FILE}"; then
echo "Failed to install cloudron. See ${LOG_FILE} for details"
exit 1
fi
rm "${DATA_FILE}"
+2 -2
View File
@@ -31,8 +31,8 @@ if ! $(cd "${SOURCE_DIR}" && git diff --exit-code >/dev/null); then
exit 1
fi
if [[ "$(node --version)" != "v6.11.2" ]]; then
echo "This script requires node 6.11.2"
if [[ "$(node --version)" != "v6.11.5" ]]; then
echo "This script requires node 6.11.5"
exit 1
fi
+34 -9
View File
@@ -34,13 +34,41 @@ while true; do
esac
done
echo "==> installer: updating docker"
if [[ $(docker version --format {{.Client.Version}}) != "17.09.0-ce" ]]; then
$curl -sL https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_17.09.0~ce-0~ubuntu_amd64.deb -o /tmp/docker.deb
# https://download.docker.com/linux/ubuntu/dists/xenial/stable/binary-amd64/Packages
if [[ $(sha256sum /tmp/docker.deb | cut -d' ' -f1) != "d33f6eb134f0ab0876148bd96de95ea47d583d7f2cddfdc6757979453f9bd9bf" ]]; then
echo "docker binary download is corrupt"
exit 5
fi
echo "Waiting for all dpkg tasks to finish..."
while fuser /var/lib/dpkg/lock; do
sleep 1
done
while ! dpkg --force-confold --configure -a; do
echo "Failed to fix packages. Retry"
sleep 1
done
while ! apt install -y /tmp/docker.deb; do
echo "Failed to install docker. Retry"
sleep 1
done
rm /tmp/docker.deb
fi
echo "==> installer: updating node"
if [[ "$(node --version)" != "v6.11.2" ]]; then
mkdir -p /usr/local/node-6.11.2
$curl -sL https://nodejs.org/dist/v6.11.2/node-v6.11.2-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-6.11.2
ln -sf /usr/local/node-6.11.2/bin/node /usr/bin/node
ln -sf /usr/local/node-6.11.2/bin/npm /usr/bin/npm
rm -rf /usr/local/node-6.11.1
if [[ "$(node --version)" != "v6.11.5" ]]; then
mkdir -p /usr/local/node-6.11.5
$curl -sL https://nodejs.org/dist/v6.11.5/node-v6.11.5-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-6.11.5
ln -sf /usr/local/node-6.11.5/bin/node /usr/bin/node
ln -sf /usr/local/node-6.11.5/bin/npm /usr/bin/npm
rm -rf /usr/local/node-6.11.3
fi
for try in `seq 1 10`; do
@@ -81,9 +109,6 @@ fi
# ensure we are not inside the source directory, which we will remove now
cd /root
echo "==> installer: updating packages"
# add logic to update apt packages here
echo "==> installer: switching the box code"
rm -rf "${BOX_SRC_DIR}"
mv "${box_src_tmp_dir}" "${BOX_SRC_DIR}"
+5 -9
View File
@@ -6,6 +6,7 @@ json="${source_dir}/../node_modules/.bin/json"
# IMPORTANT: Fix cloudron.js:doUpdate if you add/remove any arg. keep these sorted for readability
arg_api_server_origin=""
arg_fqdn=""
arg_admin_location=""
arg_zone_name=""
arg_is_custom_domain="false"
arg_restore_key=""
@@ -20,9 +21,7 @@ arg_version=""
arg_web_server_origin=""
arg_backup_config=""
arg_dns_config=""
arg_update_config=""
arg_provider=""
arg_app_bundle=""
arg_is_demo="false"
args=$(getopt -o "" -l "data:,retire-reason:,retire-info:" -n "$0" -- "$@")
@@ -46,19 +45,19 @@ while true; do
arg_is_custom_domain=$(echo "$2" | $json isCustomDomain)
[[ "${arg_is_custom_domain}" == "" ]] && arg_is_custom_domain="true"
arg_admin_location=$(echo "$2" | $json adminLocation)
[[ "${arg_admin_location}" == "" ]] && arg_admin_location="my"
# only update/restore have this valid (but not migrate)
arg_api_server_origin=$(echo "$2" | $json apiServerOrigin)
[[ "${arg_api_server_origin}" == "" ]] && arg_api_server_origin="https://api.cloudron.io"
arg_web_server_origin=$(echo "$2" | $json webServerOrigin)
[[ "${arg_web_server_origin}" == "" ]] && arg_web_server_origin="https://cloudron.io"
# TODO check if an where this is used
# TODO check if and where this is used
arg_version=$(echo "$2" | $json version)
# read possibly empty parameters here
arg_app_bundle=$(echo "$2" | $json appBundle)
[[ "${arg_app_bundle}" == "" ]] && arg_app_bundle="[]"
arg_is_demo=$(echo "$2" | $json isDemo)
[[ "${arg_is_demo}" == "" ]] && arg_is_demo="false"
@@ -86,9 +85,6 @@ while true; do
arg_dns_config=$(echo "$2" | $json dnsConfig)
[[ "${arg_dns_config}" == "null" ]] && arg_dns_config=""
arg_update_config=$(echo "$2" | $json updateConfig)
[[ "${arg_update_config}" == "null" ]] && arg_update_config=""
shift 2
;;
--) break;;
+3 -4
View File
@@ -7,7 +7,6 @@ readonly SETUP_WEBSITE_DIR="/home/yellowtent/setup/website"
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly box_src_dir="$(realpath ${script_dir}/..)"
readonly PLATFORM_DATA_DIR="/home/yellowtent/platformdata"
readonly ADMIN_LOCATION="my" # keep this in sync with constants.js
echo "Setting up nginx update page"
@@ -19,7 +18,7 @@ fi
source "${script_dir}/argparser.sh" "$@" # this injects the arg_* variables used below
# keep this is sync with config.js appFqdn()
admin_fqdn=$([[ "${arg_is_custom_domain}" == "true" ]] && echo "${ADMIN_LOCATION}.${arg_fqdn}" || echo "${ADMIN_LOCATION}-${arg_fqdn}")
admin_fqdn=$([[ "${arg_is_custom_domain}" == "true" ]] && echo "${arg_admin_location}.${arg_fqdn}" || echo "${arg_admin_location}-${arg_fqdn}")
admin_origin="https://${admin_fqdn}"
# copy the website
@@ -34,11 +33,11 @@ if [[ "${arg_retire_reason}" != "" || "${existing_infra}" != "${current_infra}"
echo "Showing progress bar on all subdomains in retired mode or infra update. retire: ${arg_retire_reason} existing: ${existing_infra} current: ${current_infra}"
rm -f ${PLATFORM_DATA_DIR}/nginx/applications/*
${box_src_dir}/node_modules/.bin/ejs-cli -f "${script_dir}/start/nginx/appconfig.ejs" \
-O "{ \"vhost\": \"~^(.+)\$\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\", \"robotsTxtQuoted\": null }" > "${PLATFORM_DATA_DIR}/nginx/applications/admin.conf"
-O "{ \"vhost\": \"~^(.+)\$\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\", \"robotsTxtQuoted\": null, \"hasIPv6\": false }" > "${PLATFORM_DATA_DIR}/nginx/applications/admin.conf"
else
echo "Show progress bar only on admin domain for normal update"
${box_src_dir}/node_modules/.bin/ejs-cli -f "${script_dir}/start/nginx/appconfig.ejs" \
-O "{ \"vhost\": \"${admin_fqdn}\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\", \"robotsTxtQuoted\": null }" > "${PLATFORM_DATA_DIR}/nginx/applications/admin.conf"
-O "{ \"vhost\": \"${admin_fqdn}\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\", \"robotsTxtQuoted\": null, \"hasIPv6\": false }" > "${PLATFORM_DATA_DIR}/nginx/applications/admin.conf"
fi
if [[ "${arg_retire_reason}" == "migrate" ]]; then
+69 -81
View File
@@ -7,7 +7,6 @@ echo "==> Cloudron Start"
readonly USER="yellowtent"
readonly HOME_DIR="/home/${USER}"
readonly BOX_SRC_DIR="${HOME_DIR}/box"
readonly OLD_DATA_DIR="${HOME_DIR}/data";
readonly PLATFORM_DATA_DIR="${HOME_DIR}/platformdata" # platform data
readonly APPS_DATA_DIR="${HOME_DIR}/appsdata" # app data
readonly BOX_DATA_DIR="${HOME_DIR}/boxdata" # box data
@@ -73,54 +72,29 @@ fi
mkdir -p "${BOX_DATA_DIR}"
mkdir -p "${APPS_DATA_DIR}"
mkdir -p "${PLATFORM_DATA_DIR}"
# keep these in sync with paths.js
echo "==> Ensuring directories"
if [[ ! -d "${PLATFORM_DATA_DIR}/mail" ]]; then
if [[ -d "${OLD_DATA_DIR}/mail" ]]; then
echo "==> Migrate old mail data"
# Migrate mail data to new format
docker stop mail || true # otherwise the move below might fail if mail container writes in the middle
mkdir -p "${PLATFORM_DATA_DIR}/mail"
# we can't move the whole folder as it is a btrfs subvolume mount
mv -f "${OLD_DATA_DIR}/mail/"* "${PLATFORM_DATA_DIR}/mail/" # this used to be mail container's run directory
else
echo "==> Create new mail data dir"
mkdir -p "${PLATFORM_DATA_DIR}/mail"
fi
fi
mkdir -p "${PLATFORM_DATA_DIR}/graphite"
mkdir -p "${PLATFORM_DATA_DIR}/mail/dkim"
mkdir -p "${PLATFORM_DATA_DIR}/mysql"
mkdir -p "${PLATFORM_DATA_DIR}/postgresql"
mkdir -p "${PLATFORM_DATA_DIR}/mongodb"
mkdir -p "${PLATFORM_DATA_DIR}/snapshots"
mkdir -p "${PLATFORM_DATA_DIR}/addons/mail"
mkdir -p "${PLATFORM_DATA_DIR}/collectd/collectd.conf.d"
mkdir -p "${PLATFORM_DATA_DIR}/logrotate.d"
mkdir -p "${PLATFORM_DATA_DIR}/acme"
mkdir -p "${PLATFORM_DATA_DIR}/backup"
mkdir -p "${BOX_DATA_DIR}/appicons"
mkdir -p "${BOX_DATA_DIR}/certs"
mkdir -p "${BOX_DATA_DIR}/acme" # acme keys
mkdir -p "${BOX_DATA_DIR}/mail/dkim"
# ensure backups folder exists and is writeable
mkdir -p /var/backups
chmod 777 /var/backups
echo "==> Check for old btrfs volumes"
if mountpoint -q "${OLD_DATA_DIR}"; then
echo "==> Cleanup btrfs volumes"
# First stop all container to be able to unmount
docker ps -q | xargs docker stop
umount "${OLD_DATA_DIR}"
rm -rf "/root/user_data.img"
else
echo "==> No btrfs volumes found";
fi
echo "==> Configuring journald"
sed -e "s/^#SystemMaxUse=.*$/SystemMaxUse=100M/" \
-e "s/^#ForwardToSyslog=.*$/ForwardToSyslog=no/" \
@@ -146,7 +120,10 @@ echo "==> Setting up unbound"
# DO uses Google nameservers by default. This causes RBL queries to fail (host 2.0.0.127.zen.spamhaus.org)
# We do not use dnsmasq because it is not a recursive resolver and defaults to the value in the interfaces file (which is Google DNS!)
# We listen on 0.0.0.0 because there is no way control ordering of docker (which creates the 172.18.0.0/16) and unbound
echo -e "server:\n\tinterface: 0.0.0.0\n\taccess-control: 127.0.0.1 allow\n\taccess-control: 172.18.0.1/16 allow\n\tcache-max-negative-ttl: 30\n\tcache-max-ttl: 300" > /etc/unbound/unbound.conf.d/cloudron-network.conf
# If IP6 is not enabled, dns queries seem to fail on some hosts
echo -e "server:\n\tinterface: 0.0.0.0\n\tdo-ip6: yes\n\taccess-control: 127.0.0.1 allow\n\taccess-control: 172.18.0.1/16 allow\n\tcache-max-negative-ttl: 30\n\tcache-max-ttl: 300\n\t#logfile: /var/log/unbound.log\n\t#verbosity: 10" > /etc/unbound/unbound.conf.d/cloudron-network.conf
# update the root anchor after a out-of-disk-space situation (see #269)
unbound-anchor -a /var/lib/unbound/root.key
echo "==> Adding systemd services"
cp -r "${script_dir}/start/systemd/." /etc/systemd/system/
@@ -209,7 +186,11 @@ if [[ ! -f /etc/mysql/mysql.cnf ]] || ! diff -q "${script_dir}/start/mysql.cnf"
echo "Waiting for mysql jobs..."
sleep 1
done
systemctl restart mysql
while true; do
if systemctl restart mysql; then break; fi
echo "Restarting MySql again after sometime since this fails randomly"
sleep 1
done
else
systemctl start mysql
fi
@@ -221,20 +202,31 @@ mysql -u root -p${mysql_root_password} -e 'CREATE DATABASE IF NOT EXISTS box'
if [[ -n "${arg_restore_url}" ]]; then
set_progress "30" "Downloading restore data"
decrypt=""
if [[ "${arg_restore_url}" == *.tar.gz.enc || -n "${arg_restore_key}" ]]; then
echo "==> Downloading encrypted backup: ${arg_restore_url} and key: ${arg_restore_key}"
decrypt=(openssl aes-256-cbc -d -nosalt -pass "pass:${arg_restore_key}")
else
echo "==> Downloading backup: ${arg_restore_url}"
decrypt=(cat -)
fi
readonly restore_dir="${arg_restore_url#file://}"
while true; do
if $curl -L "${arg_restore_url}" | "${decrypt[@]}" \
| tar -zxf - --overwrite --transform="s,^box/\?,boxdata/," --transform="s,^mail/\?,platformdata/mail/," --show-transformed-names -C "${HOME_DIR}"; then break; fi
echo "Failed to download data, trying again"
done
if [[ -d "${restore_dir}" ]]; then # rsync backup
echo "==> Copying backup: ${restore_dir}"
if [[ $(stat -c "%d" "${BOX_DATA_DIR}") == $(stat -c "%d" "${restore_dir}") ]]; then
cp -rfl "${restore_dir}/." "${BOX_DATA_DIR}"
else
cp -rf "${restore_dir}/." "${BOX_DATA_DIR}"
fi
else # tgz backup
decrypt=""
if [[ "${arg_restore_url}" == *.tar.gz.enc || -n "${arg_restore_key}" ]]; then
echo "==> Downloading encrypted backup: ${arg_restore_url} and key: ${arg_restore_key}"
decrypt=(openssl aes-256-cbc -d -nosalt -pass "pass:${arg_restore_key}")
elif [[ "${arg_restore_url}" == *.tar.gz ]]; then
echo "==> Downloading backup: ${arg_restore_url}"
decrypt=(cat -)
fi
while true; do
if $curl -L "${arg_restore_url}" | "${decrypt[@]}" \
| tar -zxf - --overwrite -C "${BOX_DATA_DIR}"; then break; fi
echo "Failed to download data, trying again"
done
fi
set_progress "35" "Setting up MySQL"
if [[ -f "${BOX_DATA_DIR}/box.mysqldump" ]]; then
@@ -247,9 +239,27 @@ set_progress "40" "Migrating data"
sudo -u "${USER}" -H bash <<EOF
set -eu
cd "${BOX_SRC_DIR}"
BOX_ENV=cloudron DATABASE_URL=mysql://root:${mysql_root_password}@localhost/box "${BOX_SRC_DIR}/node_modules/.bin/db-migrate" up
BOX_ENV=cloudron DATABASE_URL=mysql://root:${mysql_root_password}@127.0.0.1/box "${BOX_SRC_DIR}/node_modules/.bin/db-migrate" up
EOF
echo "==> Adding automated configs"
mysql -u root -p${mysql_root_password} -e "REPLACE INTO settings (name, value) VALUES (\"domain\", '{ \"fqdn\": \"$arg_fqdn\", \"zoneName\": \"$arg_zone_name\", \"adminLocation\": \"$arg_admin_location\" }')" box
if [[ ! -z "${arg_backup_config}" ]]; then
mysql -u root -p${mysql_root_password} \
-e "REPLACE INTO settings (name, value) VALUES (\"backup_config\", '$arg_backup_config')" box
fi
if [[ ! -z "${arg_dns_config}" ]]; then
mysql -u root -p${mysql_root_password} \
-e "REPLACE INTO settings (name, value) VALUES (\"dns_config\", '$arg_dns_config')" box
fi
if [[ ! -z "${arg_tls_config}" ]]; then
mysql -u root -p${mysql_root_password} \
-e "REPLACE INTO settings (name, value) VALUES (\"tls_config\", '$arg_tls_config')" box
fi
echo "==> Creating cloudron.conf"
cat > "${CONFIG_DIR}/cloudron.conf" <<CONF_END
{
@@ -258,18 +268,11 @@ cat > "${CONFIG_DIR}/cloudron.conf" <<CONF_END
"apiServerOrigin": "${arg_api_server_origin}",
"webServerOrigin": "${arg_web_server_origin}",
"fqdn": "${arg_fqdn}",
"adminLocation": "${arg_admin_location}",
"zoneName": "${arg_zone_name}",
"isCustomDomain": ${arg_is_custom_domain},
"provider": "${arg_provider}",
"isDemo": ${arg_is_demo},
"database": {
"hostname": "localhost",
"username": "root",
"password": "${mysql_root_password}",
"port": 3306,
"name": "box"
},
"appBundle": ${arg_app_bundle}
"isDemo": ${arg_is_demo}
}
CONF_END
# pass these out-of-band because they have new lines which interfere with json
@@ -285,39 +288,24 @@ cat > "${BOX_SRC_DIR}/webadmin/dist/config.json" <<CONF_END
}
CONF_END
if [[ ! -f "${BOX_DATA_DIR}/dhparams.pem" ]]; then
echo "==> Generating dhparams (takes forever)"
openssl dhparam -out "${BOX_DATA_DIR}/dhparams.pem" 2048
cp "${BOX_DATA_DIR}/dhparams.pem" "${PLATFORM_DATA_DIR}/addons/mail/dhparams.pem"
else
cp "${BOX_DATA_DIR}/dhparams.pem" "${PLATFORM_DATA_DIR}/addons/mail/dhparams.pem"
fi
echo "==> Changing ownership"
chown "${USER}:${USER}" -R "${CONFIG_DIR}"
chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/nginx" "${PLATFORM_DATA_DIR}/collectd" "${PLATFORM_DATA_DIR}/logrotate.d" "${PLATFORM_DATA_DIR}/addons" "${PLATFORM_DATA_DIR}/acme"
chown "${USER}:${USER}" -R "${BOX_DATA_DIR}"
chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/mail/dkim" # this is owned by box currently since it generates the keys
chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/nginx" "${PLATFORM_DATA_DIR}/collectd" "${PLATFORM_DATA_DIR}/logrotate.d" "${PLATFORM_DATA_DIR}/addons" "${PLATFORM_DATA_DIR}/acme" "${PLATFORM_DATA_DIR}/backup"
chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}/INFRA_VERSION" 2>/dev/null || true
chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}"
echo "==> Adding automated configs"
if [[ ! -z "${arg_backup_config}" ]]; then
mysql -u root -p${mysql_root_password} \
-e "REPLACE INTO settings (name, value) VALUES (\"backup_config\", '$arg_backup_config')" box
fi
if [[ ! -z "${arg_dns_config}" ]]; then
mysql -u root -p${mysql_root_password} \
-e "REPLACE INTO settings (name, value) VALUES (\"dns_config\", '$arg_dns_config')" box
fi
if [[ ! -z "${arg_update_config}" ]]; then
mysql -u root -p${mysql_root_password} \
-e "REPLACE INTO settings (name, value) VALUES (\"update_config\", '$arg_update_config')" box
fi
if [[ ! -z "${arg_tls_config}" ]]; then
mysql -u root -p${mysql_root_password} \
-e "REPLACE INTO settings (name, value) VALUES (\"tls_config\", '$arg_tls_config')" box
fi
echo "==> Generating dhparams (takes forever)"
if [[ ! -f "${BOX_DATA_DIR}/dhparams.pem" ]]; then
openssl dhparam -out "${BOX_DATA_DIR}/dhparams.pem" 2048
fi
# do not chown the boxdata/mail directory; dovecot gets upset
chown "${USER}:${USER}" "${BOX_DATA_DIR}"
find "${BOX_DATA_DIR}" -mindepth 1 -maxdepth 1 -not -path "${BOX_DATA_DIR}/mail" -exec chown -R "${USER}:${USER}" {} \;
chown "${USER}:${USER}" -R "${BOX_DATA_DIR}/mail/dkim" # this is owned by box currently since it generates the keys
set_progress "60" "Starting Cloudron"
systemctl start cloudron.target
+6
View File
@@ -7,3 +7,9 @@ printf "Cloudron relies on and may break your installation. Ubuntu security upda
printf "are automatically installed on this server every night.\n"
printf "\n"
printf "Read more at https://cloudron.io/documentation/security/#os-updates\n"
if grep -q "^PasswordAuthentication yes" /etc/ssh/sshd_config; then
printf "\nPlease disable password based SSH access to secure your server. Read more at\n"
printf "https://cloudron.io/documentation/security/#securing-ssh-access\n"
fi
+45 -4
View File
@@ -4,13 +4,54 @@ map $http_upgrade $connection_upgrade {
'' close;
}
# http server
server {
<% if (vhost) { %>
listen 443 http2;
listen 80;
<% if (hasIPv6) { -%>
listen [::]:80;
<% } -%>
<% if (vhost) { -%>
server_name <%= vhost %>;
<% } else { %>
<% } else { -%>
# IP based access from collectd or initial cloudron setup. TODO: match the IPv6 address
server_name "~^\d+\.\d+\.\d+\.\d+$";
# collectd
location /nginx_status {
stub_status on;
access_log off;
allow 127.0.0.1;
deny all;
}
<% } -%>
# acme challenges (for cert renewal where the vhost config exists)
location /.well-known/acme-challenge/ {
default_type text/plain;
alias /home/yellowtent/platformdata/acme/;
}
location / {
# redirect everything to HTTPS
return 301 https://$host$request_uri;
}
}
# https server
server {
<% if (vhost) { -%>
server_name <%= vhost %>;
listen 443 http2;
<% if (hasIPv6) { -%>
listen [::]:443 http2;
<% } -%>
<% } else { -%>
listen 443 http2 default_server;
<% } %>
<% if (hasIPv6) { -%>
listen [::]:443 http2 default_server;
<% } -%>
<% } -%>
ssl on;
# paths are relative to prefix and not to this file
+7 -13
View File
@@ -36,27 +36,21 @@ http {
# zones for rate limiting
limit_req_zone $binary_remote_addr zone=admin_login:10m rate=10r/s; # 10 request a second
# HTTP server
# default http server that returns 404 for any domain we are not listening on
server {
listen 80;
listen 80 default_server;
listen [::]:80 default_server;
server_name does_not_match_anything;
# collectd
location /nginx_status {
stub_status on;
access_log off;
allow 127.0.0.1;
deny all;
}
# acme challenges
# acme challenges (for app installation and re-configure when the vhost config does not exist)
location /.well-known/acme-challenge/ {
default_type text/plain;
alias /home/yellowtent/platformdata/acme/;
}
location / {
# redirect everything to HTTPS
return 301 https://$host$request_uri;
return 404;
}
}
+6 -8
View File
@@ -13,8 +13,8 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reloadnginx.sh
Defaults!/home/yellowtent/box/src/scripts/reboot.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reboot.sh
Defaults!/home/yellowtent/box/src/scripts/reloadcollectd.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reloadcollectd.sh
Defaults!/home/yellowtent/box/src/scripts/configurecollectd.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/configurecollectd.sh
Defaults!/home/yellowtent/box/src/scripts/collectlogs.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/collectlogs.sh
@@ -28,11 +28,9 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/update.sh
Defaults!/home/yellowtent/box/src/scripts/authorized_keys.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/authorized_keys.sh
Defaults!/home/yellowtent/box/src/scripts/node.sh env_keep="HOME BOX_ENV NODE_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/node.sh
Defaults!/home/yellowtent/box/src/scripts/configurelogrotate.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/configurelogrotate.sh
Defaults!/home/yellowtent/box/src/scripts/mvlogrotateconfig.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/mvlogrotateconfig.sh
Defaults!/home/yellowtent/box/src/backuptask.js env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD:SETENV: /home/yellowtent/box/src/backuptask.js
Defaults!/home/yellowtent/box/src/scripts/rmlogrotateconfig.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmlogrotateconfig.sh
+5 -3
View File
@@ -20,7 +20,6 @@ var appdb = require('./appdb.js'),
async = require('async'),
clients = require('./clients.js'),
config = require('./config.js'),
constants = require('./constants.js'),
ClientsError = clients.ClientsError,
debug = require('debug')('box:addons'),
docker = require('./docker.js'),
@@ -249,7 +248,7 @@ function setupOauth(app, options, callback) {
if (!app.sso) return callback(null);
var appId = app.id;
var redirectURI = 'https://' + config.appFqdn(app.location);
var redirectURI = 'https://' + (app.altDomain || config.appFqdn(app.location));
var scope = 'profile';
clients.delByAppIdAndType(appId, clients.TYPE_OAUTH, function (error) { // remove existing creds
@@ -364,6 +363,7 @@ function setupSendMail(app, options, callback) {
var env = [
{ name: 'MAIL_SMTP_SERVER', value: 'mail' },
{ name: 'MAIL_SMTP_PORT', value: '2525' },
{ name: 'MAIL_SMTPS_PORT', value: '4650' },
{ name: 'MAIL_SMTP_USERNAME', value: mailbox.name },
{ name: 'MAIL_SMTP_PASSWORD', value: password },
{ name: 'MAIL_FROM', value: mailbox.name + '@' + config.fqdn() },
@@ -645,7 +645,9 @@ function setupRedis(app, options, callback) {
}
const tag = infra.images.redis.tag, redisName = 'redis-' + app.id;
// note that we do not add appId label because this interferes with the stop/start app logic
const cmd = `docker run --restart=always -d --name=${redisName} \
--label=location=${app.location} \
--net cloudron \
--net-alias ${redisName} \
-m ${memoryLimit/2} \
@@ -692,7 +694,7 @@ function teardownRedis(app, options, callback) {
safe.fs.unlinkSync(paths.ADDON_CONFIG_DIR, 'redis-' + app.id + '_vars.sh');
shell.sudo('teardownRedis', [ RMAPPDIR_CMD, app.id + '/redis' ], function (error, stdout, stderr) {
shell.sudo('teardownRedis', [ RMAPPDIR_CMD, app.id + '/redis', true /* delete directory */ ], function (error, stdout, stderr) {
if (error) return callback(new Error('Error removing redis data:' + error));
appdb.unsetAddonConfig(app.id, 'redis', callback);
+29 -17
View File
@@ -10,6 +10,7 @@ exports = module.exports = {
update: update,
getAll: getAll,
getPortBindings: getPortBindings,
delPortBinding: delPortBinding,
setAddonConfig: setAddonConfig,
getAddonConfig: getAddonConfig,
@@ -59,7 +60,7 @@ var assert = require('assert'),
var APPS_FIELDS_PREFIXED = [ 'apps.id', 'apps.appStoreId', 'apps.installationState', 'apps.installationProgress', 'apps.runState',
'apps.health', 'apps.containerId', 'apps.manifestJson', 'apps.httpPort', 'apps.location', 'apps.dnsRecordId',
'apps.accessRestrictionJson', 'apps.lastBackupId', 'apps.oldConfigJson', 'apps.memoryLimit', 'apps.altDomain',
'apps.accessRestrictionJson', 'apps.restoreConfigJson', 'apps.oldConfigJson', 'apps.updateConfigJson', 'apps.memoryLimit', 'apps.altDomain',
'apps.xFrameOptions', 'apps.sso', 'apps.debugModeJson', 'apps.robotsTxt', 'apps.enableBackup' ].join(',');
var PORT_BINDINGS_FIELDS = [ 'hostPort', 'environmentVariable', 'appId' ].join(',');
@@ -75,6 +76,14 @@ function postProcess(result) {
result.oldConfig = safe.JSON.parse(result.oldConfigJson);
delete result.oldConfigJson;
assert(result.updateConfigJson === null || typeof result.updateConfigJson === 'string');
result.updateConfig = safe.JSON.parse(result.updateConfigJson);
delete result.updateConfigJson;
assert(result.restoreConfigJson === null || typeof result.restoreConfigJson === 'string');
result.restoreConfig = safe.JSON.parse(result.restoreConfigJson);
delete result.restoreConfigJson;
assert(result.hostPorts === null || typeof result.hostPorts === 'string');
assert(result.environmentVariables === null || typeof result.environmentVariables === 'string');
@@ -188,14 +197,14 @@ function add(id, appStoreId, manifest, location, portBindings, data, callback) {
var altDomain = data.altDomain || null;
var xFrameOptions = data.xFrameOptions || '';
var installationState = data.installationState || exports.ISTATE_PENDING_INSTALL;
var lastBackupId = data.lastBackupId || null; // used when cloning
var restoreConfigJson = data.restoreConfig ? JSON.stringify(data.restoreConfig) : null; // used when cloning
var sso = 'sso' in data ? data.sso : null;
var debugModeJson = data.debugMode ? JSON.stringify(data.debugMode) : null;
var queries = [];
queries.push({
query: 'INSERT INTO apps (id, appStoreId, manifestJson, installationState, location, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, lastBackupId, sso, debugModeJson) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
args: [ id, appStoreId, manifestJson, installationState, location, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, lastBackupId, sso, debugModeJson ]
query: 'INSERT INTO apps (id, appStoreId, manifestJson, installationState, location, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, restoreConfigJson, sso, debugModeJson) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
args: [ id, appStoreId, manifestJson, installationState, location, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, restoreConfigJson, sso, debugModeJson ]
});
Object.keys(portBindings).forEach(function (env) {
@@ -248,6 +257,18 @@ function getPortBindings(id, callback) {
});
}
function delPortBinding(hostPort, callback) {
assert.strictEqual(typeof hostPort, 'number');
assert.strictEqual(typeof callback, 'function');
database.query('DELETE FROM appPortBindings WHERE hostPort=?', [ hostPort ], function (error, result) {
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
if (result.affectedRows !== 1) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
callback(null);
});
}
function del(id, callback) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof callback, 'function');
@@ -305,17 +326,8 @@ function updateWithConstraints(id, app, constraints, callback) {
var fields = [ ], values = [ ];
for (var p in app) {
if (p === 'manifest') {
fields.push('manifestJson = ?');
values.push(JSON.stringify(app[p]));
} else if (p === 'oldConfig') {
fields.push('oldConfigJson = ?');
values.push(JSON.stringify(app[p]));
} else if (p === 'accessRestriction') {
fields.push('accessRestrictionJson = ?');
values.push(JSON.stringify(app[p]));
} else if (p === 'debugMode') {
fields.push('debugModeJson = ?');
if (p === 'manifest' || p === 'oldConfig' || p === 'updateConfig' || p === 'restoreConfig' || p === 'accessRestriction' || p === 'debugMode') {
fields.push(`${p}Json = ?`);
values.push(JSON.stringify(app[p]));
} else if (p !== 'portBindings') {
fields.push(p + ' = ?');
@@ -368,14 +380,14 @@ function setInstallationCommand(appId, installationState, values, callback) {
// Rules are:
// uninstall is allowed in any state
// force update is allowed in any state including pending_uninstall! (for better or worse)
// restore is allowed from installed or error state
// restore is allowed from installed or error state or currently restoring
// configure is allowed in installed state or currently configuring or in error state
// update and backup are allowed only in installed state
if (installationState === exports.ISTATE_PENDING_UNINSTALL || installationState === exports.ISTATE_PENDING_FORCE_UPDATE) {
updateWithConstraints(appId, values, '', callback);
} else if (installationState === exports.ISTATE_PENDING_RESTORE) {
updateWithConstraints(appId, values, 'AND (installationState = "installed" OR installationState = "error")', callback);
updateWithConstraints(appId, values, 'AND (installationState = "installed" OR installationState = "error" OR installationState = "pending_restore")', callback);
} else if (installationState === exports.ISTATE_PENDING_UPDATE || installationState === exports.ISTATE_PENDING_BACKUP) {
updateWithConstraints(appId, values, 'AND installationState = "installed"', callback);
} else if (installationState === exports.ISTATE_PENDING_CONFIGURE) {
+11 -11
View File
@@ -94,19 +94,20 @@ function checkAppHealth(app, callback) {
superagent
.get(healthCheckUrl)
.set('Host', app.fqdn) // required for some apache configs with rewrite rules
.set('User-Agent', 'Mozilla') // required for some apps (e.g. minio)
.redirects(0)
.timeout(HEALTHCHECK_INTERVAL)
.end(function (error, res) {
if (error && !error.response) {
debugApp(app, 'not alive (network error): %s', error.message);
setHealth(app, appdb.HEALTH_UNHEALTHY, callback);
} else if (res.statusCode >= 400) { // 2xx and 3xx are ok
debugApp(app, 'not alive : %s', error || res.status);
setHealth(app, appdb.HEALTH_UNHEALTHY, callback);
} else {
setHealth(app, appdb.HEALTH_HEALTHY, callback);
}
});
if (error && !error.response) {
debugApp(app, 'not alive (network error): %s', error.message);
setHealth(app, appdb.HEALTH_UNHEALTHY, callback);
} else if (res.statusCode >= 400) { // 2xx and 3xx are ok
debugApp(app, 'not alive : %s', error || res.status);
setHealth(app, appdb.HEALTH_UNHEALTHY, callback);
} else {
setHealth(app, appdb.HEALTH_HEALTHY, callback);
}
});
});
}
@@ -156,7 +157,6 @@ function processDockerEvents() {
stream.setEncoding('utf8');
stream.on('data', function (data) {
var ev = JSON.parse(data);
debug('Container ' + ev.id + ' went OOM');
appdb.getByContainerId(ev.id, function (error, app) { // this can error for addons
var program = error || !app.appStoreId ? ev.id : app.appStoreId;
var context = JSON.stringify(ev);
+58 -50
View File
@@ -65,6 +65,7 @@ var addons = require('./addons.js'),
groups = require('./groups.js'),
mailboxdb = require('./mailboxdb.js'),
manifestFormat = require('cloudron-manifestformat'),
os = require('os'),
path = require('path'),
paths = require('./paths.js'),
safe = require('safetydance'),
@@ -118,7 +119,7 @@ AppsError.BAD_CERTIFICATE = 'Invalid certificate';
// https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names
// We are validating the validity of the location-fqdn as host name
function validateHostname(location, fqdn) {
var RESERVED_LOCATIONS = [ constants.ADMIN_LOCATION, constants.API_LOCATION, constants.SMTP_LOCATION, constants.IMAP_LOCATION, constants.MAIL_LOCATION, constants.POSTMAN_LOCATION ];
var RESERVED_LOCATIONS = [ config.adminLocation(), constants.API_LOCATION, constants.SMTP_LOCATION, constants.IMAP_LOCATION, config.mailLocation(), constants.POSTMAN_LOCATION ];
if (RESERVED_LOCATIONS.indexOf(location) !== -1) return new AppsError(AppsError.BAD_FIELD, location + ' is reserved');
@@ -208,7 +209,7 @@ function validateMemoryLimit(manifest, memoryLimit) {
assert.strictEqual(typeof memoryLimit, 'number');
var min = manifest.memoryLimit || constants.DEFAULT_MEMORY_LIMIT;
var max = (4096 * 1024 * 1024);
var max = os.totalmem() * 2; // this will overallocate since we don't allocate equal swap always (#466)
// allow 0, which indicates that it is not set, the one from the manifest will be choosen but we don't commit any user value
// this is needed so an app update can change the value in the manifest, and if not set by the user, the new value should be used
@@ -258,6 +259,12 @@ function validateRobotsTxt(robotsTxt) {
return null;
}
function validateBackupFormat(format) {
if (format === 'tgz' || format == 'rsync') return null;
return new AppsError(AppsError.BAD_FIELD, 'Invalid backup format');
}
function getDuplicateErrorDetails(location, portBindings, error) {
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof portBindings, 'object');
@@ -308,12 +315,18 @@ function hasAccessTo(app, user, callback) {
if (app.accessRestriction.users.some(function (e) { return e === user.id; })) return callback(null, true);
// check group access
if (!app.accessRestriction.groups) return callback(null, false);
groups.getGroups(user.id, function (error, groupIds) {
if (error) return callback(null, false);
async.some(app.accessRestriction.groups, function (groupId, iteratorDone) {
groups.isMember(groupId, user.id, iteratorDone);
}, function (error, result) {
callback(null, !error && result);
const isAdmin = groupIds.indexOf(constants.ADMIN_GROUP_ID) !== -1;
if (isAdmin) return callback(null, true); // admins can always access any app
if (!app.accessRestriction.groups) return callback(null, false);
if (app.accessRestriction.groups.some(function (gid) { return groupIds.indexOf(gid) !== -1; })) return callback(null, true);
callback(null, false);
});
}
@@ -420,7 +433,8 @@ function install(data, auditSource, callback) {
debugMode = data.debugMode || null,
robotsTxt = data.robotsTxt || null,
enableBackup = 'enableBackup' in data ? data.enableBackup : true,
backupId = data.backupId || null;
backupId = data.backupId || null,
backupFormat = data.backupFormat || 'tgz';
assert(data.appStoreId || data.manifest); // atleast one of them is required
@@ -454,6 +468,9 @@ function install(data, auditSource, callback) {
error = validateRobotsTxt(robotsTxt);
if (error) return callback(error);
error = validateBackupFormat(backupFormat);
if (error) return callback(error);
if ('sso' in data && !('optionalSso' in manifest)) return callback(new AppsError(AppsError.BAD_FIELD, 'sso can only be specified for apps with optionalSso'));
// if sso was unspecified, enable it by default if possible
if (sso === null) sso = !!manifest.addons['ldap'] || !!manifest.addons['oauth'];
@@ -489,8 +506,9 @@ function install(data, auditSource, callback) {
sso: sso,
debugMode: debugMode,
mailboxName: (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app',
lastBackupId: backupId,
enableBackup: enableBackup
restoreConfig: backupId ? { backupId: backupId, backupFormat: backupFormat } : null,
enableBackup: enableBackup,
robotsTxt: robotsTxt
};
appdb.add(appId, appStoreId, manifest, location, portBindings, data, function (error) {
@@ -628,7 +646,7 @@ function update(appId, data, auditSource, callback) {
downloadManifest(data.appStoreId, data.manifest, function (error, appStoreId, manifest) {
if (error) return callback(error);
var values = { };
var updateConfig = { };
error = manifestFormat.parse(manifest);
if (error) return callback(new AppsError(AppsError.BAD_FIELD, 'Manifest error:' + error.message));
@@ -636,13 +654,7 @@ function update(appId, data, auditSource, callback) {
error = checkManifestConstraints(manifest);
if (error) return callback(error);
values.manifest = manifest;
if ('portBindings' in data) {
values.portBindings = data.portBindings;
error = validatePortBindings(data.portBindings, values.manifest.tcpPorts);
if (error) return callback(error);
}
updateConfig.manifest = manifest;
if ('icon' in data) {
if (data.icon) {
@@ -662,26 +674,23 @@ function update(appId, data, auditSource, callback) {
// prevent user from installing a app with different manifest id over an existing app
// this allows cloudron install -f --app <appid> for an app installed from the appStore
if (app.manifest.id !== values.manifest.id) {
if (app.manifest.id !== updateConfig.manifest.id) {
if (!data.force) return callback(new AppsError(AppsError.BAD_FIELD, 'manifest id does not match. force to override'));
// clear appStoreId so that this app does not get updates anymore
values.appStoreId = '';
updateConfig.appStoreId = '';
}
// do not update apps in debug mode
if (app.debugMode && !data.force) return callback(new AppsError(AppsError.BAD_STATE, 'debug mode enabled. force to override'));
// Ensure we update the memory limit in case the new app requires more memory as a minimum
// 0 and -1 are special values for memory limit indicating unset and unlimited
if (app.memoryLimit > 0 && values.manifest.memoryLimit && app.memoryLimit < values.manifest.memoryLimit) {
values.memoryLimit = values.manifest.memoryLimit;
// 0 and -1 are special updateConfig for memory limit indicating unset and unlimited
if (app.memoryLimit > 0 && updateConfig.manifest.memoryLimit && app.memoryLimit < updateConfig.manifest.memoryLimit) {
updateConfig.memoryLimit = updateConfig.manifest.memoryLimit;
}
values.oldConfig = getAppConfig(app);
appdb.setInstallationCommand(appId, data.force ? appdb.ISTATE_PENDING_FORCE_UPDATE : appdb.ISTATE_PENDING_UPDATE, values, function (error) {
appdb.setInstallationCommand(appId, data.force ? appdb.ISTATE_PENDING_FORCE_UPDATE : appdb.ISTATE_PENDING_UPDATE, { updateConfig: updateConfig }, function (error) {
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.BAD_STATE)); // might be a bad guess
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails('' /* location cannot conflict */, values.portBindings, error));
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
taskmanager.restartAppTask(appId);
@@ -762,22 +771,22 @@ function restore(appId, data, auditSource, callback) {
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
// for empty or null backupId, use existing manifest to mimic a reinstall
var func = data.backupId ? backups.getRestoreConfig.bind(null, data.backupId) : function (next) { return next(null, { manifest: app.manifest }); };
var func = data.backupId ? backups.get.bind(null, data.backupId) : function (next) { return next(null, { manifest: app.manifest }); };
func(function (error, restoreConfig) {
func(function (error, backupInfo) {
if (error && error.reason === BackupsError.NOT_FOUND) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
if (error && error.reason === BackupsError.EXTERNAL_ERROR) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
if (!restoreConfig) callback(new AppsError(AppsError.EXTERNAL_ERROR, 'Could not get restore config'));
if (!backupInfo.manifest) callback(new AppsError(AppsError.EXTERNAL_ERROR, 'Could not get restore manifest'));
// re-validate because this new box version may not accept old configs
error = checkManifestConstraints(restoreConfig.manifest);
error = checkManifestConstraints(backupInfo.manifest);
if (error) return callback(error);
var values = {
lastBackupId: data.backupId || null, // when null, apptask simply reinstalls
manifest: restoreConfig.manifest,
restoreConfig: data.backupId ? { backupId: data.backupId, backupFormat: backupInfo.format } : null, // when null, apptask simply reinstalls
manifest: backupInfo.manifest,
oldConfig: getAppConfig(app)
};
@@ -816,24 +825,24 @@ function clone(appId, data, auditSource, callback) {
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND));
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
backups.getRestoreConfig(backupId, function (error, restoreConfig) {
backups.get(backupId, function (error, backupInfo) {
if (error && error.reason === BackupsError.EXTERNAL_ERROR) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
if (error && error.reason === BackupsError.NOT_FOUND) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
if (!restoreConfig) callback(new AppsError(AppsError.EXTERNAL_ERROR, 'Could not get restore config'));
if (!backupInfo.manifest) callback(new AppsError(AppsError.EXTERNAL_ERROR, 'Could not get restore config'));
// re-validate because this new box version may not accept old configs
error = checkManifestConstraints(restoreConfig.manifest);
error = checkManifestConstraints(backupInfo.manifest);
if (error) return callback(error);
error = validateHostname(location, config.fqdn());
if (error) return callback(error);
error = validatePortBindings(portBindings, restoreConfig.manifest.tcpPorts);
error = validatePortBindings(portBindings, backupInfo.manifest.tcpPorts);
if (error) return callback(error);
var newAppId = uuid.v4(), appStoreId = app.appStoreId, manifest = restoreConfig.manifest;
var newAppId = uuid.v4(), appStoreId = app.appStoreId, manifest = backupInfo.manifest;
appstore.purchase(newAppId, appStoreId, function (error) {
if (error && error.reason === AppstoreError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND));
@@ -846,7 +855,7 @@ function clone(appId, data, auditSource, callback) {
memoryLimit: app.memoryLimit,
accessRestriction: app.accessRestriction,
xFrameOptions: app.xFrameOptions,
lastBackupId: backupId,
restoreConfig: { backupId: backupId, backupFormat: backupInfo.format },
sso: !!app.sso,
mailboxName: (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app'
};
@@ -1012,14 +1021,9 @@ function autoupdateApps(updateInfo, auditSource, callback) { // updateInfo is {
if ((semver.major(app.manifest.version) !== 0) && (semver.major(app.manifest.version) !== semver.major(newManifest.version))) return new Error('Major version change'); // major changes are blocking
var newTcpPorts = newManifest.tcpPorts || { };
var oldTcpPorts = app.manifest.tcpPorts || { };
var portBindings = app.portBindings; // this is never null
for (var env in newTcpPorts) {
if (!(env in oldTcpPorts)) return new Error(env + ' is required from user');
}
for (env in portBindings) {
for (var env in portBindings) {
if (!(env in newTcpPorts)) return new Error(env + ' was in use but new update removes it');
}
@@ -1034,7 +1038,7 @@ function autoupdateApps(updateInfo, auditSource, callback) { // updateInfo is {
if (error) {
debug('Cannot autoupdate app %s : %s', appId, error.message);
return iteratorDone();
}
}
error = canAutoupdateApp(app, updateInfo[appId].manifest);
if (error) {
@@ -1104,10 +1108,14 @@ function restoreInstalledApps(callback) {
async.map(apps, function (app, iteratorDone) {
debug('marking %s for restore', app.location || app.id);
appdb.setInstallationCommand(app.id, appdb.ISTATE_PENDING_RESTORE, { oldConfig: null }, function (error) {
if (error) debug('did not mark %s for restore', app.location || app.id, error);
backups.getByAppIdPaged(1, 1, app.id, function (error, results) {
var restoreConfig = !error && results.length ? { backupId: results[0].id, backupFormat: results[0].format } : null;
iteratorDone(); // always succeed
appdb.setInstallationCommand(app.id, appdb.ISTATE_PENDING_RESTORE, { restoreConfig: restoreConfig, oldConfig: null }, function (error) {
if (error) debug('did not mark %s for restore', app.location || app.id, error);
iteratorDone(); // always succeed
});
});
}, callback);
});
@@ -1205,7 +1213,7 @@ function uploadFile(appId, sourceFilePath, destFilePath, callback) {
if (error) return callback(error);
var readFile = fs.createReadStream(sourceFilePath);
readFile.on('error', console.error);
readFile.on('error', callback);
readFile.pipe(stream);
+54 -7
View File
@@ -11,6 +11,10 @@ exports = module.exports = {
getAppUpdate: getAppUpdate,
getBoxUpdate: getBoxUpdate,
getAccount: getAccount,
sendFeedback: sendFeedback,
AppstoreError: AppstoreError
};
@@ -162,16 +166,17 @@ function sendAliveStatus(data, callback) {
provider: result[settings.TLS_CONFIG_KEY].provider
},
backupConfig: {
provider: result[settings.BACKUP_CONFIG_KEY].provider
provider: result[settings.BACKUP_CONFIG_KEY].provider,
hardlinks: !result[settings.BACKUP_CONFIG_KEY].noHardlinks
},
mailConfig: {
enabled: result[settings.MAIL_CONFIG_KEY].enabled
},
mailRelay: {
provider: result[settings.MAIL_RELAY_KEY].provider
},
mailCatchAll: {
count: result[settings.CATCH_ALL_ADDRESS_KEY].length
},
mailRelay: {
provider: result[settings.MAIL_RELAY_KEY].provider
},
mailCatchAll: {
count: result[settings.CATCH_ALL_ADDRESS_KEY].length
},
autoupdatePattern: result[settings.AUTOUPDATE_PATTERN_KEY],
timeZone: result[settings.TIME_ZONE_KEY],
@@ -180,6 +185,7 @@ function sendAliveStatus(data, callback) {
var data = {
domain: config.fqdn(),
version: config.version(),
adminFqdn: config.adminFqdn(),
provider: config.provider(),
backendSettings: backendSettings,
machine: {
@@ -245,3 +251,44 @@ function getAppUpdate(app, callback) {
});
});
}
function getAccount(callback) {
assert.strictEqual(typeof callback, 'function');
getAppstoreConfig(function (error, appstoreConfig) {
if (error) return callback(error);
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId;
superagent.get(url).query({ accessToken: appstoreConfig.token }).timeout(10 * 1000).end(function (error, result) {
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
if (result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response: %s %s', result.statusCode, result.text)));
// { profile: { id, email, groupId, billing, firstName, lastName, company, street, city, zip, state, country } }
callback(null, result.body.profile);
});
});
}
function sendFeedback(info, callback) {
assert.strictEqual(typeof info, 'object');
assert.strictEqual(typeof info.email, 'string');
assert.strictEqual(typeof info.displayName, 'string');
assert.strictEqual(typeof info.type, 'string');
assert.strictEqual(typeof info.subject, 'string');
assert.strictEqual(typeof info.description, 'string');
assert.strictEqual(typeof callback, 'function');
getAppstoreConfig(function (error, appstoreConfig) {
if (error) return callback(error);
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/feedback';
superagent.post(url).query({ accessToken: appstoreConfig.token }).send(info).timeout(10 * 1000).end(function (error, result) {
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
if (result.statusCode !== 201) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response: %s %s', result.statusCode, result.text)));
callback(null);
});
});
}
+83 -51
View File
@@ -35,6 +35,7 @@ var addons = require('./addons.js'),
certificates = require('./certificates.js'),
config = require('./config.js'),
database = require('./database.js'),
DatabaseError = require('./databaseerror.js'),
debug = require('debug')('box:apptask'),
docker = require('./docker.js'),
ejs = require('ejs'),
@@ -56,10 +57,9 @@ var addons = require('./addons.js'),
_ = require('underscore');
var COLLECTD_CONFIG_EJS = fs.readFileSync(__dirname + '/collectd.config.ejs', { encoding: 'utf8' }),
RELOAD_COLLECTD_CMD = path.join(__dirname, 'scripts/reloadcollectd.sh'),
CONFIGURE_COLLECTD_CMD = path.join(__dirname, 'scripts/configurecollectd.sh'),
LOGROTATE_CONFIG_EJS = fs.readFileSync(__dirname + '/logrotate.ejs', { encoding: 'utf8' }),
MV_LOGROTATE_CONFIG_CMD = path.join(__dirname, 'scripts/mvlogrotateconfig.sh'),
RM_LOGROTATE_CONFIG_CMD = path.join(__dirname, 'scripts/rmlogrotateconfig.sh'),
CONFIGURE_LOGROTATE_CMD = path.join(__dirname, 'scripts/configurelogrotate.sh'),
RMAPPDIR_CMD = path.join(__dirname, 'scripts/rmappdir.sh'),
CREATEAPPDIR_CMD = path.join(__dirname, 'scripts/createappdir.sh');
@@ -131,7 +131,7 @@ function deleteContainers(app, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'deleting containers');
debugApp(app, 'deleting app containers (app, scheduler)');
docker.deleteContainers(app.id, function (error) {
if (error) return callback(new Error('Error deleting container: ' + error));
@@ -147,11 +147,12 @@ function createVolume(app, callback) {
shell.sudo('createVolume', [ CREATEAPPDIR_CMD, app.id ], callback);
}
function deleteVolume(app, callback) {
function deleteVolume(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
shell.sudo('deleteVolume', [ RMAPPDIR_CMD, app.id ], callback);
shell.sudo('deleteVolume', [ RMAPPDIR_CMD, app.id, !!options.removeDirectory ], callback);
}
function addCollectdProfile(app, callback) {
@@ -161,7 +162,7 @@ function addCollectdProfile(app, callback) {
var collectdConf = ejs.render(COLLECTD_CONFIG_EJS, { appId: app.id, containerId: app.containerId });
fs.writeFile(path.join(paths.COLLECTD_APPCONFIG_DIR, app.id + '.conf'), collectdConf, function (error) {
if (error) return callback(error);
shell.sudo('addCollectdProfile', [ RELOAD_COLLECTD_CMD ], callback);
shell.sudo('addCollectdProfile', [ CONFIGURE_COLLECTD_CMD, 'add', app.id ], callback);
});
}
@@ -171,7 +172,7 @@ function removeCollectdProfile(app, callback) {
fs.unlink(path.join(paths.COLLECTD_APPCONFIG_DIR, app.id + '.conf'), function (error) {
if (error && error.code !== 'ENOENT') debugApp(app, 'Error removing collectd profile', error);
shell.sudo('removeCollectdProfile', [ RELOAD_COLLECTD_CMD ], callback);
shell.sudo('removeCollectdProfile', [ CONFIGURE_COLLECTD_CMD, 'remove', app.id ], callback);
});
}
@@ -185,11 +186,12 @@ function addLogrotateConfig(app, callback) {
var runVolume = result.Mounts.find(function (mount) { return mount.Destination === '/run'; });
if (!runVolume) return callback(new Error('App does not have /run mounted'));
// logrotate configs can have arbitrary commands, so the config files must be owned by root
var logrotateConf = ejs.render(LOGROTATE_CONFIG_EJS, { volumePath: runVolume.Source });
var tmpFilePath = path.join(os.tmpdir(), app.id + '.logrotate');
fs.writeFile(tmpFilePath, logrotateConf, function (error) {
if (error) return callback(error);
shell.sudo('addLogrotateConfig', [ MV_LOGROTATE_CONFIG_CMD, tmpFilePath, app.id ], callback);
shell.sudo('addLogrotateConfig', [ CONFIGURE_LOGROTATE_CMD, 'add', app.id, tmpFilePath ], callback);
});
});
}
@@ -198,16 +200,13 @@ function removeLogrotateConfig(app, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
shell.sudo('removeLogrotateConfig', [ RM_LOGROTATE_CONFIG_CMD, app.id ], callback);
shell.sudo('removeLogrotateConfig', [ CONFIGURE_LOGROTATE_CMD, 'remove', app.id ], callback);
}
function verifyManifest(app, callback) {
assert.strictEqual(typeof app, 'object');
function verifyManifest(manifest, callback) {
assert.strictEqual(typeof manifest, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Verifying manifest');
var manifest = app.manifest;
var error = manifestFormat.parse(manifest);
if (error) return callback(new Error(util.format('Manifest error: %s', error.message)));
@@ -240,7 +239,7 @@ function downloadIcon(app, callback) {
if (!safe.fs.writeFileSync(path.join(paths.APP_ICONS_DIR, app.id + '.png'), res.body)) return retryCallback(new Error('Error saving icon:' + safe.error.message));
retryCallback(null);
});
});
}, callback);
}
@@ -272,6 +271,7 @@ function registerSubdomain(app, overwrite, callback) {
}, function (error, result) {
if (error || result instanceof Error) return callback(error || result);
// dnsRecordId tracks whether we created this DNS record so that we can unregister later
updateApp(app, { dnsRecordId: result }, callback);
});
});
@@ -288,6 +288,11 @@ function unregisterSubdomain(app, location, callback) {
return callback(null);
}
if (!app.dnsRecordId) {
debugApp(app, 'Skip unregister of record not created by cloudron');
return callback(null);
}
sysinfo.getPublicIp(function (error, ip) {
if (error) return callback(error);
@@ -379,14 +384,17 @@ function updateApp(app, values, callback) {
// - setup addons (requires the above volume)
// - setup the container (requires image, volumes, addons)
// - setup collectd (requires container id)
// restore is also handled here since restore is just an install with some oldConfig to clean up
function install(app, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
const backupId = app.lastBackupId, isRestoring = app.installationState === appdb.ISTATE_PENDING_RESTORE;
const restoreConfig = app.restoreConfig, isRestoring = app.installationState === appdb.ISTATE_PENDING_RESTORE;
async.series([
verifyManifest.bind(null, app),
// this protects against the theoretical possibility of an app being marked for install/restore from
// a previous version of box code
verifyManifest.bind(null, app.manifest),
// teardown for re-installs
updateApp.bind(null, app, { installationProgress: '10, Cleaning up old install' }),
@@ -397,13 +405,13 @@ function install(app, callback) {
deleteContainers.bind(null, app),
// oldConfig can be null during upgrades
addons.teardownAddons.bind(null, app, app.oldConfig ? app.oldConfig.manifest.addons : app.manifest.addons),
deleteVolume.bind(null, app),
deleteVolume.bind(null, app, { removeDirectory: false }), // do not remove any symlinked volume
// for restore case
function deleteImageIfChanged(done) {
if (!app.oldConfig || (app.oldConfig.manifest.dockerImage === app.manifest.dockerImage)) return done();
if (!app.oldConfig || (app.oldConfig.manifest.dockerImage === app.manifest.dockerImage)) return done();
docker.deleteImage(app.oldConfig.manifest, done);
docker.deleteImage(app.oldConfig.manifest, done);
},
reserveHttpPort.bind(null, app),
@@ -421,7 +429,7 @@ function install(app, callback) {
createVolume.bind(null, app),
function restoreFromBackup(next) {
if (!backupId) {
if (!restoreConfig) {
async.series([
updateApp.bind(null, app, { installationProgress: '60, Setting up addons' }),
addons.setupAddons.bind(null, app, app.manifest.addons),
@@ -429,7 +437,7 @@ function install(app, callback) {
} else {
async.series([
updateApp.bind(null, app, { installationProgress: '60, Download backup and restoring addons' }),
backups.restoreApp.bind(null, app, app.manifest.addons, backupId),
backups.restoreApp.bind(null, app, app.manifest.addons, restoreConfig),
], next);
}
},
@@ -449,7 +457,7 @@ function install(app, callback) {
exports._waitForDnsPropagation.bind(null, app),
updateApp.bind(null, app, { installationProgress: '90, Waiting for External Domain setup' }),
exports._waitForAltDomainDnsPropagation.bind(null, app), // required when restoring and !lastBackupId
exports._waitForAltDomainDnsPropagation.bind(null, app), // required when restoring and !restoreConfig
updateApp.bind(null, app, { installationProgress: '95, Configure nginx' }),
configureNginx.bind(null, app),
@@ -472,11 +480,9 @@ function backup(app, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
var prefix = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
async.series([
updateApp.bind(null, app, { installationProgress: '10, Backing up' }),
backups.backupApp.bind(null, app, app.manifest, prefix),
backups.backupApp.bind(null, app, app.manifest),
// done!
function (callback) {
@@ -497,6 +503,9 @@ function configure(app, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
// oldConfig can be null during an infra update
var locationChanged = app.oldConfig && app.oldConfig.location !== app.location;
async.series([
updateApp.bind(null, app, { installationProgress: '10, Cleaning up old install' }),
unconfigureNginx.bind(null, app),
@@ -505,8 +514,7 @@ function configure(app, callback) {
stopApp.bind(null, app),
deleteContainers.bind(null, app),
function (next) {
// oldConfig can be null during an infra update
if (!app.oldConfig || app.oldConfig.location === app.location) return next();
if (!locationChanged) return next();
unregisterSubdomain(app, app.oldConfig.location, next);
},
@@ -516,7 +524,7 @@ function configure(app, callback) {
downloadIcon.bind(null, app),
updateApp.bind(null, app, { installationProgress: '35, Registering subdomain' }),
registerSubdomain.bind(null, app, true /* overwrite */),
registerSubdomain.bind(null, app, !locationChanged /* overwrite */), // if location changed, do not overwrite to detect conflicts
updateApp.bind(null, app, { installationProgress: '40, Downloading image' }),
docker.downloadImage.bind(null, app.manifest),
@@ -567,48 +575,72 @@ function update(app, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Updating to %s', safe.query(app, 'manifest.version'));
debugApp(app, `Updating to ${app.updateConfig.manifest.version}`);
// app does not want these addons anymore
// FIXME: this does not handle option changes (like multipleDatabases)
var unusedAddons = _.omit(app.oldConfig.manifest.addons, Object.keys(app.manifest.addons));
var unusedAddons = _.omit(app.manifest.addons, Object.keys(app.updateConfig.manifest.addons));
async.series([
// this protects against the theoretical possibility of an app being marked for update from
// a previous version of box code
updateApp.bind(null, app, { installationProgress: '0, Verify manifest' }),
verifyManifest.bind(null, app),
verifyManifest.bind(null, app.updateConfig.manifest),
function (next) {
if (app.installationState === appdb.ISTATE_PENDING_FORCE_UPDATE) return next(null);
async.series([
updateApp.bind(null, app, { installationProgress: '15, Backing up app' }),
backups.backupApp.bind(null, app, app.manifest)
], next);
},
// download new image before app is stopped. this is so we can reduce downtime
// and also not remove the 'common' layers when the old image is deleted
updateApp.bind(null, app, { installationProgress: '15, Downloading image' }),
docker.downloadImage.bind(null, app.manifest),
updateApp.bind(null, app, { installationProgress: '25, Downloading image' }),
docker.downloadImage.bind(null, app.updateConfig.manifest),
// note: we cleanup first and then backup. this is done so that the app is not running should backup fail
// we cannot easily 'recover' from backup failures because we have to revert manfest and portBindings
updateApp.bind(null, app, { installationProgress: '25, Cleaning up old install' }),
updateApp.bind(null, app, { installationProgress: '35, Cleaning up old install' }),
removeCollectdProfile.bind(null, app),
removeLogrotateConfig.bind(null, app),
stopApp.bind(null, app),
deleteContainers.bind(null, app),
function deleteImageIfChanged(done) {
if (app.oldConfig.manifest.dockerImage === app.manifest.dockerImage) return done();
if (app.manifest.dockerImage === app.updateConfig.manifest.dockerImage) return done();
docker.deleteImage(app.oldConfig.manifest, done);
},
function (next) {
if (app.installationState === appdb.ISTATE_PENDING_FORCE_UPDATE) return next(null);
var prefix = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
async.series([
updateApp.bind(null, app, { installationProgress: '30, Backing up app' }),
backups.backupApp.bind(null, app, app.oldConfig.manifest, prefix)
], next);
docker.deleteImage(app.manifest, done);
},
// only delete unused addons after backup
addons.teardownAddons.bind(null, app, unusedAddons),
// free unused ports
function (next) {
// make sure we always have objects
var currentPorts = app.portBindings || {};
var newPorts = app.updateConfig.manifest.tcpPorts || {};
async.each(Object.keys(currentPorts), function (portName, callback) {
if (newPorts[portName]) return callback(); // port still in use
appdb.delPortBinding(currentPorts[portName], function (error) {
if (error && error.reason === DatabaseError.NOT_FOUND) console.error('Portbinding does not exist in database.');
else if (error) return next(error);
// also delete from app object for further processing (the db is updated in the next step)
delete app.portBindings[portName];
callback();
});
}, next);
},
// switch over to the new config. manifest, memoryLimit, portBindings, appstoreId are updated here
updateApp.bind(null, app, app.updateConfig),
updateApp.bind(null, app, { installationProgress: '45, Downloading icon' }),
downloadIcon.bind(null, app),
@@ -629,7 +661,7 @@ function update(app, callback) {
// done!
function (callback) {
debugApp(app, 'updated');
updateApp(app, { installationState: appdb.ISTATE_INSTALLED, installationProgress: '', health: null }, callback);
updateApp(app, { installationState: appdb.ISTATE_INSTALLED, installationProgress: '', health: null, updateConfig: null }, callback);
}
], function seriesDone(error) {
if (error) {
@@ -663,7 +695,7 @@ function uninstall(app, callback) {
addons.teardownAddons.bind(null, app, app.manifest.addons),
updateApp.bind(null, app, { installationProgress: '40, Deleting volume' }),
deleteVolume.bind(null, app),
deleteVolume.bind(null, app, { removeDirectory: true }),
updateApp.bind(null, app, { installationProgress: '50, Deleting image' }),
docker.deleteImage.bind(null, app.manifest),
+32 -42
View File
@@ -6,7 +6,7 @@ var assert = require('assert'),
safe = require('safetydance'),
util = require('util');
var BACKUPS_FIELDS = [ 'id', 'creationTime', 'version', 'type', 'dependsOn', 'state', 'restoreConfigJson' ];
var BACKUPS_FIELDS = [ 'id', 'creationTime', 'version', 'type', 'dependsOn', 'state', 'manifestJson', 'format' ];
exports = module.exports = {
add: add,
@@ -34,8 +34,8 @@ function postProcess(result) {
result.dependsOn = result.dependsOn ? result.dependsOn.split(',') : [ ];
result.restoreConfig = result.restoreConfigJson ? safe.JSON.parse(result.restoreConfigJson) : null;
delete result.restoreConfigJson;
result.manifest = result.manifestJson ? safe.JSON.parse(result.manifestJson) : null;
delete result.manifestJson;
}
function getByTypeAndStatePaged(type, state, page, perPage, callback) {
@@ -47,12 +47,12 @@ function getByTypeAndStatePaged(type, state, page, perPage, callback) {
database.query('SELECT ' + BACKUPS_FIELDS + ' FROM backups WHERE type = ? AND state = ? ORDER BY creationTime DESC LIMIT ?,?',
[ type, state, (page-1)*perPage, perPage ], function (error, results) {
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
results.forEach(function (result) { postProcess(result); });
results.forEach(function (result) { postProcess(result); });
callback(null, results);
});
callback(null, results);
});
}
function getByTypePaged(type, page, perPage, callback) {
@@ -63,12 +63,12 @@ function getByTypePaged(type, page, perPage, callback) {
database.query('SELECT ' + BACKUPS_FIELDS + ' FROM backups WHERE type = ? ORDER BY creationTime DESC LIMIT ?,?',
[ type, (page-1)*perPage, perPage ], function (error, results) {
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
results.forEach(function (result) { postProcess(result); });
results.forEach(function (result) { postProcess(result); });
callback(null, results);
});
callback(null, results);
});
}
function getByAppIdPaged(page, perPage, appId, callback) {
@@ -80,12 +80,12 @@ function getByAppIdPaged(page, perPage, appId, callback) {
// box versions (0.93.x and below) used to use appbackup_ prefix
database.query('SELECT ' + BACKUPS_FIELDS + ' FROM backups WHERE type = ? AND state = ? AND id LIKE ? ORDER BY creationTime DESC LIMIT ?,?',
[ exports.BACKUP_TYPE_APP, exports.BACKUP_STATE_NORMAL, '%app%\\_' + appId + '\\_%', (page-1)*perPage, perPage ], function (error, results) {
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
results.forEach(function (result) { postProcess(result); });
results.forEach(function (result) { postProcess(result); });
callback(null, results);
});
callback(null, results);
});
}
function get(id, callback) {
@@ -94,13 +94,13 @@ function get(id, callback) {
database.query('SELECT ' + BACKUPS_FIELDS + ' FROM backups WHERE id = ? ORDER BY creationTime DESC',
[ id ], function (error, result) {
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
if (result.length === 0) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
if (result.length === 0) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
postProcess(result[0]);
postProcess(result[0]);
callback(null, result[0]);
});
callback(null, result[0]);
});
}
function add(backup, callback) {
@@ -109,20 +109,21 @@ function add(backup, callback) {
assert.strictEqual(typeof backup.version, 'string');
assert(backup.type === exports.BACKUP_TYPE_APP || backup.type === exports.BACKUP_TYPE_BOX);
assert(util.isArray(backup.dependsOn));
assert.strictEqual(typeof backup.restoreConfig, 'object');
assert.strictEqual(typeof backup.manifest, 'object');
assert.strictEqual(typeof backup.format, 'string');
assert.strictEqual(typeof callback, 'function');
var creationTime = backup.creationTime || new Date(); // allow tests to set the time
var restoreConfig = backup.restoreConfig ? JSON.stringify(backup.restoreConfig) : '';
var manifestJson = JSON.stringify(backup.manifest);
database.query('INSERT INTO backups (id, version, type, creationTime, state, dependsOn, restoreConfigJson) VALUES (?, ?, ?, ?, ?, ?, ?)',
[ backup.id, backup.version, backup.type, creationTime, exports.BACKUP_STATE_NORMAL, backup.dependsOn.join(','), restoreConfig ],
database.query('INSERT INTO backups (id, version, type, creationTime, state, dependsOn, manifestJson, format) VALUES (?, ?, ?, ?, ?, ?, ?, ?)',
[ backup.id, backup.version, backup.type, creationTime, exports.BACKUP_STATE_NORMAL, backup.dependsOn.join(','), manifestJson, backup.format ],
function (error) {
if (error && error.code === 'ER_DUP_ENTRY') return callback(new DatabaseError(DatabaseError.ALREADY_EXISTS));
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
if (error && error.code === 'ER_DUP_ENTRY') return callback(new DatabaseError(DatabaseError.ALREADY_EXISTS));
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
callback(null);
});
callback(null);
});
}
function update(id, backup, callback) {
@@ -158,19 +159,8 @@ function del(id, callback) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof callback, 'function');
get(id, function (error, result) {
if (error && error.reason === DatabaseError.NOT_FOUND) return callback();
if (error) return callback(error);
var whereClause = [ 'id=?' ], whereArgs = [ result.id ];
result.dependsOn.forEach(function (id) {
whereClause.push('id=?');
whereArgs.push(id);
});
database.query('DELETE FROM backups WHERE ' + whereClause.join(' OR '), whereArgs, function (error) {
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
callback(null);
});
database.query('DELETE FROM backups WHERE id=?', [ id ], function (error) {
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
callback(null);
});
}
+613 -193
View File
File diff suppressed because it is too large Load Diff
+15 -69
View File
@@ -1,7 +1,12 @@
#!/usr/bin/env node
#!/bin/bash
':' //# comment; exec /usr/bin/env node --max_old_space_size=300 "$0" "$@"
// to understand the above hack read http://sambal.org/2014/02/passing-options-node-shebang-line/
'use strict';
if (process.argv[2] === '--check') return console.log('OK');
require('supererror')({ splatchError: true });
// remove timestamp from debug() based output
@@ -10,29 +15,11 @@ require('debug').formatArgs = function formatArgs(args) {
};
var assert = require('assert'),
BackupsError = require('./backups.js').BackupsError,
caas = require('./storage/caas.js'),
backups = require('./backups.js'),
database = require('./database.js'),
debug = require('debug')('box:backuptask'),
filesystem = require('./storage/filesystem.js'),
noop = require('./storage/noop.js'),
path = require('path'),
paths = require('./paths.js'),
s3 = require('./storage/s3.js'),
safe = require('safetydance'),
settings = require('./settings.js');
function api(provider) {
switch (provider) {
case 'caas': return caas;
case 's3': return s3;
case 'filesystem': return filesystem;
case 'minio': return s3;
case 'exoscale-sos': return s3;
case 'noop': return noop;
default: return null;
}
}
safe = require('safetydance');
function initialize(callback) {
assert.strictEqual(typeof callback, 'function');
@@ -40,52 +27,12 @@ function initialize(callback) {
database.initialize(callback);
}
function backupApp(backupId, appId, callback) {
assert.strictEqual(typeof backupId, 'string');
assert.strictEqual(typeof appId, 'string');
assert.strictEqual(typeof callback, 'function');
debug('Start app backup with id %s for %s', backupId, appId);
settings.getBackupConfig(function (error, backupConfig) {
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
var backupMapping = [{
source: path.join(paths.APPS_DATA_DIR, appId),
destination: '.'
}];
api(backupConfig.provider).backup(backupConfig, backupId, backupMapping, callback);
});
}
function backupBox(backupId, callback) {
assert.strictEqual(typeof backupId, 'string');
assert.strictEqual(typeof callback, 'function');
debug('Start box backup with id %s', backupId);
settings.getBackupConfig(function (error, backupConfig) {
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
var backupMapping = [{
source: paths.BOX_DATA_DIR,
destination: 'box'
}, {
source: path.join(paths.PLATFORM_DATA_DIR, 'mail'),
destination: 'mail'
}];
api(backupConfig.provider).backup(backupConfig, backupId, backupMapping, callback);
});
}
// Main process starts here
var backupId = process.argv[2];
var appId = process.argv[3];
var format = process.argv[3];
var dataDir = process.argv[4];
if (appId) debug('Backuptask for the app %s with id %s', appId, backupId);
else debug('Backuptask for the whole Cloudron with id %s', backupId);
debug(`Backing up ${dataDir} to ${backupId}`);
process.on('SIGTERM', function () {
process.exit(0);
@@ -94,7 +41,9 @@ process.on('SIGTERM', function () {
initialize(function (error) {
if (error) throw error;
function resultHandler(error) {
safe.fs.writeFileSync(paths.BACKUP_RESULT_FILE, '');
backups.upload(backupId, format, dataDir, function resultHandler(error) {
if (error) debug('completed with error', error);
debug('completed');
@@ -104,8 +53,5 @@ initialize(function (error) {
// https://nodejs.org/api/process.html are exit codes used by node. apps.js uses the value below
// to check apptask crashes
process.exit(error ? 50 : 0);
}
if (appId) backupApp(backupId, appId, resultHandler);
else backupBox(backupId, resultHandler);
});
});
+1 -1
View File
@@ -16,7 +16,7 @@ var assert = require('assert'),
var CA_PROD = 'https://acme-v01.api.letsencrypt.org',
CA_STAGING = 'https://acme-staging.api.letsencrypt.org',
LE_AGREEMENT = 'https://letsencrypt.org/documents/LE-SA-v1.1.1-August-1-2016.pdf';
LE_AGREEMENT = 'https://letsencrypt.org/documents/LE-SA-v1.2-November-15-2017.pdf';
exports = module.exports = {
getCertificate: getCertificate,
+2 -2
View File
@@ -177,7 +177,7 @@ function renewAll(auditSource, callback) {
apps.getAll(function (error, allApps) {
if (error) return callback(error);
allApps.push({ location: constants.ADMIN_LOCATION }); // inject fake webadmin app
allApps.push({ location: config.adminLocation() }); // inject fake webadmin app
var expiringApps = [ ];
for (var i = 0; i < allApps.length; i++) {
@@ -239,7 +239,7 @@ function renewAll(auditSource, callback) {
}
// reconfigure and reload nginx. this is required for the case where we got a renewed cert after fallback
var configureFunc = app.location === constants.ADMIN_LOCATION ?
var configureFunc = app.location === config.adminLocation() ?
nginx.configureAdmin.bind(null, certFilePath, keyFilePath, constants.NGINX_ADMIN_CONFIG_FILE_NAME, config.adminFqdn())
: nginx.configureApp.bind(null, app, certFilePath, keyFilePath);
+54 -73
View File
@@ -12,7 +12,7 @@ exports = module.exports = {
dnsSetup: dnsSetup,
getLogs: getLogs,
sendHeartbeat: sendHeartbeat,
sendCaasHeartbeat: sendCaasHeartbeat,
updateToLatest: updateToLatest,
reboot: reboot,
@@ -63,7 +63,6 @@ var appdb = require('./appdb.js'),
updateChecker = require('./updatechecker.js'),
user = require('./user.js'),
UserError = user.UserError,
user = require('./user.js'),
util = require('util'),
_ = require('underscore');
@@ -127,7 +126,6 @@ function initialize(callback) {
async.series([
certificates.initialize,
settings.initialize,
installAppBundle,
configureDefaultServer,
onDomainConfigured
], function (error) {
@@ -236,24 +234,32 @@ function configureWebadmin(callback) {
callback(error);
}
sysinfo.getPublicIp(function (error, ip) {
if (error) return done(error);
function configureNginx(error) {
debug('configureNginx: dns update:%j', error);
addDnsRecords(ip, function (error) {
certificates.ensureCertificate({ location: config.adminLocation() }, function (error, certFilePath, keyFilePath) {
if (error) return done(error);
gWebadminStatus.tls = true;
nginx.configureAdmin(certFilePath, keyFilePath, constants.NGINX_ADMIN_CONFIG_FILE_NAME, config.adminFqdn(), done);
});
}
// update the DNS. configure nginx regardless of whether it succeeded so that
// box is accessible even if dns creds are invalid
sysinfo.getPublicIp(function (error, ip) {
if (error) return configureNginx(error);
addDnsRecords(ip, function (error) {
if (error) return configureNginx(error);
subdomains.waitForDns(config.adminFqdn(), ip, 'A', { interval: 30000, times: 50000 }, function (error) {
if (error) return done(error);
if (error) return configureNginx(error);
gWebadminStatus.dns = true;
certificates.ensureCertificate({ location: constants.ADMIN_LOCATION }, function (error, certFilePath, keyFilePath) {
if (error) return done(error);
gWebadminStatus.tls = true;
nginx.configureAdmin(certFilePath, keyFilePath, constants.NGINX_ADMIN_CONFIG_FILE_NAME, config.adminFqdn(), done);
});
configureNginx();
});
});
});
@@ -410,9 +416,12 @@ function getConfig(callback) {
apiServerOrigin: config.apiServerOrigin(),
webServerOrigin: config.webServerOrigin(),
fqdn: config.fqdn(),
adminLocation: config.adminLocation(),
adminFqdn: config.adminFqdn(),
mailFqdn: config.mailFqdn(),
version: config.version(),
update: updateChecker.getUpdateInfo(),
progress: progress.get(),
progress: progress.getAll(),
isCustomDomain: config.isCustomDomain(),
isDemo: config.isDemo(),
developerMode: developerMode,
@@ -430,8 +439,8 @@ function getConfig(callback) {
});
}
function sendHeartbeat() {
if (config.provider() !== 'caas') return;
function sendCaasHeartbeat() {
assert(config.provider() === 'caas', 'Heartbeat is only sent for managed cloudrons');
var url = config.apiServerOrigin() + '/api/v1/boxes/' + config.fqdn() + '/heartbeat';
superagent.post(url).query({ token: config.token(), version: config.version() }).timeout(30 * 1000).end(function (error, result) {
@@ -531,9 +540,9 @@ function addDnsRecords(ip, callback) {
var dkimKey = readDkimPublicKeySync();
if (!dkimKey) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, new Error('Failed to read dkim public key')));
var webadminRecord = { subdomain: constants.ADMIN_LOCATION, type: 'A', values: [ ip ] };
var webadminRecord = { subdomain: config.adminLocation(), type: 'A', values: [ ip ] };
// t=s limits the domainkey to this domain and not it's subdomains
var dkimRecord = { subdomain: constants.DKIM_SELECTOR + '._domainkey', type: 'TXT', values: [ '"v=DKIM1; t=s; p=' + dkimKey + '"' ] };
var dkimRecord = { subdomain: config.dkimSelector() + '._domainkey', type: 'TXT', values: [ '"v=DKIM1; t=s; p=' + dkimKey + '"' ] };
var records = [ ];
if (config.isCustomDomain()) {
@@ -568,7 +577,8 @@ function addDnsRecords(ip, callback) {
});
});
}, function (error) {
debug('addDnsRecords: done updating records with error:', error);
if (error) debug('addDnsRecords: done updating records with error:', error);
else debug('addDnsRecords: done');
callback(error);
});
@@ -662,19 +672,19 @@ function doUpgrade(boxUpdateInfo, callback) {
if (error) return upgradeError(error);
superagent.post(config.apiServerOrigin() + '/api/v1/boxes/' + config.fqdn() + '/upgrade')
.query({ token: config.token() })
.send({ version: boxUpdateInfo.version })
.timeout(30 * 1000)
.end(function (error, result) {
if (error && !error.response) return upgradeError(new Error('Network error making upgrade request: ' + error));
if (result.statusCode !== 202) return upgradeError(new Error(util.format('Server not ready to upgrade. statusCode: %s body: %j', result.status, result.body)));
.query({ token: config.token() })
.send({ version: boxUpdateInfo.version })
.timeout(30 * 1000)
.end(function (error, result) {
if (error && !error.response) return upgradeError(new Error('Network error making upgrade request: ' + error));
if (result.statusCode !== 202) return upgradeError(new Error(util.format('Server not ready to upgrade. statusCode: %s body: %j', result.status, result.body)));
progress.set(progress.UPDATE, 10, 'Updating base system');
progress.set(progress.UPDATE, 10, 'Updating base system');
// no need to unlock since this is the last thing we ever do on this box
callback();
retire('upgrade');
});
// no need to unlock since this is the last thing we ever do on this box
callback();
retire('upgrade');
});
});
}
@@ -698,6 +708,7 @@ function doUpdate(boxUpdateInfo, callback) {
apiServerOrigin: config.apiServerOrigin(),
webServerOrigin: config.webServerOrigin(),
fqdn: config.fqdn(),
adminLocation: config.adminLocation(),
tlsCert: config.tlsCert(),
tlsKey: config.tlsKey(),
isCustomDomain: config.isCustomDomain(),
@@ -729,36 +740,6 @@ function doUpdate(boxUpdateInfo, callback) {
});
}
function installAppBundle(callback) {
assert.strictEqual(typeof callback, 'function');
if (fs.existsSync(paths.FIRST_RUN_FILE)) return callback();
var bundle = config.get('appBundle');
debug('initialize: installing app bundle on first run: %j', bundle);
if (!bundle || bundle.length === 0) return callback();
async.eachSeries(bundle, function (appInfo, iteratorCallback) {
debug('autoInstall: installing %s at %s', appInfo.appstoreId, appInfo.location);
var data = {
appStoreId: appInfo.appstoreId,
location: appInfo.location,
portBindings: appInfo.portBindings || null,
accessRestriction: appInfo.accessRestriction || null,
};
apps.install(data, { userId: null, username: 'autoinstaller' }, iteratorCallback);
}, function (error) {
if (error) debug('autoInstallApps: ', error);
fs.writeFileSync(paths.FIRST_RUN_FILE, 'been there, done that', 'utf8');
callback();
});
}
function checkDiskSpace(callback) {
callback = callback || NOOP_CALLBACK;
@@ -836,20 +817,20 @@ function doMigrate(options, callback) {
debug('migrate: domain: %s size %s region %s', options.domain, options.size, options.region);
superagent
.post(config.apiServerOrigin() + '/api/v1/boxes/' + config.fqdn() + '/migrate')
.query({ token: config.token() })
.send(options)
.timeout(30 * 1000)
.end(function (error, result) {
if (error && !error.response) return unlock(error); // network error
if (result.statusCode === 409) return unlock(new CloudronError(CloudronError.BAD_STATE));
if (result.statusCode === 404) return unlock(new CloudronError(CloudronError.NOT_FOUND));
if (result.statusCode !== 202) return unlock(new CloudronError(CloudronError.EXTERNAL_ERROR, util.format('%s %j', result.status, result.body)));
.post(config.apiServerOrigin() + '/api/v1/boxes/' + config.fqdn() + '/migrate')
.query({ token: config.token() })
.send(options)
.timeout(30 * 1000)
.end(function (error, result) {
if (error && !error.response) return unlock(error); // network error
if (result.statusCode === 409) return unlock(new CloudronError(CloudronError.BAD_STATE));
if (result.statusCode === 404) return unlock(new CloudronError(CloudronError.NOT_FOUND));
if (result.statusCode !== 202) return unlock(new CloudronError(CloudronError.EXTERNAL_ERROR, util.format('%s %j', result.status, result.body)));
progress.set(progress.MIGRATE, 10, 'Migrating');
progress.set(progress.MIGRATE, 10, 'Migrating');
retire('migrate', _.pick(options, 'domain', 'size', 'region'));
});
retire('migrate', _.pick(options, 'domain', 'size', 'region'));
});
});
callback(null);
+74 -34
View File
@@ -17,6 +17,7 @@ exports = module.exports = {
apiServerOrigin: apiServerOrigin,
webServerOrigin: webServerOrigin,
fqdn: fqdn,
zoneName: zoneName,
setFqdn: setFqdn,
token: token,
version: version,
@@ -28,11 +29,14 @@ exports = module.exports = {
adminOrigin: adminOrigin,
internalAdminOrigin: internalAdminOrigin,
sysadminOrigin: sysadminOrigin, // caas routes
adminLocation: adminLocation,
adminFqdn: adminFqdn,
mailLocation: mailLocation,
mailFqdn: mailFqdn,
appFqdn: appFqdn,
zoneName: zoneName,
setZoneName: setZoneName,
hasIPv6: hasIPv6,
dkimSelector: dkimSelector,
isDemo: isDemo,
@@ -44,13 +48,17 @@ exports = module.exports = {
};
var assert = require('assert'),
constants = require('./constants.js'),
debug = require('debug')('box:config.js'),
fs = require('fs'),
path = require('path'),
safe = require('safetydance'),
tld = require('tldjs'),
_ = require('underscore');
// assert on unknown environment can't proceed
assert(exports.CLOUDRON || exports.TEST, 'Unknown environment. This should not happen!');
var homeDir = process.env.HOME || process.env.HOMEPATH || process.env.USERPROFILE;
var data = { };
@@ -62,8 +70,25 @@ function baseDir() {
var cloudronConfigFileName = path.join(baseDir(), 'configs/cloudron.conf');
// only tests can run without a config file on disk, they use the defaults with runtime overrides
if (exports.CLOUDRON) assert(fs.existsSync(cloudronConfigFileName), 'No cloudron.conf found, cannot proceed');
function saveSync() {
fs.writeFileSync(cloudronConfigFileName, JSON.stringify(data, null, 4)); // functions are ignored by JSON.stringify
// only save values we want to have in the cloudron.conf, see start.sh
var conf = {
version: data.version,
token: data.token,
apiServerOrigin: data.apiServerOrigin,
webServerOrigin: data.webServerOrigin,
fqdn: data.fqdn,
zoneName: data.zoneName,
adminLocation: data.adminLocation,
isCustomDomain: data.isCustomDomain,
provider: data.provider,
isDemo: data.isDemo
};
fs.writeFileSync(cloudronConfigFileName, JSON.stringify(conf, null, 4)); // functions are ignored by JSON.stringify
}
function _reset(callback) {
@@ -76,45 +101,41 @@ function _reset(callback) {
function initConfig() {
// setup defaults
data.fqdn = 'localhost';
data.fqdn = '';
data.zoneName = '';
data.adminLocation = 'my';
data.port = 3000;
data.token = null;
data.version = null;
data.isCustomDomain = true;
data.apiServerOrigin = null;
data.webServerOrigin = null;
data.smtpPort = 2525; // // this value comes from mail container
data.provider = 'caas';
data.smtpPort = 2525; // this value comes from mail container
data.sysadminPort = 3001;
data.ldapPort = 3002;
data.provider = 'caas';
data.appBundle = [ ];
if (exports.CLOUDRON) {
data.port = 3000;
data.apiServerOrigin = null;
data.database = null;
} else if (exports.TEST) {
// keep in sync with start.sh
data.database = {
hostname: '127.0.0.1',
username: 'root',
password: 'password',
port: 3306,
name: 'box'
};
// overrides for local testings
if (exports.TEST) {
data.port = 5454;
data.apiServerOrigin = 'http://localhost:6060'; // hock doesn't support https
data.database = {
hostname: 'localhost',
username: 'root',
password: '',
port: 3306,
name: 'boxtest'
};
data.token = 'APPSTORE_TOKEN';
} else {
assert(false, 'Unknown environment. This should not happen!');
data.apiServerOrigin = 'http://localhost:6060'; // hock doesn't support https
data.database.password = '';
data.database.name = 'boxtest';
}
if (safe.fs.existsSync(cloudronConfigFileName)) {
var existingData = safe.JSON.parse(safe.fs.readFileSync(cloudronConfigFileName, 'utf8'));
_.extend(data, existingData); // overwrite defaults with saved config
return;
}
saveSync();
// overwrite defaults with saved config
var existingData = safe.JSON.parse(safe.fs.readFileSync(cloudronConfigFileName, 'utf8'));
_.extend(data, existingData);
}
initConfig();
@@ -175,16 +196,24 @@ function appFqdn(location) {
return isCustomDomain() ? location + '.' + fqdn() : location + '-' + fqdn();
}
function adminFqdn() {
return appFqdn(constants.ADMIN_LOCATION);
function mailLocation() {
return get('adminLocation'); // not a typo! should be same as admin location until we figure out certificates
}
function mailFqdn() {
return appFqdn(constants.MAIL_LOCATION);
return appFqdn(mailLocation());
}
function adminLocation() {
return get('adminLocation');
}
function adminFqdn() {
return appFqdn(adminLocation());
}
function adminOrigin() {
return 'https://' + appFqdn(constants.ADMIN_LOCATION);
return 'https://' + appFqdn(adminLocation());
}
function internalAdminOrigin() {
@@ -232,3 +261,14 @@ function tlsKey() {
var keyFile = path.join(baseDir(), 'configs/host.key');
return safe.fs.readFileSync(keyFile, 'utf8');
}
function hasIPv6() {
const IPV6_PROC_FILE = '/proc/net/if_inet6';
return fs.existsSync(IPV6_PROC_FILE);
}
function dkimSelector() {
var loc = adminLocation();
return loc === 'my' ? 'cloudron' : `cloudron-${loc.replace(/\./g, '')}`;
}
-5
View File
@@ -1,12 +1,9 @@
'use strict';
// default admin installation location. keep in sync with ADMIN_LOCATION in setup/start.sh and BOX_ADMIN_LOCATION in appstore constants.js
exports = module.exports = {
ADMIN_LOCATION: 'my',
API_LOCATION: 'api', // this is unused but reserved for future use (#403)
SMTP_LOCATION: 'smtp',
IMAP_LOCATION: 'imap',
MAIL_LOCATION: 'my', // not a typo! should be same as admin location until we figure out certificates
POSTMAN_LOCATION: 'postman', // used in dovecot bounces
// These are combined into one array because users and groups become mailboxes
@@ -36,8 +33,6 @@ exports = module.exports = {
DEMO_USERNAME: 'cloudron',
DKIM_SELECTOR: 'cloudron',
AUTOUPDATE_PATTERN_NEVER: 'never'
};
+18 -16
View File
@@ -35,7 +35,7 @@ var gAliveJob = null, // send periodic stats
gCleanupTokensJob = null,
gDockerVolumeCleanerJob = null,
gDynamicDNSJob = null,
gHeartbeatJob = null, // for CaaS health check
gCaasHeartbeatJob = null, // for CaaS health check
gSchedulerSyncJob = null,
gDigestEmailJob = null;
@@ -53,18 +53,20 @@ var AUDIT_SOURCE = { userId: null, username: 'cron' };
function initialize(callback) {
assert.strictEqual(typeof callback, 'function');
gHeartbeatJob = new CronJob({
cronTime: '00 */1 * * * *', // every minute
onTick: cloudron.sendHeartbeat,
start: false
});
// hack: send the first heartbeat only after we are running for 60 seconds
// required as we end up sending a heartbeat and then cloudron-setup reboots the server
setTimeout(function () {
if (!gHeartbeatJob) return; // already uninitalized
gHeartbeatJob.start();
cloudron.sendHeartbeat();
}, 1000 * 60);
if (config.provider() === 'caas') {
gCaasHeartbeatJob = new CronJob({
cronTime: '00 */1 * * * *', // every minute
onTick: cloudron.sendCaasHeartbeat,
start: false
});
// hack: send the first heartbeat only after we are running for 60 seconds
// required as we end up sending a heartbeat and then cloudron-setup reboots the server
setTimeout(function () {
if (!gCaasHeartbeatJob) return; // already uninitalized
gCaasHeartbeatJob.start();
cloudron.sendCaasHeartbeat();
}, 1000 * 60);
}
var randomHourMinute = Math.floor(60*Math.random());
gAliveJob = new CronJob({
@@ -139,7 +141,7 @@ function recreateJobs(tz) {
if (gCleanupBackupsJob) gCleanupBackupsJob.stop();
gCleanupBackupsJob = new CronJob({
cronTime: '00 45 */6 * * *', // every 6 hours. try not to overlap with ensureBackup job
onTick: backups.cleanup,
onTick: backups.cleanup.bind(null, AUDIT_SOURCE, NOOP_CALLBACK),
start: true,
timeZone: tz
});
@@ -252,8 +254,8 @@ function uninitialize(callback) {
if (gAppUpdateCheckerJob) gAppUpdateCheckerJob.stop();
gAppUpdateCheckerJob = null;
if (gHeartbeatJob) gHeartbeatJob.stop();
gHeartbeatJob = null;
if (gCaasHeartbeatJob) gCaasHeartbeatJob.stop();
gCaasHeartbeatJob = null;
if (gAliveJob) gAliveJob.stop();
gAliveJob = null;
+1 -21
View File
@@ -7,19 +7,15 @@ exports = module.exports = {
isEnabled: isEnabled,
setEnabled: setEnabled,
issueDeveloperToken: issueDeveloperToken,
getNonApprovedApps: getNonApprovedApps
issueDeveloperToken: issueDeveloperToken
};
var assert = require('assert'),
clients = require('./clients.js'),
config = require('./config.js'),
constants = require('./constants.js'),
debug = require('debug')('box:developer'),
eventlog = require('./eventlog.js'),
tokendb = require('./tokendb.js'),
settings = require('./settings.js'),
superagent = require('superagent'),
util = require('util');
function DeveloperError(reason, errorOrMessage) {
@@ -84,19 +80,3 @@ function issueDeveloperToken(user, auditSource, callback) {
callback(null, { token: token, expiresAt: new Date(expiresAt).toISOString() });
});
}
function getNonApprovedApps(callback) {
assert.strictEqual(typeof callback, 'function');
var url = config.apiServerOrigin() + '/api/v1/boxes/' + config.fqdn() + '/apps';
superagent.get(url).query({ token: config.token(), boxVersion: config.version() }).timeout(30 * 1000).end(function (error, result) {
if (error && !error.response) return callback(new DeveloperError(DeveloperError.EXTERNAL_ERROR, error));
if (result.statusCode === 401 || result.statusCode === 403) {
debug('Failed to list apps in development. Appstore token invalid or missing. Returning empty list.', result.body);
return callback(null, []);
}
if (result.statusCode !== 200) return callback(new DeveloperError(DeveloperError.EXTERNAL_ERROR, util.format('App listing failed. %s %j', result.status, result.body)));
callback(null, result.body.apps || []);
});
}
+25 -18
View File
@@ -33,31 +33,38 @@ function maybeSend(callback) {
var hasSubscription = result && result.plan.id !== 'free' && result.plan.id !== 'undecided';
eventlog.getByActionLastWeek(eventlog.ACTION_APP_UPDATE, function (error, appUpdates) {
eventlog.getByCreationTime(new Date(new Date() - 7*86400000), function (error, events) {
if (error) return callback(error);
eventlog.getByActionLastWeek(eventlog.ACTION_UPDATE, function (error, boxUpdates) {
if (error) return callback(error);
var appUpdates = events.filter(function (e) { return e.action === eventlog.ACTION_APP_UPDATE; }).map(function (e) { return e.data; });
var boxUpdates = events.filter(function (e) { return e.action === eventlog.ACTION_UPDATE; }).map(function (e) { return e.data; });
var certRenewals = events.filter(function (e) { return e.action === eventlog.ACTION_CERTIFICATE_RENEWAL; }).map(function (e) { return e.data; });
var usersAdded = events.filter(function (e) { return e.action === eventlog.ACTION_USER_ADD; }).map(function (e) { return e.data; });
var usersRemoved = events.filter(function (e) { return e.action === eventlog.ACTION_USER_REMOVE; }).map(function (e) { return e.data; });
var finishedBackups = events.filter(function (e) { return e.action === eventlog.ACTION_BACKUP_FINISH && !e.errorMessage; }).map(function (e) { return e.data; });
var info = {
hasSubscription: hasSubscription,
if (error) return callback(error);
pendingAppUpdates: pendingAppUpdates,
pendingBoxUpdate: updateInfo.box || null,
var info = {
hasSubscription: hasSubscription,
finishedAppUpdates: (appUpdates || []).map(function (e) { return e.data; }),
finishedBoxUpdates: (boxUpdates || []).map(function (e) { return e.data; })
};
pendingAppUpdates: pendingAppUpdates,
pendingBoxUpdate: updateInfo.box || null,
if (info.pendingAppUpdates.length || info.pendingBoxUpdate || info.finishedAppUpdates.length || info.finishedBoxUpdates.length) {
debug('maybeSend: sending digest email', info);
mailer.sendDigest(info);
} else {
debug('maybeSend: nothing happened, NOT sending digest email');
}
finishedAppUpdates: appUpdates,
finishedBoxUpdates: boxUpdates,
callback();
});
certRenewals: certRenewals,
finishedBackups: finishedBackups, // only the successful backups
usersAdded: usersAdded,
usersRemoved: usersRemoved // unused because we don't have username to work with
};
// always send digest for backup failure notification
debug('maybeSend: sending digest email', info);
mailer.sendDigest(info);
callback();
});
});
});
+3 -3
View File
@@ -38,7 +38,7 @@ function add(dnsConfig, zoneName, subdomain, type, values, callback) {
.send(data)
.timeout(30 * 1000)
.end(function (error, result) {
if (error && !error.response) return callback(error);
if (error && !error.response) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('Network error %s', error.message)));
if (result.statusCode === 400) return callback(new SubdomainError(SubdomainError.BAD_FIELD, result.body.message));
if (result.statusCode === 420) return callback(new SubdomainError(SubdomainError.STILL_BUSY));
if (result.statusCode !== 201) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body)));
@@ -63,7 +63,7 @@ function get(dnsConfig, zoneName, subdomain, type, callback) {
.query({ token: dnsConfig.token, type: type })
.timeout(30 * 1000)
.end(function (error, result) {
if (error && !error.response) return callback(error);
if (error && !error.response) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('Network error %s', error.message)));
if (result.statusCode !== 200) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body)));
return callback(null, result.body.values);
@@ -102,7 +102,7 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
.send(data)
.timeout(30 * 1000)
.end(function (error, result) {
if (error && !error.response) return callback(error);
if (error && !error.response) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('Network error %s', error.message)));
if (result.statusCode === 400) return callback(new SubdomainError(SubdomainError.BAD_FIELD, result.body.message));
if (result.statusCode === 420) return callback(new SubdomainError(SubdomainError.STILL_BUSY));
if (result.statusCode === 404) return callback(new SubdomainError(SubdomainError.NOT_FOUND));
+12 -4
View File
@@ -66,18 +66,18 @@ function getDNSRecordsByZoneId(dnsConfig, zoneId, zoneName, subdomain, type, cal
assert.strictEqual(typeof type, 'string');
assert.strictEqual(typeof callback, 'function');
var fqdn = subdomain === '' ? zoneName : subdomain + '.' + zoneName;
superagent.get(CLOUDFLARE_ENDPOINT + '/zones/' + zoneId + '/dns_records')
.set('X-Auth-Key',dnsConfig.token)
.set('X-Auth-Email',dnsConfig.email)
.query({ type: type, name: fqdn })
.timeout(30 * 1000)
.end(function (error, result) {
if (error && !error.response) return callback(error);
if (result.statusCode !== 200 || result.body.success !== true) return translateRequestError(result, callback);
var fqdn = subdomain === '' ? zoneName : subdomain + '.' + zoneName;
var tmp = result.body.result.filter(function (record) {
return (record.type === type && record.name === fqdn);
});
var tmp = result.body.result;
return callback(null, tmp);
});
@@ -109,10 +109,18 @@ function upsert(dnsConfig, zoneName, subdomain, type, values, callback) {
var i = 0;
async.eachSeries(values, function (value, callback) {
var priority = null;
if (type === 'MX') {
priority = value.split(' ')[0];
value = value.split(' ')[1];
}
var data = {
type: type,
name: fqdn,
content: value,
priority: priority,
ttl: 120 // 1 means "automatic" (meaning 300ms) and 120 is the lowest supported
};
+66 -49
View File
@@ -10,9 +10,10 @@ exports = module.exports = {
var assert = require('assert'),
async = require('async'),
constants = require('../constants.js'),
config = require('../config.js'),
debug = require('debug')('box:dns/digitalocean'),
dns = require('dns'),
safe = require('safetydance'),
SubdomainError = require('../subdomains.js').SubdomainError,
superagent = require('superagent'),
util = require('util');
@@ -30,22 +31,34 @@ function getInternal(dnsConfig, zoneName, subdomain, type, callback) {
assert.strictEqual(typeof type, 'string');
assert.strictEqual(typeof callback, 'function');
superagent.get(DIGITALOCEAN_ENDPOINT + '/v2/domains/' + zoneName + '/records')
.set('Authorization', 'Bearer ' + dnsConfig.token)
.timeout(30 * 1000)
.end(function (error, result) {
if (error && !error.response) return callback(error);
if (result.statusCode === 404) return callback(new SubdomainError(SubdomainError.NOT_FOUND, formatError(result)));
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, formatError(result)));
if (result.statusCode !== 200) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, formatError(result)));
var nextPage = null, matchingRecords = [];
var tmp = result.body.domain_records.filter(function (record) {
return (record.type === type && record.name === subdomain);
});
async.doWhilst(function (iteratorDone) {
var url = nextPage ? nextPage : DIGITALOCEAN_ENDPOINT + '/v2/domains/' + zoneName + '/records';
debug('getInternal: %j', tmp);
superagent.get(url)
.set('Authorization', 'Bearer ' + dnsConfig.token)
.timeout(30 * 1000)
.end(function (error, result) {
if (error && !error.response) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('Network error %s', error.message)));
if (result.statusCode === 404) return callback(new SubdomainError(SubdomainError.NOT_FOUND, formatError(result)));
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, formatError(result)));
if (result.statusCode !== 200) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, formatError(result)));
return callback(null, tmp);
matchingRecords = matchingRecords.concat(result.body.domain_records.filter(function (record) {
return (record.type === type && record.name === subdomain);
}));
nextPage = (result.body.links && result.body.links.pages) ? result.body.links.pages.next : null;
iteratorDone();
});
}, function () { return !!nextPage; }, function (error) {
if (error) return callback(error);
debug('getInternal: %j', matchingRecords);
return callback(null, matchingRecords);
});
}
@@ -65,7 +78,7 @@ function upsert(dnsConfig, zoneName, subdomain, type, values, callback) {
if (error) return callback(error);
// used to track available records to update instead of create
var i = 0;
var i = 0, recordIds = [];
async.eachSeries(values, function (value, callback) {
var priority = null;
@@ -85,38 +98,42 @@ function upsert(dnsConfig, zoneName, subdomain, type, values, callback) {
if (i >= result.length) {
superagent.post(DIGITALOCEAN_ENDPOINT + '/v2/domains/' + zoneName + '/records')
.set('Authorization', 'Bearer ' + dnsConfig.token)
.send(data)
.timeout(30 * 1000)
.end(function (error, result) {
if (error && !error.response) return callback(error);
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, formatError(result)));
if (result.statusCode === 422) return callback(new SubdomainError(SubdomainError.BAD_FIELD, result.body.message));
if (result.statusCode !== 201) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, formatError(result)));
.set('Authorization', 'Bearer ' + dnsConfig.token)
.send(data)
.timeout(30 * 1000)
.end(function (error, result) {
if (error && !error.response) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('Network error %s', error.message)));
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, formatError(result)));
if (result.statusCode === 422) return callback(new SubdomainError(SubdomainError.BAD_FIELD, result.body.message));
if (result.statusCode !== 201) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, formatError(result)));
return callback(null);
});
recordIds.push(safe.query(result.body, 'domain_record.id'));
return callback(null);
});
} else {
superagent.put(DIGITALOCEAN_ENDPOINT + '/v2/domains/' + zoneName + '/records/' + result[i].id)
.set('Authorization', 'Bearer ' + dnsConfig.token)
.send(data)
.timeout(30 * 1000)
.end(function (error, result) {
.set('Authorization', 'Bearer ' + dnsConfig.token)
.send(data)
.timeout(30 * 1000)
.end(function (error, result) {
// increment, as we have consumed the record
++i;
++i;
if (error && !error.response) return callback(error);
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, formatError(result)));
if (result.statusCode === 422) return callback(new SubdomainError(SubdomainError.BAD_FIELD, result.body.message));
if (result.statusCode !== 200) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, formatError(result)));
if (error && !error.response) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('Network error %s', error.message)));
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, formatError(result)));
if (result.statusCode === 422) return callback(new SubdomainError(SubdomainError.BAD_FIELD, result.body.message));
if (result.statusCode !== 200) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, formatError(result)));
return callback(null);
});
recordIds.push(safe.query(result.body, 'domain_record.id'));
return callback(null);
});
}
}, function (error) {
}, function (error, id) {
if (error) return callback(error);
callback(null, 'unused');
callback(null, '' + recordIds[0]); // DO ids are integers
});
});
}
@@ -166,18 +183,18 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
// FIXME we only handle the first one currently
superagent.del(DIGITALOCEAN_ENDPOINT + '/v2/domains/' + zoneName + '/records/' + tmp[0].id)
.set('Authorization', 'Bearer ' + dnsConfig.token)
.timeout(30 * 1000)
.end(function (error, result) {
if (error && !error.response) return callback(error);
if (result.statusCode === 404) return callback(null);
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, formatError(result)));
if (result.statusCode !== 204) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, formatError(result)));
.set('Authorization', 'Bearer ' + dnsConfig.token)
.timeout(30 * 1000)
.end(function (error, result) {
if (error && !error.response) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('Network error %s', error.message)));
if (result.statusCode === 404) return callback(null);
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, formatError(result)));
if (result.statusCode !== 204) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, formatError(result)));
debug('del: done');
debug('del: done');
return callback(null);
});
return callback(null);
});
});
}
@@ -204,7 +221,7 @@ function verifyDnsConfig(dnsConfig, fqdn, zoneName, ip, callback) {
return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Domain nameservers are not set to Digital Ocean'));
}
const name = constants.ADMIN_LOCATION + (fqdn === zoneName ? '' : '.' + fqdn.slice(0, - zoneName.length - 1));
const name = config.adminLocation() + (fqdn === zoneName ? '' : '.' + fqdn.slice(0, - zoneName.length - 1));
upsert(credentials, zoneName, name, 'A', [ ip ], function (error, changeId) {
if (error) return callback(error);
+201
View File
@@ -0,0 +1,201 @@
'use strict';
exports = module.exports = {
upsert: upsert,
get: get,
del: del,
waitForDns: require('./waitfordns.js'),
verifyDnsConfig: verifyDnsConfig
};
var assert = require('assert'),
config = require('../config.js'),
debug = require('debug')('box:dns/gcdns'),
dns = require('dns'),
GCDNS = require('@google-cloud/dns'),
SubdomainError = require('../subdomains.js').SubdomainError,
util = require('util'),
_ = require('underscore');
function getDnsCredentials(dnsConfig) {
assert.strictEqual(typeof dnsConfig, 'object');
var config = {
provider: dnsConfig.provider,
projectId: dnsConfig.projectId,
keyFilename: dnsConfig.keyFilename,
email: dnsConfig.email
};
if (dnsConfig.credentials) {
config.credentials = {
client_email: dnsConfig.credentials.client_email,
private_key: dnsConfig.credentials.private_key
};
}
return config;
}
function getZoneByName(dnsConfig, zoneName, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof zoneName, 'string');
assert.strictEqual(typeof callback, 'function');
var gcdns = GCDNS(getDnsCredentials(dnsConfig));
gcdns.getZones(function (error, zones) {
if (error && error.message === 'invalid_grant') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, 'The key was probably revoked'));
if (error && error.reason === 'No such domain') return callback(new SubdomainError(SubdomainError.NOT_FOUND, error.message));
if (error && error.code === 403) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
if (error && error.code === 404) return callback(new SubdomainError(SubdomainError.NOT_FOUND, error.message));
if (error) {
debug('gcdns.getZones', error);
return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, error));
}
var zone = zones.filter(function (zone) {
return zone.metadata.dnsName.slice(0, -1) === zoneName; // the zone name contains a '.' at the end
})[0];
if (!zone) return callback(new SubdomainError(SubdomainError.NOT_FOUND, 'no such zone'));
callback(null, zone); //zone.metadata ~= {name="", dnsName="", nameServers:[]}
});
}
function upsert(dnsConfig, zoneName, subdomain, type, values, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof zoneName, 'string');
assert.strictEqual(typeof subdomain, 'string');
assert.strictEqual(typeof type, 'string');
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
debug('add: %s for zone %s of type %s with values %j', subdomain, zoneName, type, values);
getZoneByName(getDnsCredentials(dnsConfig), zoneName, function (error, zone) {
if (error) return callback(error);
var domain = (subdomain ? subdomain + '.' : '') + zoneName + '.';
zone.getRecords({ type: type, name: domain }, function (error, oldRecords) {
if (error && error.code === 403) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
if (error) {
debug('upsert->zone.getRecords', error);
return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, error.message));
}
var newRecord = zone.record(type, {
name: domain,
data: values,
ttl: 1
});
zone.createChange({ delete: oldRecords, add: newRecord }, function(error, change) {
if (error && error.code === 403) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
if (error && error.code === 412) return callback(new SubdomainError(SubdomainError.STILL_BUSY, error.message));
if (error) {
debug('upsert->zone.createChange', error);
return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, error.message));
}
callback(null, change.id);
});
});
});
}
function get(dnsConfig, zoneName, subdomain, type, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof zoneName, 'string');
assert.strictEqual(typeof subdomain, 'string');
assert.strictEqual(typeof type, 'string');
assert.strictEqual(typeof callback, 'function');
getZoneByName(getDnsCredentials(dnsConfig), zoneName, function (error, zone) {
if (error) return callback(error);
var params = {
name: (subdomain ? subdomain + '.' : '') + zoneName + '.',
type: type
};
zone.getRecords(params, function (error, records) {
if (error && error.code === 403) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
if (error) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, error));
if (records.length === 0) return callback(null, [ ]);
return callback(null, records[0].data);
});
});
}
function del(dnsConfig, zoneName, subdomain, type, values, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof zoneName, 'string');
assert.strictEqual(typeof subdomain, 'string');
assert.strictEqual(typeof type, 'string');
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
getZoneByName(getDnsCredentials(dnsConfig), zoneName, function (error, zone) {
if (error) return callback(error);
var domain = (subdomain ? subdomain + '.' : '') + zoneName + '.';
zone.getRecords({ type: type, name: domain }, function(error, oldRecords) {
if (error && error.code === 403) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
if (error) {
debug('del->zone.getRecords', error);
return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, error.message));
}
zone.deleteRecords(oldRecords, function (error, change) {
if (error && error.code === 403) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
if (error && error.code === 412) return callback(new SubdomainError(SubdomainError.STILL_BUSY, error.message));
if (error) {
debug('del->zone.createChange', error);
return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, error.message));
}
callback(null, change.id);
});
});
});
}
function verifyDnsConfig(dnsConfig, fqdn, zoneName, ip, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof fqdn, 'string');
assert.strictEqual(typeof zoneName, 'string');
assert.strictEqual(typeof ip, 'string');
assert.strictEqual(typeof callback, 'function');
var credentials = getDnsCredentials(dnsConfig);
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
dns.resolveNs(zoneName, function (error, resolvedNS) {
if (error && error.code === 'ENOTFOUND') return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to resolve nameservers for this domain'));
if (error || !resolvedNS) return callback(new SubdomainError(SubdomainError.BAD_FIELD, error ? error.message : 'Unable to get nameservers'));
getZoneByName(credentials, zoneName, function (error, zone) {
if (error) return callback(error);
var definedNS = zone.metadata.nameServers.sort().map(function(r) { return r.replace(/\.$/, ''); });
if (!_.isEqual(definedNS, resolvedNS.sort())) {
debug('verifyDnsConfig: %j and %j do not match', resolvedNS, definedNS);
return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Domain nameservers are not set to Google Cloud DNS'));
}
const name = config.adminLocation() + (fqdn === zoneName ? '' : '.' + fqdn.slice(0, - zoneName.length - 1));
upsert(credentials, zoneName, name, 'A', [ ip ], function (error, changeId) {
if (error) return callback(error);
debug('verifyDnsConfig: A record added with change id %s', changeId);
callback(null, credentials);
});
});
});
}
+2 -2
View File
@@ -10,7 +10,7 @@ exports = module.exports = {
var assert = require('assert'),
async = require('async'),
constants = require('../constants.js'),
config = require('../config.js'),
debug = require('debug')('box:dns/manual'),
dig = require('../dig.js'),
dns = require('dns'),
@@ -58,7 +58,7 @@ function verifyDnsConfig(dnsConfig, domain, zoneName, ip, callback) {
assert.strictEqual(typeof ip, 'string');
assert.strictEqual(typeof callback, 'function');
var adminDomain = constants.ADMIN_LOCATION + '.' + domain;
var adminDomain = config.adminLocation() + '.' + domain;
dns.resolveNs(zoneName, function (error, nameservers) {
if (error || !nameservers) return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to get nameservers'));
+2 -2
View File
@@ -13,7 +13,7 @@ exports = module.exports = {
var assert = require('assert'),
AWS = require('aws-sdk'),
constants = require('../constants.js'),
config = require('../config.js'),
debug = require('debug')('box:dns/route53'),
dns = require('dns'),
SubdomainError = require('../subdomains.js').SubdomainError,
@@ -247,7 +247,7 @@ function verifyDnsConfig(dnsConfig, fqdn, zoneName, ip, callback) {
return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Domain nameservers are not set to Route53'));
}
const name = constants.ADMIN_LOCATION + (fqdn === zoneName ? '' : '.' + fqdn.slice(0, - zoneName.length - 1));
const name = config.adminLocation() + (fqdn === zoneName ? '' : '.' + fqdn.slice(0, - zoneName.length - 1));
upsert(credentials, zoneName, name, 'A', [ ip ], function (error, changeId) {
if (error) return callback(error);
+11 -21
View File
@@ -186,9 +186,9 @@ function createSubcontainer(app, name, cmd, options, callback) {
'/run': {}
},
Labels: {
"location": app.location,
"appId": app.id,
"isSubcontainer": String(!isAppContainer)
'location': app.location,
'appId': app.id,
'isSubcontainer': String(!isAppContainer)
},
HostConfig: {
Binds: addons.getBindsSync(app, app.manifest.addons),
@@ -198,15 +198,15 @@ function createSubcontainer(app, name, cmd, options, callback) {
PublishAllPorts: false,
ReadonlyRootfs: app.debugMode ? !!app.debugMode.readonlyRootfs : true,
RestartPolicy: {
"Name": isAppContainer ? "always" : "no",
"MaximumRetryCount": 0
'Name': isAppContainer ? 'always' : 'no',
'MaximumRetryCount': 0
},
CpuShares: 512, // relative to 1024 for system processes
VolumesFrom: isAppContainer ? null : [ app.containerId + ":rw" ],
VolumesFrom: isAppContainer ? null : [ app.containerId + ':rw' ],
NetworkMode: 'cloudron',
Dns: ['172.18.0.1'], // use internal dns
DnsSearch: ['.'], // use internal dns
SecurityOpt: enableSecurityOpt ? [ "apparmor=docker-cloudron-app" ] : null // profile available only on cloudron
SecurityOpt: enableSecurityOpt ? [ 'apparmor=docker-cloudron-app' ] : null // profile available only on cloudron
}
};
@@ -219,7 +219,7 @@ function createSubcontainer(app, name, cmd, options, callback) {
containerOptions = _.extend(containerOptions, options);
debugApp(app, 'Creating container for %s with options %j', app.manifest.dockerImage, containerOptions);
debugApp(app, 'Creating container for %s', app.manifest.dockerImage);
docker.createContainer(containerOptions, callback);
});
@@ -367,31 +367,21 @@ function getContainerIdByIp(ip, callback) {
assert.strictEqual(typeof ip, 'string');
assert.strictEqual(typeof callback, 'function');
debug('get container by ip %s', ip);
var docker = exports.connection;
docker.listNetworks({}, function (error, result) {
docker.getNetwork('cloudron').inspect(function (error, bridge) {
if (error && error.statusCode === 404) return callback(new Error('Unable to find the cloudron network'));
if (error) return callback(error);
var bridge;
result.forEach(function (n) {
if (n.Name === 'cloudron') bridge = n;
});
if (!bridge) return callback(new Error('Unable to find the cloudron network'));
var containerId;
for (var id in bridge.Containers) {
if (bridge.Containers[id].IPv4Address.indexOf(ip) === 0) {
if (bridge.Containers[id].IPv4Address.indexOf(ip + '/16') === 0) {
containerId = id;
break;
}
}
if (!containerId) return callback(new Error('No container with that ip'));
debug('found container %s with ip %s', containerId, ip);
callback(null, containerId);
});
}
+101 -2
View File
@@ -3,6 +3,7 @@
exports = module.exports = {
verifyRelay: verifyRelay,
getStatus: getStatus,
checkRblStatus: checkRblStatus,
EmailError: EmailError
};
@@ -14,6 +15,7 @@ var assert = require('assert'),
constants = require('./constants.js'),
debug = require('debug')('box:email'),
dig = require('./dig.js'),
mailer = require('./mailer.js'),
net = require('net'),
nodemailer = require('nodemailer'),
safe = require('safetydance'),
@@ -23,6 +25,8 @@ var assert = require('assert'),
util = require('util'),
_ = require('underscore');
var NOOP_CALLBACK = function (error) { if (error) console.error(error); };
const digOptions = { server: '127.0.0.1', port: 53, timeout: 5000 };
function EmailError(reason, errorOrMessage) {
@@ -128,7 +132,7 @@ function verifyRelay(relay, callback) {
function checkDkim(callback) {
var dkim = {
domain: constants.DKIM_SELECTOR + '._domainkey.' + config.fqdn(),
domain: config.dkimSelector() + '._domainkey.' + config.fqdn(),
type: 'TXT',
expected: null,
value: null,
@@ -259,6 +263,100 @@ function checkPtr(callback) {
});
}
// https://raw.githubusercontent.com/jawsome/node-dnsbl/master/list.json
const RBL_LIST = [
{
"name": "Barracuda",
"dns": "b.barracudacentral.org",
"site": "http://www.barracudacentral.org/rbl/removal-request"
},
{
"name": "SpamCop",
"dns": "bl.spamcop.net",
"site": "http://spamcop.net"
},
{
"name": "Sorbs Aggregate Zone",
"dns": "dnsbl.sorbs.net",
"site": "http://dnsbl.sorbs.net/"
},
{
"name": "Sorbs spam.dnsbl Zone",
"dns": "spam.dnsbl.sorbs.net",
"site": "http://sorbs.net"
},
{
"name": "Composite Blocking List",
"dns": "cbl.abuseat.org",
"site": "http://www.abuseat.org"
},
{
"name": "SpamHaus Zen",
"dns": "zen.spamhaus.org",
"site": "http://spamhaus.org"
},
{
"name": "Multi SURBL",
"dns": "multi.surbl.org",
"site": "http://www.surbl.org"
},
{
"name": "Spam Cannibal",
"dns": "bl.spamcannibal.org",
"site": "http://www.spamcannibal.org/cannibal.cgi"
},
{
"name": "dnsbl.abuse.ch",
"dns": "spam.abuse.ch",
"site": "http://dnsbl.abuse.ch/"
},
{
"name": "The Unsubscribe Blacklist(UBL)",
"dns": "ubl.unsubscore.com ",
"site": "http://www.lashback.com/blacklist/"
},
{
"name": "UCEPROTECT Network",
"dns": "dnsbl-1.uceprotect.net",
"site": "http://www.uceprotect.net/en"
}
];
function checkRblStatus(callback) {
assert.strictEqual(typeof callback, 'function');
sysinfo.getPublicIp(function (error, ip) {
if (error) return callback(error, ip);
var flippedIp = ip.split('.').reverse().join('.');
// https://tools.ietf.org/html/rfc5782
async.map(RBL_LIST, function (rblServer, iteratorDone) {
dig.resolve(flippedIp + '.' + rblServer.dns, 'A', digOptions, function (error, records) {
if (error || !records) return iteratorDone(null, null); // not listed
debug('checkRblStatus: %s (ip: %s) is in the blacklist of %j', config.fqdn(), flippedIp, rblServer);
var result = _.extend({ }, rblServer);
dig.resolve(flippedIp + '.' + rblServer.dns, 'TXT', digOptions, function (error, txtRecords) {
result.txtRecords = error || !txtRecords ? 'No txt record' : txtRecords;
debug('checkRblStatus: %s (error: %s) (txtRecords: %j)', config.fqdn(), error, txtRecords);
return iteratorDone(null, result);
});
});
}, function (ignoredError, blacklistedServers) {
blacklistedServers = blacklistedServers.filter(function(b) { return b !== null; });
debug('checkRblStatus: %s (ip: %s) servers: %j', config.fqdn(), ip, blacklistedServers);
return callback(null, { status: blacklistedServers.length === 0, ip: ip, servers: blacklistedServers });
});
});
}
function getStatus(callback) {
assert.strictEqual(typeof callback, 'function');
@@ -290,7 +388,8 @@ function getStatus(callback) {
recordResult('dns.spf', checkSpf),
recordResult('dns.dkim', checkDkim),
recordResult('dns.ptr', checkPtr),
recordResult('relay', checkOutboundPort25)
recordResult('relay', checkOutboundPort25),
recordResult('rbl', checkRblStatus)
);
} else {
checks.push(recordResult('relay', checkSmtpRelay.bind(null, relay)));
+9 -8
View File
@@ -6,7 +6,7 @@ exports = module.exports = {
add: add,
get: get,
getAllPaged: getAllPaged,
getByActionLastWeek: getByActionLastWeek,
getByCreationTime: getByCreationTime,
cleanup: cleanup,
// keep in sync with webadmin index.js filter and CLI tool
@@ -20,6 +20,7 @@ exports = module.exports = {
ACTION_APP_LOGIN: 'app.login',
ACTION_BACKUP_FINISH: 'backup.finish',
ACTION_BACKUP_START: 'backup.start',
ACTION_BACKUP_CLEANUP: 'backup.cleanup',
ACTION_CERTIFICATE_RENEWAL: 'certificate.renew',
ACTION_CLI_MODE: 'settings.climode',
ACTION_START: 'cloudron.start',
@@ -97,21 +98,21 @@ function getAllPaged(action, search, page, perPage, callback) {
assert.strictEqual(typeof perPage, 'number');
assert.strictEqual(typeof callback, 'function');
eventlogdb.getAllPaged(action, search, page, perPage, function (error, boxes) {
eventlogdb.getAllPaged(action, search, page, perPage, function (error, events) {
if (error) return callback(new EventLogError(EventLogError.INTERNAL_ERROR, error));
callback(null, boxes);
callback(null, events);
});
}
function getByActionLastWeek(action, callback) {
assert(typeof action === 'string' || action === null);
function getByCreationTime(creationTime, callback) {
assert(util.isDate(creationTime));
assert.strictEqual(typeof callback, 'function');
eventlogdb.getByActionLastWeek(action, function (error, boxes) {
eventlogdb.getByCreationTime(creationTime, function (error, events) {
if (error) return callback(new EventLogError(EventLogError.INTERNAL_ERROR, error));
callback(null, boxes);
callback(null, events);
});
}
@@ -119,7 +120,7 @@ function cleanup(callback) {
callback = callback || NOOP_CALLBACK;
var d = new Date();
d.setDate(d.getDate() - 7); // 7 days ago
d.setDate(d.getDate() - 10); // 10 days ago
// only cleanup high frequency events
var actions = [
+5 -5
View File
@@ -3,7 +3,7 @@
exports = module.exports = {
get: get,
getAllPaged: getAllPaged,
getByActionLastWeek: getByActionLastWeek,
getByCreationTime: getByCreationTime,
add: add,
count: count,
delByCreationTime: delByCreationTime,
@@ -73,12 +73,12 @@ function getAllPaged(action, search, page, perPage, callback) {
});
}
function getByActionLastWeek(action, callback) {
assert(typeof action === 'string' || action === null);
function getByCreationTime(creationTime, callback) {
assert(util.isDate(creationTime));
assert.strictEqual(typeof callback, 'function');
var query = 'SELECT ' + EVENTLOGS_FIELDS + ' FROM eventlog WHERE action=? AND creationTime >= DATE_SUB(NOW(), INTERVAL 1 WEEK) ORDER BY creationTime DESC';
database.query(query, [ action ], function (error, results) {
var query = 'SELECT ' + EVENTLOGS_FIELDS + ' FROM eventlog WHERE creationTime >= ? ORDER BY creationTime DESC';
database.query(query, [ creationTime ], function (error, results) {
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
results.forEach(postProcess);
+5 -5
View File
@@ -5,9 +5,9 @@
// Do not require anything here!
exports = module.exports = {
// a major version makes all apps restore from backup
// a major version makes all apps restore from backup. #451 must be fixed before we do this.
// a minor version makes all apps re-configure themselves
'version': '48.5.0',
'version': '48.8.0',
'baseImages': [ 'cloudron/base:0.10.0' ],
@@ -15,10 +15,10 @@ exports = module.exports = {
// This is because we upgrade using dumps instead of mysql_upgrade, pg_upgrade etc
'images': {
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:0.18.0' },
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:0.17.0' },
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:0.17.1' },
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:0.13.0' },
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:0.11.0' },
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:0.36.3' },
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:0.11.0' }
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:0.39.0' },
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:0.12.0' }
}
};
+123 -15
View File
@@ -61,12 +61,74 @@ function getUsersWithAccessToApp(req, callback) {
});
}
// helper function to deal with pagination
function finalSend(results, req, res, next) {
var min = 0;
var max = results.length;
var cookie = null;
var pageSize = 0;
// check if this is a paging request, if so get the cookie for session info
req.controls.forEach(function (control) {
if (control.type === ldap.PagedResultsControl.OID) {
pageSize = control.value.size;
cookie = control.value.cookie;
}
});
function sendPagedResults(start, end) {
start = (start < min) ? min : start;
end = (end > max || end < min) ? max : end;
var i;
for (i = start; i < end; i++) {
res.send(results[i]);
}
return i;
}
if (cookie && Buffer.isBuffer(cookie)) {
// we have pagination
var first = min;
if (cookie.length !== 0) {
first = parseInt(cookie.toString(), 10);
}
var last = sendPagedResults(first, first + pageSize);
var resultCookie;
if (last < max) {
resultCookie = new Buffer(last.toString());
} else {
resultCookie = new Buffer('');
}
res.controls.push(new ldap.PagedResultsControl({
value: {
size: pageSize, // correctness not required here
cookie: resultCookie
}
}));
} else {
// no pagination simply send all
results.forEach(function (result) {
res.send(result);
});
}
// all done
res.end();
next();
}
function userSearch(req, res, next) {
debug('user search: dn %s, scope %s, filter %s (from %s)', req.dn.toString(), req.scope, req.filter.toString(), req.connection.ldap.id);
getUsersWithAccessToApp(req, function (error, result) {
if (error) return next(error);
var results = [];
// send user objects
result.forEach(function (entry) {
// skip entries with empty username. Some apps like owncloud can't deal with this
@@ -109,11 +171,11 @@ function userSearch(req, res, next) {
if (!lowerCaseFilter) return next(new ldap.OperationsError(safe.error.toString()));
if ((req.dn.equals(dn) || req.dn.parentOf(dn)) && lowerCaseFilter.matches(obj.attributes)) {
res.send(obj);
results.push(obj);
}
});
res.end();
finalSend(results, req, res, next);
});
}
@@ -123,6 +185,8 @@ function groupSearch(req, res, next) {
getUsersWithAccessToApp(req, function (error, result) {
if (error) return next(error);
var results = [];
var groups = [{
name: 'users',
admin: false
@@ -149,11 +213,43 @@ function groupSearch(req, res, next) {
if (!lowerCaseFilter) return next(new ldap.OperationsError(safe.error.toString()));
if ((req.dn.equals(dn) || req.dn.parentOf(dn)) && lowerCaseFilter.matches(obj.attributes)) {
res.send(obj);
results.push(obj);
}
});
res.end();
finalSend(results, req, res, next);
});
}
function groupUsersCompare(req, res, next) {
debug('group users compare: dn %s, attribute %s, value %s (from %s)', req.dn.toString(), req.attribute, req.value, req.connection.ldap.id);
getUsersWithAccessToApp(req, function (error, result) {
if (error) return next(error);
// we only support memberuid here, if we add new group attributes later add them here
if (req.attribute === 'memberuid') {
var found = result.find(function (u) { return u.id === req.value; });
if (found) return res.end(true);
}
res.end(false);
});
}
function groupAdminsCompare(req, res, next) {
debug('group admins compare: dn %s, attribute %s, value %s (from %s)', req.dn.toString(), req.attribute, req.value, req.connection.ldap.id);
getUsersWithAccessToApp(req, function (error, result) {
if (error) return next(error);
// we only support memberuid here, if we add new group attributes later add them here
if (req.attribute === 'memberuid') {
var found = result.find(function (u) { return u.id === req.value; });
if (found && found.admin) return res.end(true);
}
res.end(false);
});
}
@@ -161,6 +257,7 @@ function mailboxSearch(req, res, next) {
debug('mailbox search: dn %s, scope %s, filter %s (from %s)', req.dn.toString(), req.scope, req.filter.toString(), req.connection.ldap.id);
if (!req.dn.rdns[0].attrs.cn) return next(new ldap.NoSuchObjectError(req.dn.toString()));
var name = req.dn.rdns[0].attrs.cn.value.toLowerCase();
// allow login via email
var parts = name.split('@');
@@ -188,9 +285,11 @@ function mailboxSearch(req, res, next) {
var lowerCaseFilter = safe(function () { return ldap.parseFilter(req.filter.toString().toLowerCase()); }, null);
if (!lowerCaseFilter) return next(new ldap.OperationsError(safe.error.toString()));
if (lowerCaseFilter.matches(obj.attributes)) res.send(obj);
res.end();
if (lowerCaseFilter.matches(obj.attributes)) {
finalSend([ obj ], req, res, next);
} else {
res.end();
}
});
}
@@ -198,6 +297,7 @@ function mailAliasSearch(req, res, next) {
debug('mail alias get: dn %s, scope %s, filter %s (from %s)', req.dn.toString(), req.scope, req.filter.toString(), req.connection.ldap.id);
if (!req.dn.rdns[0].attrs.cn) return next(new ldap.NoSuchObjectError(req.dn.toString()));
mailboxdb.getAlias(req.dn.rdns[0].attrs.cn.value.toLowerCase(), function (error, alias) {
if (error && error.reason === DatabaseError.NOT_FOUND) return next(new ldap.NoSuchObjectError(req.dn.toString()));
if (error) return next(new ldap.OperationsError(error.toString()));
@@ -218,9 +318,11 @@ function mailAliasSearch(req, res, next) {
var lowerCaseFilter = safe(function () { return ldap.parseFilter(req.filter.toString().toLowerCase()); }, null);
if (!lowerCaseFilter) return next(new ldap.OperationsError(safe.error.toString()));
if (lowerCaseFilter.matches(obj.attributes)) res.send(obj);
res.end();
if (lowerCaseFilter.matches(obj.attributes)) {
finalSend([ obj ], req, res, next);
} else {
res.end();
}
});
}
@@ -228,6 +330,7 @@ function mailingListSearch(req, res, next) {
debug('mailing list get: dn %s, scope %s, filter %s (from %s)', req.dn.toString(), req.scope, req.filter.toString(), req.connection.ldap.id);
if (!req.dn.rdns[0].attrs.cn) return next(new ldap.NoSuchObjectError(req.dn.toString()));
mailboxdb.getGroup(req.dn.rdns[0].attrs.cn.value.toLowerCase(), function (error, group) {
if (error && error.reason === DatabaseError.NOT_FOUND) return next(new ldap.NoSuchObjectError(req.dn.toString()));
if (error) return next(new ldap.OperationsError(error.toString()));
@@ -248,9 +351,11 @@ function mailingListSearch(req, res, next) {
var lowerCaseFilter = safe(function () { return ldap.parseFilter(req.filter.toString().toLowerCase()); }, null);
if (!lowerCaseFilter) return next(new ldap.OperationsError(safe.error.toString()));
if (lowerCaseFilter.matches(obj.attributes)) res.send(obj);
res.end();
if (lowerCaseFilter.matches(obj.attributes)) {
finalSend([ obj ], req, res, next);
} else {
res.end();
}
});
}
@@ -370,14 +475,17 @@ function start(callback) {
gServer.bind('ou=recvmail,dc=cloudron', authenticateMailbox);
gServer.bind('ou=sendmail,dc=cloudron', authenticateMailbox);
gServer.compare('cn=users,ou=groups,dc=cloudron', groupUsersCompare);
gServer.compare('cn=admins,ou=groups,dc=cloudron', groupAdminsCompare);
// this is the bind for addons (after bind, they might search and authenticate)
gServer.bind('ou=addons,dc=cloudron', function(req, res, next) {
gServer.bind('ou=addons,dc=cloudron', function(req, res /*, next */) {
debug('addons bind: %s', req.dn.toString()); // note: cn can be email or id
res.end();
});
// this is the bind for apps (after bind, they might search and authenticate user)
gServer.bind('ou=apps,dc=cloudron', function(req, res, next) {
gServer.bind('ou=apps,dc=cloudron', function(req, res /*, next */) {
// TODO: validate password
debug('application bind: %s', req.dn.toString());
res.end();
+35 -3
View File
@@ -2,13 +2,14 @@
Dear Cloudron Admin,
a new version <%= updateInfo.manifest.version %> of the app '<%= app.manifest.title %>' installed at <%= app.fqdn %> is available!
The app will update automatically tonight. Alternately, update immediately at <%= webadminUrl %>.
A new version <%= updateInfo.manifest.version %> of the app '<%= app.manifest.title %>' installed at <%= app.fqdn %> is available!
Changes:
<%= updateInfo.manifest.changelog %>
<% if (!hasSubscription) { -%>
*Keep your Cloudron automatically up-to-date and secure by upgrading to a paid plan at* <%= webadminUrl %>/#/settings
<% } -%>
Powered by https://cloudron.io
@@ -16,4 +17,35 @@ Sent at: <%= new Date().toUTCString() %>
<% } else { %>
<center>
<img src="<%= cloudronAvatarUrl %>" width="128px" height="128px"/>
<h3>Dear <%= cloudronName %> Admin,</h3>
<div style="width: 650px; text-align: left;">
<p>
A new version <%= updateInfo.manifest.version %> of the app '<%= app.manifest.title %>' installed at <%= app.fqdn %> is available!
</p>
<h5>Changelog:</h5>
<%- changelogHTML %>
<br/>
<% if (!hasSubscription) { %>
<p>Keep your Cloudron automatically up-to-date and secure by upgrading to a <a href="<%= webadminUrl %>/#/settings">paid plan</a>.</p>
<% } %>
<br/>
</div>
<div style="font-size: 10px; color: #333333; background: #ffffff;">
Powered by <a href="https://cloudron.io">Cloudron</a>.
</div>
</center>
<img src="https://analytics.cloudron.io/piwik.php?idsite=2&rec=1&e_c=CloudronEmail&e_a=update" style="border:0" alt="" />
<% } %>
+12 -9
View File
@@ -4,15 +4,18 @@ Dear <%= cloudronName %> Admin,
Version <%= newBoxVersion %> for Cloudron <%= fqdn %> is now available!
Your Cloudron will update automatically tonight. Alternately, update immediately at <%= webadminUrl %>.
Changelog:
<% for (var i = 0; i < changelog.length; i++) { %>
* <%- changelog[i] %>
<% } %>
Thank you,
your Cloudron
<% if (!hasSubscription) { -%>
*Keep your Cloudron automatically up-to-date and secure by upgrading to a paid plan at* <%= webadminUrl %>/#/settings
<% } -%>
Powered by https://cloudron.io
Sent at: <%= new Date().toUTCString() %>
<% } else { %>
@@ -27,11 +30,6 @@ your Cloudron
Version <b><%= newBoxVersion %></b> for Cloudron <%= fqdn %> is now available!
</p>
<p>
Your Cloudron will update automatically tonight.<br/>
Alternately, update immediately <a href="<%= webadminUrl %>">here</a>.
</p>
<h5>Changelog:</h5>
<ul>
<% for (var i = 0; i < changelogHTML.length; i++) { %>
@@ -40,6 +38,11 @@ your Cloudron
</ul>
<br/>
<% if (!hasSubscription) { %>
<p>Keep your Cloudron automatically up-to-date and secure by upgrading to a <a href="<%= webadminUrl %>/#/settings">paid plan</a>.</p>
<% } %>
<br/>
</div>
+47 -5
View File
@@ -2,7 +2,19 @@
Dear <%= cloudronName %> Admin,
This is the weekly summary of activities on your Cloudron <%= fqdn %>.
This is a summary of the activities on your Cloudron <%= fqdn %>.
<% if (info.usersAdded.length) { -%>
The following users were added:
<% for (var i = 0; i < info.usersAdded.length; i++) { -%>
* <%- info.usersAdded[i].email %>
<% }} -%>
<% if (info.certRenewals.length) { -%>
The certificates of the following apps was renewed:
<% for (var i = 0; i < info.certRenewals.length; i++) { -%>
* <%- info.certRenewals[i].domain %> - <%- info.certRenewals[i].errorMessage || 'Success' %>
<% }} -%>
<% if (info.pendingBoxUpdate) { -%>
Cloudron v<%- info.pendingBoxUpdate.version %> is available:
@@ -33,6 +45,14 @@ The following apps were updated:
<% for (var j = 0; j < info.finishedAppUpdates[i].toManifest.changelog.trim().split('\n').length; j++) { -%>
<%= info.finishedAppUpdates[i].toManifest.changelog.trim().split('\n')[j] %>
<% }}} -%>
<% if (info.finishedBackups.length) { -%>
Last successful backup: <%- info.finishedBackups[0].backupId || info.finishedBackups[0].filename %>
<% } else { -%>
This Cloudron did **not** backup successfully in the last week!
<% } -%>
<% if (!info.hasSubscription) { -%>
*Keep your Cloudron automatically up-to-date and secure by upgrading to a paid plan at* <%= webadminUrl %>/#/settings
@@ -52,9 +72,25 @@ Sent at: <%= new Date().toUTCString() %>
<br/>
<p>Weekly summary of activities on your Cloudron <a href="<%= webadminUrl %>"><%= cloudronName %></a>:</p>
<p>This is a summary of the activities on your Cloudron <a href="<%= webadminUrl %>"><%= cloudronName %></a> last week.</p>
<br/>
<% if (info.usersAdded.length) { -%>
<p><b>The following users were added:</b></p>
<ul>
<% for (var i = 0; i < info.usersAdded.length; i++) { %>
<li><%- info.usersAdded[i].email %></li>
<% } %>
</ul>
<% } %>
<% if (info.certRenewals.length) { -%>
<p><b>The certificates of the following apps were renewed:</b></p>
<ul>
<% for (var i = 0; i < info.certRenewals.length; i++) { %>
<li><%- info.certRenewals[i].domain %> - <%- info.certRenewals[i].errorMessage || 'Success' %></li>
<% } %>
</ul>
<% } %>
<% if (info.pendingBoxUpdate) { -%>
<p><b>Cloudron v<%- info.pendingBoxUpdate.version %> is available:</b></p>
@@ -113,6 +149,12 @@ Sent at: <%= new Date().toUTCString() %>
</ul>
<% } %>
<% if (info.finishedBackups.length) { %>
<p><b>Last successful backup : </b> <%= info.finishedBackups[0].backupId || info.finishedBackups[0].filename %> </p>
<% } else { %>
<p><b>This Cloudron did not backup successfully in the last week!</b></p>
<% } %>
<br/>
<% if (!info.hasSubscription) { %>
@@ -123,12 +165,12 @@ Sent at: <%= new Date().toUTCString() %>
<br/>
<br/>
<p style="text-align: right;">
<center>
<small>
Powered by <a href="https://cloudron.io">Cloudron</a><br/>
Sent on <%= new Date().toUTCString() %>
</small>
</p>
</center>
</div>
</center>
-14
View File
@@ -1,14 +0,0 @@
<%if (format === 'text') { %>
New <%= type %> from <%= fqdn %>.
Sender: <%= user.email %>
Sent at: <%= new Date().toUTCString() %>
Subject: <%= subject %>
-----------------------------------------------------------
<%= description %>
<% } else { %>
<% } %>
+11
View File
@@ -0,0 +1,11 @@
<%if (format === 'text') { %>
Test email from <%= fqdn %>,
If you can read this, your Cloudron email settings are good.
Sent at: <%= new Date().toUTCString() %>
<% } else { %>
<% } %>
+2 -1
View File
@@ -134,7 +134,8 @@ function getGroup(name, callback) {
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
if (results.length === 0) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
database.query('SELECT users.username FROM groupMembers INNER JOIN users ON groupMembers.userId = users.id WHERE groupMembers.groupId = ?', [ results[0].ownerId ], function (error, memberList) {
// username can be null if the user has not signed up with the invite yet
database.query('SELECT users.username FROM groupMembers INNER JOIN users ON groupMembers.userId = users.id WHERE groupMembers.groupId = ? AND users.username IS NOT NULL', [ results[0].ownerId ], function (error, memberList) {
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
results[0].members = memberList.map(function (m) { return m.username; });
+57 -40
View File
@@ -23,12 +23,7 @@ exports = module.exports = {
certificateRenewalError: certificateRenewalError,
FEEDBACK_TYPE_FEEDBACK: 'feedback',
FEEDBACK_TYPE_TICKET: 'ticket',
FEEDBACK_TYPE_APP_MISSING: 'app_missing',
FEEDBACK_TYPE_APP_ERROR: 'app_error',
FEEDBACK_TYPE_UPGRADE_REQUEST: 'upgrade_request',
sendFeedback: sendFeedback,
sendTestMail: sendTestMail,
_getMailQueue: _getMailQueue,
_clearMailQueue: _clearMailQueue
@@ -167,6 +162,7 @@ function getAdminEmails(callback) {
if (admins.length === 0) return callback(new Error('No admins on this cloudron')); // box not activated yet
var adminEmails = [ ];
if (admins[0].alternateEmail) adminEmails.push(admins[0].alternateEmail);
admins.forEach(function (admin) { adminEmails.push(admin.email); });
callback(null, adminEmails);
@@ -240,7 +236,7 @@ function userAdded(user, inviteSent) {
debug('Sending mail for userAdded %s including invite link', inviteSent ? 'not' : '');
getAdminEmails(function (error, adminEmails) {
if (error) return console.log('Error getting admins', error);
if (error) return debug('Error getting admins', error);
adminEmails = _.difference(adminEmails, [ user.email ]);
@@ -337,7 +333,7 @@ function appDied(app) {
debug('Sending mail for app %s @ %s died', app.id, app.fqdn);
getAdminEmails(function (error, adminEmails) {
if (error) return console.log('Error getting admins', error);
if (error) return debug('Error getting admins', error);
var mailOptions = {
from: mailConfig().from,
@@ -350,12 +346,13 @@ function appDied(app) {
});
}
function boxUpdateAvailable(newBoxVersion, changelog) {
function boxUpdateAvailable(hasSubscription, newBoxVersion, changelog) {
assert.strictEqual(typeof hasSubscription, 'boolean');
assert.strictEqual(typeof newBoxVersion, 'string');
assert(util.isArray(changelog));
getAdminEmails(function (error, adminEmails) {
if (error) return console.log('Error getting admins', error);
if (error) return debug('Error getting admins', error);
settings.getCloudronName(function (error, cloudronName) {
if (error) {
@@ -369,6 +366,7 @@ function boxUpdateAvailable(newBoxVersion, changelog) {
fqdn: config.fqdn(),
webadminUrl: config.adminOrigin(),
newBoxVersion: newBoxVersion,
hasSubscription: hasSubscription,
changelog: changelog,
changelogHTML: changelog.map(function (e) { return converter.makeHtml(e); }),
cloudronName: cloudronName,
@@ -381,7 +379,7 @@ function boxUpdateAvailable(newBoxVersion, changelog) {
var templateDataHTML = JSON.parse(JSON.stringify(templateData));
templateDataHTML.format = 'html';
var mailOptions = {
var mailOptions = {
from: mailConfig().from,
to: adminEmails.join(', '),
subject: util.format('%s has a new update available', config.fqdn()),
@@ -394,21 +392,49 @@ function boxUpdateAvailable(newBoxVersion, changelog) {
});
}
function appUpdateAvailable(app, updateInfo) {
function appUpdateAvailable(app, hasSubscription, info) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof updateInfo, 'object');
assert.strictEqual(typeof hasSubscription, 'boolean');
assert.strictEqual(typeof info, 'object');
getAdminEmails(function (error, adminEmails) {
if (error) return console.log('Error getting admins', error);
if (error) return debug('Error getting admins', error);
var mailOptions = {
from: mailConfig().from,
to: adminEmails.join(', '),
subject: util.format('[%s] Update available for %s', config.fqdn(), app.fqdn),
text: render('app_update_available.ejs', { fqdn: config.fqdn(), webadminUrl: config.adminOrigin(), app: app, updateInfo: updateInfo, format: 'text' })
};
settings.getCloudronName(function (error, cloudronName) {
if (error) {
debug(error);
cloudronName = 'Cloudron';
}
enqueue(mailOptions);
var converter = new showdown.Converter();
var templateData = {
fqdn: config.fqdn(),
webadminUrl: config.adminOrigin(),
hasSubscription: hasSubscription,
app: app,
updateInfo: info,
changelogHTML: converter.makeHtml(info.manifest.changelog),
cloudronName: cloudronName,
cloudronAvatarUrl: config.adminOrigin() + '/api/v1/cloudron/avatar'
};
var templateDataText = JSON.parse(JSON.stringify(templateData));
templateDataText.format = 'text';
var templateDataHTML = JSON.parse(JSON.stringify(templateData));
templateDataHTML.format = 'html';
var mailOptions = {
from: mailConfig().from,
to: adminEmails.join(', '),
subject: util.format('App %s has a new update available', app.fqdn),
text: render('app_update_available.ejs', templateDataText),
html: render('app_update_available.ejs', templateDataHTML)
};
enqueue(mailOptions);
});
});
}
@@ -416,7 +442,7 @@ function sendDigest(info) {
assert.strictEqual(typeof info, 'object');
getAdminEmails(function (error, adminEmails) {
if (error) return console.log('Error getting admins', error);
if (error) return debug('Error getting admins', error);
settings.getCloudronName(function (error, cloudronName) {
if (error) {
@@ -455,7 +481,7 @@ function outOfDiskSpace(message) {
assert.strictEqual(typeof message, 'string');
getAdminEmails(function (error, adminEmails) {
if (error) return console.log('Error getting admins', error);
if (error) return debug('Error getting admins', error);
var mailOptions = {
from: mailConfig().from,
@@ -472,7 +498,7 @@ function backupFailed(error) {
var message = splatchError(error);
getAdminEmails(function (error, adminEmails) {
if (error) return console.log('Error getting admins', error);
if (error) return debug('Error getting admins', error);
var mailOptions = {
from: mailConfig().from,
@@ -490,7 +516,7 @@ function certificateRenewalError(domain, message) {
assert.strictEqual(typeof message, 'string');
getAdminEmails(function (error, adminEmails) {
if (error) return console.log('Error getting admins', error);
if (error) return debug('Error getting admins', error);
var mailOptions = {
from: mailConfig().from,
@@ -508,7 +534,7 @@ function oomEvent(program, context) {
assert.strictEqual(typeof context, 'string');
getAdminEmails(function (error, adminEmails) {
if (error) return console.log('Error getting admins', error);
if (error) return debug('Error getting admins', error);
var mailOptions = {
from: mailConfig().from,
@@ -540,23 +566,14 @@ function unexpectedExit(program, context, callback) {
sendMails([ mailOptions ], callback);
}
function sendFeedback(user, type, subject, description) {
assert.strictEqual(typeof user, 'object');
assert.strictEqual(typeof type, 'string');
assert.strictEqual(typeof subject, 'string');
assert.strictEqual(typeof description, 'string');
assert(type === exports.FEEDBACK_TYPE_TICKET ||
type === exports.FEEDBACK_TYPE_FEEDBACK ||
type === exports.FEEDBACK_TYPE_APP_MISSING ||
type === exports.FEEDBACK_TYPE_UPGRADE_REQUEST ||
type === exports.FEEDBACK_TYPE_APP_ERROR);
function sendTestMail(email) {
assert.strictEqual(typeof email, 'string');
var mailOptions = {
from: mailConfig().from,
to: 'support@cloudron.io',
subject: util.format('[%s] %s - %s', type, config.fqdn(), subject),
text: render('feedback.ejs', { fqdn: config.fqdn(), type: type, user: user, subject: subject, description: description, format: 'text'})
to: email,
subject: util.format('Test Email from %s', config.fqdn()),
text: render('test.ejs', { fqdn: config.fqdn(), format: 'text'})
};
enqueue(mailOptions);
+2
View File
@@ -32,6 +32,7 @@ function configureAdmin(certFilePath, keyFilePath, configFileName, vhost, callba
sourceDir: path.resolve(__dirname, '..'),
adminOrigin: config.adminOrigin(),
vhost: vhost, // if vhost is empty it will become the default_server
hasIPv6: config.hasIPv6(),
endpoint: 'admin',
certFilePath: certFilePath,
keyFilePath: keyFilePath,
@@ -60,6 +61,7 @@ function configureApp(app, certFilePath, keyFilePath, callback) {
sourceDir: sourceDir,
adminOrigin: config.adminOrigin(),
vhost: vhost,
hasIPv6: config.hasIPv6(),
port: app.httpPort,
endpoint: endpoint,
certFilePath: certFilePath,
+6 -4
View File
@@ -7,7 +7,8 @@ var config = require('./config.js'),
exports = module.exports = {
CLOUDRON_DEFAULT_AVATAR_FILE: path.join(__dirname + '/../assets/avatar.png'),
INFRA_VERSION_FILE: path.join(config.baseDir(), 'platformdata/INFRA_VERSION'),
BACKUP_RESULT_FILE: path.join(config.baseDir(), 'platformdata/backupresult'),
BACKUP_RESULT_FILE: path.join(config.baseDir(), 'platformdata/backup/result.txt'),
BACKUP_LOG_FILE: path.join(config.baseDir(), 'platformdata/backup/logs.txt'),
OLD_DATA_DIR: path.join(config.baseDir(), 'data'),
PLATFORM_DATA_DIR: path.join(config.baseDir(), 'platformdata'),
@@ -18,16 +19,17 @@ exports = module.exports = {
ADDON_CONFIG_DIR: path.join(config.baseDir(), 'platformdata/addons'),
COLLECTD_APPCONFIG_DIR: path.join(config.baseDir(), 'platformdata/collectd/collectd.conf.d'),
LOGROTATE_CONFIG_DIR: path.join(config.baseDir(), 'platformdata/logrotate.d'),
MAIL_DATA_DIR: path.join(config.baseDir(), 'platformdata/mail'),
NGINX_CONFIG_DIR: path.join(config.baseDir(), 'platformdata/nginx'),
NGINX_APPCONFIG_DIR: path.join(config.baseDir(), 'platformdata/nginx/applications'),
NGINX_CERT_DIR: path.join(config.baseDir(), 'platformdata/nginx/cert'),
BACKUP_INFO_DIR: path.join(config.baseDir(), 'platformdata/backup'),
SNAPSHOT_INFO_FILE: path.join(config.baseDir(), 'platformdata/backup/snapshot-info.json'),
// this is not part of appdata because an icon may be set before install
ACME_ACCOUNT_KEY_FILE: path.join(config.baseDir(), 'boxdata/acme/acme.key'),
APP_ICONS_DIR: path.join(config.baseDir(), 'boxdata/appicons'),
MAIL_DATA_DIR: path.join(config.baseDir(), 'boxdata/mail'),
ACME_ACCOUNT_KEY_FILE: path.join(config.baseDir(), 'boxdata/acme/acme.key'),
APP_CERTS_DIR: path.join(config.baseDir(), 'boxdata/certs'),
CLOUDRON_AVATAR_FILE: path.join(config.baseDir(), 'boxdata/avatar.png'),
FIRST_RUN_FILE: path.join(config.baseDir(), 'boxdata/first_run'),
UPDATE_CHECKER_FILE: path.join(config.baseDir(), 'boxdata/updatechecker.json')
};
+12 -10
View File
@@ -162,7 +162,7 @@ function startMysql(callback) {
const memoryLimit = (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256;
if (!safe.fs.writeFileSync(paths.ADDON_CONFIG_DIR + '/mysql_vars.sh',
'MYSQL_ROOT_PASSWORD=' + rootPassword +'\nMYSQL_ROOT_HOST=172.18.0.1', 'utf8')) {
'MYSQL_ROOT_PASSWORD=' + rootPassword +'\nMYSQL_ROOT_HOST=172.18.0.1', 'utf8')) {
return callback(new Error('Could not create mysql var file:' + safe.error.message));
}
@@ -254,17 +254,17 @@ function createMailConfig(callback) {
var mailFromValidation = result[settings.MAIL_FROM_VALIDATION_KEY];
if (!safe.fs.writeFileSync(paths.ADDON_CONFIG_DIR + '/mail/mail.ini',
`mail_domain=${fqdn}\nmail_server_name=${mailFqdn}\nalerts_from=${alertsFrom}\nalerts_to=${alertsTo}\ncatch_all=${catchAll}\nmail_from_validation=${mailFromValidation}\n`, 'utf8')) {
`mail_domain=${fqdn}\nmail_server_name=${mailFqdn}\nalerts_from=${alertsFrom}\nalerts_to=${alertsTo}\ncatch_all=${catchAll}\nmail_from_validation=${mailFromValidation}\ndkim_selector=${config.dkimSelector()}\n`, 'utf8')) {
return callback(new Error('Could not create mail var file:' + safe.error.message));
}
var relay = result[settings.MAIL_RELAY_KEY];
const enabled = relay.provider !== 'cloudron-smtp' ? true : false,
host = relay.host || '',
port = relay.port || 25,
username = relay.username || '',
password = relay.password || '';
host = relay.host || '',
port = relay.port || 25,
username = relay.username || '',
password = relay.password || '';
if (!safe.fs.writeFileSync(paths.ADDON_CONFIG_DIR + '/mail/smtp_forward.ini',
`enable_outbound=${enabled}\nhost=${host}\nport=${port}\nenable_tls=true\nauth_type=plain\nauth_user=${username}\nauth_pass=${password}`, 'utf8')) {
@@ -283,13 +283,13 @@ function startMail(callback) {
// mail container uses /app/data for backed up data and /run for restart-able data
const tag = infra.images.mail.tag;
const dataDir = paths.PLATFORM_DATA_DIR;
const memoryLimit = Math.max((1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 128, 256);
// admin and mail share the same certificate
certificates.getAdminCertificate(function (error, cert, key) {
if (error) return callback(error);
// the setup script copies dhparams.pem to /addons/mail
if (!safe.fs.writeFileSync(paths.ADDON_CONFIG_DIR + '/mail/tls_cert.pem', cert)) return callback(new Error('Could not create cert file:' + safe.error.message));
if (!safe.fs.writeFileSync(paths.ADDON_CONFIG_DIR + '/mail/tls_key.pem', key)) return callback(new Error('Could not create key file:' + safe.error.message));
@@ -311,8 +311,8 @@ function startMail(callback) {
--dns 172.18.0.1 \
--dns-search=. \
--env ENABLE_MDA=${mailConfig.enabled} \
-v "${dataDir}/mail:/app/data" \
-v "${dataDir}/addons/mail:/etc/mail" \
-v "${paths.MAIL_DATA_DIR}:/app/data" \
-v "${paths.PLATFORM_DATA_DIR}/addons/mail:/etc/mail" \
${ports} \
--read-only -v /run -v /tmp ${tag}`;
@@ -329,7 +329,9 @@ function startMail(callback) {
async.mapSeries(records, function (record, iteratorCallback) {
subdomains.upsert(record.subdomain, record.type, record.values, iteratorCallback);
}, callback);
}, NOOP_CALLBACK); // do not crash if DNS creds do not work in startup sequence
callback();
});
});
});
+14 -3
View File
@@ -2,8 +2,9 @@
exports = module.exports = {
set: set,
setDetail: setDetail,
clear: clear,
get: get,
getAll: getAll,
UPDATE: 'update',
BACKUP: 'backup',
@@ -29,12 +30,22 @@ function set(tag, percent, message) {
progress[tag] = {
percent: percent,
message: message
message: message,
detail: ''
};
debug('%s: %s %s', tag, percent, message);
}
function setDetail(tag, detail) {
assert.strictEqual(typeof tag, 'string');
assert.strictEqual(typeof detail, 'string');
if (!progress[tag]) return debug('unable to set detail %s', detail);
progress[tag].detail = detail;
}
function clear(tag) {
assert.strictEqual(typeof tag, 'string');
@@ -43,6 +54,6 @@ function clear(tag) {
debug('clearing %s', tag);
}
function get() {
function getAll() {
return progress;
}
+2 -4
View File
@@ -52,7 +52,6 @@ function removeInternalAppFields(app) {
health: app.health,
location: app.location,
accessRestriction: app.accessRestriction,
lastBackupId: app.lastBackupId,
manifest: app.manifest,
portBindings: app.portBindings,
iconUrl: app.iconUrl,
@@ -121,6 +120,7 @@ function installApp(req, res, next) {
if ('icon' in data && typeof data.icon !== 'string') return next(new HttpError(400, 'icon is not a string'));
if (data.backupId && typeof data.backupId !== 'string') return next(new HttpError(400, 'backupId must be string or null'));
if (data.backupFormat && typeof data.backupFormat !== 'string') return next(new HttpError(400, 'backupFormat must be string or null'));
// falsy values in cert and key unset the cert
if (data.key && typeof data.cert !== 'string') return next(new HttpError(400, 'cert must be a string'));
@@ -313,17 +313,15 @@ function updateApp(req, res, next) {
if ('appStoreId' in data && typeof data.appStoreId !== 'string') return next(new HttpError(400, 'appStoreId must be a string'));
if (!data.manifest && !data.appStoreId) return next(new HttpError(400, 'appStoreId or manifest is required'));
if ('portBindings' in data && typeof data.portBindings !== 'object') return next(new HttpError(400, 'portBindings must be an object'));
if ('icon' in data && typeof data.icon !== 'string') return next(new HttpError(400, 'icon is not a string'));
if ('force' in data && typeof data.force !== 'boolean') return next(new HttpError(400, 'force must be a boolean'));
debug('Update app id:%s to manifest:%j with portBindings:%j', req.params.id, data.manifest, data.portBindings);
debug('Update app id:%s to manifest:%j', req.params.id, data.manifest);
apps.update(req.params.id, req.body, auditSource(req), function (error) {
if (error && error.reason === AppsError.NOT_FOUND) return next(new HttpError(404, 'No such app'));
if (error && error.reason === AppsError.BAD_FIELD) return next(new HttpError(400, error.message));
if (error && error.reason === AppsError.BAD_STATE) return next(new HttpError(409, error.message));
if (error && error.reason === AppsError.PORT_CONFLICT) return next(new HttpError(409, 'Port ' + error.message + ' is already in use.'));
if (error) return next(new HttpError(500, error));
next(new HttpSuccess(202, { }));
+44 -24
View File
@@ -15,10 +15,13 @@ exports = module.exports = {
feedback: feedback,
checkForUpdates: checkForUpdates,
getLogs: getLogs,
getLogStream: getLogStream
getLogStream: getLogStream,
sendTestMail: sendTestMail
};
var assert = require('assert'),
var appstore = require('../appstore.js'),
AppstoreError = require('../appstore.js').AppstoreError,
assert = require('assert'),
async = require('async'),
cloudron = require('../cloudron.js'),
CloudronError = cloudron.CloudronError,
@@ -65,13 +68,13 @@ function activate(req, res, next) {
superagent.post(config.apiServerOrigin() + '/api/v1/boxes/' + config.fqdn() + '/setup/done').query({ setupToken: req.query.setupToken })
.timeout(30 * 1000)
.end(function (error, result) {
if (error && !error.response) return next(new HttpError(500, error));
if (result.statusCode === 403) return next(new HttpError(403, 'Invalid token'));
if (result.statusCode === 409) return next(new HttpError(409, 'Already setup'));
if (result.statusCode !== 201) return next(new HttpError(500, result.text || 'Internal error'));
if (error && !error.response) return next(new HttpError(500, error));
if (result.statusCode === 403) return next(new HttpError(403, 'Invalid token'));
if (result.statusCode === 409) return next(new HttpError(409, 'Already setup'));
if (result.statusCode !== 201) return next(new HttpError(500, result.text || 'Internal error'));
next(new HttpSuccess(201, info));
});
next(new HttpSuccess(201, info));
});
});
}
@@ -99,15 +102,15 @@ function setupTokenAuth(req, res, next) {
if (typeof req.query.setupToken !== 'string' || !req.query.setupToken) return next(new HttpError(400, 'setupToken must be a non empty string'));
superagent.get(config.apiServerOrigin() + '/api/v1/boxes/' + config.fqdn() + '/setup/verify').query({ setupToken:req.query.setupToken })
.timeout(30 * 1000)
.end(function (error, result) {
if (error && !error.response) return next(new HttpError(500, error));
if (result.statusCode === 403) return next(new HttpError(403, 'Invalid token'));
if (result.statusCode === 409) return next(new HttpError(409, 'Already setup'));
if (result.statusCode !== 200) return next(new HttpError(500, result.text || 'Internal error'));
.timeout(30 * 1000)
.end(function (error, result) {
if (error && !error.response) return next(new HttpError(500, error));
if (result.statusCode === 403) return next(new HttpError(403, 'Invalid token'));
if (result.statusCode === 409) return next(new HttpError(409, 'Already setup'));
if (result.statusCode !== 200) return next(new HttpError(500, result.text || 'Internal error'));
next();
});
next();
});
} else {
next();
}
@@ -141,7 +144,7 @@ function getStatus(req, res, next) {
}
function getProgress(req, res, next) {
return next(new HttpSuccess(200, progress.get()));
return next(new HttpSuccess(200, progress.getAll()));
}
function reboot(req, res, next) {
@@ -184,6 +187,10 @@ function getConfig(req, res, next) {
cloudron.getConfig(function (error, cloudronConfig) {
if (error) return next(new HttpError(500, error));
if (!req.user.admin) {
cloudronConfig = _.pick(cloudronConfig, 'apiServerOrigin', 'webServerOrigin', 'fqdn', 'version', 'progress', 'isCustomDomain', 'isDemo', 'cloudronName', 'provider');
}
next(new HttpSuccess(200, cloudronConfig));
});
}
@@ -219,17 +226,20 @@ function checkForUpdates(req, res, next) {
function feedback(req, res, next) {
assert.strictEqual(typeof req.user, 'object');
if (req.body.type !== mailer.FEEDBACK_TYPE_FEEDBACK &&
req.body.type !== mailer.FEEDBACK_TYPE_TICKET &&
req.body.type !== mailer.FEEDBACK_TYPE_APP_MISSING &&
req.body.type !== mailer.FEEDBACK_TYPE_UPGRADE_REQUEST &&
req.body.type !== mailer.FEEDBACK_TYPE_APP_ERROR) return next(new HttpError(400, 'type must be either "ticket", "feedback", "app_missing", "app_error" or "upgrade_request"'));
const VALID_TYPES = [ 'feedback', 'ticket', 'app_missing', 'app_error', 'upgrade_request' ];
if (typeof req.body.type !== 'string' || !req.body.type) return next(new HttpError(400, 'type must be string'));
if (VALID_TYPES.indexOf(req.body.type) === -1) return next(new HttpError(400, 'unknown type'));
if (typeof req.body.subject !== 'string' || !req.body.subject) return next(new HttpError(400, 'subject must be string'));
if (typeof req.body.description !== 'string' || !req.body.description) return next(new HttpError(400, 'description must be string'));
mailer.sendFeedback(req.user, req.body.type, req.body.subject, req.body.description);
appstore.sendFeedback(_.extend(req.body, { email: req.user.alternateEmail || req.user.email, displayName: req.user.displayName }), function (error) {
if (error && error.reason === AppstoreError.BILLING_REQUIRED) return next(new HttpError(402, 'Login to App Store to create support tickets. You can also email support@cloudron.io'));
if (error) return next(new HttpError(503, 'Error contacting cloudron.io. Please email support@cloudron.io'));
next(new HttpSuccess(201, {}));
});
next(new HttpSuccess(201, {}));
}
function getLogs(req, res, next) {
@@ -297,3 +307,13 @@ function getLogStream(req, res, next) {
logStream.on('error', res.end.bind(res, null));
});
}
function sendTestMail(req, res, next) {
assert.strictEqual(typeof req.body, 'object');
if (!req.body.email || typeof req.body.email !== 'string') return next(new HttpError(400, 'email must be a non-empty string'));
mailer.sendTestMail(req.body.email);
next(new HttpSuccess(202));
}
+1 -8
View File
@@ -4,8 +4,7 @@ exports = module.exports = {
enabled: enabled,
setEnabled: setEnabled,
status: status,
login: login,
apps: apps
login: login
};
var developer = require('../developer.js'),
@@ -52,9 +51,3 @@ function login(req, res, next) {
})(req, res, next);
}
function apps(req, res, next) {
developer.getNonApprovedApps(function (error, result) {
if (error) return next(new HttpError(500, error));
next(new HttpSuccess(200, { apps: result }));
});
}
+2
View File
@@ -274,6 +274,8 @@ function setBackupConfig(req, res, next) {
if (typeof req.body.provider !== 'string') return next(new HttpError(400, 'provider is required'));
if (typeof req.body.retentionSecs !== 'number') return next(new HttpError(400, 'retentionSecs is required'));
if ('key' in req.body && typeof req.body.key !== 'string') return next(new HttpError(400, 'key must be a string'));
if (typeof req.body.format !== 'string') return next(new HttpError(400, 'format must be a string'));
if ('acceptSelfSignedCerts' in req.body && typeof req.body.acceptSelfSignedCerts !== 'boolean') return next(new HttpError(400, 'format must be a boolean'));
settings.setBackupConfig(req.body, function (error) {
if (error && error.reason === SettingsError.BAD_FIELD) return next(new HttpError(400, error.message));
+29 -30
View File
@@ -22,7 +22,7 @@ var appdb = require('../../appdb.js'),
hock = require('hock'),
http = require('http'),
https = require('https'),
js2xml = require('js2xmlparser'),
js2xml = require('js2xmlparser').parse,
ldap = require('../../ldap.js'),
net = require('net'),
nock = require('nock'),
@@ -269,7 +269,6 @@ describe('App API', function () {
it('app install fails - missing manifest', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token })
.send({ password: PASSWORD })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
expect(res.body.message).to.eql('appStoreId or manifest is required');
@@ -280,7 +279,7 @@ describe('App API', function () {
it('app install fails - null manifest', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token })
.send({ manifest: null, password: PASSWORD })
.send({ manifest: null })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
expect(res.body.message).to.eql('appStoreId or manifest is required');
@@ -291,7 +290,7 @@ describe('App API', function () {
it('app install fails - bad manifest format', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token })
.send({ manifest: 'epic', password: PASSWORD })
.send({ manifest: 'epic' })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
expect(res.body.message).to.eql('manifest must be an object');
@@ -302,7 +301,7 @@ describe('App API', function () {
it('app install fails - empty appStoreId format', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token })
.send({ manifest: null, appStoreId: '', password: PASSWORD })
.send({ manifest: null, appStoreId: '' })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
expect(res.body.message).to.eql('appStoreId or manifest is required');
@@ -323,7 +322,7 @@ describe('App API', function () {
it('app install fails - invalid location', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token })
.send({ manifest: APP_MANIFEST, password: PASSWORD, location: '!awesome', accessRestriction: null })
.send({ manifest: APP_MANIFEST, location: '!awesome', accessRestriction: null })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
expect(res.body.message).to.eql('Hostname can only contain alphanumerics and hyphen');
@@ -334,7 +333,7 @@ describe('App API', function () {
it('app install fails - invalid location type', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token })
.send({ manifest: APP_MANIFEST, password: PASSWORD, location: 42, accessRestriction: null })
.send({ manifest: APP_MANIFEST, location: 42, accessRestriction: null })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
expect(res.body.message).to.eql('location is required');
@@ -345,10 +344,10 @@ describe('App API', function () {
it('app install fails - reserved admin location', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token })
.send({ manifest: APP_MANIFEST, password: PASSWORD, location: constants.ADMIN_LOCATION, accessRestriction: null })
.send({ manifest: APP_MANIFEST, location: 'my', accessRestriction: null })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
expect(res.body.message).to.eql(constants.ADMIN_LOCATION + ' is reserved');
expect(res.body.message).to.eql('my is reserved');
done();
});
});
@@ -356,7 +355,7 @@ describe('App API', function () {
it('app install fails - reserved api location', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token })
.send({ manifest: APP_MANIFEST, password: PASSWORD, location: constants.API_LOCATION, accessRestriction: null })
.send({ manifest: APP_MANIFEST, location: constants.API_LOCATION, accessRestriction: null })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
expect(res.body.message).to.eql(constants.API_LOCATION + ' is reserved');
@@ -367,7 +366,7 @@ describe('App API', function () {
it('app install fails - portBindings must be object', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token })
.send({ manifest: APP_MANIFEST, password: PASSWORD, location: APP_LOCATION, portBindings: 23, accessRestriction: null })
.send({ manifest: APP_MANIFEST, location: APP_LOCATION, portBindings: 23, accessRestriction: null })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
expect(res.body.message).to.eql('portBindings must be an object');
@@ -378,7 +377,7 @@ describe('App API', function () {
it('app install fails - accessRestriction is required', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token })
.send({ manifest: APP_MANIFEST, password: PASSWORD, location: APP_LOCATION, portBindings: {} })
.send({ manifest: APP_MANIFEST, location: APP_LOCATION, portBindings: {} })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
expect(res.body.message).to.eql('accessRestriction is required');
@@ -389,7 +388,7 @@ describe('App API', function () {
it('app install fails - accessRestriction type is wrong', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token })
.send({ manifest: APP_MANIFEST, password: PASSWORD, location: APP_LOCATION, portBindings: {}, accessRestriction: '' })
.send({ manifest: APP_MANIFEST, location: APP_LOCATION, portBindings: {}, accessRestriction: '' })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
expect(res.body.message).to.eql('accessRestriction is required');
@@ -400,7 +399,7 @@ describe('App API', function () {
it('app install fails for non admin', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token_1 })
.send({ manifest: APP_MANIFEST, password: PASSWORD, location: APP_LOCATION, portBindings: null, accessRestriction: null })
.send({ manifest: APP_MANIFEST, location: APP_LOCATION, portBindings: null, accessRestriction: null })
.end(function (err, res) {
expect(res.statusCode).to.equal(403);
done();
@@ -412,7 +411,7 @@ describe('App API', function () {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token })
.send({ appStoreId: APP_STORE_ID, password: PASSWORD, location: APP_LOCATION, portBindings: null, accessRestriction: { users: [ 'someuser' ], groups: [] } })
.send({ appStoreId: APP_STORE_ID, location: APP_LOCATION, portBindings: null, accessRestriction: { users: [ 'someuser' ], groups: [] } })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
expect(fake.isDone()).to.be.ok();
@@ -426,7 +425,7 @@ describe('App API', function () {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token })
.send({ appStoreId: APP_STORE_ID, password: PASSWORD, location: APP_LOCATION, portBindings: null, accessRestriction: null })
.send({ appStoreId: APP_STORE_ID, location: APP_LOCATION, portBindings: null, accessRestriction: null })
.end(function (err, res) {
expect(res.statusCode).to.equal(503);
expect(fake1.isDone()).to.be.ok();
@@ -442,7 +441,7 @@ describe('App API', function () {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token })
.send({ appStoreId: APP_STORE_ID, password: PASSWORD, location: APP_LOCATION, portBindings: null, accessRestriction: { users: [ 'someuser' ], groups: [] } })
.send({ appStoreId: APP_STORE_ID, location: APP_LOCATION, portBindings: null, accessRestriction: { users: [ 'someuser' ], groups: [] } })
.end(function (err, res) {
expect(res.statusCode).to.equal(202);
expect(res.body.id).to.be.a('string');
@@ -455,7 +454,7 @@ describe('App API', function () {
it('app install fails because of conflicting location', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token })
.send({ manifest: APP_MANIFEST, password: PASSWORD, location: APP_LOCATION, portBindings: null, accessRestriction: null })
.send({ manifest: APP_MANIFEST, location: APP_LOCATION, portBindings: null, accessRestriction: null })
.end(function (err, res) {
expect(res.statusCode).to.equal(409);
done();
@@ -565,7 +564,7 @@ describe('App API', function () {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token })
.send({ appStoreId: APP_STORE_ID, password: PASSWORD, location: APP_LOCATION_2, portBindings: null, accessRestriction: null })
.send({ appStoreId: APP_STORE_ID, location: APP_LOCATION_2, portBindings: null, accessRestriction: null })
.end(function (err, res) {
expect(res.statusCode).to.equal(202);
expect(res.body.id).to.be.a('string');
@@ -649,7 +648,7 @@ describe('App installation', function () {
awsHockInstance
.get('/2013-04-01/hostedzone')
.max(Infinity)
.reply(200, js2xml('ListHostedZonesResponse', awsHostedZones, { arrayMap: { HostedZones: 'HostedZone'} }), { 'Content-Type': 'application/xml' })
.reply(200, js2xml('ListHostedZonesResponse', awsHostedZones, { wrapHandlers: { HostedZones: () => 'HostedZone'} }), { 'Content-Type': 'application/xml' })
.filteringPathRegEx(/name=[^&]*/, 'name=location')
.get('/2013-04-01/hostedzone/ZONEID/rrset?maxitems=1&name=location&type=A')
.max(Infinity)
@@ -695,7 +694,7 @@ describe('App installation', function () {
superagent.post(SERVER_URL + '/api/v1/apps/install')
.query({ access_token: token })
.send({ appStoreId: APP_STORE_ID, password: PASSWORD, location: APP_LOCATION, portBindings: { ECHO_SERVER_PORT: 7171 }, accessRestriction: null })
.send({ appStoreId: APP_STORE_ID, location: APP_LOCATION, portBindings: { ECHO_SERVER_PORT: 7171 }, accessRestriction: null })
.end(function (err, res) {
expect(res.statusCode).to.equal(202);
expect(fake1.isDone()).to.be.ok();
@@ -986,7 +985,7 @@ describe('App installation', function () {
it('cannot reconfigure app with bad location', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/' + APP_ID + '/configure')
.query({ access_token: token })
.send({ password: PASSWORD, location: 1234, portBindings: { ECHO_SERVER_PORT: 7172 }, accessRestriction: null })
.send({ location: 1234, portBindings: { ECHO_SERVER_PORT: 7172 }, accessRestriction: null })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
done();
@@ -996,7 +995,7 @@ describe('App installation', function () {
it('cannot reconfigure app with bad accessRestriction', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/' + APP_ID + '/configure')
.query({ access_token: token })
.send({ password: PASSWORD, portBindings: { ECHO_SERVER_PORT: 7172 }, accessRestriction: false })
.send({ portBindings: { ECHO_SERVER_PORT: 7172 }, accessRestriction: false })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
done();
@@ -1006,7 +1005,7 @@ describe('App installation', function () {
it('cannot reconfigure app with only the cert, no key', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/' + APP_ID + '/configure')
.query({ access_token: token })
.send({ password: PASSWORD, location: APP_LOCATION_NEW, portBindings: { ECHO_SERVER_PORT: 7172 }, accessRestriction: null, cert: validCert1 })
.send({ location: APP_LOCATION_NEW, portBindings: { ECHO_SERVER_PORT: 7172 }, accessRestriction: null, cert: validCert1 })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
done();
@@ -1016,7 +1015,7 @@ describe('App installation', function () {
it('cannot reconfigure app with only the key, no cert', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/' + APP_ID + '/configure')
.query({ access_token: token })
.send({ password: PASSWORD, location: APP_LOCATION_NEW, portBindings: { ECHO_SERVER_PORT: 7172 }, key: validKey1 })
.send({ location: APP_LOCATION_NEW, portBindings: { ECHO_SERVER_PORT: 7172 }, key: validKey1 })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
done();
@@ -1026,7 +1025,7 @@ describe('App installation', function () {
it('cannot reconfigure app with cert not being a string', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/' + APP_ID + '/configure')
.query({ access_token: token })
.send({ password: PASSWORD, location: APP_LOCATION_NEW, portBindings: { ECHO_SERVER_PORT: 7172 }, accessRestriction: null, cert: 1234, key: validKey1 })
.send({ location: APP_LOCATION_NEW, portBindings: { ECHO_SERVER_PORT: 7172 }, accessRestriction: null, cert: 1234, key: validKey1 })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
done();
@@ -1036,7 +1035,7 @@ describe('App installation', function () {
it('cannot reconfigure app with key not being a string', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/' + APP_ID + '/configure')
.query({ access_token: token })
.send({ password: PASSWORD, location: APP_LOCATION_NEW, portBindings: { ECHO_SERVER_PORT: 7172 }, cert: validCert1, key: 1234 })
.send({ location: APP_LOCATION_NEW, portBindings: { ECHO_SERVER_PORT: 7172 }, cert: validCert1, key: 1234 })
.end(function (err, res) {
expect(res.statusCode).to.equal(400);
done();
@@ -1046,7 +1045,7 @@ describe('App installation', function () {
it('non admin cannot reconfigure app', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/' + APP_ID + '/configure')
.query({ access_token: token_1 })
.send({ password: PASSWORD, location: APP_LOCATION_NEW, portBindings: { ECHO_SERVER_PORT: 7172 }, accessRestriction: null })
.send({ location: APP_LOCATION_NEW, portBindings: { ECHO_SERVER_PORT: 7172 }, accessRestriction: null })
.end(function (err, res) {
expect(res.statusCode).to.equal(403);
done();
@@ -1056,7 +1055,7 @@ describe('App installation', function () {
it('can reconfigure app', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/' + APP_ID + '/configure')
.query({ access_token: token })
.send({ password: PASSWORD, location: APP_LOCATION_NEW, portBindings: { ECHO_SERVER_PORT: 7172 } })
.send({ location: APP_LOCATION_NEW, portBindings: { ECHO_SERVER_PORT: 7172 } })
.end(function (err, res) {
expect(res.statusCode).to.equal(202);
checkConfigureStatus(0, done);
@@ -1098,7 +1097,7 @@ describe('App installation', function () {
it('can reconfigure app with custom certificate', function (done) {
superagent.post(SERVER_URL + '/api/v1/apps/' + APP_ID + '/configure')
.query({ access_token: token })
.send({ password: PASSWORD, location: APP_LOCATION_NEW, portBindings: { ECHO_SERVER_PORT: 7172 }, accessRestriction: null, cert: validCert1, key: validKey1 })
.send({ location: APP_LOCATION_NEW, portBindings: { ECHO_SERVER_PORT: 7172 }, accessRestriction: null, cert: validCert1, key: validKey1 })
.end(function (err, res) {
expect(res.statusCode).to.equal(202);
checkConfigureStatus(0, done);
+32 -45
View File
@@ -10,20 +10,16 @@ var appdb = require('../../appdb.js'),
config = require('../../config.js'),
database = require('../../database.js'),
expect = require('expect.js'),
hock = require('hock'),
http = require('http'),
nock = require('nock'),
superagent = require('superagent'),
server = require('../../server.js'),
settings = require('../../settings.js'),
url = require('url');
settings = require('../../settings.js');
var SERVER_URL = 'http://localhost:' + config.get('port');
var USERNAME = 'superadmin', PASSWORD = 'Foobar?1337', EMAIL ='silly@me.com';
var token = null;
var server;
function setup(done) {
nock.cleanAll();
config._reset();
@@ -40,19 +36,19 @@ function setup(done) {
var scope2 = nock(config.apiServerOrigin()).post('/api/v1/boxes/' + config.fqdn() + '/setup/done?setupToken=somesetuptoken').reply(201, {});
superagent.post(SERVER_URL + '/api/v1/cloudron/activate')
.query({ setupToken: 'somesetuptoken' })
.send({ username: USERNAME, password: PASSWORD, email: EMAIL })
.end(function (error, result) {
expect(result).to.be.ok();
expect(result.statusCode).to.eql(201);
expect(scope1.isDone()).to.be.ok();
expect(scope2.isDone()).to.be.ok();
.query({ setupToken: 'somesetuptoken' })
.send({ username: USERNAME, password: PASSWORD, email: EMAIL })
.end(function (error, result) {
expect(result).to.be.ok();
expect(result.statusCode).to.eql(201);
expect(scope1.isDone()).to.be.ok();
expect(scope2.isDone()).to.be.ok();
// stash token for further use
token = result.body.token;
// stash token for further use
token = result.body.token;
callback();
});
callback();
});
},
function addApp(callback) {
@@ -61,7 +57,7 @@ function setup(done) {
},
function createSettings(callback) {
settings.setBackupConfig({ provider: 'caas', token: 'BACKUP_TOKEN', bucket: 'Bucket', prefix: 'Prefix' }, callback);
settings.setBackupConfig({ provider: 'filesystem', backupFolder: '/tmp', format: 'tgz' }, callback);
}
], done);
}
@@ -75,19 +71,12 @@ function cleanup(done) {
}
describe('Backups API', function () {
var apiHockInstance = hock.createHock({ throwOnUnmatched: false }), apiHockServer;
var scope1 = nock(config.apiServerOrigin()).post('/api/v1/boxes/' + config.fqdn() + '/awscredentials?token=BACKUP_TOKEN')
.reply(201, { credentials: { AccessKeyId: 'accessKeyId', SecretAccessKey: 'secretAccessKey' } }, { 'Content-Type': 'application/json' });
before(setup);
before(function (done) {
apiHockInstance
.post('/api/v1/boxes/' + config.fqdn() + '/awscredentials?token=BACKUP_TOKEN')
.reply(201, { credentials: { AccessKeyId: 'accessKeyId', SecretAccessKey: 'secretAccessKey' } }, { 'Content-Type': 'application/json' });
var port = parseInt(url.parse(config.apiServerOrigin()).port, 10);
apiHockServer = http.createServer(apiHockInstance.handler).listen(port, done);
});
after(function (done) {
apiHockServer.close();
done();
});
after(cleanup);
@@ -95,37 +84,35 @@ describe('Backups API', function () {
describe('create', function () {
it('fails due to mising token', function (done) {
superagent.post(SERVER_URL + '/api/v1/backups')
.end(function (error, result) {
expect(result.statusCode).to.equal(401);
done();
});
.end(function (error, result) {
expect(result.statusCode).to.equal(401);
done();
});
});
it('fails due to wrong token', function (done) {
superagent.post(SERVER_URL + '/api/v1/backups')
.query({ access_token: token.toUpperCase() })
.end(function (error, result) {
expect(result.statusCode).to.equal(401);
done();
});
.query({ access_token: token.toUpperCase() })
.end(function (error, result) {
expect(result.statusCode).to.equal(401);
done();
});
});
it('succeeds', function (done) {
superagent.post(SERVER_URL + '/api/v1/backups')
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(202);
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(202);
function checkAppstoreServerCalled() {
apiHockInstance.done(function (error) {
if (!error) return done();
function checkAppstoreServerCalled() {
if (scope1.isDone()) return done();
setTimeout(checkAppstoreServerCalled, 100);
});
}
}
checkAppstoreServerCalled();
});
checkAppstoreServerCalled();
});
});
});
});
+350 -308
View File
@@ -16,14 +16,15 @@ var async = require('async'),
superagent = require('superagent'),
server = require('../../server.js'),
settings = require('../../settings.js'),
shell = require('../../shell.js');
shell = require('../../shell.js'),
tokendb = require('../../tokendb.js');
var SERVER_URL = 'http://localhost:' + config.get('port');
var USERNAME = 'superadmin', PASSWORD = 'Foobar?1337', EMAIL ='silly@me.com';
var token = null; // authentication token
var USERNAME_1 = 'userTheFirst', EMAIL_1 = 'taO@zen.mac', userId_1, token_1;
var server;
function setup(done) {
nock.cleanAll();
config._reset();
@@ -32,7 +33,7 @@ function setup(done) {
server.start(function (error) {
if (error) return done(error);
settings.setBackupConfig({ provider: 'caas', token: 'BACKUP_TOKEN', bucket: 'Bucket', prefix: 'Prefix' }, done);
settings.setBackupConfig({ provider: 'filesystem', backupFolder: '/tmp', format: 'tgz' }, done);
});
}
@@ -65,89 +66,89 @@ describe('Cloudron', function () {
it('fails due to missing setupToken', function (done) {
superagent.post(SERVER_URL + '/api/v1/cloudron/activate')
.send({ username: '', password: 'somepassword', email: 'admin@foo.bar' })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
.send({ username: '', password: 'somepassword', email: 'admin@foo.bar' })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
});
it('fails due to internal server error on appstore side', function (done) {
var scope = nock(config.apiServerOrigin()).get('/api/v1/boxes/' + config.fqdn() + '/setup/verify?setupToken=somesetuptoken').reply(500, { message: 'this is wrong' });
superagent.post(SERVER_URL + '/api/v1/cloudron/activate')
.query({ setupToken: 'somesetuptoken' })
.send({ username: 'someuser', password: 'strong#A3asdf', email: 'admin@foo.bar' })
.end(function (error, result) {
expect(result.statusCode).to.equal(500);
expect(scope.isDone()).to.be.ok();
done();
});
.query({ setupToken: 'somesetuptoken' })
.send({ username: 'someuser', password: 'strong#A3asdf', email: 'admin@foo.bar' })
.end(function (error, result) {
expect(result.statusCode).to.equal(500);
expect(scope.isDone()).to.be.ok();
done();
});
});
it('fails due to empty username', function (done) {
var scope = nock(config.apiServerOrigin()).get('/api/v1/boxes/' + config.fqdn() + '/setup/verify?setupToken=somesetuptoken').reply(200, {});
superagent.post(SERVER_URL + '/api/v1/cloudron/activate')
.query({ setupToken: 'somesetuptoken' })
.send({ username: '', password: 'ADSFsdf$%436', email: 'admin@foo.bar' })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
expect(scope.isDone()).to.be.ok();
done();
});
.query({ setupToken: 'somesetuptoken' })
.send({ username: '', password: 'ADSFsdf$%436', email: 'admin@foo.bar' })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
expect(scope.isDone()).to.be.ok();
done();
});
});
it('fails due to empty password', function (done) {
var scope = nock(config.apiServerOrigin()).get('/api/v1/boxes/' + config.fqdn() + '/setup/verify?setupToken=somesetuptoken').reply(200, {});
superagent.post(SERVER_URL + '/api/v1/cloudron/activate')
.query({ setupToken: 'somesetuptoken' })
.send({ username: 'someuser', password: '', email: 'admin@foo.bar' })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
expect(scope.isDone()).to.be.ok();
done();
});
.query({ setupToken: 'somesetuptoken' })
.send({ username: 'someuser', password: '', email: 'admin@foo.bar' })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
expect(scope.isDone()).to.be.ok();
done();
});
});
it('fails due to empty email', function (done) {
var scope = nock(config.apiServerOrigin()).get('/api/v1/boxes/' + config.fqdn() + '/setup/verify?setupToken=somesetuptoken').reply(200, {});
superagent.post(SERVER_URL + '/api/v1/cloudron/activate')
.query({ setupToken: 'somesetuptoken' })
.send({ username: 'someuser', password: 'ADSF#asd546', email: '' })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
expect(scope.isDone()).to.be.ok();
done();
});
.query({ setupToken: 'somesetuptoken' })
.send({ username: 'someuser', password: 'ADSF#asd546', email: '' })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
expect(scope.isDone()).to.be.ok();
done();
});
});
it('fails due to wrong displayName type', function (done) {
var scope = nock(config.apiServerOrigin()).get('/api/v1/boxes/' + config.fqdn() + '/setup/verify?setupToken=somesetuptoken').reply(200, {});
superagent.post(SERVER_URL + '/api/v1/cloudron/activate')
.query({ setupToken: 'somesetuptoken' })
.send({ username: 'someuser', password: 'ADSF?#asd546', email: 'admin@foo.bar', displayName: 1234 })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
expect(scope.isDone()).to.be.ok();
done();
});
.query({ setupToken: 'somesetuptoken' })
.send({ username: 'someuser', password: 'ADSF?#asd546', email: 'admin@foo.bar', displayName: 1234 })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
expect(scope.isDone()).to.be.ok();
done();
});
});
it('fails due to invalid email', function (done) {
var scope = nock(config.apiServerOrigin()).get('/api/v1/boxes/' + config.fqdn() + '/setup/verify?setupToken=somesetuptoken').reply(200, {});
superagent.post(SERVER_URL + '/api/v1/cloudron/activate')
.query({ setupToken: 'somesetuptoken' })
.send({ username: 'someuser', password: 'ADSF#asd546', email: 'invalidemail' })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
expect(scope.isDone()).to.be.ok();
done();
});
.query({ setupToken: 'somesetuptoken' })
.send({ username: 'someuser', password: 'ADSF#asd546', email: 'invalidemail' })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
expect(scope.isDone()).to.be.ok();
done();
});
});
it('succeeds', function (done) {
@@ -155,27 +156,27 @@ describe('Cloudron', function () {
var scope2 = nock(config.apiServerOrigin()).post('/api/v1/boxes/' + config.fqdn() + '/setup/done?setupToken=somesetuptoken').reply(201, {});
superagent.post(SERVER_URL + '/api/v1/cloudron/activate')
.query({ setupToken: 'somesetuptoken' })
.send({ username: 'someuser', password: 'ADSF#asd546', email: 'admin@foo.bar', displayName: 'tester' })
.end(function (error, result) {
expect(result.statusCode).to.equal(201);
expect(scope1.isDone()).to.be.ok();
expect(scope2.isDone()).to.be.ok();
done();
});
.query({ setupToken: 'somesetuptoken' })
.send({ username: 'someuser', password: 'ADSF#asd546', email: 'admin@foo.bar', displayName: 'tester' })
.end(function (error, result) {
expect(result.statusCode).to.equal(201);
expect(scope1.isDone()).to.be.ok();
expect(scope2.isDone()).to.be.ok();
done();
});
});
it('fails the second time', function (done) {
var scope = nock(config.apiServerOrigin()).get('/api/v1/boxes/' + config.fqdn() + '/setup/verify?setupToken=somesetuptoken').reply(200, {});
superagent.post(SERVER_URL + '/api/v1/cloudron/activate')
.query({ setupToken: 'somesetuptoken' })
.send({ username: 'someuser', password: 'ADSF#asd546', email: 'admin@foo.bar' })
.end(function (error, result) {
expect(result.statusCode).to.equal(409);
expect(scope.isDone()).to.be.ok();
done();
});
.query({ setupToken: 'somesetuptoken' })
.send({ username: 'someuser', password: 'ADSF#asd546', email: 'admin@foo.bar' })
.end(function (error, result) {
expect(result.statusCode).to.equal(409);
expect(scope.isDone()).to.be.ok();
done();
});
});
});
@@ -188,22 +189,36 @@ describe('Cloudron', function () {
var scope1 = nock(config.apiServerOrigin()).get('/api/v1/boxes/' + config.fqdn() + '/setup/verify?setupToken=somesetuptoken').reply(200, {});
var scope2 = nock(config.apiServerOrigin()).post('/api/v1/boxes/' + config.fqdn() + '/setup/done?setupToken=somesetuptoken').reply(201, {});
config._reset();
superagent.post(SERVER_URL + '/api/v1/cloudron/activate')
.query({ setupToken: 'somesetuptoken' })
.send({ username: USERNAME, password: PASSWORD, email: EMAIL })
.end(function (error, result) {
expect(result).to.be.ok();
expect(scope1.isDone()).to.be.ok();
expect(scope2.isDone()).to.be.ok();
.query({ setupToken: 'somesetuptoken' })
.send({ username: USERNAME, password: PASSWORD, email: EMAIL })
.end(function (error, result) {
expect(result).to.be.ok();
expect(scope1.isDone()).to.be.ok();
expect(scope2.isDone()).to.be.ok();
// stash token for further use
token = result.body.token;
// stash token for further use
token = result.body.token;
callback();
});
callback();
});
},
function (callback) {
superagent.post(SERVER_URL + '/api/v1/users')
.query({ access_token: token })
.send({ username: USERNAME_1, email: EMAIL_1, invite: false })
.end(function (error, result) {
expect(result).to.be.ok();
expect(result.statusCode).to.eql(201);
token_1 = tokendb.generateToken();
userId_1 = result.body.id;
// HACK to get a token for second user (passwords are generated and the user should have gotten a password setup link...)
tokendb.add(token_1, userId_1, 'test-client-id', Date.now() + 100000, '*', callback);
});
}
], done);
});
@@ -211,60 +226,85 @@ describe('Cloudron', function () {
it('cannot get without token', function (done) {
superagent.get(SERVER_URL + '/api/v1/cloudron/config')
.end(function (error, result) {
expect(result.statusCode).to.equal(401);
done();
});
.end(function (error, result) {
expect(result.statusCode).to.equal(401);
done();
});
});
it('succeeds without appstore', function (done) {
superagent.get(SERVER_URL + '/api/v1/cloudron/config')
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(200);
expect(result.body.apiServerOrigin).to.eql('http://localhost:6060');
expect(result.body.webServerOrigin).to.eql(null);
expect(result.body.fqdn).to.eql(config.fqdn());
expect(result.body.isCustomDomain).to.eql(true);
expect(result.body.progress).to.be.an('object');
expect(result.body.update).to.be.an('object');
expect(result.body.version).to.eql(config.version());
expect(result.body.developerMode).to.be.a('boolean');
expect(result.body.size).to.eql(null);
expect(result.body.region).to.eql(null);
expect(result.body.memory).to.eql(os.totalmem());
expect(result.body.cloudronName).to.be.a('string');
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(200);
expect(result.body.apiServerOrigin).to.eql('http://localhost:6060');
expect(result.body.webServerOrigin).to.eql(null);
expect(result.body.fqdn).to.eql(config.fqdn());
expect(result.body.isCustomDomain).to.eql(true);
expect(result.body.progress).to.be.an('object');
expect(result.body.update).to.be.an('object');
expect(result.body.version).to.eql(config.version());
expect(result.body.developerMode).to.be.a('boolean');
expect(result.body.size).to.eql(null);
expect(result.body.region).to.eql(null);
expect(result.body.memory).to.eql(os.totalmem());
expect(result.body.cloudronName).to.be.a('string');
done();
});
done();
});
});
it('succeeds', function (done) {
it('succeeds (admin)', function (done) {
var scope = nock(config.apiServerOrigin())
.get('/api/v1/boxes/localhost?token=' + config.token())
.reply(200, { box: { region: 'sfo', size: '1gb' }, user: { }});
.get('/api/v1/boxes/localhost?token=' + config.token())
.reply(200, { box: { region: 'sfo', size: '1gb' }, user: { }});
superagent.get(SERVER_URL + '/api/v1/cloudron/config')
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(200);
expect(result.body.apiServerOrigin).to.eql('http://localhost:6060');
expect(result.body.webServerOrigin).to.eql(null);
expect(result.body.fqdn).to.eql(config.fqdn());
expect(result.body.isCustomDomain).to.eql(true);
expect(result.body.progress).to.be.an('object');
expect(result.body.update).to.be.an('object');
expect(result.body.version).to.eql(config.version());
expect(result.body.developerMode).to.be.a('boolean');
expect(result.body.size).to.eql('1gb');
expect(result.body.region).to.eql('sfo');
expect(result.body.memory).to.eql(os.totalmem());
expect(result.body.cloudronName).to.be.a('string');
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(200);
expect(result.body.apiServerOrigin).to.eql('http://localhost:6060');
expect(result.body.webServerOrigin).to.eql(null);
expect(result.body.fqdn).to.eql(config.fqdn());
expect(result.body.isCustomDomain).to.eql(true);
expect(result.body.progress).to.be.an('object');
expect(result.body.update).to.be.an('object');
expect(result.body.version).to.eql(config.version());
expect(result.body.developerMode).to.be.a('boolean');
expect(result.body.size).to.eql('1gb');
expect(result.body.region).to.eql('sfo');
expect(result.body.memory).to.eql(os.totalmem());
expect(result.body.cloudronName).to.be.a('string');
expect(result.body.provider).to.be.a('string');
expect(scope.isDone()).to.be.ok();
expect(scope.isDone()).to.be.ok();
done();
});
done();
});
});
it('succeeds (non-admin)', function (done) {
superagent.get(SERVER_URL + '/api/v1/cloudron/config')
.query({ access_token: token_1 })
.end(function (error, result) {
expect(result.statusCode).to.equal(200);
expect(result.body.apiServerOrigin).to.eql('http://localhost:6060');
expect(result.body.webServerOrigin).to.eql(null);
expect(result.body.fqdn).to.eql(config.fqdn());
expect(result.body.isCustomDomain).to.eql(true);
expect(result.body.progress).to.be.an('object');
expect(result.body.version).to.eql(config.version());
expect(result.body.cloudronName).to.be.a('string');
expect(result.body.provider).to.be.a('string');
expect(result.body.update).to.be(undefined);
expect(result.body.size).to.be(undefined);
expect(result.body.region).to.be(undefined);
expect(result.body.memory).to.be(undefined);
done();
});
});
});
@@ -279,18 +319,18 @@ describe('Cloudron', function () {
var scope2 = nock(config.apiServerOrigin()).post('/api/v1/boxes/' + config.fqdn() + '/setup/done?setupToken=somesetuptoken').reply(201, {});
superagent.post(SERVER_URL + '/api/v1/cloudron/activate')
.query({ setupToken: 'somesetuptoken' })
.send({ username: USERNAME, password: PASSWORD, email: EMAIL })
.end(function (error, result) {
expect(result).to.be.ok();
expect(scope1.isDone()).to.be.ok();
expect(scope2.isDone()).to.be.ok();
.query({ setupToken: 'somesetuptoken' })
.send({ username: USERNAME, password: PASSWORD, email: EMAIL })
.end(function (error, result) {
expect(result).to.be.ok();
expect(scope1.isDone()).to.be.ok();
expect(scope2.isDone()).to.be.ok();
// stash token for further use
token = result.body.token;
// stash token for further use
token = result.body.token;
callback();
});
callback();
});
}
], done);
});
@@ -302,73 +342,73 @@ describe('Cloudron', function () {
it('fails without token', function (done) {
superagent.post(SERVER_URL + '/api/v1/cloudron/migrate')
.send({ size: 'small', region: 'sfo'})
.end(function (error, result) {
expect(result.statusCode).to.equal(401);
done();
});
.send({ size: 'small', region: 'sfo'})
.end(function (error, result) {
expect(result.statusCode).to.equal(401);
done();
});
});
it('fails without password', function (done) {
superagent.post(SERVER_URL + '/api/v1/cloudron/migrate')
.send({ size: 'small', region: 'sfo'})
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
.send({ size: 'small', region: 'sfo'})
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
});
it('succeeds without size', function (done) {
superagent.post(SERVER_URL + '/api/v1/cloudron/migrate')
.send({ region: 'sfo', password: PASSWORD })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(202);
done();
});
.send({ region: 'sfo', password: PASSWORD })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(202);
done();
});
});
it('fails with wrong size type', function (done) {
superagent.post(SERVER_URL + '/api/v1/cloudron/migrate')
.send({ size: 4, region: 'sfo', password: PASSWORD })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
.send({ size: 4, region: 'sfo', password: PASSWORD })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
});
it('succeeds without region', function (done) {
superagent.post(SERVER_URL + '/api/v1/cloudron/migrate')
.send({ size: 'small', password: PASSWORD })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(202);
done();
});
.send({ size: 'small', password: PASSWORD })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(202);
done();
});
});
it('fails with wrong region type', function (done) {
superagent.post(SERVER_URL + '/api/v1/cloudron/migrate')
.send({ size: 'small', region: 4, password: PASSWORD })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
.send({ size: 'small', region: 4, password: PASSWORD })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
});
it('fails when in wrong state', function (done) {
var scope2 = nock(config.apiServerOrigin())
.post('/api/v1/boxes/' + config.fqdn() + '/awscredentials?token=BACKUP_TOKEN')
.reply(201, { credentials: { AccessKeyId: 'accessKeyId', SecretAccessKey: 'secretAccessKey', SessionToken: 'sessionToken' } });
.post('/api/v1/boxes/' + config.fqdn() + '/awscredentials?token=BACKUP_TOKEN')
.reply(201, { credentials: { AccessKeyId: 'accessKeyId', SecretAccessKey: 'secretAccessKey', SessionToken: 'sessionToken' } });
var scope3 = nock(config.apiServerOrigin())
.post('/api/v1/boxes/' + config.fqdn() + '/backupDone?token=APPSTORE_TOKEN', function (body) {
return body.boxVersion && body.restoreKey && !body.appId && !body.appVersion && body.appBackupIds.length === 0;
})
.reply(200, { id: 'someid' });
.post('/api/v1/boxes/' + config.fqdn() + '/backupDone?token=APPSTORE_TOKEN', function (body) {
return body.boxVersion && body.restoreKey && !body.appId && !body.appVersion && body.appBackupIds.length === 0;
})
.reply(200, { id: 'someid' });
var scope1 = nock(config.apiServerOrigin())
.post('/api/v1/boxes/' + config.fqdn() + '/migrate?token=APPSTORE_TOKEN', function (body) {
@@ -378,22 +418,22 @@ describe('Cloudron', function () {
injectShellMock();
superagent.post(SERVER_URL + '/api/v1/cloudron/migrate')
.send({ size: 'small', region: 'sfo', password: PASSWORD })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(202);
.send({ size: 'small', region: 'sfo', password: PASSWORD })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(202);
function checkAppstoreServerCalled() {
if (scope1.isDone() && scope2.isDone() && scope3.isDone()) {
restoreShellMock();
return done();
function checkAppstoreServerCalled() {
if (scope1.isDone() && scope2.isDone() && scope3.isDone()) {
restoreShellMock();
return done();
}
setTimeout(checkAppstoreServerCalled, 100);
}
setTimeout(checkAppstoreServerCalled, 100);
}
checkAppstoreServerCalled();
});
checkAppstoreServerCalled();
});
});
it('succeeds', function (done) {
@@ -402,34 +442,34 @@ describe('Cloudron', function () {
}).reply(202, {});
var scope2 = nock(config.apiServerOrigin())
.post('/api/v1/boxes/' + config.fqdn() + '/backupDone?token=APPSTORE_TOKEN', function (body) {
return body.boxVersion && body.restoreKey && !body.appId && !body.appVersion && body.appBackupIds.length === 0;
})
.reply(200, { id: 'someid' });
.post('/api/v1/boxes/' + config.fqdn() + '/backupDone?token=APPSTORE_TOKEN', function (body) {
return body.boxVersion && body.restoreKey && !body.appId && !body.appVersion && body.appBackupIds.length === 0;
})
.reply(200, { id: 'someid' });
var scope3 = nock(config.apiServerOrigin())
.post('/api/v1/boxes/' + config.fqdn() + '/awscredentials?token=BACKUP_TOKEN')
.reply(201, { credentials: { AccessKeyId: 'accessKeyId', SecretAccessKey: 'secretAccessKey', SessionToken: 'sessionToken' } });
.post('/api/v1/boxes/' + config.fqdn() + '/awscredentials?token=BACKUP_TOKEN')
.reply(201, { credentials: { AccessKeyId: 'accessKeyId', SecretAccessKey: 'secretAccessKey', SessionToken: 'sessionToken' } });
injectShellMock();
superagent.post(SERVER_URL + '/api/v1/cloudron/migrate')
.send({ size: 'small', region: 'sfo', password: PASSWORD })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(202);
.send({ size: 'small', region: 'sfo', password: PASSWORD })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(202);
function checkAppstoreServerCalled() {
if (scope1.isDone() && scope2.isDone() && scope3.isDone()) {
restoreShellMock();
return done();
function checkAppstoreServerCalled() {
if (scope1.isDone() && scope2.isDone() && scope3.isDone()) {
restoreShellMock();
return done();
}
setTimeout(checkAppstoreServerCalled, 100);
}
setTimeout(checkAppstoreServerCalled, 100);
}
checkAppstoreServerCalled();
});
checkAppstoreServerCalled();
});
});
});
@@ -442,21 +482,19 @@ describe('Cloudron', function () {
var scope1 = nock(config.apiServerOrigin()).get('/api/v1/boxes/' + config.fqdn() + '/setup/verify?setupToken=somesetuptoken').reply(200, {});
var scope2 = nock(config.apiServerOrigin()).post('/api/v1/boxes/' + config.fqdn() + '/setup/done?setupToken=somesetuptoken').reply(201, {});
config._reset();
superagent.post(SERVER_URL + '/api/v1/cloudron/activate')
.query({ setupToken: 'somesetuptoken' })
.send({ username: USERNAME, password: PASSWORD, email: EMAIL })
.end(function (error, result) {
expect(result).to.be.ok();
expect(scope1.isDone()).to.be.ok();
expect(scope2.isDone()).to.be.ok();
.query({ setupToken: 'somesetuptoken' })
.send({ username: USERNAME, password: PASSWORD, email: EMAIL })
.end(function (error, result) {
expect(result).to.be.ok();
expect(scope1.isDone()).to.be.ok();
expect(scope2.isDone()).to.be.ok();
// stash token for further use
token = result.body.token;
// stash token for further use
token = result.body.token;
callback();
});
callback();
});
},
], done);
});
@@ -465,111 +503,117 @@ describe('Cloudron', function () {
it('fails without token', function (done) {
superagent.post(SERVER_URL + '/api/v1/feedback')
.send({ type: 'ticket', subject: 'some subject', description: 'some description' })
.end(function (error, result) {
expect(result.statusCode).to.equal(401);
done();
});
.send({ type: 'ticket', subject: 'some subject', description: 'some description' })
.end(function (error, result) {
expect(result.statusCode).to.equal(401);
done();
});
});
it('fails without type', function (done) {
superagent.post(SERVER_URL + '/api/v1/feedback')
.send({ subject: 'some subject', description: 'some description' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
.send({ subject: 'some subject', description: 'some description' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
});
it('fails with empty type', function (done) {
superagent.post(SERVER_URL + '/api/v1/feedback')
.send({ type: '', subject: 'some subject', description: 'some description' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
.send({ type: '', subject: 'some subject', description: 'some description' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
});
it('fails with unknown type', function (done) {
superagent.post(SERVER_URL + '/api/v1/feedback')
.send({ type: 'foobar', subject: 'some subject', description: 'some description' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
});
it('succeeds with ticket type', function (done) {
superagent.post(SERVER_URL + '/api/v1/feedback')
.send({ type: 'ticket', subject: 'some subject', description: 'some description' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(201);
done();
});
});
it('succeeds with app type', function (done) {
superagent.post(SERVER_URL + '/api/v1/feedback')
.send({ type: 'app_missing', subject: 'some subject', description: 'some description' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(201);
done();
});
.send({ type: 'foobar', subject: 'some subject', description: 'some description' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
});
it('fails without description', function (done) {
superagent.post(SERVER_URL + '/api/v1/feedback')
.send({ type: 'ticket', subject: 'some subject' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
.send({ type: 'ticket', subject: 'some subject' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
});
it('fails with empty subject', function (done) {
superagent.post(SERVER_URL + '/api/v1/feedback')
.send({ type: 'ticket', subject: '', description: 'some description' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
.send({ type: 'ticket', subject: '', description: 'some description' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
});
it('fails with empty description', function (done) {
superagent.post(SERVER_URL + '/api/v1/feedback')
.send({ type: 'ticket', subject: 'some subject', description: '' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
});
it('succeeds with feedback type', function (done) {
superagent.post(SERVER_URL + '/api/v1/feedback')
.send({ type: 'feedback', subject: 'some subject', description: 'some description' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(201);
done();
});
.send({ type: 'ticket', subject: 'some subject', description: '' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
});
it('fails without subject', function (done) {
superagent.post(SERVER_URL + '/api/v1/feedback')
.send({ type: 'ticket', description: 'some description' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
.send({ type: 'ticket', description: 'some description' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
done();
});
});
it('succeeds with ticket type', function (done) {
var scope1 = nock(config.apiServerOrigin()).post('/api/v1/exchangeBoxTokenWithUserToken?token=APPSTORE_TOKEN').reply(201, { userId: 'USER_ID', cloudronId: 'CLOUDRON_ID', token: 'ACCESS_TOKEN' });
var scope2 = nock(config.apiServerOrigin())
.filteringRequestBody(function (/* unusedBody */) { return ''; }) // strip out body
.post('/api/v1/users/USER_ID/cloudrons/CLOUDRON_ID/feedback?accessToken=ACCESS_TOKEN')
.reply(201, { });
superagent.post(SERVER_URL + '/api/v1/feedback')
.send({ type: 'ticket', subject: 'some subject', description: 'some description' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(201);
expect(scope1.isDone()).to.be.ok();
expect(scope2.isDone()).to.be.ok();
done();
});
});
it('succeeds with app type', function (done) {
var scope1 = nock(config.apiServerOrigin()).post('/api/v1/exchangeBoxTokenWithUserToken?token=APPSTORE_TOKEN').reply(201, { userId: 'USER_ID', cloudronId: 'CLOUDRON_ID', token: 'ACCESS_TOKEN' });
var scope2 = nock(config.apiServerOrigin())
.filteringRequestBody(function (/* unusedBody */) { return ''; }) // strip out body
.post('/api/v1/users/USER_ID/cloudrons/CLOUDRON_ID/feedback?accessToken=ACCESS_TOKEN')
.reply(201, { });
superagent.post(SERVER_URL + '/api/v1/feedback')
.send({ type: 'app_missing', subject: 'some subject', description: 'some description' })
.query({ access_token: token })
.end(function (error, result) {
expect(result.statusCode).to.equal(201);
expect(scope1.isDone()).to.be.ok();
expect(scope2.isDone()).to.be.ok();
done();
});
});
});
@@ -582,21 +626,19 @@ describe('Cloudron', function () {
var scope1 = nock(config.apiServerOrigin()).get('/api/v1/boxes/' + config.fqdn() + '/setup/verify?setupToken=somesetuptoken').reply(200, {});
var scope2 = nock(config.apiServerOrigin()).post('/api/v1/boxes/' + config.fqdn() + '/setup/done?setupToken=somesetuptoken').reply(201, {});
config._reset();
superagent.post(SERVER_URL + '/api/v1/cloudron/activate')
.query({ setupToken: 'somesetuptoken' })
.send({ username: USERNAME, password: PASSWORD, email: EMAIL })
.end(function (error, result) {
expect(result).to.be.ok();
expect(scope1.isDone()).to.be.ok();
expect(scope2.isDone()).to.be.ok();
.query({ setupToken: 'somesetuptoken' })
.send({ username: USERNAME, password: PASSWORD, email: EMAIL })
.end(function (error, result) {
expect(result).to.be.ok();
expect(scope1.isDone()).to.be.ok();
expect(scope2.isDone()).to.be.ok();
// stash token for further use
token = result.body.token;
// stash token for further use
token = result.body.token;
callback();
});
callback();
});
},
], done);
});
@@ -607,9 +649,9 @@ describe('Cloudron', function () {
superagent.get(SERVER_URL + '/api/v1/cloudron/logstream')
.query({ access_token: token, fromLine: 0 })
.end(function (err, res) {
expect(res.statusCode).to.be(400);
done();
});
expect(res.statusCode).to.be(400);
done();
});
});
it('logStream - stream logs', function (done) {
@@ -630,8 +672,8 @@ describe('Cloudron', function () {
if (line.indexOf('id: ') === 0) {
expect(parseInt(line.substr('id: '.length), 10)).to.be.a('number');
} else if (line.indexOf('data: ') === 0) {
expect(JSON.parse(line.slice('data: '.length)).message).to.be.a('string');
dataMessageFound = true;
var message = JSON.parse(line.slice('data: '.length)).message;
if (Array.isArray(message) || typeof message === 'string') dataMessageFound = true;
}
});
+1 -1
View File
@@ -70,7 +70,7 @@ describe('REST API', function () {
.send("some invalid non-strict json")
.end(function (error, result) {
expect(result.statusCode).to.equal(400);
expect(result.body.message).to.be('Bad JSON');
expect(result.body.message).to.be('Failed to parse body');
done();
});
});
+36 -26
View File
@@ -12,18 +12,23 @@ var appdb = require('../../appdb.js'),
expect = require('expect.js'),
hock = require('hock'),
http = require('http'),
MockS3 = require('mock-aws-s3'),
nock = require('nock'),
superagent = require('superagent'),
os = require('os'),
path = require('path'),
rimraf = require('rimraf'),
s3 = require('../../storage/s3.js'),
safe = require('safetydance'),
server = require('../../server.js'),
settings = require('../../settings.js'),
settingsdb = require('../../settingsdb.js'),
superagent = require('superagent'),
url = require('url');
var SERVER_URL = 'http://localhost:' + config.get('port');
var USERNAME = 'superadmin', PASSWORD = 'Foobar?1337', EMAIL ='silly@me.com';
var token = null;
var server;
function setup(done) {
config.setVersion('1.2.3');
@@ -37,19 +42,16 @@ function setup(done) {
var scope2 = nock(config.apiServerOrigin()).post('/api/v1/boxes/' + config.fqdn() + '/setup/done?setupToken=somesetuptoken').reply(201, {});
superagent.post(SERVER_URL + '/api/v1/cloudron/activate')
.query({ setupToken: 'somesetuptoken' })
.send({ username: USERNAME, password: PASSWORD, email: EMAIL })
.end(function (error, result) {
expect(result).to.be.ok();
expect(result.statusCode).to.eql(201);
expect(scope1.isDone()).to.be.ok();
expect(scope2.isDone()).to.be.ok();
.query({ setupToken: 'somesetuptoken' })
.send({ username: USERNAME, password: PASSWORD, email: EMAIL })
.end(function (error, result) {
expect(result).to.be.ok();
expect(result.statusCode).to.eql(201);
expect(scope1.isDone()).to.be.ok();
expect(scope2.isDone()).to.be.ok();
// stash token for further use
token = result.body.token;
callback();
});
callback();
});
},
function addApp(callback) {
@@ -58,12 +60,20 @@ function setup(done) {
},
function createSettings(callback) {
settings.setBackupConfig({ provider: 'caas', token: 'BACKUP_TOKEN', bucket: 'Bucket', prefix: 'Prefix' }, callback);
MockS3.config.basePath = path.join(os.tmpdir(), 's3-sysadmin-test-buckets/');
s3._mockInject(MockS3);
safe.fs.mkdirSync('/tmp/box-sysadmin-test');
settingsdb.set(settings.BACKUP_CONFIG_KEY, JSON.stringify({ provider: 'caas', token: 'BACKUP_TOKEN', key: 'key', prefix: 'boxid', format: 'tgz'}), callback);
}
], done);
}
function cleanup(done) {
s3._mockRestore();
rimraf.sync(MockS3.config.basePath);
database._clear(function (error) {
expect(!error).to.be.ok();
@@ -93,19 +103,19 @@ describe('Internal API', function () {
describe('backup', function () {
it('succeeds', function (done) {
superagent.post(config.sysadminOrigin() + '/api/v1/backup')
.end(function (error, result) {
expect(result.statusCode).to.equal(202);
.end(function (error, result) {
expect(result.statusCode).to.equal(202);
function checkAppstoreServerCalled() {
apiHockInstance.done(function (error) {
if (!error) return done();
function checkAppstoreServerCalled() {
apiHockInstance.done(function (error) {
if (!error) return done();
setTimeout(checkAppstoreServerCalled, 100);
});
}
setTimeout(checkAppstoreServerCalled, 100);
});
}
checkAppstoreServerCalled();
});
checkAppstoreServerCalled();
});
});
});
});
+2 -2
View File
@@ -145,8 +145,8 @@ function doTask(appId, taskName, callback) {
apps.get(appId, function (error, app) {
if (error) return callback(error);
if (app.installationState !== appdb.ISTATE_INSTALLED || app.runState !== appdb.RSTATE_RUNNING) {
debug('task %s skipped. app %s is not installed/running', taskName, app.id);
if (app.installationState !== appdb.ISTATE_INSTALLED || app.runState !== appdb.RSTATE_RUNNING || app.health !== appdb.HEALTH_HEALTHY) {
debug('task %s skipped. app %s is not installed/running/healthy', taskName, app.id);
return callback();
}
@@ -12,6 +12,9 @@ if [[ $# == 1 && "$1" == "--check" ]]; then
exit 0
fi
cmd="$1"
appid="$2"
if [[ "${BOX_ENV}" == "cloudron" ]]; then
# when restoring the cloudron with many apps, the apptasks rush in to restart
# collectd which makes systemd/collectd very unhappy and puts the collectd in
@@ -19,10 +22,17 @@ if [[ "${BOX_ENV}" == "cloudron" ]]; then
for i in {1..10}; do
echo "Restarting collectd"
if systemctl restart collectd; then
exit 0
break
fi
echo "Failed to reload collectd. Maybe some other apptask is restarting it"
sleep $((RANDOM%30))
done
# delete old stats when uninstalling an app
if [[ "${cmd}" == "remove" ]]; then
echo "Removing collectd stats of ${appid}"
rm -rf ${HOME}/platformdata/graphite/whisper/collectd/localhost/*${appid}*
fi
fi
+40
View File
@@ -0,0 +1,40 @@
#!/bin/bash
set -eu -o pipefail
if [[ ${EUID} -ne 0 ]]; then
echo "This script should be run as root." > /dev/stderr
exit 1
fi
if [[ $# -eq 0 ]]; then
echo "No arguments supplied"
exit 1
fi
if [[ "$1" == "--check" ]]; then
echo "OK"
exit 0
fi
cmd="$1"
appid="$2"
if [[ "${cmd}" == "add" ]]; then
# TODO prevent this script from moving the file from $1 into a random dir with using a relative ../ path
if [[ "${BOX_ENV}" == "cloudron" ]]; then
readonly destination_file_path="${HOME}/platformdata/logrotate.d/${appid}"
else
readonly destination_file_path="${HOME}/.cloudron_test/platformdata/logrotate.d/${appid}"
fi
mv "${3}" "${destination_file_path}"
chown root:root "${destination_file_path}"
elif [[ "${cmd}" == "remove" ]]; then
if [[ "${BOX_ENV}" == "cloudron" ]]; then
rm -rf "${HOME}/platformdata/logrotate.d/${appid}"
else
rm -rf "${HOME}/.cloudron_test/platformdata/logrotate.d/${appid}"
fi
fi
-28
View File
@@ -1,28 +0,0 @@
#!/bin/bash
set -eu -o pipefail
if [[ ${EUID} -ne 0 ]]; then
echo "This script should be run as root." > /dev/stderr
exit 1
fi
if [[ $# -eq 0 ]]; then
echo "No arguments supplied"
exit 1
fi
if [[ "$1" == "--check" ]]; then
echo "OK"
exit 0
fi
# TODO prevent this script from moving the file from $1 into a random dir with using a relative ../ path
if [[ "${BOX_ENV}" == "cloudron" ]]; then
readonly destination_file_path="${HOME}/platformdata/logrotate.d/$2"
else
readonly destination_file_path="${HOME}/.cloudron_test/platformdata/logrotate.d/$2"
fi
mv "${1}" "${destination_file_path}"
chown root:root "${destination_file_path}"
-23
View File
@@ -1,23 +0,0 @@
#!/bin/bash
set -eu -o pipefail
if [[ ${EUID} -ne 0 ]]; then
echo "This script should be run as root." > /dev/stderr
exit 1
fi
if [[ $# -eq 0 ]]; then
echo "No arguments supplied"
exit 1
fi
if [[ "$1" == "--check" ]]; then
echo "OK"
exit 0
fi
echo "Running node with memory constraints"
# note BOX_ENV and NODE_ENV are derived from parent process
exec env "DEBUG=box*,connect-lastmile" /usr/bin/node --max_old_space_size=300 "$@"
+14 -3
View File
@@ -17,10 +17,21 @@ if [[ "$1" == "--check" ]]; then
exit 0
fi
# this script is called from redis addon as well!
appid="$1"
rmdir="$2"
if [[ "${BOX_ENV}" == "cloudron" ]]; then
readonly app_data_dir="${HOME}/appsdata/$1"
rm -rf "${app_data_dir}"
readonly app_data_dir="${HOME}/appsdata/${appid}"
else
readonly app_data_dir="${HOME}/.cloudron_test/appsdata/$1"
readonly app_data_dir="${HOME}/.cloudron_test/appsdata/${appid}"
fi
# the approach below ensures symlinked contents are also deleted
find -H "${app_data_dir}" -mindepth 1 -delete || true # -H means resolve symlink in args
if [[ "${rmdir}" == "true" ]]; then
rm -rf "${app_data_dir}"
fi
-24
View File
@@ -1,24 +0,0 @@
#!/bin/bash
set -eu -o pipefail
if [[ ${EUID} -ne 0 ]]; then
echo "This script should be run as root." > /dev/stderr
exit 1
fi
if [[ $# -eq 0 ]]; then
echo "No arguments supplied"
exit 1
fi
if [[ "$1" == "--check" ]]; then
echo "OK"
exit 0
fi
if [[ "${BOX_ENV}" == "cloudron" ]]; then
rm -rf "${HOME}/platformdata/logrotate.d/$1"
else
rm -rf "${HOME}/.cloudron_test/platformdata/logrotate.d/$1"
fi
+23
View File
@@ -0,0 +1,23 @@
#!/usr/bin/env node
'use strict';
require('supererror')({ splatchError: true });
var tar = require('tar-fs');
var sourceDir = process.argv[2];
if (sourceDir === '--check') return console.log('OK');
process.stderr.write('Packing ' + sourceDir + '\n');
tar.pack('/', {
dereference: false, // pack the symlink and not what it points to
entries: [ sourceDir ],
map: function(header) {
header.name = header.name.replace(new RegExp('^' + sourceDir + '(/?)'), '.$1'); // make paths relative
return header;
},
strict: false // do not error for unknown types (skip fifo, char/block devices)
}).pipe(process.stdout);
+30 -30
View File
@@ -59,26 +59,26 @@ function initializeExpressSync() {
router.del = router.delete; // amend router.del for readability further on
app
.use(middleware.timeout(REQUEST_TIMEOUT))
.use(json)
.use(urlencoded)
.use(middleware.cookieParser())
.use(middleware.cors({ origins: [ '*' ], allowCredentials: false }))
.use(middleware.session({
secret: hat(128), // we only use the session during oauth, and already have an in-memory session store, so we can safely change that during restarts
resave: true,
saveUninitialized: true,
cookie: {
path: '/',
httpOnly: true,
secure: process.env.BOX_ENV !== 'test',
maxAge: 600000
}
}))
.use(passport.initialize())
.use(passport.session())
.use(router)
.use(middleware.lastMile());
.use(middleware.timeout(REQUEST_TIMEOUT))
.use(json)
.use(urlencoded)
.use(middleware.cookieParser())
.use(middleware.cors({ origins: [ '*' ], allowCredentials: false }))
.use(middleware.session({
secret: hat(128), // we only use the session during oauth, and already have an in-memory session store, so we can safely change that during restarts
resave: true,
saveUninitialized: true,
cookie: {
path: '/',
httpOnly: true,
secure: process.env.BOX_ENV !== 'test',
maxAge: 600000
}
}))
.use(passport.initialize())
.use(passport.session())
.use(router)
.use(middleware.lastMile());
// NOTE: these limits have to be in sync with nginx limits
var FILE_SIZE_LIMIT = '256mb', // max file size that can be uploaded (see also client_max_body_size in nginx)
@@ -108,11 +108,10 @@ function initializeExpressSync() {
router.post('/api/v1/developer', developerScope, routes.user.requireAdmin, routes.user.verifyPassword, routes.developer.setEnabled);
router.get ('/api/v1/developer', developerScope, routes.developer.enabled, routes.developer.status);
router.post('/api/v1/developer/login', routes.developer.enabled, routes.developer.login);
router.get ('/api/v1/developer/apps', developerScope, routes.developer.enabled, routes.developer.apps);
// cloudron routes
router.get ('/api/v1/cloudron/config', cloudronScope, routes.cloudron.getConfig);
router.post('/api/v1/cloudron/update', cloudronScope, routes.user.requireAdmin, routes.user.verifyPassword, routes.cloudron.update);
router.post('/api/v1/cloudron/update', cloudronScope, routes.user.requireAdmin, routes.cloudron.update);
router.post('/api/v1/cloudron/check_for_updates', cloudronScope, routes.user.requireAdmin, routes.cloudron.checkForUpdates);
router.post('/api/v1/cloudron/reboot', cloudronScope, routes.user.requireAdmin, routes.cloudron.reboot);
router.post('/api/v1/cloudron/migrate', cloudronScope, routes.user.requireAdmin, routes.user.verifyPassword, routes.cloudron.migrate);
@@ -124,7 +123,8 @@ function initializeExpressSync() {
router.put ('/api/v1/cloudron/ssh/authorized_keys', cloudronScope, routes.user.requireAdmin, routes.ssh.addAuthorizedKey);
router.get ('/api/v1/cloudron/ssh/authorized_keys/:identifier', cloudronScope, routes.user.requireAdmin, routes.ssh.getAuthorizedKey);
router.del ('/api/v1/cloudron/ssh/authorized_keys/:identifier', cloudronScope, routes.user.requireAdmin, routes.ssh.delAuthorizedKey);
router.get ('/api/v1/cloudron/eventlog', settingsScope, routes.user.requireAdmin, routes.eventlog.get);
router.get ('/api/v1/cloudron/eventlog', cloudronScope, routes.user.requireAdmin, routes.eventlog.get);
router.post('/api/v1/cloudron/send_test_mail', cloudronScope, routes.user.requireAdmin, routes.cloudron.sendTestMail);
// profile api, working off the user behind the provided token
router.get ('/api/v1/profile', profileScope, routes.profile.get);
@@ -182,8 +182,8 @@ function initializeExpressSync() {
router.post('/api/v1/apps/install', appsScope, routes.user.requireAdmin, routes.apps.installApp);
router.post('/api/v1/apps/:id/uninstall', appsScope, routes.user.requireAdmin, routes.user.verifyPassword, routes.apps.uninstallApp);
router.post('/api/v1/apps/:id/configure', appsScope, routes.user.requireAdmin, routes.user.verifyPassword, routes.apps.configureApp);
router.post('/api/v1/apps/:id/update', appsScope, routes.user.requireAdmin, routes.user.verifyPassword, routes.apps.updateApp);
router.post('/api/v1/apps/:id/configure', appsScope, routes.user.requireAdmin, routes.apps.configureApp);
router.post('/api/v1/apps/:id/update', appsScope, routes.user.requireAdmin, routes.apps.updateApp);
router.post('/api/v1/apps/:id/restore', appsScope, routes.user.requireAdmin, routes.user.verifyPassword, routes.apps.restoreApp);
router.post('/api/v1/apps/:id/backup', appsScope, routes.user.requireAdmin, routes.apps.backupApp);
router.get ('/api/v1/apps/:id/backups', appsScope, routes.user.requireAdmin, routes.apps.listBackups);
@@ -283,11 +283,11 @@ function initializeSysadminExpressSync() {
router.del = router.delete; // amend router.del for readability further on
app
.use(middleware.timeout(REQUEST_TIMEOUT))
.use(json)
.use(urlencoded)
.use(router)
.use(middleware.lastMile());
.use(middleware.timeout(REQUEST_TIMEOUT))
.use(json)
.use(urlencoded)
.use(router)
.use(middleware.lastMile());
// Sysadmin routes
router.post('/api/v1/backup', routes.sysadmin.backup);
+5 -28
View File
@@ -33,9 +33,6 @@ exports = module.exports = {
getTlsConfig: getTlsConfig,
setTlsConfig: setTlsConfig,
getUpdateConfig: getUpdateConfig,
setUpdateConfig: setUpdateConfig,
getAppstoreConfig: getAppstoreConfig,
setAppstoreConfig: setAppstoreConfig,
@@ -410,11 +407,15 @@ function setBackupConfig(backupConfig, callback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof callback, 'function');
if (backupConfig.key && backupConfig.format !== 'tgz') return callback(new SettingsError(SettingsError.BAD_FIELD, 'format does not support encryption'));
backups.testConfig(backupConfig, function (error) {
if (error && error.reason === BackupsError.BAD_FIELD) return callback(new SettingsError(SettingsError.BAD_FIELD, error.message));
if (error && error.reason === BackupsError.EXTERNAL_ERROR) return callback(new SettingsError(SettingsError.EXTERNAL_ERROR, error.message));
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, error));
backups.cleanupCacheFilesSync();
settingsdb.set(exports.BACKUP_CONFIG_KEY, JSON.stringify(backupConfig), function (error) {
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, error));
@@ -425,30 +426,6 @@ function setBackupConfig(backupConfig, callback) {
});
}
function getUpdateConfig(callback) {
assert.strictEqual(typeof callback, 'function');
settingsdb.get(exports.UPDATE_CONFIG_KEY, function (error, value) {
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(null, gDefaults[exports.UPDATE_CONFIG_KEY]);
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, error));
callback(null, JSON.parse(value)); // { prerelease }
});
}
function setUpdateConfig(updateConfig, callback) {
assert.strictEqual(typeof updateConfig, 'object');
assert.strictEqual(typeof callback, 'function');
settingsdb.set(exports.UPDATE_CONFIG_KEY, JSON.stringify(updateConfig), function (error) {
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, error));
exports.events.emit(exports.UPDATE_CONFIG_KEY, updateConfig);
callback(null);
});
}
function getMailConfig(callback) {
assert.strictEqual(typeof callback, 'function');
@@ -664,7 +641,7 @@ function getAll(callback) {
// convert JSON objects
[exports.DNS_CONFIG_KEY, exports.TLS_CONFIG_KEY, exports.BACKUP_CONFIG_KEY, exports.MAIL_CONFIG_KEY,
exports.UPDATE_CONFIG_KEY, exports.APPSTORE_CONFIG_KEY, exports.MAIL_RELAY_KEY, exports.CATCH_ALL_ADDRESS_KEY].forEach(function (key) {
exports.UPDATE_CONFIG_KEY, exports.APPSTORE_CONFIG_KEY, exports.MAIL_RELAY_KEY, exports.CATCH_ALL_ADDRESS_KEY].forEach(function (key) {
result[key] = typeof result[key] === 'object' ? result[key] : safe.JSON.parse(result[key]);
});
+14 -13
View File
@@ -10,6 +10,7 @@ exports = module.exports = {
var assert = require('assert'),
child_process = require('child_process'),
debug = require('debug')('box:shell'),
fs = require('fs'),
once = require('once'),
util = require('util');
@@ -33,26 +34,26 @@ function exec(tag, file, args, options, callback) {
assert.strictEqual(typeof tag, 'string');
assert.strictEqual(typeof file, 'string');
assert(util.isArray(args));
if (typeof options === 'function') {
callback = options;
options = { };
}
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
callback = once(callback); // exit may or may not be called after an 'error'
debug(tag + ' execFile: %s', file); // do not dump args as it might have sensitive info
var cp = child_process.spawn(file, args, options);
cp.stdout.on('data', function (data) {
debug(tag + ' (stdout): %s', data.toString('utf8'));
});
if (options.logStream) {
cp.stdout.pipe(options.logStream);
cp.stderr.pipe(options.logStream);
} else {
cp.stdout.on('data', function (data) {
debug(tag + ' (stdout): %s', data.toString('utf8'));
});
cp.stderr.on('data', function (data) {
debug(tag + ' (stderr): %s', data.toString('utf8'));
});
cp.stderr.on('data', function (data) {
debug(tag + ' (stderr): %s', data.toString('utf8'));
});
}
cp.on('exit', function (code, signal) {
if (code || signal) debug(tag + ' code: %s, signal: %s', code, signal);
@@ -83,7 +84,7 @@ function sudo(tag, args, options, callback) {
assert.strictEqual(typeof options, 'object');
// -S makes sudo read stdin for password. -E preserves arguments
// -S makes sudo read stdin for password. -E preserves environment
var cp = exec(tag, SUDO, [ options.env ? '-SE' : '-S' ].concat(args), options, callback);
cp.stdin.end();
return cp;
-229
View File
@@ -1,229 +0,0 @@
'use strict';
exports = module.exports = {
backup: backup,
restore: restore,
copyBackup: copyBackup,
removeBackups: removeBackups,
backupDone: backupDone,
testConfig: testConfig,
};
var assert = require('assert'),
AWS = require('aws-sdk'),
BackupsError = require('../backups.js').BackupsError,
config = require('../config.js'),
debug = require('debug')('box:storage/caas'),
once = require('once'),
PassThrough = require('stream').PassThrough,
path = require('path'),
S3BlockReadStream = require('s3-block-read-stream'),
superagent = require('superagent'),
targz = require('./targz.js');
var FILE_TYPE = '.tar.gz.enc';
// internal only
function getBackupCredentials(apiConfig, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof callback, 'function');
assert(apiConfig.token);
var url = config.apiServerOrigin() + '/api/v1/boxes/' + config.fqdn() + '/awscredentials';
superagent.post(url).query({ token: apiConfig.token }).timeout(30 * 1000).end(function (error, result) {
if (error && !error.response) return callback(error);
if (result.statusCode !== 201) return callback(new Error(result.text));
if (!result.body || !result.body.credentials) return callback(new Error('Unexpected response: ' + JSON.stringify(result.headers)));
var credentials = {
signatureVersion: 'v4',
accessKeyId: result.body.credentials.AccessKeyId,
secretAccessKey: result.body.credentials.SecretAccessKey,
sessionToken: result.body.credentials.SessionToken,
region: apiConfig.region || 'us-east-1'
};
if (apiConfig.endpoint) credentials.endpoint = new AWS.Endpoint(apiConfig.endpoint);
callback(null, credentials);
});
}
function getBackupFilePath(apiConfig, backupId) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
const FILE_TYPE = apiConfig.key ? '.tar.gz.enc' : '.tar.gz';
return path.join(apiConfig.prefix, backupId.endsWith(FILE_TYPE) ? backupId : backupId+FILE_TYPE);
}
// storage api
function backup(apiConfig, backupId, sourceDirectories, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
assert(Array.isArray(sourceDirectories));
assert.strictEqual(typeof callback, 'function');
callback = once(callback);
var backupFilePath = getBackupFilePath(apiConfig, backupId);
debug('[%s] backup: %j -> %s', backupId, sourceDirectories, backupFilePath);
getBackupCredentials(apiConfig, function (error, credentials) {
if (error) return callback(error);
var passThrough = new PassThrough();
var params = {
Bucket: apiConfig.bucket,
Key: backupFilePath,
Body: passThrough
};
var s3 = new AWS.S3(credentials);
// s3.upload automatically does a multi-part upload. we set queueSize to 1 to reduce memory usage
s3.upload(params, { partSize: 10 * 1024 * 1024, queueSize: 1 }, function (error) {
if (error) {
debug('[%s] backup: s3 upload error.', backupId, error);
return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error));
}
callback(null);
});
targz.create(sourceDirectories, apiConfig.key || null, passThrough, callback);
});
}
function restore(apiConfig, backupId, destination, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
assert.strictEqual(typeof destination, 'string');
assert.strictEqual(typeof callback, 'function');
callback = once(callback);
var backupFilePath = getBackupFilePath(apiConfig, backupId);
debug('[%s] restore: %s -> %s', backupId, backupFilePath, destination);
getBackupCredentials(apiConfig, function (error, credentials) {
if (error) return callback(error);
var params = {
Bucket: apiConfig.bucket,
Key: backupFilePath
};
var s3 = new AWS.S3(credentials);
var multipartDownload = new S3BlockReadStream(s3, params, { blockSize: 64 * 1024 * 1024, logCallback: debug });
multipartDownload.on('error', function (error) {
// TODO ENOENT for the mock, fix upstream!
if (error.code === 'NoSuchKey' || error.code === 'ENOENT') return callback(new BackupsError(BackupsError.NOT_FOUND));
debug('[%s] restore: s3 stream error.', backupId, error);
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
});
targz.extract(multipartDownload, destination, apiConfig.key || null, callback);
});
}
function copyBackup(apiConfig, oldBackupId, newBackupId, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof oldBackupId, 'string');
assert.strictEqual(typeof newBackupId, 'string');
assert.strictEqual(typeof callback, 'function');
getBackupCredentials(apiConfig, function (error, credentials) {
if (error) return callback(error);
var params = {
Bucket: apiConfig.bucket,
Key: getBackupFilePath(apiConfig, newBackupId),
CopySource: path.join(apiConfig.bucket, getBackupFilePath(apiConfig, oldBackupId))
};
var s3 = new AWS.S3(credentials);
s3.copyObject(params, function (error) {
if (error && error.code === 'NoSuchKey') return callback(new BackupsError(BackupsError.NOT_FOUND));
if (error) {
debug('copyBackup: s3 copy error.', error);
return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error));
}
callback(null);
});
});
}
function removeBackups(apiConfig, backupIds, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert(Array.isArray(backupIds));
assert.strictEqual(typeof callback, 'function');
getBackupCredentials(apiConfig, function (error, credentials) {
if (error) return callback(error);
var params = {
Bucket: apiConfig.bucket,
Delete: {
Objects: [ ] // { Key }
}
};
backupIds.forEach(function (backupId) {
params.Delete.Objects.push({ Key: getBackupFilePath(apiConfig, backupId) });
});
var s3 = new AWS.S3(credentials);
s3.deleteObjects(params, function (error, data) {
if (error) debug('Unable to remove %s. Not fatal.', params.Key, error);
else debug('removeBackups: Deleted: %j Errors: %j', data.Deleted, data.Errors);
callback(null);
});
});
}
function testConfig(apiConfig, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof callback, 'function');
if (config.provider() !== 'caas') return callback(new BackupsError(BackupsError.BAD_FIELD, 'instance provider must be caas'));
callback();
}
function backupDone(backupId, appBackupIds, callback) {
assert.strictEqual(typeof backupId, 'string');
assert(Array.isArray(appBackupIds));
assert.strictEqual(typeof callback, 'function');
// Caas expects filenames instead of backupIds, this means no prefix but a file type extension
var boxBackupFilename = backupId + FILE_TYPE;
var appBackupFilenames = appBackupIds.map(function (id) { return id + FILE_TYPE; });
debug('[%s] backupDone: %s apps %j', backupId, boxBackupFilename, appBackupFilenames);
var url = config.apiServerOrigin() + '/api/v1/boxes/' + config.fqdn() + '/backupDone';
var data = {
boxVersion: config.version(),
restoreKey: boxBackupFilename,
appId: null, // now unused
appVersion: null, // now unused
appBackupIds: appBackupFilenames
};
superagent.post(url).send(data).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
if (error && !error.response) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error));
if (result.statusCode !== 200) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, result.text));
return callback(null);
});
}
+101 -100
View File
@@ -1,10 +1,14 @@
'use strict';
exports = module.exports = {
backup: backup,
restore: restore,
copyBackup: copyBackup,
removeBackups: removeBackups,
upload: upload,
download: download,
downloadDir: downloadDir,
copy: copy,
remove: remove,
removeDir: removeDir,
backupDone: backupDone,
@@ -12,170 +16,167 @@ exports = module.exports = {
};
var assert = require('assert'),
async = require('async'),
BackupsError = require('../backups.js').BackupsError,
config = require('../config.js'),
debug = require('debug')('box:storage/filesystem'),
EventEmitter = require('events'),
fs = require('fs'),
mkdirp = require('mkdirp'),
once = require('once'),
path = require('path'),
safe = require('safetydance'),
targz = require('./targz.js');
var FALLBACK_BACKUP_FOLDER = '/var/backups';
var BACKUP_USER = config.TEST ? process.env.USER : 'yellowtent';
// internal only
function getBackupFilePath(apiConfig, backupId) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
const FILE_TYPE = apiConfig.key ? '.tar.gz.enc' : '.tar.gz';
return path.join(apiConfig.backupFolder || FALLBACK_BACKUP_FOLDER, backupId.endsWith(FILE_TYPE) ? backupId : backupId+FILE_TYPE);
}
shell = require('../shell.js');
// storage api
function backup(apiConfig, backupId, sourceDirectories, callback) {
function upload(apiConfig, backupFilePath, sourceStream, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
assert(Array.isArray(sourceDirectories));
assert.strictEqual(typeof backupFilePath, 'string');
assert.strictEqual(typeof sourceStream, 'object');
assert.strictEqual(typeof callback, 'function');
callback = once(callback);
var backupFilePath = getBackupFilePath(apiConfig, backupId);
debug('[%s] backup: %j -> %s', backupId, sourceDirectories, backupFilePath);
mkdirp(path.dirname(backupFilePath), function (error) {
if (error) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
safe.fs.unlinkSync(backupFilePath); // remove any hardlink
var fileStream = fs.createWriteStream(backupFilePath);
// this pattern is required to ensure that the file got created before 'finish'
fileStream.on('open', function () {
sourceStream.pipe(fileStream);
});
fileStream.on('error', function (error) {
debug('[%s] backup: out stream error.', backupId, error);
debug('[%s] upload: out stream error.', backupFilePath, error);
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
});
fileStream.on('close', function () {
debug('[%s] backup: changing ownership.', backupId);
fileStream.on('finish', function () {
// in test, upload() may or may not be called via sudo script
const BACKUP_UID = parseInt(process.env.SUDO_UID, 10) || process.getuid();
if (!safe.child_process.execSync('chown -R ' + BACKUP_USER + ':' + BACKUP_USER + ' ' + path.dirname(backupFilePath))) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, safe.error.message));
if (!safe.fs.chownSync(backupFilePath, BACKUP_UID, BACKUP_UID)) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, 'Unable to chown:' + safe.error.message));
if (!safe.fs.chownSync(path.dirname(backupFilePath), BACKUP_UID, BACKUP_UID)) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, 'Unable to chown:' + safe.error.message));
debug('[%s] backup: done.', backupId);
debug('upload %s: done.', backupFilePath);
callback(null);
});
targz.create(sourceDirectories, apiConfig.key || null, fileStream, callback);
});
}
function restore(apiConfig, backupId, destination, callback) {
function download(apiConfig, sourceFilePath, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
assert.strictEqual(typeof destination, 'string');
assert.strictEqual(typeof sourceFilePath, 'string');
assert.strictEqual(typeof callback, 'function');
callback = once(callback);
debug('download: %s', sourceFilePath);
var sourceFilePath = getBackupFilePath(apiConfig, backupId);
debug('[%s] restore: %s -> %s', backupId, sourceFilePath, destination);
if (!fs.existsSync(sourceFilePath)) return callback(new BackupsError(BackupsError.NOT_FOUND, 'backup file does not exist'));
if (!safe.fs.existsSync(sourceFilePath)) return callback(new BackupsError(BackupsError.NOT_FOUND, 'File not found'));
var fileStream = fs.createReadStream(sourceFilePath);
fileStream.on('error', function (error) {
debug('restore: file stream error.', error);
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
});
targz.extract(fileStream, destination, apiConfig.key || null, callback);
callback(null, fileStream);
}
function copyBackup(apiConfig, oldBackupId, newBackupId, callback) {
function downloadDir(apiConfig, backupFilePath, destDir) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof oldBackupId, 'string');
assert.strictEqual(typeof newBackupId, 'string');
assert.strictEqual(typeof callback, 'function');
assert.strictEqual(typeof backupFilePath, 'string');
assert.strictEqual(typeof destDir, 'string');
callback = once(callback);
var events = new EventEmitter();
var oldFilePath = getBackupFilePath(apiConfig, oldBackupId);
var newFilePath = getBackupFilePath(apiConfig, newBackupId);
events.emit('progress', `downloadDir: ${backupFilePath} to ${destDir}`);
debug('copyBackup: %s -> %s', oldFilePath, newFilePath);
shell.exec('downloadDir', '/bin/cp', [ '-r', backupFilePath + '/.', destDir ], { }, function (error) {
if (error) return events.emit('done', new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
events.emit('done', null);
});
return events;
}
function copy(apiConfig, oldFilePath, newFilePath) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof oldFilePath, 'string');
assert.strictEqual(typeof newFilePath, 'string');
debug('copy: %s -> %s', oldFilePath, newFilePath);
var events = new EventEmitter();
mkdirp(path.dirname(newFilePath), function (error) {
if (error) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
if (error) return events.emit('done', new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
var readStream = fs.createReadStream(oldFilePath);
var writeStream = fs.createWriteStream(newFilePath);
// this will hardlink backups saving space
var cpOptions = apiConfig.noHardlinks ? '-a' : '-al';
shell.exec('copy', '/bin/cp', [ cpOptions, oldFilePath, newFilePath ], { }, function (error) {
if (error) return events.emit('done', new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
readStream.on('error', function (error) {
debug('copyBackup: read stream error.', error);
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
events.emit('done', null);
});
writeStream.on('error', function (error) {
debug('copyBackup: write stream error.', error);
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
});
writeStream.on('close', function () {
if (!safe.child_process.execSync('chown -R ' + BACKUP_USER + ':' + BACKUP_USER + ' ' + path.dirname(newFilePath))) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, safe.error.message));
callback();
});
readStream.pipe(writeStream);
});
return events;
}
function removeBackups(apiConfig, backupIds, callback) {
function remove(apiConfig, filename, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert(Array.isArray(backupIds));
assert.strictEqual(typeof filename, 'string');
assert.strictEqual(typeof callback, 'function');
async.eachSeries(backupIds, function (id, iteratorCallback) {
var filePath = getBackupFilePath(apiConfig, id);
var stat = safe.fs.statSync(filename);
if (!stat) return callback();
if (!safe.fs.unlinkSync(filePath)) {
debug('removeBackups: Unable to remove %s : %s', filePath, safe.error.message);
}
if (stat.isFile()) {
if (!safe.fs.unlinkSync(filename)) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, safe.error.message));
} else if (stat.isDirectory()) {
if (!safe.fs.rmdirSync(filename)) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, safe.error.message));
}
safe.fs.rmdirSync(path.dirname(filePath)); // try to cleanup empty directories
callback(null);
}
iteratorCallback();
}, callback);
function removeDir(apiConfig, pathPrefix) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof pathPrefix, 'string');
var events = new EventEmitter();
events.emit('progress', `downloadDir: ${pathPrefix}`);
shell.exec('removeDir', '/bin/rm', [ '-rf', pathPrefix ], { }, function (error) {
if (error) return events.emit('done', new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
events.emit('done', null);
});
return events;
}
function testConfig(apiConfig, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof callback, 'function');
if ('backupFolder' in apiConfig && typeof apiConfig.backupFolder !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'backupFolder must be string'));
if (typeof apiConfig.backupFolder !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'backupFolder must be string'));
// default value will be used
if (!apiConfig.backupFolder) return callback();
if (!apiConfig.backupFolder) return callback(new BackupsError(BackupsError.BAD_FIELD, 'backupFolder is required'));
if ('noHardlinks' in apiConfig && typeof apiConfig.noHardlinks !== 'boolean') return callback(new BackupsError(BackupsError.BAD_FIELD, 'noHardlinks must be boolean'));
fs.stat(apiConfig.backupFolder, function (error, result) {
if (error) {
debug('testConfig: %s', apiConfig.backupFolder, error);
return callback(new BackupsError(BackupsError.BAD_FIELD, 'Directory does not exist or cannot be accessed'));
}
if (error) return callback(new BackupsError(BackupsError.BAD_FIELD, 'Directory does not exist or cannot be accessed: ' + error.message));
if (!result.isDirectory()) return callback(new BackupsError(BackupsError.BAD_FIELD, 'Backup location is not a directory'));
callback(null);
mkdirp(path.join(apiConfig.backupFolder, 'snapshot'), function (error) {
if (error && error.code === 'EACCES') return callback(new BackupsError(BackupsError.BAD_FIELD, `Access denied. Run "chown yellowtent:yellowtent ${apiConfig.backupFolder}" on the server`));
if (error) return callback(new BackupsError(BackupsError.BAD_FIELD, error.message));
callback(null);
});
});
}
function backupDone(backupId, appBackupIds, callback) {
function backupDone(apiConfig, backupId, appBackupIds, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
assert(Array.isArray(appBackupIds));
assert.strictEqual(typeof callback, 'function');
+50 -28
View File
@@ -7,22 +7,26 @@
// -------------------------------------------
exports = module.exports = {
backup: backup,
restore: restore,
copyBackup: copyBackup,
removeBackups: removeBackups,
upload: upload,
download: download,
downloadDir: downloadDir,
copy: copy,
remove: remove,
removeDir: removeDir,
backupDone: backupDone,
testConfig: testConfig
};
var assert = require('assert');
var assert = require('assert'),
EventEmitter = require('events');
function backup(apiConfig, backupId, sourceDirectories, callback) {
function upload(apiConfig, backupFilePath, sourceStream, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
assert(Array.isArray(sourceDirectories));
assert.strictEqual(typeof backupFilePath, 'string');
assert.strictEqual(typeof sourceStream, 'object');
assert.strictEqual(typeof callback, 'function');
// Result: none
@@ -30,10 +34,38 @@ function backup(apiConfig, backupId, sourceDirectories, callback) {
callback(new Error('not implemented'));
}
function restore(apiConfig, backupId, destination, callback) {
function download(apiConfig, backupFilePath, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
assert.strictEqual(typeof destination, 'string');
assert.strictEqual(typeof backupFilePath, 'string');
assert.strictEqual(typeof callback, 'function');
// Result: download stream
callback(new Error('not implemented'));
}
function downloadDir(apiConfig, backupFilePath, destDir) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupFilePath, 'string');
assert.strictEqual(typeof destDir, 'string');
var events = new EventEmitter();
process.nextTick(function () { events.emit('done', null); });
return events;
}
function copy(apiConfig, oldFilePath, newFilePath) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof oldFilePath, 'string');
assert.strictEqual(typeof newFilePath, 'string');
var events = new EventEmitter();
process.nextTick(function () { events.emit('done', null); });
return events;
}
function remove(apiConfig, filename, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof filename, 'string');
assert.strictEqual(typeof callback, 'function');
// Result: none
@@ -41,25 +73,14 @@ function restore(apiConfig, backupId, destination, callback) {
callback(new Error('not implemented'));
}
function copyBackup(apiConfig, oldBackupId, newBackupId, callback) {
function removeDir(apiConfig, pathPrefix) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof oldBackupId, 'string');
assert.strictEqual(typeof newBackupId, 'string');
assert.strictEqual(typeof callback, 'function');
assert.strictEqual(typeof pathPrefix, 'string');
// Result: none
callback(new Error('not implemented'));
}
function removeBackups(apiConfig, backupIds, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert(Array.isArray(backupIds));
assert.strictEqual(typeof callback, 'function');
// Result: none
callback(new Error('not implemented'));
var events = new EventEmitter();
process.nextTick(function () { events.emit('done', new Error('not implemented')); });
return events;
}
function testConfig(apiConfig, callback) {
@@ -71,7 +92,8 @@ function testConfig(apiConfig, callback) {
callback(new Error('not implemented'));
}
function backupDone(backupId, appBackupIds, callback) {
function backupDone(apiConfig, backupId, appBackupIds, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
assert(Array.isArray(appBackupIds));
assert.strictEqual(typeof callback, 'function');
+58 -28
View File
@@ -1,10 +1,13 @@
'use strict';
exports = module.exports = {
backup: backup,
restore: restore,
copyBackup: copyBackup,
removeBackups: removeBackups,
upload: upload,
download: download,
downloadDir: downloadDir,
copy: copy,
remove: remove,
removeDir: removeDir,
backupDone: backupDone,
@@ -12,62 +15,89 @@ exports = module.exports = {
};
var assert = require('assert'),
debug = require('debug')('box:storage/noop');
debug = require('debug')('box:storage/noop'),
EventEmitter = require('events');
function backup(apiConfig, backupId, sourceDirectories, callback) {
function upload(apiConfig, backupFilePath, sourceStream, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
assert(Array.isArray(sourceDirectories));
assert.strictEqual(typeof backupFilePath, 'string');
assert.strictEqual(typeof sourceStream, 'object');
assert.strictEqual(typeof callback, 'function');
debug('backup: %s %j', backupId, sourceDirectories);
debug('upload: %s', backupFilePath);
callback();
callback(null);
}
function restore(apiConfig, backupId, destination, callback) {
function download(apiConfig, backupFilePath, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
assert.strictEqual(typeof destination, 'string');
assert.strictEqual(typeof backupFilePath, 'string');
assert.strictEqual(typeof callback, 'function');
debug('restore: %s %s', backupId, destination);
debug('download: %s', backupFilePath);
callback(new Error('Cannot restore from noop backend'));
callback(new Error('Cannot download from noop backend'));
}
function copyBackup(apiConfig, oldBackupId, newBackupId, callback) {
function downloadDir(apiConfig, backupFilePath, destDir) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof oldBackupId, 'string');
assert.strictEqual(typeof newBackupId, 'string');
assert.strictEqual(typeof callback, 'function');
assert.strictEqual(typeof backupFilePath, 'string');
assert.strictEqual(typeof destDir, 'string');
debug('copyBackup: %s -> %s', oldBackupId, newBackupId);
var events = new EventEmitter();
process.nextTick(function () {
debug('downloadDir: %s -> %s', backupFilePath, destDir);
callback();
events.emit('done', new Error('Cannot download from noop backend'));
});
return events;
}
function removeBackups(apiConfig, backupIds, callback) {
function copy(apiConfig, oldFilePath, newFilePath) {
assert.strictEqual(typeof apiConfig, 'object');
assert(Array.isArray(backupIds));
assert.strictEqual(typeof oldFilePath, 'string');
assert.strictEqual(typeof newFilePath, 'string');
debug('copy: %s -> %s', oldFilePath, newFilePath);
var events = new EventEmitter();
process.nextTick(function () { events.emit('done', null); });
return events;
}
function remove(apiConfig, filename, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof filename, 'string');
assert.strictEqual(typeof callback, 'function');
debug('removeBackups: %j', backupIds);
debug('remove: %s', filename);
callback();
callback(null);
}
function removeDir(apiConfig, pathPrefix) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof pathPrefix, 'string');
debug('removeDir: %s', pathPrefix);
var events = new EventEmitter();
process.nextTick(function () { events.emit('done', null); });
return events;
}
function testConfig(apiConfig, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof callback, 'function');
callback();
callback(null);
}
function backupDone(backupId, appBackupIds, callback) {
function backupDone(apiConfig, backupId, appBackupIds, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
assert(Array.isArray(appBackupIds));
assert.strictEqual(typeof callback, 'function');
callback();
callback(null);
}
+386 -98
View File
@@ -1,10 +1,13 @@
'use strict';
exports = module.exports = {
backup: backup,
restore: restore,
copyBackup: copyBackup,
removeBackups: removeBackups,
upload: upload,
download: download,
downloadDir: downloadDir,
copy: copy,
remove: remove,
removeDir: removeDir,
backupDone: backupDone,
@@ -16,14 +19,21 @@ exports = module.exports = {
};
var assert = require('assert'),
async = require('async'),
AWS = require('aws-sdk'),
BackupsError = require('../backups.js').BackupsError,
chunk = require('lodash.chunk'),
config = require('../config.js'),
debug = require('debug')('box:storage/s3'),
once = require('once'),
EventEmitter = require('events'),
fs = require('fs'),
https = require('https'),
mkdirp = require('mkdirp'),
PassThrough = require('stream').PassThrough,
path = require('path'),
S3BlockReadStream = require('s3-block-read-stream'),
targz = require('./targz.js');
safe = require('safetydance'),
superagent = require('superagent');
// test only
var originalAWS;
@@ -36,87 +46,123 @@ function mockRestore() {
AWS = originalAWS;
}
// internal only
function getBackupCredentials(apiConfig, callback) {
var gCachedCaasCredentials = { issueDate: null, credentials: null };
function getCaasConfig(apiConfig, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof callback, 'function');
assert(apiConfig.token);
if ((new Date() - gCachedCaasCredentials.issueDate) <= (1.75 * 60 * 60 * 1000)) { // caas gives tokens with 2 hour limit
return callback(null, gCachedCaasCredentials.credentials);
}
debug('getCaasCredentials: getting new credentials');
var url = config.apiServerOrigin() + '/api/v1/boxes/' + config.fqdn() + '/awscredentials';
superagent.post(url).query({ token: apiConfig.token }).timeout(30 * 1000).end(function (error, result) {
if (error && !error.response) return callback(error);
if (result.statusCode !== 201) return callback(new Error(result.text));
if (!result.body || !result.body.credentials) return callback(new Error('Unexpected response: ' + JSON.stringify(result.headers)));
var credentials = {
signatureVersion: 'v4',
accessKeyId: result.body.credentials.AccessKeyId,
secretAccessKey: result.body.credentials.SecretAccessKey,
sessionToken: result.body.credentials.SessionToken,
region: apiConfig.region || 'us-east-1',
maxRetries: 5,
retryDelayOptions: {
base: 20000 // 2^5 * 20 seconds
}
};
if (apiConfig.endpoint) credentials.endpoint = new AWS.Endpoint(apiConfig.endpoint);
gCachedCaasCredentials = {
issueDate: new Date(),
credentials: credentials
};
callback(null, credentials);
});
}
function getS3Config(apiConfig, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof callback, 'function');
assert(apiConfig.accessKeyId && apiConfig.secretAccessKey);
if (apiConfig.provider === 'caas') return getCaasConfig(apiConfig, callback);
var credentials = {
signatureVersion: apiConfig.signatureVersion || 'v4',
s3ForcePathStyle: true,
s3ForcePathStyle: true, // Force use path-style url (http://endpoint/bucket/path) instead of host-style (http://bucket.endpoint/path)
accessKeyId: apiConfig.accessKeyId,
secretAccessKey: apiConfig.secretAccessKey,
region: apiConfig.region || 'us-east-1'
region: apiConfig.region || 'us-east-1',
maxRetries: 5,
retryDelayOptions: {
base: 20000 // 2^5 * 20 seconds
}
};
if (apiConfig.endpoint) credentials.endpoint = apiConfig.endpoint;
if (apiConfig.acceptSelfSignedCerts === true && credentials.endpoint && credentials.endpoint.startsWith('https://')) {
credentials.httpOptions.agent = {
agent: new https.Agent({ rejectUnauthorized: false })
};
}
callback(null, credentials);
}
function getBackupFilePath(apiConfig, backupId) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
const FILE_TYPE = apiConfig.key ? '.tar.gz.enc' : '.tar.gz';
return path.join(apiConfig.prefix, backupId.endsWith(FILE_TYPE) ? backupId : backupId+FILE_TYPE);
}
// storage api
function backup(apiConfig, backupId, sourceDirectories, callback) {
function upload(apiConfig, backupFilePath, sourceStream, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
assert(Array.isArray(sourceDirectories));
assert.strictEqual(typeof backupFilePath, 'string');
assert.strictEqual(typeof sourceStream, 'object');
assert.strictEqual(typeof callback, 'function');
callback = once(callback);
function done(error) {
if (error) {
debug('[%s] upload: s3 upload error.', backupFilePath, error);
return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, `Error uploading ${backupFilePath}. Message: ${error.message} HTTP Code: ${error.code}`));
}
var backupFilePath = getBackupFilePath(apiConfig, backupId);
callback(null);
}
debug('[%s] backup: %j -> %s', backupId, sourceDirectories, backupFilePath);
getBackupCredentials(apiConfig, function (error, credentials) {
getS3Config(apiConfig, function (error, credentials) {
if (error) return callback(error);
var passThrough = new PassThrough();
var params = {
Bucket: apiConfig.bucket,
Key: backupFilePath,
Body: passThrough
Body: sourceStream
};
var s3 = new AWS.S3(credentials);
// exoscale does not like multi-part uploads. so avoid them for filesystem streams < 5GB
if (apiConfig.provider === 'exoscale-sos' && typeof sourceStream.path === 'string') {
var stat = safe.fs.statSync(sourceStream.path);
if (!stat) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, `Error detecting size ${sourceStream.path}. Message: ${safe.error.message}`));
if (stat.size <= 5 * 1024 * 1024 * 1024) return s3.putObject(params, done);
}
// s3.upload automatically does a multi-part upload. we set queueSize to 1 to reduce memory usage
s3.upload(params, { partSize: 10 * 1024 * 1024, queueSize: 1 }, function (error) {
if (error) {
debug('[%s] backup: s3 upload error.', backupId, error);
return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
}
callback(null);
});
targz.create(sourceDirectories, apiConfig.key || null, passThrough, callback);
// uploader will buffer at most queueSize * partSize bytes into memory at any given time.
return s3.upload(params, { partSize: 10 * 1024 * 1024, queueSize: 1 }, done);
});
}
function restore(apiConfig, backupId, destination, callback) {
function download(apiConfig, backupFilePath, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
assert.strictEqual(typeof destination, 'string');
assert.strictEqual(typeof backupFilePath, 'string');
assert.strictEqual(typeof callback, 'function');
callback = once(callback);
var backupFilePath = getBackupFilePath(apiConfig, backupId);
debug('[%s] restore: %s -> %s', backupId, backupFilePath, destination);
getBackupCredentials(apiConfig, function (error, credentials) {
getS3Config(apiConfig, function (error, credentials) {
if (error) return callback(error);
var params = {
@@ -126,90 +172,308 @@ function restore(apiConfig, backupId, destination, callback) {
var s3 = new AWS.S3(credentials);
var multipartDownload = new S3BlockReadStream(s3, params, { blockSize: 64 * 1024 * 1024, logCallback: debug });
var ps = new PassThrough();
var multipartDownload = new S3BlockReadStream(s3, params, { blockSize: 64 * 1024 * 1024 /*, logCallback: debug */ });
multipartDownload.on('error', function (error) {
// TODO ENOENT for the mock, fix upstream!
if (error.code === 'NoSuchKey' || error.code === 'ENOENT') return callback(new BackupsError(BackupsError.NOT_FOUND));
debug('[%s] restore: s3 stream error.', backupId, error);
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
if (error.code === 'NoSuchKey' || error.code === 'ENOENT') {
ps.emit('error', new BackupsError(BackupsError.NOT_FOUND));
} else {
debug('[%s] download: s3 stream error.', backupFilePath, error);
ps.emit('error', new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
}
});
targz.extract(multipartDownload, destination, apiConfig.key || null, callback);
multipartDownload.pipe(ps);
callback(null, ps);
});
}
function copyBackup(apiConfig, oldBackupId, newBackupId, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof oldBackupId, 'string');
assert.strictEqual(typeof newBackupId, 'string');
assert.strictEqual(typeof callback, 'function');
getBackupCredentials(apiConfig, function (error, credentials) {
function listDir(apiConfig, backupFilePath, iteratorCallback, callback) {
getS3Config(apiConfig, function (error, credentials) {
if (error) return callback(error);
var params = {
Bucket: apiConfig.bucket,
Key: getBackupFilePath(apiConfig, newBackupId),
CopySource: path.join(apiConfig.bucket, getBackupFilePath(apiConfig, oldBackupId))
};
var s3 = new AWS.S3(credentials);
s3.copyObject(params, function (error) {
if (error && error.code === 'NoSuchKey') return callback(new BackupsError(BackupsError.NOT_FOUND, 'Old backup not found'));
if (error) {
debug('copyBackup: s3 copy error.', error);
return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
}
var listParams = {
Bucket: apiConfig.bucket,
Prefix: backupFilePath
};
callback(null);
async.forever(function listAndDownload(foreverCallback) {
s3.listObjects(listParams, function (error, listData) {
if (error) {
debug('remove: Failed to list %s. Not fatal.', error);
return foreverCallback(error);
}
if (listData.Contents.length === 0) return foreverCallback(new Error('Done'));
iteratorCallback(s3, listData.Contents, function (error) {
if (error) return foreverCallback(error);
if (!listData.IsTruncated) return foreverCallback(new Error('Done'));
listParams.Marker = listData.Contents[listData.Contents.length - 1].Key; // NextMarker is returned only with delimiter
foreverCallback();
});
});
}, function (error) {
if (error.message === 'Done') return callback(null);
callback(error);
});
});
}
function removeBackups(apiConfig, backupIds, callback) {
function downloadDir(apiConfig, backupFilePath, destDir) {
assert.strictEqual(typeof apiConfig, 'object');
assert(Array.isArray(backupIds));
assert.strictEqual(typeof backupFilePath, 'string');
assert.strictEqual(typeof destDir, 'string');
var events = new EventEmitter();
var total = 0;
function downloadFile(s3, content, iteratorCallback) {
var relativePath = path.relative(backupFilePath, content.Key);
events.emit('progress', `Downloading ${relativePath}`);
mkdirp(path.dirname(path.join(destDir, relativePath)), function (error) {
if (error) return iteratorCallback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
download(apiConfig, content.Key, function (error, sourceStream) {
if (error) return iteratorCallback(error);
var destStream = fs.createWriteStream(path.join(destDir, relativePath));
destStream.on('open', function () {
sourceStream.pipe(destStream);
});
destStream.on('error', function (error) {
return iteratorCallback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
});
destStream.on('finish', iteratorCallback);
});
});
}
const concurrency = 10;
listDir(apiConfig, backupFilePath, function (s3, objects, done) {
total += objects.length;
async.eachLimit(objects, concurrency, downloadFile.bind(null, s3), done);
}, function (error) {
events.emit('progress', `Downloaded ${total} files`);
events.emit('done', error);
});
return events;
}
function copy(apiConfig, oldFilePath, newFilePath) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof oldFilePath, 'string');
assert.strictEqual(typeof newFilePath, 'string');
var events = new EventEmitter(), retryCount = 0;
function copyFile(s3, content, iteratorCallback) {
var relativePath = path.relative(oldFilePath, content.Key);
function done(error) {
if (error && error.code === 'NoSuchKey') return iteratorCallback(new BackupsError(BackupsError.NOT_FOUND, `Old backup not found: ${content.Key}`));
if (error) {
debug('copy: s3 copy error when copying %s %s', content.Key, error);
return iteratorCallback(new BackupsError(BackupsError.EXTERNAL_ERROR, `Error copying ${content.Key} : ${error.message} ${error.code}`));
}
iteratorCallback(null);
}
var copyParams = {
Bucket: apiConfig.bucket,
Key: path.join(newFilePath, relativePath)
};
// S3 copyObject has a file size limit of 5GB so if we have larger files, we do a multipart copy
if (content.Size < 5 * 1024 * 1024 * 1024 || apiConfig.provider === 'digitalocean-spaces') { // DO has not implemented this yet
events.emit('progress', `Copying ${relativePath}`);
// for exoscale, '/' should not be encoded
copyParams.CopySource = path.join(apiConfig.bucket, encodeURIComponent(content.Key)); // See aws-sdk-js/issues/1302
s3.copyObject(copyParams, done).on('retry', function (response) {
++retryCount;
events.emit('progress', `Retrying (${response.retryCount+1}) copy of ${relativePath}. Status code: ${response.httpResponse.statusCode}`);
});
return;
}
events.emit('progress', `Copying (multipart) ${relativePath}`);
s3.createMultipartUpload(copyParams, function (error, result) {
if (error) return done(error);
const CHUNK_SIZE = 1024 * 1024 * 1024; // 1GB - rather random size
var uploadId = result.UploadId;
var uploadedParts = [];
var partNumber = 1;
var startBytes = 0;
var endBytes = 0;
var size = content.Size-1;
function copyNextChunk() {
endBytes = startBytes + CHUNK_SIZE;
if (endBytes > size) endBytes = size;
var params = {
Bucket: apiConfig.bucket,
Key: path.join(newFilePath, relativePath),
CopySource: path.join(apiConfig.bucket, encodeURIComponent(content.Key)), // See aws-sdk-js/issues/1302
CopySourceRange: 'bytes=' + startBytes + '-' + endBytes,
PartNumber: partNumber,
UploadId: uploadId
};
s3.uploadPartCopy(params, function (error, result) {
if (error) return done(error);
uploadedParts.push({ ETag: result.CopyPartResult.ETag, PartNumber: partNumber });
if (endBytes < size) {
startBytes = endBytes + 1;
partNumber++;
return copyNextChunk();
}
var params = {
Bucket: apiConfig.bucket,
Key: path.join(newFilePath, relativePath),
MultipartUpload: { Parts: uploadedParts },
UploadId: uploadId
};
s3.completeMultipartUpload(params, done);
}).on('retry', function (response) {
++retryCount;
events.emit('progress', `Retrying (${response.retryCount+1}) multipart copy of ${relativePath}. Status code: ${response.httpResponse.statusCode}`);
});
}
copyNextChunk();
});
}
var total = 0, concurrency = 4;
listDir(apiConfig, oldFilePath, function (s3, objects, done) {
total += objects.length;
if (retryCount === 0) concurrency = Math.min(concurrency + 1, 10); else concurrency = Math.max(concurrency - 1, 5);
events.emit('progress', `${retryCount} errors. concurrency set to ${concurrency}`);
retryCount = 0;
async.eachLimit(objects, concurrency, copyFile.bind(null, s3), done);
}, function (error) {
events.emit('progress', `Copied ${total} files`);
events.emit('done', error);
});
return events;
}
function remove(apiConfig, filename, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof filename, 'string');
assert.strictEqual(typeof callback, 'function');
getBackupCredentials(apiConfig, function (error, credentials) {
getS3Config(apiConfig, function (error, credentials) {
if (error) return callback(error);
var params = {
var s3 = new AWS.S3(credentials);
var deleteParams = {
Bucket: apiConfig.bucket,
Delete: {
Objects: [ ] // { Key }
Objects: [{ Key: filename }]
}
};
backupIds.forEach(function (backupId) {
params.Delete.Objects.push({ Key: getBackupFilePath(apiConfig, backupId) });
});
var s3 = new AWS.S3(credentials);
s3.deleteObjects(params, function (error, data) {
if (error) debug('removeBackups: Unable to remove %s. Not fatal.', params.Key, error);
else debug('removeBackups: Deleted: %j Errors: %j', data.Deleted, data.Errors);
s3.deleteObjects(deleteParams, function (error) {
if (error) debug('remove: Unable to remove %s. Not fatal.', deleteParams.Key, error);
callback(null);
});
});
}
function removeDir(apiConfig, pathPrefix) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof pathPrefix, 'string');
var events = new EventEmitter();
var total = 0;
function deleteFiles(s3, contents, iteratorCallback) {
var deleteParams = {
Bucket: apiConfig.bucket,
Delete: {
Objects: contents.map(function (c) { return { Key: c.Key }; })
}
};
events.emit('progress', `Removing ${contents.length} files from ${contents[0].Key} to ${contents[contents.length-1].Key}`);
s3.deleteObjects(deleteParams, function (error /*, deleteData */) {
if (error) {
events.emit('progress', `Unable to remove ${deleteParams.Key} ${error.message}`);
return iteratorCallback(error);
}
iteratorCallback();
});
}
listDir(apiConfig, pathPrefix, function (s3, objects, done) {
total += objects.length;
const batchSize = apiConfig.provider !== 'digitalocean-spaces' ? 1000 : 100; // throttle objects in each request
var chunks = batchSize === 1 ? objects : chunk(objects, batchSize);
async.eachSeries(chunks, deleteFiles.bind(null, s3), done);
}, function (error) {
events.emit('progress', `Removed ${total} files`);
events.emit('done', error);
});
return events;
}
function testConfig(apiConfig, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof callback, 'function');
if (typeof apiConfig.accessKeyId !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'accessKeyId must be a string'));
if (typeof apiConfig.secretAccessKey !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'secretAccessKey must be a string'));
if (apiConfig.provider === 'caas') {
if (typeof apiConfig.token !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'token must be a string'));
} else {
if (typeof apiConfig.accessKeyId !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'accessKeyId must be a string'));
if (typeof apiConfig.secretAccessKey !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'secretAccessKey must be a string'));
}
if (typeof apiConfig.bucket !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'bucket must be a string'));
if (typeof apiConfig.prefix !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'prefix must be a string'));
if ('signatureVersion' in apiConfig && typeof apiConfig.prefix !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'signatureVersion must be a string'));
if ('endpoint' in apiConfig && typeof apiConfig.prefix !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'endpoint must be a string'));
if ('signatureVersion' in apiConfig && typeof apiConfig.signatureVersion !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'signatureVersion must be a string'));
if ('endpoint' in apiConfig && typeof apiConfig.endpoint !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'endpoint must be a string'));
// attempt to upload and delete a file with new credentials
getBackupCredentials(apiConfig, function (error, credentials) {
getS3Config(apiConfig, function (error, credentials) {
if (error) return callback(error);
var params = {
@@ -236,10 +500,34 @@ function testConfig(apiConfig, callback) {
});
}
function backupDone(backupId, appBackupIds, callback) {
function backupDone(apiConfig, backupId, appBackupIds, callback) {
assert.strictEqual(typeof apiConfig, 'object');
assert.strictEqual(typeof backupId, 'string');
assert(Array.isArray(appBackupIds));
assert.strictEqual(typeof callback, 'function');
callback();
if (apiConfig.provider !== 'caas') return callback();
// CaaS expects filenames instead of backupIds, this means no prefix but a file type extension
var FILE_TYPE = '.tar.gz.enc';
var boxBackupFilename = backupId + FILE_TYPE;
var appBackupFilenames = appBackupIds.map(function (id) { return id + FILE_TYPE; });
debug('[%s] backupDone: %s apps %j', backupId, boxBackupFilename, appBackupFilenames);
var url = config.apiServerOrigin() + '/api/v1/boxes/' + config.fqdn() + '/backupDone';
var data = {
boxVersion: config.version(),
restoreKey: boxBackupFilename,
appId: null, // now unused
appVersion: null, // now unused
appBackupIds: appBackupFilenames
};
superagent.post(url).send(data).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
if (error && !error.response) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error));
if (result.statusCode !== 200) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, result.text));
return callback(null);
});
}
-105
View File
@@ -1,105 +0,0 @@
'use strict';
exports = module.exports = {
create: create,
extract: extract
};
var assert = require('assert'),
BackupsError = require('../backups.js').BackupsError,
crypto = require('crypto'),
debug = require('debug')('box:storage/targz'),
mkdirp = require('mkdirp'),
progress = require('progress-stream'),
tar = require('tar-fs'),
zlib = require('zlib');
function create(sourceDirectories, key, outStream, callback) {
assert(Array.isArray(sourceDirectories));
assert(key === null || typeof key === 'string');
assert.strictEqual(typeof callback, 'function');
var pack = tar.pack('/', {
dereference: false, // pack the symlink and not what it points to
entries: sourceDirectories.map(function (m) { return m.source; }),
map: function(header) {
sourceDirectories.forEach(function (m) {
header.name = header.name.replace(new RegExp('^' + m.source + '(/?)'), m.destination + '$1');
});
return header;
},
strict: false // do not error for unknown types (skip fifo, char/block devices)
});
var gzip = zlib.createGzip({});
var progressStream = progress({ time: 10000 }); // display a progress every 10 seconds
pack.on('error', function (error) {
debug('backup: tar stream error.', error);
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
});
gzip.on('error', function (error) {
debug('backup: gzip stream error.', error);
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
});
progressStream.on('progress', function(progress) {
debug('backup: %s@%s', Math.round(progress.transferred/1024/1024) + 'M', Math.round(progress.speed/1024/1024) + 'Mbps');
});
if (key !== null) {
var encrypt = crypto.createCipher('aes-256-cbc', key);
encrypt.on('error', function (error) {
debug('backup: encrypt stream error.', error);
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
});
pack.pipe(gzip).pipe(encrypt).pipe(progressStream).pipe(outStream);
} else {
pack.pipe(gzip).pipe(progressStream).pipe(outStream);
}
}
function extract(inStream, destination, key, callback) {
assert.strictEqual(typeof destination, 'string');
assert(key === null || typeof key === 'string');
assert.strictEqual(typeof callback, 'function');
mkdirp(destination, function (error) {
if (error) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
var gunzip = zlib.createGunzip({});
var progressStream = progress({ time: 10000 }); // display a progress every 10 seconds
var extract = tar.extract(destination);
progressStream.on('progress', function(progress) {
debug('restore: %s@%s', Math.round(progress.transferred/1024/1024) + 'M', Math.round(progress.speed/1024/1024) + 'Mbps');
});
gunzip.on('error', function (error) {
debug('restore: gunzip stream error.', error);
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
});
extract.on('error', function (error) {
debug('restore: extract stream error.', error);
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
});
extract.on('finish', function () {
debug('restore: done.');
callback(null);
});
if (key !== null) {
var decrypt = crypto.createDecipher('aes-256-cbc', key);
decrypt.on('error', function (error) {
debug('restore: decrypt stream error.', error);
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
});
inStream.pipe(progressStream).pipe(decrypt).pipe(gunzip).pipe(extract);
} else {
inStream.pipe(progressStream).pipe(gunzip).pipe(extract);
}
});
}
+2 -2
View File
@@ -40,10 +40,9 @@ SubdomainError.NOT_FOUND = 'No such domain';
SubdomainError.EXTERNAL_ERROR = 'External error';
SubdomainError.BAD_FIELD = 'Bad Field';
SubdomainError.STILL_BUSY = 'Still busy';
SubdomainError.MISSING_CREDENTIALS = 'Missing credentials';
SubdomainError.INTERNAL_ERROR = 'Internal error';
SubdomainError.ACCESS_DENIED = 'Access denied';
SubdomainError.INVALID_PROVIDER = 'provider must be route53, digitalocean, cloudflare, noop, manual or caas';
SubdomainError.INVALID_PROVIDER = 'provider must be route53, gcdns, digitalocean, cloudflare, noop, manual or caas';
// choose which subdomain backend we use for test purpose we use route53
function api(provider) {
@@ -53,6 +52,7 @@ function api(provider) {
case 'caas': return require('./dns/caas.js');
case 'cloudflare': return require('./dns/cloudflare.js');
case 'route53': return require('./dns/route53.js');
case 'gcdns': return require('./dns/gcdns.js');
case 'digitalocean': return require('./dns/digitalocean.js');
case 'noop': return require('./dns/noop.js');
case 'manual': return require('./dns/manual.js');
+151
View File
@@ -0,0 +1,151 @@
'use strict';
var assert = require('assert'),
async = require('async'),
debug = require('debug')('box:syncer'),
fs = require('fs'),
path = require('path'),
paths = require('./paths.js'),
safe = require('safetydance');
exports = module.exports = {
sync: sync
};
function readCache(cacheFile) {
assert.strictEqual(typeof cacheFile, 'string');
var cache = safe.fs.readFileSync(cacheFile, 'utf8');
if (!cache) return [ ];
var result = cache.trim().split('\n').map(JSON.parse);
return result;
}
function readTree(dir) {
assert.strictEqual(typeof dir, 'string');
var list = safe.fs.readdirSync(dir).sort();
if (!list) return [ ];
return list.map(function (e) { return { stat: safe.fs.lstatSync(path.join(dir, e)), name: e }; });
}
function ISDIR(x) {
return (x & fs.constants.S_IFDIR) === fs.constants.S_IFDIR;
}
function ISFILE(x) {
return (x & fs.constants.S_IFREG) === fs.constants.S_IFREG;
}
function sync(dir, taskProcessor, concurrency, callback) {
assert.strictEqual(typeof dir, 'string');
assert.strictEqual(typeof taskProcessor, 'function');
assert.strictEqual(typeof concurrency, 'number');
assert.strictEqual(typeof callback, 'function');
var curCacheIndex = 0, addQueue = [ ], delQueue = [ ];
var cacheFile = path.join(paths.BACKUP_INFO_DIR, path.basename(dir) + '.sync.cache'),
newCacheFile = path.join(paths.BACKUP_INFO_DIR, path.basename(dir) + '.sync.cache.new');
var cache = [ ];
// if cache is missing or if we crashed/errored in previous run, start out empty. TODO: do a remote listDir and rebuild
if (!safe.fs.existsSync(cacheFile)) {
delQueue.push({ operation: 'removedir', path: '', reason: 'nocache' });
} else if (safe.fs.existsSync(newCacheFile)) {
delQueue.push({ operation: 'removedir', path: '', reason: 'crash' });
} else {
cache = readCache(cacheFile);
}
var newCacheFd = safe.fs.openSync(newCacheFile, 'w'); // truncates any existing file
if (newCacheFd === -1) return callback(new Error('Error opening new cache file: ' + safe.error.message));
function advanceCache(entryPath) {
var lastRemovedDir = null;
for (; curCacheIndex !== cache.length && (entryPath === '' || cache[curCacheIndex].path < entryPath); ++curCacheIndex) {
// ignore subdirs of lastRemovedDir since it was removed already
if (lastRemovedDir && cache[curCacheIndex].path.startsWith(lastRemovedDir)) continue;
if (ISDIR(cache[curCacheIndex].stat.mode)) {
delQueue.push({ operation: 'removedir', path: cache[curCacheIndex].path, reason: 'missing' });
lastRemovedDir = cache[curCacheIndex].path;
} else {
delQueue.push({ operation: 'remove', path: cache[curCacheIndex].path, reason: 'missing' });
lastRemovedDir = null;
}
}
}
function traverse(relpath) {
var entries = readTree(path.join(dir, relpath));
for (var i = 0; i < entries.length; i++) {
var entryPath = path.join(relpath, entries[i].name);
var entryStat = entries[i].stat;
if (!entryStat) continue; // some stat error. prented it doesn't exist
if (!entryStat.isDirectory() && !entryStat.isFile()) continue; // ignore non-files and dirs
if (entryStat.isSymbolicLink()) continue;
safe.fs.appendFileSync(newCacheFd, JSON.stringify({ path: entryPath, stat: { mtime: entryStat.mtime.getTime(), size: entryStat.size, inode: entryStat.inode, mode: entryStat.mode } }) + '\n');
if (curCacheIndex !== cache.length && cache[curCacheIndex].path < entryPath) { // files disappeared. first advance cache as needed
advanceCache(entryPath);
}
const cachePath = curCacheIndex === cache.length ? null : cache[curCacheIndex].path;
const cacheStat = curCacheIndex === cache.length ? null : cache[curCacheIndex].stat;
if (cachePath === null || cachePath > entryPath) { // new files appeared
if (entryStat.isDirectory()) {
traverse(entryPath);
} else {
addQueue.push({ operation: 'add', path: entryPath, reason: 'new' });
}
} else if (ISDIR(cacheStat.mode) && entryStat.isDirectory()) { // dir names match
++curCacheIndex;
traverse(entryPath);
} else if (ISFILE(cacheStat.mode) && entryStat.isFile()) { // file names match
if (entryStat.mtime.getTime() !== cacheStat.mtime || entryStat.size != cacheStat.size || entryStat.inode !== cacheStat.inode) { // file changed
addQueue.push({ operation: 'add', path: entryPath, reason: 'changed' });
}
++curCacheIndex;
} else if (entryStat.isDirectory()) { // was a file, now a directory
delQueue.push({ operation: 'remove', path: cachePath, reason: 'wasfile' });
++curCacheIndex;
traverse(entryPath);
} else { // was a dir, now a file
delQueue.push({ operation: 'removedir', path: cachePath, reason: 'wasdir' });
while (curCacheIndex !== cache.length && cache[curCacheIndex].path.startsWith(cachePath)) ++curCacheIndex;
addQueue.push({ operation: 'add', path: entryPath, reason: 'wasdir' });
}
}
}
traverse('');
advanceCache(''); // remove rest of the cache entries
safe.fs.closeSync(newCacheFd);
debug('Processing %s deletes and %s additions', delQueue.length, addQueue.length);
async.eachLimit(delQueue, concurrency, taskProcessor, function (error) {
debug('Done processing deletes', error);
async.eachLimit(addQueue, concurrency, taskProcessor, function (error) {
debug('Done processing adds', error);
if (error) return callback(error);
safe.fs.unlinkSync(cacheFile);
if (!safe.fs.renameSync(newCacheFile, cacheFile)) debug('Unable to save new cache file');
callback();
});
});
}
+5 -3
View File
@@ -135,7 +135,7 @@ describe('Apps', function () {
describe('validateHostname', function () {
it('does not allow admin subdomain', function () {
expect(apps._validateHostname(constants.ADMIN_LOCATION, 'cloudron.us')).to.be.an(Error);
expect(apps._validateHostname('my', 'cloudron.us')).to.be.an(Error);
});
it('cannot have >63 length subdomains', function () {
@@ -325,11 +325,13 @@ describe('Apps', function () {
});
});
it('succeeds with admin not being special', function (done) {
it('returns all apps for admin', function (done) {
apps.getAllByUser(ADMIN_0, function (error, result) {
expect(error).to.equal(null);
expect(result.length).to.equal(1);
expect(result.length).to.equal(3);
expect(result[0].id).to.equal(APP_0.id);
expect(result[1].id).to.equal(APP_1.id);
expect(result[2].id).to.equal(APP_2.id);
done();
});
});
+19 -9
View File
@@ -13,7 +13,7 @@ var addons = require('../addons.js'),
database = require('../database.js'),
expect = require('expect.js'),
fs = require('fs'),
js2xml = require('js2xmlparser'),
js2xml = require('js2xmlparser').parse,
net = require('net'),
nock = require('nock'),
paths = require('../paths.js'),
@@ -138,9 +138,19 @@ describe('apptask', function () {
});
});
it('delete volume', function (done) {
apptask._deleteVolume(APP, function (error) {
it('delete volume - removeDirectory (false) ', function (done) {
apptask._deleteVolume(APP, { removeDirectory: false }, function (error) {
expect(!fs.existsSync(paths.APPS_DATA_DIR + '/' + APP.id + '/data')).to.be(true);
expect(fs.existsSync(paths.APPS_DATA_DIR + '/' + APP.id)).to.be(true);
expect(fs.readdirSync(paths.APPS_DATA_DIR + '/' + APP.id).length).to.be(0); // empty
expect(error).to.be(null);
done();
});
});
it('delete volume - removeDirectory (true) ', function (done) {
apptask._deleteVolume(APP, { removeDirectory: true }, function (error) {
expect(!fs.existsSync(paths.APPS_DATA_DIR + '/' + APP.id)).to.be(true);
expect(error).to.be(null);
done();
});
@@ -171,7 +181,7 @@ describe('apptask', function () {
var badApp = _.extend({ }, APP);
badApp.manifest = { };
apptask._verifyManifest(badApp, function (error) {
apptask._verifyManifest(badApp.manifest, function (error) {
expect(error).to.be.ok();
done();
});
@@ -182,7 +192,7 @@ describe('apptask', function () {
badApp.manifest = _.extend({ }, APP.manifest);
delete badApp.manifest.id;
apptask._verifyManifest(badApp, function (error) {
apptask._verifyManifest(badApp.manifest, function (error) {
expect(error).to.be.ok();
done();
});
@@ -193,7 +203,7 @@ describe('apptask', function () {
badApp.manifest = _.extend({ }, APP.manifest);
badApp.manifest.maxBoxVersion = '0.0.0'; // max box version is too small
apptask._verifyManifest(badApp, function (error) {
apptask._verifyManifest(badApp.manifest, function (error) {
expect(error).to.be.ok();
done();
});
@@ -202,7 +212,7 @@ describe('apptask', function () {
it('verifies manifest', function (done) {
var goodApp = _.extend({ }, APP);
apptask._verifyManifest(goodApp, function (error) {
apptask._verifyManifest(goodApp.manifest, function (error) {
expect(error).to.be(null);
done();
});
@@ -214,7 +224,7 @@ describe('apptask', function () {
var awsScope = nock('http://localhost:5353')
.get('/2013-04-01/hostedzone')
.times(2)
.reply(200, js2xml('ListHostedZonesResponse', awsHostedZones, { arrayMap: { HostedZones: 'HostedZone'} }))
.reply(200, js2xml('ListHostedZonesResponse', awsHostedZones, { wrapHandlers: { HostedZones: () => 'HostedZone'} }))
.get('/2013-04-01/hostedzone/ZONEID/rrset?maxitems=1&name=applocation.' + config.fqdn() + '.&type=A')
.reply(200, js2xml('ListResourceRecordSetsResponse', { ResourceRecordSets: [ ] }, { 'Content-Type': 'application/xml' }))
.post('/2013-04-01/hostedzone/ZONEID/rrset/')
@@ -232,7 +242,7 @@ describe('apptask', function () {
var awsScope = nock('http://localhost:5353')
.get('/2013-04-01/hostedzone')
.reply(200, js2xml('ListHostedZonesResponse', awsHostedZones, { arrayMap: { HostedZones: 'HostedZone'} }))
.reply(200, js2xml('ListHostedZonesResponse', awsHostedZones, { wrapHandlers: { HostedZones: () => 'HostedZone'} }))
.post('/2013-04-01/hostedzone/ZONEID/rrset/')
.reply(200, js2xml('ChangeResourceRecordSetsResponse', { ChangeInfo: { Id: 'RRID', Status: 'INSYNC' } }));
+210 -13
View File
@@ -7,24 +7,104 @@
'use strict';
var async = require('async'),
appdb = require('../appdb.js'),
backupdb = require('../backupdb.js'),
backups = require('../backups.js'),
createTree = require('./common.js').createTree,
database = require('../database'),
DatabaseError = require('../databaseerror.js'),
expect = require('expect.js'),
settings = require('../settings.js');
fs = require('fs'),
os = require('os'),
mkdirp = require('mkdirp'),
readdirp = require('readdirp'),
path = require('path'),
progress = require('../progress.js'),
rimraf = require('rimraf'),
settings = require('../settings.js'),
SettingsError = require('../settings.js').SettingsError;
function compareDirectories(one, two, callback) {
readdirp({ root: one }, function (error, treeOne) {
if (error) return callback(error);
readdirp({ root: two }, function (error, treeTwo) {
if (error) return callback(error);
var mismatch = [];
function compareDirs(a, b) {
a.forEach(function (tmpA) {
var found = b.find(function (tmpB) {
return tmpA.path === tmpB.path;
});
if (!found) mismatch.push(tmpA);
});
}
function compareFiles(a, b) {
a.forEach(function (tmpA) {
var found = b.find(function (tmpB) {
// TODO check file or symbolic link
return tmpA.path === tmpB.path && tmpA.mode === tmpB.mode;
});
if (!found) mismatch.push(tmpA);
});
}
compareDirs(treeOne.directories, treeTwo.directories);
compareDirs(treeTwo.directories, treeOne.directories);
compareFiles(treeOne.files, treeTwo.files);
compareFiles(treeTwo.files, treeOne.files);
if (mismatch.length) {
console.error('Files not found in both: %j', mismatch);
return callback(new Error('file mismatch'));
}
callback(null);
});
});
}
function createBackup(callback) {
backups.backup({ username: 'test' }, function (error) { // this call does not wait for the backup!
if (error) return callback(error);
function waitForBackup() {
var p = progress.getAll();
if (p.backup.percent !== 100) return setTimeout(waitForBackup, 1000);
if (p.backup.message) return callback(new Error('backup failed:' + p.backup.message));
backups.getByStatePaged(backupdb.BACKUP_STATE_NORMAL, 1, 1, function (error, result) {
if (error) return callback(error);
if (result.length !== 1) return callback(new Error('result is not of length 1'));
callback(null, result[0]);
});
}
setTimeout(waitForBackup, 1000);
});
}
describe('backups', function () {
before(function (done) {
const BACKUP_DIR = path.join(os.tmpdir(), 'cloudron-backup-test');
async.series([
mkdirp.bind(null, BACKUP_DIR),
database.initialize,
database._clear,
settings.initialize,
settings.setBackupConfig.bind(null, {
provider: 'filesystem',
key: 'enckey',
retentionSecs: 1
backupFolder: BACKUP_DIR,
retentionSecs: 1,
format: 'tgz'
})
], done);
});
@@ -44,7 +124,8 @@ describe('backups', function () {
version: '1.0.0',
type: backupdb.BACKUP_TYPE_BOX,
dependsOn: [ 'backup-app-00', 'backup-app-01' ],
restoreConfig: null
manifest: null,
format: 'tgz'
};
var BACKUP_0_APP_0 = {
@@ -52,7 +133,8 @@ describe('backups', function () {
version: '1.0.0',
type: backupdb.BACKUP_TYPE_APP,
dependsOn: [],
restoreConfig: null
manifest: null,
format: 'tgz'
};
var BACKUP_0_APP_1 = {
@@ -60,7 +142,8 @@ describe('backups', function () {
version: '1.0.0',
type: backupdb.BACKUP_TYPE_APP,
dependsOn: [],
restoreConfig: null
manifest: null,
format: 'tgz'
};
var BACKUP_1 = {
@@ -68,7 +151,8 @@ describe('backups', function () {
version: '1.0.0',
type: backupdb.BACKUP_TYPE_BOX,
dependsOn: [ 'backup-app-10', 'backup-app-11' ],
restoreConfig: null
manifest: null,
format: 'tgz'
};
var BACKUP_1_APP_0 = {
@@ -76,7 +160,8 @@ describe('backups', function () {
version: '1.0.0',
type: backupdb.BACKUP_TYPE_APP,
dependsOn: [],
restoreConfig: null
manifest: null,
format: 'tgz'
};
var BACKUP_1_APP_1 = {
@@ -84,11 +169,12 @@ describe('backups', function () {
version: '1.0.0',
type: backupdb.BACKUP_TYPE_APP,
dependsOn: [],
restoreConfig: null
manifest: null,
format: 'tgz'
};
it('succeeds without backups', function (done) {
backups.cleanup(done);
backups.cleanup({ username: 'test' }, done);
});
it('succeeds with box backups, keeps latest', function (done) {
@@ -100,7 +186,7 @@ describe('backups', function () {
}, function (error) {
expect(error).to.not.be.ok();
backups.cleanup(function (error) {
backups.cleanup({ username: 'test' }, function (error) {
expect(error).to.not.be.ok();
backupdb.getByTypePaged(backupdb.BACKUP_TYPE_BOX, 1, 1000, function (error, result) {
@@ -121,7 +207,7 @@ describe('backups', function () {
});
it('does not remove expired backups if only one left', function (done) {
backups.cleanup(function (error) {
backups.cleanup({ username: 'test' }, function (error) {
expect(error).to.not.be.ok();
backupdb.getByTypePaged(backupdb.BACKUP_TYPE_BOX, 1, 1000, function (error, result) {
@@ -146,7 +232,7 @@ describe('backups', function () {
// wait for expiration
setTimeout(function () {
backups.cleanup(function (error) {
backups.cleanup({ username: 'test' }, function (error) {
expect(error).to.not.be.ok();
backupdb.getByTypePaged(backupdb.BACKUP_TYPE_APP, 1, 1000, function (error, result) {
@@ -160,4 +246,115 @@ describe('backups', function () {
});
});
});
describe('fs meta data', function () {
var tmpdir;
before(function () {
tmpdir = fs.mkdtempSync(path.join(os.tmpdir(), 'backups-test'));
});
after(function () {
rimraf.sync(tmpdir);
});
it('saves special files', function (done) {
createTree(tmpdir, { 'data': { 'subdir': { 'emptydir': { } } }, 'dir2': { 'file': 'stuff' } });
fs.chmodSync(path.join(tmpdir, 'dir2/file'), parseInt('0755', 8));
backups._saveFsMetadata(tmpdir, function (error) {
expect(error).to.not.be.ok();
var emptyDirs = JSON.parse(fs.readFileSync(path.join(tmpdir, 'fsmetadata.json'), 'utf8')).emptyDirs;
expect(emptyDirs).to.eql(['./data/subdir/emptydir']);
var execFiles = JSON.parse(fs.readFileSync(path.join(tmpdir, 'fsmetadata.json'), 'utf8')).execFiles;
expect(execFiles).to.eql(['./dir2/file']);
done();
});
});
it('restores special files', function (done) {
rimraf.sync(path.join(tmpdir, 'data'));
expect(fs.existsSync(path.join(tmpdir, 'data/subdir/emptydir'))).to.be(false); // just make sure rimraf worked
backups._restoreFsMetadata(tmpdir, function (error) {
expect(error).to.not.be.ok();
expect(fs.existsSync(path.join(tmpdir, 'data/subdir/emptydir'))).to.be(true);
var mode = fs.statSync(path.join(tmpdir, 'dir2/file')).mode;
expect(mode & ~fs.constants.S_IFREG).to.be(parseInt('0755', 8));
done();
});
});
});
describe('filesystem', function () {
var backupInfo1;
var gBackupConfig = {
provider: 'filesystem',
backupFolder: path.join(os.tmpdir(), 'backups-test-filesystem'),
format: 'tgz'
};
before(function (done) {
rimraf.sync(gBackupConfig.backupFolder);
done();
});
after(function (done) {
rimraf.sync(gBackupConfig.backupFolder);
progress.clear(progress.BACKUP);
done();
});
it('fails to set backup config for non-existing folder', function (done) {
settings.setBackupConfig(gBackupConfig, function (error) {
expect(error).to.be.a(SettingsError);
expect(error.reason).to.equal(SettingsError.BAD_FIELD);
done();
});
});
it('succeeds to set backup config', function (done) {
mkdirp.sync(gBackupConfig.backupFolder);
settings.setBackupConfig(gBackupConfig, function (error) {
expect(error).to.be(null);
done();
});
});
it('can backup', function (done) {
this.timeout(6000);
createBackup(function (error, result) {
expect(error).to.be(null);
expect(fs.statSync(path.join(gBackupConfig.backupFolder, 'snapshot/box.tar.gz')).nlink).to.be(2); // hard linked to a rotated backup
expect(fs.statSync(path.join(gBackupConfig.backupFolder, `${result.id}.tar.gz`)).nlink).to.be(2);
backupInfo1 = result;
done();
});
});
it('can take another backup', function (done) {
this.timeout(6000);
createBackup(function (error, result) {
expect(error).to.be(null);
expect(fs.statSync(path.join(gBackupConfig.backupFolder, 'snapshot/box.tar.gz')).nlink).to.be(2); // hard linked to a rotated backup
expect(fs.statSync(path.join(gBackupConfig.backupFolder, `${result.id}.tar.gz`)).nlink).to.be(2); // hard linked to new backup
expect(fs.statSync(path.join(gBackupConfig.backupFolder, `${backupInfo1.id}.tar.gz`)).nlink).to.be(1); // not hard linked anymore
done();
});
});
});
});
+3 -4
View File
@@ -15,11 +15,10 @@ scripts=("${SOURCE_DIR}/src/scripts/rmappdir.sh" \
"${SOURCE_DIR}/src/scripts/reboot.sh" \
"${SOURCE_DIR}/src/scripts/update.sh" \
"${SOURCE_DIR}/src/scripts/collectlogs.sh" \
"${SOURCE_DIR}/src/scripts/reloadcollectd.sh" \
"${SOURCE_DIR}/src/scripts/configurecollectd.sh" \
"${SOURCE_DIR}/src/scripts/authorized_keys.sh" \
"${SOURCE_DIR}/src/scripts/node.sh" \
"${SOURCE_DIR}/src/scripts/mvlogrotateconfig.sh" \
"${SOURCE_DIR}/src/scripts/rmlogrotateconfig.sh")
"${SOURCE_DIR}/src/backuptask.js" \
"${SOURCE_DIR}/src/scripts/configurelogrotate.sh")
for script in "${scripts[@]}"; do
if [[ $(sudo -n "${script}" --check 2>/dev/null) != "OK" ]]; then
+33
View File
@@ -0,0 +1,33 @@
'use strict';
var fs = require('fs'),
mkdirp = require('mkdirp'),
path = require('path'),
rimraf = require('rimraf');
exports = module.exports = {
createTree: createTree
};
function createTree(root, obj) {
rimraf.sync(root);
mkdirp.sync(root);
function createSubTree(tree, curpath) {
for (var key in tree) {
if (typeof tree[key] === 'string') {
if (key.startsWith('link:')) {
fs.symlinkSync(tree[key], path.join(curpath, key.slice(5)));
} else {
fs.writeFileSync(path.join(curpath, key), tree[key], 'utf8');
}
} else {
fs.mkdirSync(path.join(curpath, key));
createSubTree(tree[key], path.join(curpath, key));
}
}
}
createSubTree(obj, root);
}
+15 -13
View File
@@ -6,7 +6,6 @@
'use strict';
var config = require('../config.js'),
constants = require('../constants.js'),
expect = require('expect.js'),
fs = require('fs'),
path = require('path');
@@ -17,7 +16,7 @@ describe('config', function () {
});
after(function () {
delete require.cache[require.resolve('../config.js')];
config._reset();
});
it('baseDir() is set', function (done) {
@@ -25,11 +24,6 @@ describe('config', function () {
done();
});
it('cloudron.conf generated automatically', function (done) {
expect(fs.existsSync(path.join(config.baseDir(), 'configs/cloudron.conf'))).to.be.ok();
done();
});
it('can get and set version', function (done) {
config.setVersion('1.2.3');
expect(config.version()).to.be('1.2.3');
@@ -38,15 +32,20 @@ describe('config', function () {
it('did set default values', function () {
expect(config.isCustomDomain()).to.equal(true);
expect(config.fqdn()).to.equal('localhost');
expect(config.adminOrigin()).to.equal('https://' + constants.ADMIN_LOCATION + '.localhost');
expect(config.appFqdn('app')).to.equal('app.localhost');
expect(config.fqdn()).to.equal('');
expect(config.zoneName()).to.equal('');
expect(config.adminLocation()).to.equal('my');
});
it('set saves value in file', function (done) {
config.set('fqdn', 'example.com');
expect(JSON.parse(fs.readFileSync(path.join(config.baseDir(), 'configs/cloudron.conf'))).fqdn).to.eql('example.com');
done();
});
it('set does not save custom values in file', function (done) {
config.set('foobar', 'somevalue');
expect(JSON.parse(fs.readFileSync(path.join(config.baseDir(), 'configs/cloudron.conf'))).foobar).to.eql('somevalue');
expect(JSON.parse(fs.readFileSync(path.join(config.baseDir(), 'configs/cloudron.conf'))).foobar).to.not.be.ok();
done();
});
@@ -68,7 +67,7 @@ describe('config', function () {
expect(config.isCustomDomain()).to.equal(true);
expect(config.fqdn()).to.equal('example.com');
expect(config.adminOrigin()).to.equal('https://' + constants.ADMIN_LOCATION + '.example.com');
expect(config.adminOrigin()).to.equal('https://my.example.com');
expect(config.appFqdn('app')).to.equal('app.example.com');
expect(config.zoneName()).to.equal('example.com');
});
@@ -79,7 +78,7 @@ describe('config', function () {
expect(config.isCustomDomain()).to.equal(false);
expect(config.fqdn()).to.equal('test.example.com');
expect(config.adminOrigin()).to.equal('https://' + constants.ADMIN_LOCATION + '-test.example.com');
expect(config.adminOrigin()).to.equal('https://my-test.example.com');
expect(config.appFqdn('app')).to.equal('app-test.example.com');
expect(config.zoneName()).to.equal('example.com');
});
@@ -94,4 +93,7 @@ describe('config', function () {
done();
});
it('test machine has IPv6 support', function () {
expect(config.hasIPv6()).to.equal(true);
});
});
+12 -8
View File
@@ -536,8 +536,9 @@ describe('database', function () {
portBindings: { port: 5678 },
health: null,
accessRestriction: null,
lastBackupId: null,
restoreConfig: null,
oldConfig: null,
updateConfig: null,
memoryLimit: 4294967296,
altDomain: null,
xFrameOptions: 'DENY',
@@ -560,8 +561,9 @@ describe('database', function () {
portBindings: { },
health: null,
accessRestriction: { users: [ 'foobar' ] },
lastBackupId: null,
restoreConfig: null,
oldConfig: null,
updateConfig: null,
memoryLimit: 0,
altDomain: null,
xFrameOptions: 'SAMEORIGIN',
@@ -1025,7 +1027,8 @@ describe('database', function () {
version: '1.0.0',
type: backupdb.BACKUP_TYPE_BOX,
dependsOn: [ 'dep1' ],
restoreConfig: null
manifest: null,
format: 'tgz'
};
backupdb.add(backup, function (error) {
@@ -1041,7 +1044,7 @@ describe('database', function () {
expect(result.type).to.be(backupdb.BACKUP_TYPE_BOX);
expect(result.creationTime).to.be.a(Date);
expect(result.dependsOn).to.eql(['dep1']);
expect(result.restoreConfig).to.eql(null);
expect(result.manifest).to.eql(null);
done();
});
});
@@ -1064,7 +1067,7 @@ describe('database', function () {
expect(results[0].id).to.be('backup-box');
expect(results[0].version).to.be('1.0.0');
expect(results[0].dependsOn).to.eql(['dep1']);
expect(results[0].restoreConfig).to.eql(null);
expect(results[0].manifest).to.eql(null);
done();
});
@@ -1090,7 +1093,8 @@ describe('database', function () {
version: '1.0.0',
type: backupdb.BACKUP_TYPE_APP,
dependsOn: [ ],
restoreConfig: { manifest: { foo: 'bar' } }
manifest: { foo: 'bar' },
format: 'tgz'
};
backupdb.add(backup, function (error) {
@@ -1106,7 +1110,7 @@ describe('database', function () {
expect(result.type).to.be(backupdb.BACKUP_TYPE_APP);
expect(result.creationTime).to.be.a(Date);
expect(result.dependsOn).to.eql([]);
expect(result.restoreConfig).to.eql({ manifest: { foo: 'bar' } });
expect(result.manifest).to.eql({ foo: 'bar' });
done();
});
});
@@ -1120,7 +1124,7 @@ describe('database', function () {
expect(results[0].id).to.be('app_appid_123');
expect(results[0].version).to.be('1.0.0');
expect(results[0].dependsOn).to.eql([]);
expect(results[0].restoreConfig).to.eql({ manifest: { foo: 'bar' } });
expect(results[0].manifest).to.eql({ foo: 'bar' });
done();
});
+26 -4
View File
@@ -15,6 +15,7 @@ var async = require('async'),
paths = require('../paths.js'),
safe = require('safetydance'),
settings = require('../settings.js'),
settingsdb = require('../settingsdb.js'),
updatechecker = require('../updatechecker.js'),
user = require('../user.js');
@@ -30,10 +31,15 @@ var AUDIT_SOURCE = {
ip: '1.2.3.4'
};
function checkMails(number, done) {
function checkMails(number, email, done) {
// mails are enqueued async
setTimeout(function () {
expect(mailer._getMailQueue().length).to.equal(number);
if (number) {
expect(mailer._getMailQueue()[0].to).to.equal(email);
}
mailer._clearMailQueue();
done();
}, 500);
@@ -52,6 +58,7 @@ describe('digest', function () {
before(function (done) {
config._reset();
config.set('fqdn', 'domain.com');
config.set('version', '1.0.0');
config.set('apiServerOrigin', 'http://localhost:4444');
config.set('provider', 'notcaas');
@@ -63,6 +70,7 @@ describe('digest', function () {
settings.initialize,
user.createOwner.bind(null, USER_0.username, USER_0.password, USER_0.email, USER_0.displayName, AUDIT_SOURCE),
eventlog.add.bind(null, eventlog.ACTION_UPDATE, AUDIT_SOURCE, { boxUpdateInfo: { sourceTarballUrl: 'xx', version: '1.2.3', changelog: [ 'good stuff' ] } }),
settingsdb.set.bind(null, settings.MAIL_CONFIG_KEY, JSON.stringify({ enabled: true })),
mailer.start,
mailer._clearMailQueue
], done);
@@ -78,7 +86,7 @@ describe('digest', function () {
it('does not send mail with digest disabled', function (done) {
digest.maybeSend(function (error) {
if (error) return done(error);
checkMails(0, done);
checkMails(0, null, done);
});
});
@@ -93,7 +101,7 @@ describe('digest', function () {
digest.maybeSend(function (error) {
if (error) return done(error);
checkMails(1, done);
checkMails(1, `${USER_0.email}, ${USER_0.username}@${config.fqdn()}`, done);
});
});
@@ -103,7 +111,21 @@ describe('digest', function () {
digest.maybeSend(function (error) {
if (error) return done(error);
checkMails(1, done);
checkMails(1, `${USER_0.email}, ${USER_0.username}@${config.fqdn()}`, done);
});
});
it('sends mail for pending update to owner account email', function (done) {
updatechecker._setUpdateInfo({ box: null, apps: { 'appid': { manifest: { version: '1.2.5', changelog: 'noop\nreally' } } } });
settingsdb.set(settings.MAIL_CONFIG_KEY, JSON.stringify({ enabled: true }), function (error) {
if (error) return done(error);
digest.maybeSend(function (error) {
if (error) return done(error);
checkMails(1, `${USER_0.email}, ${USER_0.username}@${config.fqdn()}`, done);
});
});
});
});

Some files were not shown because too many files have changed in this diff Show More