Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| a50409bdca | |||
| 60a722e6cc | |||
| 4d6cafa589 | |||
| 63e557430b | |||
| 04acb4423d | |||
| ea813acf4c | |||
| b1198dfdbf | |||
| 4342de3747 | |||
| ef8bc7e7e9 | |||
| e18e401f6b | |||
| ab998c47e8 | |||
| 9fb830b2e1 | |||
| 415c3f90a1 | |||
| 60c8ff7fb1 | |||
| 037816313c | |||
| 3d285d1ac6 | |||
| 135338786f | |||
| 661f1fce31 | |||
| 03664ef784 | |||
| d2111ef2b6 |
@@ -805,83 +805,3 @@
|
||||
* (mail) Set maximum email size to 25MB
|
||||
* Remove SimpleAuth addon
|
||||
|
||||
[0.107.0]
|
||||
* Support CSP for webinterface and OAuth views
|
||||
* (mail) Fix issue where Cloudron is only used to send emails
|
||||
|
||||
[0.108.0]
|
||||
* Redirect to /setupdns.html when restoring
|
||||
* Fix setting custom avatar
|
||||
* Do not allocate more than 4GB swap
|
||||
* Generate real passwords for sendmail/recvmail addons
|
||||
* Rate limit all authentication routes to prevent password brute force
|
||||
* Generate 128 byte password for MySQL multi-db addon
|
||||
|
||||
[0.109.0]
|
||||
* Add Referrer-policy
|
||||
* Add tooltip for admin email field explaining it is local & private
|
||||
* Verify AMI instance id during DNS setup instead of admin account setup
|
||||
* Split platform and app data folders and get rid of btrfs volumes
|
||||
|
||||
[0.110.0]
|
||||
* Fix disk usage graphs
|
||||
* Add --data-dir to cloudron-setup that allows customizing data location
|
||||
* Add UI to restore from any app backup
|
||||
* (mysql) Use utf8mb4 encoding for databases and backups
|
||||
* Allow installing a new app from a backup
|
||||
* Fix download of large files (> 1GB)
|
||||
* Fix app backup regression
|
||||
|
||||
[0.120.0]
|
||||
* Update Docker to 17.03.1-ce
|
||||
* Rework backup backend logic
|
||||
* Add UI to download logs
|
||||
* Fix crash when checking mail dns settings
|
||||
* Allow backup retention duration to be configured
|
||||
* Add minio backend for backups
|
||||
* Fix issue where Cloudron's with errored apps won't backup when using fs backend
|
||||
* Fix DNS check issue where PTR records was read from hosts file
|
||||
|
||||
[0.120.1]
|
||||
* Fix managed Cloudron backup cleanup
|
||||
|
||||
[0.130.0]
|
||||
* Use Cloudron DNS server only for containers created by Cloudron
|
||||
* Make Cloudron always start even if DNS credentials are invalid
|
||||
* Show warning if DNS configuration is not valid
|
||||
* Drop the '.enc' extension for non-encrypted backups
|
||||
* Do not encrypt backups when the backup key is empty
|
||||
* Do a multipart S3 download for slow internet connections
|
||||
* Support naked domains as external location
|
||||
|
||||
[0.130.1]
|
||||
* Fix app configure dialog regression
|
||||
|
||||
[0.130.2]
|
||||
* Fix app configure dialog regression and dns setup screen
|
||||
|
||||
[0.130.3]
|
||||
* Show error message if setup fails due to reserved username
|
||||
* (security) Do not print password in the logs in the configure route
|
||||
* Fix restore of unencrypted backups
|
||||
* Fix bug where FS backups have incorrect extension for unencrypted backups
|
||||
|
||||
[0.140.0]
|
||||
* HTTP2 support
|
||||
* Condense the dns checks in the settings view
|
||||
* Document new app store submission guidelines
|
||||
|
||||
[0.150.0]
|
||||
* Disable dnsmasq on OVH
|
||||
* Scale redis memory based on the app's memory limit
|
||||
* (security) Do not print the ssl cert in debug logs
|
||||
* Add noop storage backend to temporarily disable backups
|
||||
* Replace native-dns module with dig to prevent spurious crashes
|
||||
* Cleanup unfinished and errored backups
|
||||
* Set a timelimit of 4 hours for backup to finish
|
||||
|
||||
[0.160.0]
|
||||
* Fix disk graphs when using device mapper
|
||||
* Prevent email view from flickering
|
||||
* Prepare for 1.0
|
||||
|
||||
|
||||
@@ -46,14 +46,10 @@ Try our demo at https://my-demo.cloudron.me (username: cloudron password: cloudr
|
||||
## Installing
|
||||
|
||||
You can install the Cloudron platform on your own server or get a managed server
|
||||
from cloudron.io. In either case, the Cloudron platform will keep your server and
|
||||
apps up-to-date and secure.
|
||||
from cloudron.io.
|
||||
|
||||
* [Selfhosting](https://cloudron.io/references/selfhosting.html) - [Pricing](https://cloudron.io/pricing.html)
|
||||
* [Managed Hosting](https://cloudron.io/managed.html)
|
||||
|
||||
The wiki has instructions on how you can install and update the Cloudron and the
|
||||
apps from source.
|
||||
* [Selfhosting](https://cloudron.io/references/selfhosting.html)
|
||||
* [Managed Hosting](https://cloudron.io/pricing.html)
|
||||
|
||||
## Documentation
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ debconf-set-selections <<< 'mysql-server mysql-server/root_password_again passwo
|
||||
apt-get -y install \
|
||||
acl \
|
||||
awscli \
|
||||
btrfs-tools \
|
||||
build-essential \
|
||||
cron \
|
||||
curl \
|
||||
@@ -51,16 +52,47 @@ apt-get install -y python # Install python which is required for npm rebuild
|
||||
|
||||
# https://docs.docker.com/engine/installation/linux/ubuntulinux/
|
||||
echo "==> Installing Docker"
|
||||
docker_key="-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: GnuPG v1
|
||||
|
||||
mQINBFWln24BEADrBl5p99uKh8+rpvqJ48u4eTtjeXAWbslJotmC/CakbNSqOb9o
|
||||
ddfzRvGVeJVERt/Q/mlvEqgnyTQy+e6oEYN2Y2kqXceUhXagThnqCoxcEJ3+KM4R
|
||||
mYdoe/BJ/J/6rHOjq7Omk24z2qB3RU1uAv57iY5VGw5p45uZB4C4pNNsBJXoCvPn
|
||||
TGAs/7IrekFZDDgVraPx/hdiwopQ8NltSfZCyu/jPpWFK28TR8yfVlzYFwibj5WK
|
||||
dHM7ZTqlA1tHIG+agyPf3Rae0jPMsHR6q+arXVwMccyOi+ULU0z8mHUJ3iEMIrpT
|
||||
X+80KaN/ZjibfsBOCjcfiJSB/acn4nxQQgNZigna32velafhQivsNREFeJpzENiG
|
||||
HOoyC6qVeOgKrRiKxzymj0FIMLru/iFF5pSWcBQB7PYlt8J0G80lAcPr6VCiN+4c
|
||||
NKv03SdvA69dCOj79PuO9IIvQsJXsSq96HB+TeEmmL+xSdpGtGdCJHHM1fDeCqkZ
|
||||
hT+RtBGQL2SEdWjxbF43oQopocT8cHvyX6Zaltn0svoGs+wX3Z/H6/8P5anog43U
|
||||
65c0A+64Jj00rNDr8j31izhtQMRo892kGeQAaaxg4Pz6HnS7hRC+cOMHUU4HA7iM
|
||||
zHrouAdYeTZeZEQOA7SxtCME9ZnGwe2grxPXh/U/80WJGkzLFNcTKdv+rwARAQAB
|
||||
tDdEb2NrZXIgUmVsZWFzZSBUb29sIChyZWxlYXNlZG9ja2VyKSA8ZG9ja2VyQGRv
|
||||
Y2tlci5jb20+iQI4BBMBAgAiBQJVpZ9uAhsvBgsJCAcDAgYVCAIJCgsEFgIDAQIe
|
||||
AQIXgAAKCRD3YiFXLFJgnbRfEAC9Uai7Rv20QIDlDogRzd+Vebg4ahyoUdj0CH+n
|
||||
Ak40RIoq6G26u1e+sdgjpCa8jF6vrx+smpgd1HeJdmpahUX0XN3X9f9qU9oj9A4I
|
||||
1WDalRWJh+tP5WNv2ySy6AwcP9QnjuBMRTnTK27pk1sEMg9oJHK5p+ts8hlSC4Sl
|
||||
uyMKH5NMVy9c+A9yqq9NF6M6d6/ehKfBFFLG9BX+XLBATvf1ZemGVHQusCQebTGv
|
||||
0C0V9yqtdPdRWVIEhHxyNHATaVYOafTj/EF0lDxLl6zDT6trRV5n9F1VCEh4Aal8
|
||||
L5MxVPcIZVO7NHT2EkQgn8CvWjV3oKl2GopZF8V4XdJRl90U/WDv/6cmfI08GkzD
|
||||
YBHhS8ULWRFwGKobsSTyIvnbk4NtKdnTGyTJCQ8+6i52s+C54PiNgfj2ieNn6oOR
|
||||
7d+bNCcG1CdOYY+ZXVOcsjl73UYvtJrO0Rl/NpYERkZ5d/tzw4jZ6FCXgggA/Zxc
|
||||
jk6Y1ZvIm8Mt8wLRFH9Nww+FVsCtaCXJLP8DlJLASMD9rl5QS9Ku3u7ZNrr5HWXP
|
||||
HXITX660jglyshch6CWeiUATqjIAzkEQom/kEnOrvJAtkypRJ59vYQOedZ1sFVEL
|
||||
MXg2UCkD/FwojfnVtjzYaTCeGwFQeqzHmM241iuOmBYPeyTY5veF49aBJA1gEJOQ
|
||||
TvBR8Q==
|
||||
=Fm3p
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
"
|
||||
echo "$docker_key" | apt-key add -
|
||||
echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" > /etc/apt/sources.list.d/docker.list
|
||||
apt-get -y update
|
||||
|
||||
# create systemd drop-in file
|
||||
mkdir -p /etc/systemd/system/docker.service.d
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=devicemapper" > /etc/systemd/system/docker.service.d/cloudron.conf
|
||||
|
||||
curl -sL https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_17.03.1~ce-0~ubuntu-xenial_amd64.deb -o /tmp/docker.deb
|
||||
# apt install with install deps (as opposed to dpkg -i)
|
||||
apt install -y /tmp/docker.deb
|
||||
rm /tmp/docker.deb
|
||||
|
||||
apt-get -y --allow-downgrades install docker-engine=1.12.5-0~ubuntu-xenial # apt-cache madison docker-engine
|
||||
apt-mark hold docker-engine # do not update docker
|
||||
storage_driver=$(docker info | grep "Storage Driver" | sed 's/.*: //')
|
||||
if [[ "${storage_driver}" != "devicemapper" ]]; then
|
||||
echo "Docker is using "${storage_driver}" instead of devicemapper"
|
||||
@@ -92,11 +124,3 @@ if ! apt-get install -y collectd collectd-utils; then
|
||||
sed -e 's/^FQDNLookup true/FQDNLookup false/' -i /etc/collectd/collectd.conf
|
||||
fi
|
||||
|
||||
# Disable bind for good measure (on online.net, kimsufi servers these are pre-installed and conflicts with unbound)
|
||||
systemctl stop bind9 || true
|
||||
systemctl disable bind9 || true
|
||||
|
||||
# on ovh images dnsmasq seems to run by default
|
||||
systemctl stop dnsmasq || true
|
||||
systemctl disable dnsmasq || true
|
||||
|
||||
|
||||
@@ -184,7 +184,7 @@ TokenURL = ${API_ORIGIN}/api/v1/oauth/token
|
||||
The token obtained via OAuth has a restricted scope wherein they can only access the [profile API](/references/api.html#profile). This restriction
|
||||
is so that apps cannot make undesired changes to the user's Cloudron.
|
||||
|
||||
We currently provide OAuth2 integration for Ruby [omniauth](https://git.cloudron.io/cloudron/omniauth-cloudron) and Node.js [passport](https://git.cloudron.io/cloudron/passport-cloudron).
|
||||
We currently provide OAuth2 integration for Ruby [omniauth](https://github.com/cloudron-io/omniauth-cloudron) and Node.js [passport](https://github.com/cloudron-io/passport-cloudron).
|
||||
|
||||
## postgresql
|
||||
|
||||
@@ -317,3 +317,4 @@ cloudron exec
|
||||
|
||||
> swaks --server "${MAIL_SMTP_SERVER}" -p "${MAIL_SMTP_PORT}" --from "${MAIL_SMTP_USERNAME}@${MAIL_DOMAIN}" --body "Test mail from cloudron app at $(hostname -f)" --auth-user "${MAIL_SMTP_USERNAME}" --auth-password "${MAIL_SMTP_PASSWORD}"
|
||||
```
|
||||
|
||||
|
||||
+5
-91
@@ -117,7 +117,6 @@ Request:
|
||||
cert: <string>, // pem encoded TLS cert
|
||||
key: <string>, // pem encoded TLS key
|
||||
memoryLimit: <number>, // memory constraint in bytes
|
||||
backupId: <string>, // initialize the app from this backup
|
||||
altDomain: <string>, // alternate domain from which this app can be reached
|
||||
xFrameOptions: <string> // set X-Frame-Options header, to control which websites can embed this app
|
||||
}
|
||||
@@ -154,8 +153,6 @@ If `altDomain` is set, the app can be accessed from `https://<altDomain>`.
|
||||
|
||||
`memoryLimit` is the maximum memory this app can use (in bytes) including swap. If set to 0, the app uses the `memoryLimit` value set in the manifest. If set to -1, the app gets unlimited memory.
|
||||
|
||||
If `backupId` is provided the app will be initialized with the data from the backup.
|
||||
|
||||
Read more about the options at [MDN](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options).
|
||||
|
||||
Response (200):
|
||||
@@ -694,23 +691,6 @@ Curl example to activate the cloudron:
|
||||
curl -X POST -H "Content-Type: application/json" -d '{"username": "girish", "password":"MySecret123#", "email": "girish@cloudron.io" }' https://my.cloudron.info/api/v1/cloudron/activate
|
||||
```
|
||||
|
||||
### Check for updates
|
||||
|
||||
POST `/api/v1/check_for_updates` <scope>admin</scope>
|
||||
|
||||
Checks for any available updates for the Cloudron and the installed apps.
|
||||
|
||||
Response (200):
|
||||
```
|
||||
{
|
||||
box: null|<object>, // object containing information about update
|
||||
apps: { // update info (if any) for each app
|
||||
<appid>: <object>,
|
||||
...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Update the Cloudron
|
||||
|
||||
POST `/api/v1/cloudron/update` <scope>admin</scope>
|
||||
@@ -759,6 +739,7 @@ Response (200):
|
||||
{
|
||||
activated: <boolean>,
|
||||
version: <semver>,
|
||||
boxVersionsUrl: <url>, // Location of the Cloudron versions file to check for updates
|
||||
apiServerOrigin: <url>, // Always https://api.cloudron.io
|
||||
provider: <string>,
|
||||
cloudronName: <string>
|
||||
@@ -789,6 +770,7 @@ Response (200):
|
||||
{
|
||||
apiServerOrigin: <string>, // Always https://api.cloudron.io
|
||||
webServerOrigin: <string>, // Always https://cloudron.io
|
||||
isDev: <boolean>, // internal
|
||||
fqdn: <fqdn>, // The FQDN
|
||||
ip: <ip>, // The public IP
|
||||
version: <semver>, // Current version
|
||||
@@ -812,50 +794,12 @@ Response (200):
|
||||
}
|
||||
```
|
||||
|
||||
### Get disks
|
||||
|
||||
GET `/api/v1/cloudron/disks` <scope>admin</scope>
|
||||
|
||||
Gets information on the disks being used on the Cloudron server.
|
||||
|
||||
```
|
||||
Response (200):
|
||||
{
|
||||
boxDataDisk: <string>, // Disk used for storing box data
|
||||
platformDataDisk: <string>, // Disk used for addon databases and email
|
||||
appsDataDisk: <string> // Disk used for apps' local storage
|
||||
}
|
||||
```
|
||||
|
||||
### Get logs
|
||||
|
||||
GET `/api/v1/cloudron/logs` <scope>admin</scope>
|
||||
|
||||
Get the system logs.
|
||||
|
||||
The `lines` query parameter can be used to specify the number of log lines to download.
|
||||
|
||||
The `units` query parameters can be set to `box` or `mail` to get logs of specific units.
|
||||
|
||||
The response has `Content-Type` set to 'application/x-logs' and `Content-Disposition` set to
|
||||
`attachment; filename="log.txt`.
|
||||
|
||||
Response(200):
|
||||
|
||||
```
|
||||
Line delimited JSON.
|
||||
|
||||
{
|
||||
realtimeTimestamp: <number>, // wallclock timestamp
|
||||
monotonicTimestamp: <number>, // time passed since boot
|
||||
message: [ <byte>,... ], // utf8 buffer
|
||||
source: <process name> // source of this message
|
||||
}
|
||||
```
|
||||
## Eventlog
|
||||
|
||||
### List events
|
||||
|
||||
GET `/api/v1/cloudron/eventlog` <scope>admin</scope>
|
||||
GET `/api/v1/eventlog` <scope>admin</scope>
|
||||
|
||||
Lists all the past events.
|
||||
|
||||
@@ -906,7 +850,7 @@ Response (200):
|
||||
|
||||
To list all the app installation events:
|
||||
```
|
||||
curl -X GET -H 'Authorization: Bearer cb0463455a6606482be7956fc3abd53330ae23244e3492cda3914a2c5154c47e' https://my-demo.cloudron.me/api/v1/cloudron/eventlog?action=app.install
|
||||
curl -X GET -H 'Authorization: Bearer cb0463455a6606482be7956fc3abd53330ae23244e3492cda3914a2c5154c47e' https://my-demo.cloudron.me/api/v1/eventlog?action=app.install
|
||||
```
|
||||
|
||||
## Groups
|
||||
@@ -1062,36 +1006,6 @@ Response (204):
|
||||
|
||||
## Settings
|
||||
|
||||
### Get Appstore Config
|
||||
|
||||
GET `/api/v1/settings/appstore_config` <scope>admin</scope>
|
||||
|
||||
Response (200):
|
||||
```
|
||||
{
|
||||
userId: <string>, // the appstore userId
|
||||
token: <string>, // appstore token
|
||||
cloudronId: <string> // cloudron id
|
||||
}
|
||||
```
|
||||
|
||||
### Set Appstore Config
|
||||
|
||||
POST `/api/v1/settings/appstore_config` <scope>admin</scope>
|
||||
|
||||
Sets the credentials used for the Cloudron Store.
|
||||
|
||||
Request:
|
||||
```
|
||||
{
|
||||
userId: <string>, // the appstore userId
|
||||
token: <string> // token from appstore
|
||||
}
|
||||
```
|
||||
|
||||
You can get the `userId` and `token` by sending a `/api/v1/login` POST request to `api.cloudron.io`
|
||||
with the `email` and `password` fields set in the request.
|
||||
|
||||
### Get auto update pattern
|
||||
|
||||
GET `/api/v1/settings/autoupdate_pattern` <scope>admin</scope>
|
||||
|
||||
@@ -1,25 +1,25 @@
|
||||
# Introduction
|
||||
|
||||
The Cloudron platform is designed to easily install and run web applications.
|
||||
The application architecture is designed to let the Cloudron take care of system
|
||||
The application architecture is designed to let the Cloudron take care of system
|
||||
operations like updates, backups, firewalls, domain management, certificate management
|
||||
etc. This allows app developers to focus on their application logic instead of deployment.
|
||||
|
||||
At a high level, an application provides an `image` and a `manifest`. The image is simply
|
||||
a docker image that is a bundle of the application code and it's dependencies. The manifest
|
||||
a docker image that is a bundle of the application code and it's dependencies. The manifest
|
||||
file specifies application runtime requirements like database type and authentication scheme.
|
||||
It also provides meta information for display purposes in the [Cloudron Store](/appstore.html)
|
||||
It also provides meta information for display purposes in the [Cloudron Store](/appstore.html)
|
||||
like the title, icon and pricing.
|
||||
|
||||
Web applications like blogs, wikis, password managers, code hosting, document editing,
|
||||
file syncers, notes, email, forums are a natural fit for the Cloudron. Decentralized "social"
|
||||
Web applications like blogs, wikis, password managers, code hosting, document editing,
|
||||
file syncers, notes, email, forums are a natural fit for the Cloudron. Decentralized "social"
|
||||
networks are also good app candidates for the Cloudron.
|
||||
|
||||
# Image
|
||||
|
||||
Application images are created using [Docker](https://www.docker.io). Docker provides a way
|
||||
to package (and containerize) the application as a filesystem which contains it's code, system libraries
|
||||
and just about anything the app requires. This flexible approach allows the application to use just
|
||||
to package (and containerize) the application as a filesystem which contains it's code, system libraries
|
||||
and just about anything the app requires. This flexible approach allows the application to use just
|
||||
about any language or framework.
|
||||
|
||||
Application images are instantiated as `containers`. Cloudron can run one or more isolated instances
|
||||
@@ -77,11 +77,12 @@ Authentication strategies include OAuth 2.0, LDAP or Simple Auth. See the
|
||||
Authorizing users is application specific and it is only authentication that is delegated to the
|
||||
Cloudron.
|
||||
|
||||
# Cloudron App Library
|
||||
# Cloudron Store
|
||||
|
||||
Cloudron App Library provides a market place to publish your app.
|
||||
Submitting to the app library enables any Cloudron user to discover and install your application with a few clicks.
|
||||
Cloudron Store provides a market place to publish and optionally monetize your app. Submitting to the
|
||||
Cloudron Store enables any Cloudron user to discover, purchase and install your application with
|
||||
a few clicks.
|
||||
|
||||
# What next?
|
||||
|
||||
* [Package an existing app for the Cloudron](/tutorials/packaging.html)
|
||||
* [Package an existing app for the Cloudron](/tutorials/packaging.html)
|
||||
@@ -0,0 +1,93 @@
|
||||
# Best practices
|
||||
|
||||
## Overview
|
||||
|
||||
This document explains the spirit of what makes a Cloudron app.
|
||||
|
||||
## No Setup
|
||||
|
||||
Cloudron apps do not show a setup screen after installation and should choose reasonable
|
||||
defaults.
|
||||
|
||||
Databases, email configuration should be automatically picked up using [addons](/references/addons.html).
|
||||
|
||||
Admin role for the application can be detected dynamically using one of the [authentication](/references/authentication.html)
|
||||
strategies.
|
||||
|
||||
## Image
|
||||
|
||||
The Dockerfile contains a specification for building an application image.
|
||||
|
||||
* Install any required software packages in the Dockerfile.
|
||||
|
||||
* Create static configuration files in the Dockerfile.
|
||||
|
||||
* Create symlinks to dynamic configuration files under `/run` in the Dockerfile.
|
||||
|
||||
* Docker supports restarting processes natively. Should your application crash, it will
|
||||
be restarted automatically. If your application is a single process, you do not require
|
||||
any process manager.
|
||||
|
||||
* The main process must handle `SIGTERM` and forward it as required to child processes. `bash`
|
||||
does not automatically forward signals to child processes. For this reason, when using a startup
|
||||
shell script, remember to use `exec <app>` as the last line. Doing so will replace bash with your
|
||||
program and allows your program to handle signals as required.
|
||||
|
||||
* Use `supervisor`, `pm2` or any of the other process managers if you application has more
|
||||
then one component. This excludes web servers like apache, nginx which can already manage their
|
||||
children by themselves. Be sure to pick a process manager that forwards signals to child processes.
|
||||
|
||||
* Disable auto updates for apps. Updates must be triggered through the Cloudron Store. This allows the admin
|
||||
to manage updates and downtime in a central location (the Cloudron Webadmin).
|
||||
|
||||
## File system
|
||||
|
||||
The Cloudron runs the application image as read-only. The app can only write to the following directories:
|
||||
|
||||
* `/tmp` - use this for temporary files.
|
||||
|
||||
* `/run` - use this for runtime configration and any dynamic data.
|
||||
|
||||
* `/app/data` - When the `localstorage` addon is enabled, any data under this directory is automatically backed up.
|
||||
|
||||
## Logging
|
||||
|
||||
Cloudron applications stream their logs to stdout and stderr. In contrast to logging
|
||||
to files, this approach has many advantages:
|
||||
|
||||
* App does not need to rotate logs and the Cloudron takes care of managing logs
|
||||
* App does not need special mechanism to release log file handles (on a log rotate)
|
||||
* Integrates better with tooling like `cloudron cli`
|
||||
|
||||
This document gives you some recipes for configuring popular libraries to log to stdout. See
|
||||
[base image](/references/baseimage.html#configuring) on how to configure various libraries to log to stdout/stderr.
|
||||
|
||||
|
||||
## Memory
|
||||
|
||||
By default, applications get 256MB RAM (including swap). This can be changed using the `memoryLimit` field in the manifest.
|
||||
|
||||
Design your application runtime for concurrent use by 10s of users. The Cloudron is not designed for concurrent access by
|
||||
100s or 1000s of users.
|
||||
|
||||
## Startup
|
||||
|
||||
* Apps must not present a post-installation screen on first run. It should be already pre-configured for
|
||||
a specific purpose.
|
||||
|
||||
* Do not run as `root`. Apps can use the `cloudron` user which is part of the [base image](/references/baseimage.html)
|
||||
for this purpose or create their own.
|
||||
|
||||
* When using the `localstorage` addon, the application must change the ownership of files in `/app/data` as desired using `chown`. This
|
||||
is necessary because file permissions may not be correctly preserved across backup, restore, application and base image
|
||||
updates.
|
||||
|
||||
* Addon information (mail, database) is exposed as environment variables. An application must use these values directly
|
||||
and not cache them across restarts. If the variables are stored in a configuration file, then the configuration file
|
||||
must be regenerated on every application start. This is usually done using a configuration template that is patched
|
||||
on every startup.
|
||||
|
||||
## Authentication
|
||||
|
||||
Apps should integrate with one of the [authentication strategies](/references/authentication.html).
|
||||
This saves the user from having to manage separate set of users for different apps.
|
||||
@@ -53,15 +53,16 @@ Cloudron has a built-in firewall and ports are opened and closed dynamically, as
|
||||
apps are installed, re-configured or removed. For this reason, be sure to open all TCP and
|
||||
UDP traffic to the server and leave the traffic management to the Cloudron.
|
||||
|
||||
### Kimsufi
|
||||
|
||||
Be sure to check the "use the distribution kernel" checkbox in the personalized installation mode.
|
||||
|
||||
### Linode
|
||||
|
||||
Since Linode does not manage SSH keys, be sure to add the public key to
|
||||
`/root/.ssh/authorized_keys`.
|
||||
|
||||
### Scaleway
|
||||
|
||||
Use the [boot script](https://github.com/scaleway-community/scaleway-docker/issues/2) to
|
||||
enable memory accouting.
|
||||
|
||||
## Run setup
|
||||
|
||||
SSH into your server and run the following commands:
|
||||
@@ -89,10 +90,6 @@ Specifying `fallback` will setup the Cloudron to use the fallback wildcard certi
|
||||
Initially a self-signed one is provided, which can be overwritten later in the admin interface.
|
||||
This may be useful for non-public installations.
|
||||
|
||||
|
||||
* `--data-dir` is the path where Cloudron will store platform and application data. Note: data
|
||||
directory must be an `ext4` filesystem.
|
||||
|
||||
Optional arguments used for update and restore:
|
||||
|
||||
* `--version` is the version of Cloudron to install. By default, the setup script installs
|
||||
@@ -271,8 +268,8 @@ reputation should be easy to get back.
|
||||
|
||||
* Scaleway - Edit your security group to allow email and [reboot the server](https://community.online.net/t/security-group-not-working/2096) for the change to take effect. You can also set a PTR record on the interface with your `my.<domain>`.
|
||||
|
||||
* Check if your IP is listed in any DNSBL list [here](http://multirbl.valli.org/) and [here](http://www.blk.mx).
|
||||
In most cases, you can apply for removal of your IP by filling out a form at the DNSBL manager site.
|
||||
* Check if your IP is listed in any DNSBL list [here](http://multirbl.valli.org/). In most cases,
|
||||
you can apply for removal of your IP by filling out a form at the DNSBL manager site.
|
||||
|
||||
* When using wildcard or manual DNS backends, you have to setup the DMARC, MX records manually.
|
||||
|
||||
@@ -377,118 +374,6 @@ To restore a Cloudron from a specific backup:
|
||||
|
||||
* Make the box backup private, once the upgrade is complete.
|
||||
|
||||
# Security
|
||||
|
||||
Security is a core feature of the Cloudron and we continue to push out updates to tighten the Cloudron's security policy. Our goal is that Cloudron users should be able to rely on Cloudron being secure out of the box without having to do manual configuration.
|
||||
|
||||
This section lists various security measures in place to protect the Cloudron.
|
||||
|
||||
## HTTP Security
|
||||
|
||||
* Cloudron admin has a CSP policy that prevents XSS attacks.
|
||||
* Cloudron set various security related HTTP headers like `X-XSS-Protection`, `X-Download-Options`,
|
||||
`X-Content-Type-Options`, `X-Permitted-Cross-Domain-Policies`, `X-Frame-Options` across all apps.
|
||||
|
||||
## SSL
|
||||
|
||||
* Cloudron enforces HTTPS across all apps. HTTP requests are automatically redirected to
|
||||
HTTPS.
|
||||
* The Cloudron automatically installs and renews certificates for your apps as needed. Should
|
||||
installation of certificate fail for reasons beyond it's control, Cloudron admins will get a notification about it.
|
||||
* Cloudron sets the `Strict-Transport-Security` header (HSTS) to protect apps against downgrade attacks
|
||||
and cookie hijacking.
|
||||
* Cloudron has A+ rating for SSL from [SSL Labs](https://cloudron.io/blog/2017-02-22-release-0.102.0.html).
|
||||
|
||||
## App isolation
|
||||
|
||||
* Apps are isolated completely from one another. One app cannot tamper with another apps' database or
|
||||
local files. We achieve this using Linux Containers.
|
||||
* Apps run with a read-only rootfs preventing attacks where the application code can be tampered with.
|
||||
* Apps can only connect to addons like databases, LDAP, email relay using authentication.
|
||||
* Apps are run with an AppArmor profile that disables many system calls and restricts access to `proc`
|
||||
and `sys` filesystems.
|
||||
* Most apps are run as non-root user. In the future, we intend to implement user namespaces.
|
||||
* Each app is run in it's own subdomain as opposed to sub-paths. This ensures that XSS vulnerabilities
|
||||
in one app doesn't [compromise](https://security.stackexchange.com/questions/24155/preventing-insecure-webapp-on-subdomain-compromise-security-of-main-webapp) other apps.
|
||||
|
||||
## Email
|
||||
|
||||
* Cloudron checks against the [Zen Spamhaus DNSBL](https://www.spamhaus.org/zen/) before accepting mail.
|
||||
* Email can only be accessed with IMAP over TLS (IMAPS).
|
||||
* Email can only be relayed (including same-domain emails) by authenticated users using SMTP/STARTTLS.
|
||||
* Cloudron ensures that `MAIL FROM` is the same as the authenticated user. Users cannot spoof each other.
|
||||
* All outbound mails from Cloudron are `DKIM` signed.
|
||||
* Cloudron automatically sets up SPF, DMARC policies in the DNS for best email delivery.
|
||||
* All incoming mail is scanned via `Spamassasin`.
|
||||
|
||||
## Firewall
|
||||
|
||||
* Cloudron blocks all incoming ports except 22 (ssh), 80 (http), 443 (https)
|
||||
* When email is enabled, Cloudron allows 25 (SMTP), 587 (MSA), 993 (IMAPS) and 4190 (WebSieve)
|
||||
|
||||
## OS Updates
|
||||
|
||||
* Ubuntu [automatic security updates](https://help.ubuntu.com/community/AutomaticSecurityUpdates) are enabled
|
||||
|
||||
## Rate limits
|
||||
|
||||
The goal of rate limits is to prevent password brute force attacks.
|
||||
|
||||
* Cloudron password verification routes - 10 requests per second per IP.
|
||||
* HTTP and HTTPS requests - 5000 requests per second per IP.
|
||||
* SSH access - 5 connections per 10 seconds per IP.
|
||||
* Email access (Port 25, 587, 993, 4190) - 50 connections per second per IP/App.
|
||||
* Database addons access - 5000 connections per second per app (addons use 128 byte passwords).
|
||||
* Email relay access - 500 connections per second per app.
|
||||
* Email receive access - 50 connections per second per app.
|
||||
* Auth addon access - 500 connections per second per app.
|
||||
|
||||
## Password restrictions
|
||||
|
||||
* Cloudron requires user passwords to have 1 uppercase, 1 number and 1 symbol.
|
||||
* Minimum length for user passwords is 8
|
||||
|
||||
## Privacy
|
||||
|
||||
* Cloudron apps have a default `Referrer-Policy` of `no-referrer-when-downgrade`.
|
||||
* Backups are optionally encrypted with AES-256-CBC.
|
||||
* Let's Encrypt [submits](https://letsencrypt.org/certificates/)
|
||||
all certificates to [Certificate Transparency Logs](https://www.certificate-transparency.org/).
|
||||
This means that the apps that you install and use are going to be guessable. For example,
|
||||
[crt.sh](https://crt.sh) can display all your subdomains and you can visit those subdomains and
|
||||
guess the app. Generally, this is not a problem because using hidden DNS names is not a security
|
||||
measure. If you want to avoid this, you can always use a wildcard certificate.
|
||||
* Cloudron does not collect any user information and this is not our business model. We collect
|
||||
information regarding the configured backend types. This helps us focus on improving backends
|
||||
based on their use. You can review the specific code [here](https://git.cloudron.io/cloudron/box/blob/master/src/appstore.js#L124).
|
||||
|
||||
# Data directory
|
||||
|
||||
If you are installing a brand new Cloudron, you can configure the data directory
|
||||
that Cloudron uses by passing the `--data-dir` option to `cloudron-setup`.
|
||||
|
||||
Note: data directory must be an `ext4` filesystem.
|
||||
|
||||
```
|
||||
./cloudron-setup --provider <digitalocean|ec2|generic|scaleway> --data-dir /var/cloudrondata
|
||||
```
|
||||
|
||||
If you have an existing Cloudron, we recommend moving the existing data directory
|
||||
to a new location as follows (`DATA_DIR` is the location to move your data):
|
||||
|
||||
```
|
||||
systemctl stop cloudron.target
|
||||
systemctl stop docker
|
||||
DATA_DIR="/var/data"
|
||||
mkdir -p "${DATA_DIR}"
|
||||
mv /home/yellowtent/appsdata "${DATA_DIR}"
|
||||
ln -s "${DATA_DIR}/appsdata" /home/yellowtent/appsdata
|
||||
mv /home/yellowtent/platformdata "${DATA_DIR}"
|
||||
ln -s "${DATA_DIR}/platformdata" /home/yellowtent/platformdata
|
||||
systemctl start docker
|
||||
systemctl start cloudron.target
|
||||
```
|
||||
|
||||
# Debug
|
||||
|
||||
You can SSH into your Cloudron and collect logs:
|
||||
|
||||
@@ -160,8 +160,8 @@ domain. For this, open the app's configure dialog and choose `External Domain` i
|
||||
|
||||
<img src="/docs/img/app_external_domain.png" class="shadow">
|
||||
|
||||
This dialog will suggest you to add a `CNAME` record (for subdomains) or an `A` record (for naked domains).
|
||||
Once you setup a record with your DNS provider, the app will be accessible from that external domain.
|
||||
This dialog will suggest you to add a `CNAME` record. Once you setup a CNAME record with your DNS provider,
|
||||
the app will be accessible from that external domain.
|
||||
|
||||
## Entire Cloudron on a custom domain
|
||||
|
||||
@@ -330,28 +330,16 @@ the apps on your Cloudron and also tracks configuration changes.
|
||||
|
||||
<img src="/docs/img/activity.png" class="shadow">
|
||||
|
||||
# API Access
|
||||
|
||||
All the operations listed in this manual like installing app, configuring users and groups, are
|
||||
completely programmable with a [REST API](/references/api.html).
|
||||
|
||||
# Domains and SSL Certificates
|
||||
|
||||
All apps on the Cloudron can only be reached by `https`. The Cloudron automatically installs and
|
||||
renews certificates for your apps as needed. Should installation of certificate fail for reasons
|
||||
beyond it's control, Cloudron admins will get a notification about it.
|
||||
|
||||
# OAuth Provider
|
||||
# API Access
|
||||
|
||||
Cloudron is an OAuth 2.0 provider. To integrate Cloudron login into an external application, create
|
||||
an OAuth application under `API Access`.
|
||||
|
||||
You can use the following OAuth URLs to add Cloudron in the external app:
|
||||
```
|
||||
authorizationURL: https://my.<domain>/api/v1/oauth/dialog/authorize
|
||||
|
||||
tokenURL: https://my.<domain>/api/v1/oauth/token
|
||||
```
|
||||
All the operations listed in this manual like installing app, configuring users and groups, are
|
||||
completely programmable with a [REST API](/references/api.html).
|
||||
|
||||
# Moving to a larger Cloudron
|
||||
|
||||
|
||||
+8
-10
@@ -42,12 +42,12 @@ Creating an application for Cloudron can be summarized as follows:
|
||||
1. Create a web application using any language/framework. This web application must run a HTTP server
|
||||
and can optionally provide other services using custom protocols (like git, ssh, TCP etc).
|
||||
|
||||
2. Create a [Dockerfile](http://docs.docker.com/engine/reference/builder/) that specifies how to create
|
||||
2. Create a [Dockerfile](http://docs.docker.com/engine/reference/builder/) that specifies how to create
|
||||
an application ```image```. An ```image``` is essentially a bundle of the application source code
|
||||
and it's dependencies.
|
||||
|
||||
3. Create a [CloudronManifest.json](/references/manifest.html) file that provides essential information
|
||||
about the app. This includes information required for the Cloudron Store like title, version, icon and
|
||||
about the app. This includes information required for the Cloudron Store like title, version, icon and
|
||||
runtime requirements like `addons`.
|
||||
|
||||
## Simple Web application
|
||||
@@ -79,7 +79,7 @@ FROM cloudron/base:0.10.0
|
||||
|
||||
ADD server.js /app/code/server.js
|
||||
|
||||
CMD [ "/usr/local/node-6.9.5/bin/node", "/app/code/server.js" ]
|
||||
CMD [ "/usr/local/node-0.12.7/bin/node", "/app/code/server.js" ]
|
||||
```
|
||||
|
||||
The `FROM` command specifies that we want to start off with Cloudron's [base image](/references/baseimage.html).
|
||||
@@ -90,7 +90,7 @@ While this example only copies a single file, the ADD command can be used to cop
|
||||
See the [Dockerfile](https://docs.docker.com/reference/builder/#add) documentation for more details.
|
||||
|
||||
The `CMD` command specifies how to run the server. There are multiple versions of node available under `/usr/local`. We
|
||||
choose node v6.9.5 for our app.
|
||||
choose node v0.12.7 for our app.
|
||||
|
||||
## CloudronManifest.json
|
||||
|
||||
@@ -176,7 +176,7 @@ Step 0 : FROM cloudron/base:0.10.0
|
||||
Step 1 : ADD server.js /app/code
|
||||
---> b09b97ecdfbc
|
||||
Removing intermediate container 03c1e1f77acb
|
||||
Step 2 : CMD /usr/local/node-6.9.5/bin/node /app/code/main.js
|
||||
Step 2 : CMD /usr/local/node-0.12.7/bin/node /app/code/main.js
|
||||
---> Running in 370f59d87ab2
|
||||
---> 53b51eabcb89
|
||||
Removing intermediate container 370f59d87ab2
|
||||
@@ -335,15 +335,13 @@ File `tutorial/Dockerfile`
|
||||
```dockerfile
|
||||
FROM cloudron/base:0.10.0
|
||||
|
||||
ENV PATH /usr/local/node-6.9.5/bin:$PATH
|
||||
|
||||
ADD server.js /app/code/server.js
|
||||
ADD package.json /app/code/package.json
|
||||
|
||||
WORKDIR /app/code
|
||||
RUN npm install --production
|
||||
|
||||
CMD [ "node", "/app/code/server.js" ]
|
||||
CMD [ "/usr/local/node-0.12.7/bin/node", "/app/code/server.js" ]
|
||||
```
|
||||
|
||||
Notice the new `RUN` command which installs the node module dependencies in package.json using `npm install`.
|
||||
@@ -590,7 +588,7 @@ Once your app is ready, you can upload it to the store for `beta testing` by
|
||||
other Cloudron users. This can be done using:
|
||||
|
||||
```
|
||||
cloudron appstore upload
|
||||
cloudron upload
|
||||
```
|
||||
|
||||
The app should now be visible in the Store view of your cloudron under
|
||||
@@ -607,7 +605,7 @@ developer mode.
|
||||
Once you are satisfied with the beta testing, you can submit it for review.
|
||||
|
||||
```
|
||||
cloudron appstore submit
|
||||
cloudron submit
|
||||
```
|
||||
|
||||
The cloudron.io team will review the app and publish the app to the store.
|
||||
|
||||
+35
-257
@@ -5,7 +5,7 @@ This tutorial outlines how to package an existing web application for the Cloudr
|
||||
If you are aware of Docker and Heroku, you should feel at home packaging for the
|
||||
Cloudron. Roughly, the steps involved are:
|
||||
|
||||
* Create a Dockerfile for your application. If your application already has a Dockerfile, it
|
||||
* Create a Dockerfile for your application. If your application already has a Dockerfile, it
|
||||
is a good starting point for packaging for the Cloudron. By virtue of Docker, the Cloudron
|
||||
is able to run apps written in any language/framework.
|
||||
|
||||
@@ -83,7 +83,7 @@ FROM cloudron/base:0.10.0
|
||||
|
||||
ADD server.js /app/code/server.js
|
||||
|
||||
CMD [ "/usr/local/node-4.7.3/bin/node", "/app/code/server.js" ]
|
||||
CMD [ "/usr/local/node-4.4.7/bin/node", "/app/code/server.js" ]
|
||||
```
|
||||
|
||||
The `FROM` command specifies that we want to start off with Cloudron's [base image](/references/baseimage.html).
|
||||
@@ -94,7 +94,7 @@ The `ADD` command copies the source code of the app into the directory `/app/cod
|
||||
about the `/app/code` directory and it is merely a convention we use to store the application code.
|
||||
|
||||
The `CMD` command specifies how to run the server. The base image already contains many different versions of
|
||||
node.js. We use Node 4.7.3 here.
|
||||
node.js. We use Node 4.4.7 here.
|
||||
|
||||
This Dockerfile can be built and run locally as:
|
||||
```
|
||||
@@ -179,7 +179,7 @@ Initiate a build using ```cloudron build```:
|
||||
$ cloudron build
|
||||
Building io.cloudron.tutorial@0.0.1
|
||||
|
||||
cloudron.io login:
|
||||
Appstore login:
|
||||
Email: ramakrishnan.girish@gmail.com # cloudron.io account
|
||||
Password: # Enter password
|
||||
Login successful.
|
||||
@@ -348,64 +348,12 @@ show any setup screen after installation and should simply choose reasonable def
|
||||
Databases, email configuration should be automatically picked up from the environment variables using
|
||||
addons.
|
||||
|
||||
## Docker
|
||||
## Dockerfile
|
||||
|
||||
Cloudron uses Docker in the backend, so the package build script is a regular `Dockerfile`.
|
||||
|
||||
The app is run as a read-only docker container. Only `/run` (dynamic data), `/app/data` (backup data) and `/tmp` (temporary files) are writable at runtime. Because of this:
|
||||
|
||||
* Install any required packages in the Dockerfile.
|
||||
* Create static configuration files in the Dockerfile.
|
||||
* Create symlinks to dynamic configuration files under `/run` in the Dockerfile.
|
||||
|
||||
### Source directory
|
||||
|
||||
By convention, Cloudron apps install the source code in `/app/code`. Do not forget to create the directory for the code of the app:
|
||||
```sh
|
||||
RUN mkdir -p /app/code
|
||||
WORKDIR /app/code
|
||||
```
|
||||
|
||||
### Download archives
|
||||
|
||||
When packaging an app you often want to download and extract archives (e.g. from github).
|
||||
This can be done in one line by combining `wget` and `tar` like this:
|
||||
|
||||
```docker
|
||||
ENV VERSION 1.6.2
|
||||
RUN wget "https://github.com/FreshRSS/FreshRSS/archive/${VERSION}.tar.gz" -O - \
|
||||
| tar -xz -C /app/code --strip-components=1
|
||||
```
|
||||
|
||||
The `--strip-components=1` causes the topmost directory in the archive to be skipped.
|
||||
|
||||
Always pin the download to a specific tag or commit instead of using `HEAD` or `master`
|
||||
so that the builds are reasonably reproducible.
|
||||
|
||||
### Applying patches
|
||||
|
||||
To get the app working in Cloudron, sometimes it is necessary to patch the original sources. Patch is a safe way to modify sources, as it fails when the expected original sources changed too much.
|
||||
|
||||
First create a backup copy of the full sources (to be able to calculate the differences):
|
||||
|
||||
```sh
|
||||
cp -a extensions extensions-orig
|
||||
```
|
||||
|
||||
Then modify the sources in the original path and when finished, create a patch like this:
|
||||
|
||||
```sh
|
||||
diff -Naru extensions-orig/ extensions/ > change-ttrss-file-path.patch
|
||||
```
|
||||
|
||||
Add and apply this patch to the sources in the Dockerfile:
|
||||
|
||||
```docker
|
||||
ADD change-ttrss-file-path.patch /app/code/change-ttrss-file-path.patch
|
||||
RUN patch -p1 -d /app/code/extensions < /app/code/change-ttrss-file-path.patch
|
||||
```
|
||||
|
||||
The `-p1` causes patch to ignore the topmost directory in the patch.
|
||||
The app is run as a read-only docker container. Because of this:
|
||||
* Install any required packages in the Dockerfile.
|
||||
* Create static configuration files in the Dockerfile.
|
||||
* Create symlinks to dynamic configuration files under /run in the Dockerfile.
|
||||
|
||||
## Process manager
|
||||
|
||||
@@ -414,7 +362,7 @@ automatically. If your application is a single process, you do not require any p
|
||||
|
||||
Use supervisor, pm2 or any of the other process managers if you application has more then one component.
|
||||
This **excludes** web servers like apache, nginx which can already manage their children by themselves.
|
||||
Be sure to pick a process manager that [forwards signals](#sigterm-handling) to child processes.
|
||||
Be sure to pick a process manager that forwards signals to child processes.
|
||||
|
||||
## Automatic updates
|
||||
|
||||
@@ -448,207 +396,35 @@ An app can determine it's memory limit by reading `/sys/fs/cgroup/memory/memory.
|
||||
Apps should integrate with one of the [authentication strategies](/references/authentication.html).
|
||||
This saves the user from having to manage separate set of credentials for each app.
|
||||
|
||||
## Start script
|
||||
## Startup Script
|
||||
|
||||
Many apps do not launch the server directly, as we did in our basic example. Instead, they execute
|
||||
a `start.sh` script (named so by convention) which is used as the app entry point.
|
||||
a `start.sh` script (named so by convention) which launches the server. Before starting the server,
|
||||
the `start.sh` script does the following:
|
||||
|
||||
At the end of the Dockerfile you should add your start script (`start.sh`) and set it as the default command.
|
||||
Ensure that the `start.sh` is executable in the app package repo. This can be done with `chmod +x start.sh`.
|
||||
```docker
|
||||
ADD start.sh /app/code/start.sh
|
||||
CMD [ "/app/code/start.sh" ]
|
||||
```
|
||||
* When using the `localstorage` addon, it changes the ownership of files in `/app/data` as desired using `chown`. This
|
||||
is necessary because file permissions may not be correctly preserved across backup, restore, application and base image
|
||||
updates.
|
||||
|
||||
### One-time init
|
||||
* Addon information (mail, database) exposed as environment are subject to change across restarts and an application
|
||||
must use these values directly (i.e not cache them across restarts). For this reason, it usually regenerates
|
||||
any config files with the current database settings on each invocation.
|
||||
|
||||
One common pattern is to initialize the data directory with some commands once depending on the existence of a special `.initialized` file.
|
||||
* Finally, it starts the server as a non-root user.
|
||||
|
||||
```sh
|
||||
if ! [ -f /app/data/.initialized ]; then
|
||||
echo "Fresh installation, setting up data directory..."
|
||||
# Setup commands here
|
||||
touch /app/data/.initialized
|
||||
echo "Done."
|
||||
fi
|
||||
```
|
||||
The app's main process must handle SIGTERM and forward it as required to child processes. bash does not
|
||||
automatically forward signals to child processes. For this reason, when using a startup shell script,
|
||||
remember to use exec <app> as the last line. Doing so will replace bash with your program and allows
|
||||
your program to handle signals as required.
|
||||
|
||||
To copy over some files from the code directory you can use the following command:
|
||||
# Beta Testing
|
||||
|
||||
```sh
|
||||
rsync -a /app/code/config/ /app/data/config/
|
||||
```
|
||||
## Metadata
|
||||
|
||||
### chown data files
|
||||
Publishing to the Cloudron Store requires apps to have meta data specified in the `CloudronManifest.json`.
|
||||
|
||||
Since the app containers use other user ids than the host, it is sometimes necessary to change the permissions on the data directory:
|
||||
|
||||
```sh
|
||||
chown -R cloudron.cloudron /app/data
|
||||
```
|
||||
|
||||
For Apache+PHP apps you might need to change permissions to `www-data.www-data` instead.
|
||||
|
||||
### Persisting random values
|
||||
|
||||
Some apps need a random value that is initialized once and does not change afterwards (e.g. a salt for security purposes). This can be accomplished by creating a random value and storing it in a file in the data directory like this:
|
||||
|
||||
```sh
|
||||
if ! [ -e /app/data/.salt ]; then
|
||||
dd if=/dev/urandom bs=1 count=1024 2>/dev/null | sha1sum | awk '{ print $1 }' > /app/data/.salt
|
||||
fi
|
||||
SALT=$(cat /app/data/.salt)
|
||||
```
|
||||
|
||||
### Generate config
|
||||
|
||||
Addon information (mail, database) exposed as environment are subject to change across restarts and an application must use these values directly (i.e not cache them across restarts). For this reason, it usually regenerates any config files with the current database settings on each invocation.
|
||||
|
||||
First create a config file template like this:
|
||||
```sh
|
||||
... snipped ...
|
||||
'mysql' => array(
|
||||
'driver' => 'mysql',
|
||||
'host' => '##MYSQL_HOST',
|
||||
'port' => '##MYSQL_PORT',
|
||||
'database' => '##MYSQL_DATABASE',
|
||||
'username' => '##MYSQL_USERNAME',
|
||||
'password' => '##MYSQL_PASSWORD',
|
||||
'charset' => 'utf8',
|
||||
'collation' => 'utf8_general_ci',
|
||||
'prefix' => '',
|
||||
),
|
||||
... snipped ...
|
||||
```
|
||||
|
||||
Add the template file to the Dockerfile and create a symlink to the dynamic configuration file as follows:
|
||||
|
||||
```docker
|
||||
ADD database.php.template /app/code/database.php.template
|
||||
RUN ln -s /run/paperwork/database.php /app/code/database.php
|
||||
```
|
||||
|
||||
Then in `start.sh`, generate the real config file under `/run` from the template like this:
|
||||
|
||||
```sh
|
||||
sed -e "s/##MYSQL_HOST/${MYSQL_HOST}/" \
|
||||
-e "s/##MYSQL_PORT/${MYSQL_PORT}/" \
|
||||
-e "s/##MYSQL_DATABASE/${MYSQL_DATABASE}/" \
|
||||
-e "s/##MYSQL_USERNAME/${MYSQL_USERNAME}/" \
|
||||
-e "s/##MYSQL_PASSWORD/${MYSQL_PASSWORD}/" \
|
||||
-e "s/##REDIS_HOST/${REDIS_HOST}/" \
|
||||
-e "s/##REDIS_PORT/${REDIS_PORT}/" \
|
||||
/app/code/database.php.template > /run/paperwork/database.php
|
||||
```
|
||||
|
||||
### Non-root user
|
||||
|
||||
The cloudron runs the `start.sh` as root user. This is required for various commands like `chown` to
|
||||
work as expected. However, to keep the app and cloudron secure, always run the app with the least
|
||||
required permissions.
|
||||
|
||||
The `gosu` tool lets you run a binary with a specific user/group as follows:
|
||||
|
||||
```sh
|
||||
/usr/local/bin/gosu cloudron:cloudron node /app/code/.build/bundle/main.js
|
||||
```
|
||||
|
||||
### SIGTERM handling
|
||||
|
||||
bash, by default, does not automatically forward signals to child processes. This would mean that a SIGTERM sent to the parent processes does not reach the children. For this reason, be sure to `exec` as the
|
||||
last line of the start.sh script. Programs like gosu, nginx, apache do proper SIGTERM handling.
|
||||
|
||||
For example, start apache using `exec` as below:
|
||||
|
||||
```sh
|
||||
echo "Starting apache"
|
||||
APACHE_CONFDIR="" source /etc/apache2/envvars
|
||||
rm -f "${APACHE_PID_FILE}"
|
||||
exec /usr/sbin/apache2 -DFOREGROUND
|
||||
```
|
||||
|
||||
## Popular stacks
|
||||
|
||||
### Apache
|
||||
|
||||
Apache requires some configuration changes to work properly with Cloudron. The following commands configure Apache in the following way:
|
||||
|
||||
* Disable all default sites
|
||||
* Print errors into the app's log and disable other logs
|
||||
* Limit server processes to `5` (good default value)
|
||||
* Change the port number to Cloudrons default `8000`
|
||||
|
||||
```docker
|
||||
RUN rm /etc/apache2/sites-enabled/* \
|
||||
&& sed -e 's,^ErrorLog.*,ErrorLog "/dev/stderr",' -i /etc/apache2/apache2.conf \
|
||||
&& sed -e "s,MaxSpareServers[^:].*,MaxSpareServers 5," -i /etc/apache2/mods-available/mpm_prefork.conf \
|
||||
&& a2disconf other-vhosts-access-log \
|
||||
&& echo "Listen 8000" > /etc/apache2/ports.conf
|
||||
```
|
||||
|
||||
Afterwards, add your site config to Apache:
|
||||
|
||||
```docker
|
||||
ADD apache2.conf /etc/apache2/sites-available/app.conf
|
||||
RUN a2ensite app
|
||||
```
|
||||
|
||||
In `start.sh` Apache can be started using these commands:
|
||||
|
||||
```sh
|
||||
echo "Starting apache..."
|
||||
APACHE_CONFDIR="" source /etc/apache2/envvars
|
||||
rm -f "${APACHE_PID_FILE}"
|
||||
exec /usr/sbin/apache2 -DFOREGROUND
|
||||
```
|
||||
|
||||
### PHP
|
||||
|
||||
PHP wants to store session data at `/var/lib/php/sessions` which is read-only in Cloudron. To fix this problem you can move this data to `/run/php/sessions` with these commands:
|
||||
|
||||
```docker
|
||||
RUN rm -rf /var/lib/php/sessions && ln -s /run/php/sessions /var/lib/php/sessions
|
||||
```
|
||||
|
||||
Don't forget to create this directory and it's ownership in the `start.sh`:
|
||||
|
||||
```sh
|
||||
mkdir -p /run/php/sessions
|
||||
chown www-data:www-data /run/php/sessions
|
||||
```
|
||||
|
||||
### Java
|
||||
|
||||
Java scales its memory usage dynamically according to the available system memory. Due to how Docker works, Java sees the hosts total memory instead of the memory limit of the app. To restrict Java to the apps memory limit it is necessary to add a special parameter to Java calls.
|
||||
|
||||
```sh
|
||||
LIMIT=$(($(cat /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes)/2**20))
|
||||
export JAVA_OPTS="-XX:MaxRAM=${LIMIT}M"
|
||||
java ${JAVA_OPTS} -jar ...
|
||||
```
|
||||
|
||||
# App Store
|
||||
|
||||
## Requirements
|
||||
|
||||
The Cloudron Store is a mechanism to share your app with others who use Cloudron. Currently, to ensure that
|
||||
apps are maintained, secure and well supported there are some restrictions imposed on apps submitted to
|
||||
the Cloudron Store. See [#292](https://git.cloudron.io/cloudron/box/issues/292) and [#327](https://git.cloudron.io/cloudron/box/issues/327) for an in-depth discussion.
|
||||
|
||||
The following criteria must be met before submitting an app for review:
|
||||
|
||||
* You must be willing to relocate your app packaging code to the [Cloudron Git Repo](https://git.cloudron.io/cloudron/).
|
||||
|
||||
* Contributed apps must have browser tests. You can see the various [app repos](https://git.cloudron.io/cloudron/) to get an idea on how to write these tests. The Cloudron team can help you write the tests.
|
||||
|
||||
* For all practical purposes, you are the maintainer of the app and Cloudron team will not commit to the repo
|
||||
directly. Any changes will be submitted as Merge Requests.
|
||||
|
||||
* You agree that the Cloudron team can take over the responsibility of progressing the app further if you become unresponsive (48 hours), lose interest, lack time etc. Please send us an email if your priorities change.
|
||||
|
||||
* You must sign the [Cloudron CLA](https://cla.cloudron.io/).
|
||||
|
||||
As a token of our appreciation, 3rd party app authors can use the Cloudron for personal or business use for free.
|
||||
The `cloudron` tool will notify if any such information is missing, prior to uploading.
|
||||
See more information for each field [here](/references/manifest.html).
|
||||
|
||||
## Upload for Testing
|
||||
|
||||
@@ -656,7 +432,7 @@ Once your app is ready, you can upload it to the store for `beta testing` by
|
||||
other Cloudron users. This can be done using:
|
||||
|
||||
```
|
||||
cloudron appstore upload
|
||||
cloudron upload
|
||||
```
|
||||
|
||||
You should now be able to visit `/#/appstore/<appid>?version=<appversion>` on your
|
||||
@@ -665,17 +441,19 @@ Cloudron to check if the icon, description and other details appear correctly.
|
||||
Other Cloudron users can install your app on their Cloudron's using
|
||||
`cloudron install --appstore-id <appid@version>`.
|
||||
|
||||
## Publishing
|
||||
# Publishing
|
||||
|
||||
Once you are satisfied with the beta testing, you can submit it for review.
|
||||
|
||||
```
|
||||
cloudron appstore submit
|
||||
cloudron submit
|
||||
```
|
||||
|
||||
The cloudron.io team will review the app and publish the app to the store.
|
||||
|
||||
## Versioning and Updates
|
||||
# Updating the app
|
||||
|
||||
## Versioning
|
||||
|
||||
To create an update for an app, simply bump up the [semver version](/references/manifest.html#version) field in
|
||||
the manifest and publish a new version to the store.
|
||||
|
||||
+14
-17
@@ -2,18 +2,17 @@
|
||||
|
||||
'use strict';
|
||||
|
||||
var argv = require('yargs').argv,
|
||||
autoprefixer = require('gulp-autoprefixer'),
|
||||
concat = require('gulp-concat'),
|
||||
cssnano = require('gulp-cssnano'),
|
||||
del = require('del'),
|
||||
ejs = require('gulp-ejs'),
|
||||
var ejs = require('gulp-ejs'),
|
||||
gulp = require('gulp'),
|
||||
sass = require('gulp-sass'),
|
||||
serve = require('gulp-serve'),
|
||||
sourcemaps = require('gulp-sourcemaps'),
|
||||
del = require('del'),
|
||||
concat = require('gulp-concat'),
|
||||
uglify = require('gulp-uglify'),
|
||||
url = require('url');
|
||||
serve = require('gulp-serve'),
|
||||
sass = require('gulp-sass'),
|
||||
sourcemaps = require('gulp-sourcemaps'),
|
||||
cssnano = require('gulp-cssnano'),
|
||||
autoprefixer = require('gulp-autoprefixer'),
|
||||
argv = require('yargs').argv;
|
||||
|
||||
gulp.task('3rdparty', function () {
|
||||
gulp.src([
|
||||
@@ -55,16 +54,14 @@ gulp.task('js', ['js-index', 'js-setup', 'js-setupdns', 'js-update'], function (
|
||||
var oauth = {
|
||||
clientId: argv.clientId || 'cid-webadmin',
|
||||
clientSecret: argv.clientSecret || 'unused',
|
||||
apiOrigin: argv.apiOrigin || '',
|
||||
apiOriginHostname: argv.apiOrigin ? url.parse(argv.apiOrigin).hostname : ''
|
||||
apiOrigin: argv.apiOrigin || ''
|
||||
};
|
||||
|
||||
console.log();
|
||||
console.log('Using OAuth credentials:');
|
||||
console.log(' ClientId: %s', oauth.clientId);
|
||||
console.log(' ClientSecret: %s', oauth.clientSecret);
|
||||
console.log(' Cloudron API: %s', oauth.apiOrigin || 'default');
|
||||
console.log(' Cloudron Host: %s', oauth.apiOriginHostname);
|
||||
console.log(' ClientId: %s', oauth.clientId);
|
||||
console.log(' ClientSecret: %s', oauth.clientSecret);
|
||||
console.log(' Cloudron API: %s', oauth.apiOrigin || 'default');
|
||||
console.log();
|
||||
|
||||
|
||||
@@ -143,7 +140,7 @@ gulp.task('js-update', function () {
|
||||
// --------------
|
||||
|
||||
gulp.task('html', ['html-views', 'html-update', 'html-templates'], function () {
|
||||
return gulp.src('webadmin/src/*.html').pipe(ejs({ apiOriginHostname: oauth.apiOriginHostname }, { ext: '.html' })).pipe(gulp.dest('webadmin/dist'));
|
||||
return gulp.src('webadmin/src/*.html').pipe(gulp.dest('webadmin/dist'));
|
||||
});
|
||||
|
||||
gulp.task('html-update', function () {
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
'use strict';
|
||||
|
||||
var tar = require('tar-fs'),
|
||||
fs = require('fs'),
|
||||
path = require('path'),
|
||||
zlib = require('zlib');
|
||||
|
||||
if (process.argv.length < 4) {
|
||||
console.error('Usage: tarjs <cwd> <dir>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
var dir = process.argv[3];
|
||||
var cwd = process.argv[2];
|
||||
|
||||
console.error('Packing directory "'+ dir +'" from within "' + cwd + '" and stream to stdout');
|
||||
|
||||
process.chdir(cwd);
|
||||
|
||||
var stat = fs.statSync(dir);
|
||||
if (!stat.isDirectory()) throw(dir + ' is not a directory');
|
||||
|
||||
var gzipStream = zlib.createGzip({});
|
||||
|
||||
tar.pack(path.resolve(dir), {
|
||||
ignore: function (name) {
|
||||
if (name === '.') return true;
|
||||
return false;
|
||||
}
|
||||
}).pipe(gzipStream).pipe(process.stdout);
|
||||
@@ -1,16 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE appAddonConfigs ADD COLUMN name VARCHAR(128)', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE appAddonConfigs DROP COLUMN name', function (error) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
var url = require('url');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var dbName = url.parse(process.env.DATABASE_URL).path.substr(1); // remove slash
|
||||
|
||||
// by default, mysql collates case insensitively. 'utf8_general_cs' is not available
|
||||
db.runSql('ALTER DATABASE ' + dbName + ' DEFAULT CHARACTER SET=utf8mb4 DEFAULT COLLATE utf8mb4_unicode_ci', callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -1,95 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
// from apps.js DO NOT UPDATE WHEN apps.js changes, as this is part of db migration!!
|
||||
function postProcess(result) {
|
||||
try {
|
||||
result.manifest = JSON.parse(result.manifestJson);
|
||||
delete result.manifestJson;
|
||||
|
||||
result.oldConfig = JSON.parse(result.oldConfigJson);
|
||||
delete result.oldConfigJson;
|
||||
|
||||
result.portBindings = { };
|
||||
var hostPorts = result.hostPorts === null ? [ ] : result.hostPorts.split(',');
|
||||
var environmentVariables = result.environmentVariables === null ? [ ] : result.environmentVariables.split(',');
|
||||
|
||||
delete result.hostPorts;
|
||||
delete result.environmentVariables;
|
||||
|
||||
for (var i = 0; i < environmentVariables.length; i++) {
|
||||
result.portBindings[environmentVariables[i]] = parseInt(hostPorts[i], 10);
|
||||
}
|
||||
|
||||
result.accessRestriction = JSON.parse(result.accessRestrictionJson);
|
||||
if (result.accessRestriction && !result.accessRestriction.users) result.accessRestriction.users = [];
|
||||
delete result.accessRestrictionJson;
|
||||
|
||||
// TODO remove later once all apps have this attribute
|
||||
result.xFrameOptions = result.xFrameOptions || 'SAMEORIGIN';
|
||||
|
||||
result.sso = !!result.sso; // make it bool
|
||||
|
||||
result.debugMode = JSON.parse(result.debugModeJson);
|
||||
delete result.debugModeJson;
|
||||
} catch (e) {
|
||||
console.error('Failed to get restoreConfig for app.', e);
|
||||
console.error('Falling back to empty values to make the update succeed.');
|
||||
result.manifest = null;
|
||||
}
|
||||
}
|
||||
|
||||
// from apps.js DO NOT UPDATE WHEN apps.js changes, as this is part of db migration!!
|
||||
var APPS_FIELDS_PREFIXED = [ 'apps.id', 'apps.appStoreId', 'apps.installationState', 'apps.installationProgress', 'apps.runState',
|
||||
'apps.health', 'apps.containerId', 'apps.manifestJson', 'apps.httpPort', 'apps.location', 'apps.dnsRecordId',
|
||||
'apps.accessRestrictionJson', 'apps.lastBackupId', 'apps.oldConfigJson', 'apps.memoryLimit', 'apps.altDomain',
|
||||
'apps.xFrameOptions', 'apps.sso', 'apps.debugModeJson' ].join(',');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
db.runSql.bind(db, 'ALTER TABLE backups ADD COLUMN restoreConfigJson TEXT'),
|
||||
// fill all the backups with restoreConfigs from current apps
|
||||
function addRestoreConfigs(callback) {
|
||||
console.log('Importing restoreConfigs');
|
||||
|
||||
var appQuery = 'SELECT ' + APPS_FIELDS_PREFIXED + ',' +
|
||||
'GROUP_CONCAT(CAST(appPortBindings.hostPort AS CHAR(6))) AS hostPorts, GROUP_CONCAT(appPortBindings.environmentVariable) AS environmentVariables' +
|
||||
' FROM apps LEFT OUTER JOIN appPortBindings ON apps.id = appPortBindings.appId' +
|
||||
' GROUP BY apps.id ORDER BY apps.id';
|
||||
|
||||
db.all(appQuery, function (error, apps) {
|
||||
if (error) return callback(error);
|
||||
|
||||
apps.forEach(postProcess);
|
||||
|
||||
async.eachSeries(apps, function (app, next) {
|
||||
if (app.manifest === null) return next();
|
||||
|
||||
db.all('SELECT * FROM backups WHERE type="app" AND id LIKE "%app%\\_' + app.id + '\\_%"', function (error, backups) {
|
||||
if (error) return next(error);
|
||||
|
||||
// from apps.js:getAppConfig()
|
||||
var restoreConfig = {
|
||||
manifest: app.manifest,
|
||||
location: app.location,
|
||||
accessRestriction: app.accessRestriction,
|
||||
portBindings: app.portBindings,
|
||||
memoryLimit: app.memoryLimit,
|
||||
xFrameOptions: app.xFrameOptions || 'SAMEORIGIN',
|
||||
altDomain: app.altDomain
|
||||
};
|
||||
|
||||
async.eachSeries(backups, function (backup, next) {
|
||||
db.runSql('UPDATE backups SET restoreConfigJson=?,creationTime=creationTime WHERE id=?', [ JSON.stringify(restoreConfig), backup.id ], next);
|
||||
}, next);
|
||||
});
|
||||
}, callback);
|
||||
});
|
||||
}
|
||||
], callback);
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups DROP COLUMN restoreConfigJson', callback);
|
||||
};
|
||||
@@ -1,22 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT value FROM settings WHERE name="backup_config"', function (error, results) {
|
||||
if (error || results.length === 0) return callback(error);
|
||||
|
||||
var backupConfig = JSON.parse(results[0].value);
|
||||
if (backupConfig.provider === 'filesystem') {
|
||||
backupConfig.retentionSecs = 2 * 24 * 60 * 60; // 2 days
|
||||
} else if (backupConfig.provider === 's3') { // S3
|
||||
backupConfig.retentionSecs = -1;
|
||||
} else if (backupConfig.provider === 'caas') {
|
||||
backupConfig.retentionSecs = 10 * 24 * 60 * 60; // 10 days
|
||||
}
|
||||
db.runSql('UPDATE settings SET value=? WHERE name="backup_config"', [ JSON.stringify(backupConfig) ], callback);
|
||||
|
||||
});
|
||||
};
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
callback();
|
||||
};
|
||||
@@ -69,9 +69,9 @@ CREATE TABLE IF NOT EXISTS apps(
|
||||
sso BOOLEAN DEFAULT 1, // whether user chose to enable SSO
|
||||
debugModeJson TEXT, // options for development mode
|
||||
|
||||
// the following fields do not belong here, they can be removed when we use a queue for apptask
|
||||
lastBackupId VARCHAR(128), // used to pass backupId to restore from to apptask
|
||||
oldConfigJson TEXT, // used to pass old config for apptask
|
||||
lastBackupId VARCHAR(128), // tracks last valid backup, can be removed
|
||||
|
||||
oldConfigJson TEXT, // used to pass old config for apptask, can be removed when we use a queue
|
||||
|
||||
PRIMARY KEY(id));
|
||||
|
||||
@@ -97,20 +97,18 @@ CREATE TABLE IF NOT EXISTS settings(
|
||||
CREATE TABLE IF NOT EXISTS appAddonConfigs(
|
||||
appId VARCHAR(128) NOT NULL,
|
||||
addonId VARCHAR(32) NOT NULL,
|
||||
name VARCHAR(128) NOT NULL,
|
||||
value VARCHAR(512) NOT NULL,
|
||||
FOREIGN KEY(appId) REFERENCES apps(id));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS backups(
|
||||
id VARCHAR(128) NOT NULL,
|
||||
filename VARCHAR(128) NOT NULL,
|
||||
creationTime TIMESTAMP,
|
||||
version VARCHAR(128) NOT NULL, /* app version or box version */
|
||||
type VARCHAR(16) NOT NULL, /* 'box' or 'app' */
|
||||
dependsOn TEXT, /* comma separate list of objects this backup depends on */
|
||||
state VARCHAR(16) NOT NULL,
|
||||
restoreConfigJson TEXT, /* JSON including the manifest of the backed up app */
|
||||
|
||||
PRIMARY KEY (id));
|
||||
PRIMARY KEY (filename));
|
||||
|
||||
CREATE TABLE IF NOT EXISTS eventlog(
|
||||
id VARCHAR(128) NOT NULL,
|
||||
|
||||
Generated
+552
-655
File diff suppressed because it is too large
Load Diff
+8
-12
@@ -13,10 +13,10 @@
|
||||
"node >=4.0.0 <=4.1.1"
|
||||
],
|
||||
"dependencies": {
|
||||
"@sindresorhus/df": "^2.1.0",
|
||||
"async": "^2.1.4",
|
||||
"aws-sdk": "^2.41.0",
|
||||
"aws-sdk": "^2.1.46",
|
||||
"body-parser": "^1.13.1",
|
||||
"checksum": "^0.1.1",
|
||||
"cloudron-manifestformat": "^2.8.0",
|
||||
"connect-ensure-login": "^0.1.1",
|
||||
"connect-lastmile": "^0.1.0",
|
||||
@@ -28,14 +28,14 @@
|
||||
"db-migrate": "^0.10.0-beta.20",
|
||||
"db-migrate-mysql": "^1.1.10",
|
||||
"debug": "^2.2.0",
|
||||
"dockerode": "^2.4.3",
|
||||
"dockerode": "^2.2.10",
|
||||
"ejs": "^2.2.4",
|
||||
"ejs-cli": "^1.2.0",
|
||||
"express": "^4.12.4",
|
||||
"express-rate-limit": "^2.6.0",
|
||||
"express-session": "^1.11.3",
|
||||
"gulp-sass": "^3.0.0",
|
||||
"hat": "0.0.3",
|
||||
"hock": "https://registry.npmjs.org/hock/-/hock-1.3.2.tgz",
|
||||
"json": "^9.0.3",
|
||||
"ldapjs": "^1.0.0",
|
||||
"mime": "^1.3.4",
|
||||
@@ -43,6 +43,8 @@
|
||||
"morgan": "^1.7.0",
|
||||
"multiparty": "^4.1.2",
|
||||
"mysql": "^2.7.0",
|
||||
"native-dns": "^0.7.0",
|
||||
"node-df": "^0.1.1",
|
||||
"node-uuid": "^1.4.3",
|
||||
"nodemailer": "^1.3.0",
|
||||
"nodemailer-smtp-transport": "^1.0.3",
|
||||
@@ -55,16 +57,13 @@
|
||||
"passport-local": "^1.0.0",
|
||||
"passport-oauth2-client-password": "^0.1.2",
|
||||
"password-generator": "^2.0.2",
|
||||
"progress-stream": "^2.0.0",
|
||||
"proxy-middleware": "^0.13.0",
|
||||
"s3-block-read-stream": "^0.2.0",
|
||||
"safetydance": "^0.2.0",
|
||||
"safetydance": "^0.1.1",
|
||||
"semver": "^4.3.6",
|
||||
"showdown": "^1.6.0",
|
||||
"split": "^1.0.0",
|
||||
"superagent": "^1.8.3",
|
||||
"supererror": "^0.7.1",
|
||||
"tar-fs": "https://registry.npmjs.org/tar-fs/-/tar-fs-1.15.2.tgz",
|
||||
"tldjs": "^1.6.2",
|
||||
"underscore": "^1.7.0",
|
||||
"valid-url": "^1.0.9",
|
||||
@@ -88,18 +87,15 @@
|
||||
"istanbul": "*",
|
||||
"js2xmlparser": "^1.0.0",
|
||||
"mocha": "*",
|
||||
"mock-aws-s3": "^2.4.0",
|
||||
"nock": "^9.0.2",
|
||||
"node-sass": "^3.0.0-alpha.0",
|
||||
"readdirp": "https://registry.npmjs.org/readdirp/-/readdirp-2.1.0.tgz",
|
||||
"request": "^2.65.0",
|
||||
"yargs": "^3.15.0"
|
||||
},
|
||||
"scripts": {
|
||||
"migrate_local": "DATABASE_URL=mysql://root:@localhost/box node_modules/.bin/db-migrate up",
|
||||
"migrate_test": "BOX_ENV=test DATABASE_URL=mysql://root:@localhost/boxtest node_modules/.bin/db-migrate up",
|
||||
"test": "npm run migrate_test && src/test/setupTest && BOX_ENV=test ./node_modules/istanbul/lib/cli.js test $1 ./node_modules/mocha/bin/_mocha -- -R spec ./src/test ./src/routes/test/[^a]*",
|
||||
"test_all": "npm run migrate_test && src/test/setupTest && BOX_ENV=test ./node_modules/istanbul/lib/cli.js test $1 ./node_modules/mocha/bin/_mocha -- -R spec ./src/test ./src/routes/test",
|
||||
"test": "npm run migrate_test && src/test/setupTest && BOX_ENV=test ./node_modules/istanbul/lib/cli.js test $1 ./node_modules/mocha/bin/_mocha -- -R spec ./src/test ./src/routes/test",
|
||||
"postmerge": "/bin/true",
|
||||
"precommit": "/bin/true",
|
||||
"prepush": "npm test",
|
||||
|
||||
+24
-49
@@ -15,15 +15,16 @@ fi
|
||||
# change this to a hash when we make a upgrade release
|
||||
readonly LOG_FILE="/var/log/cloudron-setup.log"
|
||||
readonly DATA_FILE="/root/cloudron-install-data.json"
|
||||
readonly MINIMUM_DISK_SIZE_GB="18" # this is the size of "/" and required to fit in docker images 18 is a safe bet for different reporting on 20GB min
|
||||
readonly MINIMUM_DISK_SIZE_GB="19" # this is the size of "/" and required to fit in docker images 19 is a safe bet for different reporting on 20GB min
|
||||
readonly MINIMUM_MEMORY="974" # this is mostly reported for 1GB main memory (DO 992, EC2 990, Linode 989, Serverdiscounter.com 974)
|
||||
|
||||
readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 2400"
|
||||
|
||||
# copied from cloudron-resize-fs.sh
|
||||
readonly physical_memory=$(LC_ALL=C free -m | awk '/Mem:/ { print $2 }')
|
||||
readonly disk_size_bytes=$(LC_ALL=C df --output=size / | tail -n1)
|
||||
readonly disk_size_gb=$((${disk_size_bytes}/1024/1024))
|
||||
readonly disk_device="$(for d in $(find /dev -type b); do [ "$(mountpoint -d /)" = "$(mountpoint -x $d)" ] && echo $d && break; done)"
|
||||
readonly disk_size_bytes=$(LC_ALL=C fdisk -l ${disk_device} | grep "Disk ${disk_device}" | awk '{ printf $5 }')
|
||||
readonly disk_size_gb=$((${disk_size_bytes}/1024/1024/1024))
|
||||
|
||||
# verify the system has minimum requirements met
|
||||
if [[ "${physical_memory}" -lt "${MINIMUM_MEMORY}" ]]; then
|
||||
@@ -32,7 +33,7 @@ if [[ "${physical_memory}" -lt "${MINIMUM_MEMORY}" ]]; then
|
||||
fi
|
||||
|
||||
if [[ "${disk_size_gb}" -lt "${MINIMUM_DISK_SIZE_GB}" ]]; then
|
||||
echo "Error: Cloudron requires atleast 20GB disk space (Disk space on / is ${disk_size_gb}GB)"
|
||||
echo "Error: Cloudron requires atleast 20GB disk space (Disk space on ${disk_device} is ${disk_size_gb}GB)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -44,19 +45,15 @@ encryptionKey=""
|
||||
restoreUrl=""
|
||||
dnsProvider="manual"
|
||||
tlsProvider="le-prod"
|
||||
requestedVersion=""
|
||||
versionsUrl="https://s3.amazonaws.com/prod-cloudron-releases/versions.json"
|
||||
requestedVersion="latest"
|
||||
apiServerOrigin="https://api.cloudron.io"
|
||||
webServerOrigin="https://cloudron.io"
|
||||
dataJson=""
|
||||
prerelease="false"
|
||||
sourceTarballUrl=""
|
||||
rebootServer="true"
|
||||
baseDataDir=""
|
||||
|
||||
# TODO this is still there for the restore case, see other occasions below
|
||||
versionsUrl="https://s3.amazonaws.com/prod-cloudron-releases/versions.json"
|
||||
|
||||
args=$(getopt -o "" -l "domain:,help,skip-baseimage-init,data:,data-dir:,provider:,encryption-key:,restore-url:,tls-provider:,version:,dns-provider:,env:,prerelease,skip-reboot,source-url:" -n "$0" -- "$@")
|
||||
args=$(getopt -o "" -l "domain:,help,skip-baseimage-init,data:,provider:,encryption-key:,restore-url:,tls-provider:,version:,versions-url:,api-server:,dns-provider:,env:,prerelease,skip-reboot,source-url:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
@@ -71,25 +68,24 @@ while true; do
|
||||
--version) requestedVersion="$2"; shift 2;;
|
||||
--env)
|
||||
if [[ "$2" == "dev" ]]; then
|
||||
versionsUrl="https://s3.amazonaws.com/dev-cloudron-releases/versions.json"
|
||||
apiServerOrigin="https://api.dev.cloudron.io"
|
||||
webServerOrigin="https://dev.cloudron.io"
|
||||
versionsUrl="https://s3.amazonaws.com/dev-cloudron-releases/versions.json"
|
||||
tlsProvider="le-staging"
|
||||
prerelease="true"
|
||||
elif [[ "$2" == "staging" ]]; then
|
||||
versionsUrl="https://s3.amazonaws.com/staging-cloudron-releases/versions.json"
|
||||
apiServerOrigin="https://api.staging.cloudron.io"
|
||||
webServerOrigin="https://staging.cloudron.io"
|
||||
versionsUrl="https://s3.amazonaws.com/staging-cloudron-releases/versions.json"
|
||||
tlsProvider="le-staging"
|
||||
prerelease="true"
|
||||
fi
|
||||
shift 2;;
|
||||
--versions-url) versionsUrl="$2"; shift 2;;
|
||||
--api-server) apiServerOrigin="$2"; shift 2;;
|
||||
--skip-baseimage-init) initBaseImage="false"; shift;;
|
||||
--skip-reboot) rebootServer="false"; shift;;
|
||||
--data) dataJson="$2"; shift 2;;
|
||||
--prerelease) prerelease="true"; shift;;
|
||||
--source-url) sourceTarballUrl="$2"; version="0.0.1+custom"; shift 2;;
|
||||
--data-dir) baseDataDir=$(realpath "$2"); shift 2;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
@@ -98,7 +94,7 @@ done
|
||||
# validate arguments in the absence of data
|
||||
if [[ -z "${dataJson}" ]]; then
|
||||
if [[ -z "${provider}" ]]; then
|
||||
echo "--provider is required (azure, digitalocean, ec2, lightsail, linode, ovh, rosehosting, scaleway, vultr or generic)"
|
||||
echo "--provider is required (azure, digitalocean, ec2, lightsail, linode, ovh, scaleway, vultr or generic)"
|
||||
exit 1
|
||||
elif [[ \
|
||||
"${provider}" != "ami" && \
|
||||
@@ -129,16 +125,11 @@ if [[ -z "${dataJson}" ]]; then
|
||||
echo "--dns-provider must be one of : manual, noop"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -n "${baseDataDir}" && ! -d "${baseDataDir}" ]]; then
|
||||
echo "${baseDataDir} does not exist"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "##############################################"
|
||||
echo " Cloudron Setup (${requestedVersion:-latest})"
|
||||
echo " Cloudron Setup (${requestedVersion}) "
|
||||
echo "##############################################"
|
||||
echo ""
|
||||
echo " Follow setup logs in a second terminal with:"
|
||||
@@ -162,25 +153,20 @@ fi
|
||||
|
||||
echo "=> Checking version"
|
||||
if [[ "${sourceTarballUrl}" == "" ]]; then
|
||||
if ! releaseJson=$($curl -s "${apiServerOrigin}/api/v1/releases?prerelease=${prerelease}&boxVersion=${requestedVersion}"); then
|
||||
echo "Failed to get release information"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$requestedVersion" == "" ]]; then
|
||||
version=$(echo "${releaseJson}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj["version"])')
|
||||
releaseJson=$($curl -s "${versionsUrl}")
|
||||
if [[ "$requestedVersion" == "latest" ]]; then
|
||||
pre=$([[ "${prerelease}" == "true" ]] && echo "null" || echo "-pre")
|
||||
version=$(echo "${releaseJson}" | python3 -c "import json,sys,collections;obj=json.load(sys.stdin, object_pairs_hook=collections.OrderedDict);latest=list(v for v in obj if '${pre}' not in v)[-1];print(latest)")
|
||||
else
|
||||
version="${requestedVersion}"
|
||||
fi
|
||||
|
||||
if ! sourceTarballUrl=$(echo "${releaseJson}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj["info"]["sourceTarballUrl"])'); then
|
||||
echo "No source code for version '${requestedVersion:-latest}'"
|
||||
if ! sourceTarballUrl=$(echo "${releaseJson}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj[sys.argv[1]]["sourceTarballUrl"])' "${version}"); then
|
||||
echo "No source code for version ${requestedVersion}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Build data
|
||||
# TODO versionsUrl is still there for the cloudron restore case
|
||||
if [[ -z "${dataJson}" ]]; then
|
||||
if [[ -z "${restoreUrl}" ]]; then
|
||||
data=$(cat <<EOF
|
||||
@@ -189,7 +175,6 @@ if [[ -z "${dataJson}" ]]; then
|
||||
"fqdn": "${domain}",
|
||||
"provider": "${provider}",
|
||||
"apiServerOrigin": "${apiServerOrigin}",
|
||||
"webServerOrigin": "${webServerOrigin}",
|
||||
"tlsConfig": {
|
||||
"provider": "${tlsProvider}"
|
||||
},
|
||||
@@ -199,8 +184,7 @@ if [[ -z "${dataJson}" ]]; then
|
||||
"backupConfig" : {
|
||||
"provider": "filesystem",
|
||||
"backupFolder": "/var/backups",
|
||||
"key": "${encryptionKey}",
|
||||
"retentionSecs": 172800
|
||||
"key": "${encryptionKey}"
|
||||
},
|
||||
"updateConfig": {
|
||||
"prerelease": ${prerelease}
|
||||
@@ -216,7 +200,6 @@ EOF
|
||||
"fqdn": "${domain}",
|
||||
"provider": "${provider}",
|
||||
"apiServerOrigin": "${apiServerOrigin}",
|
||||
"webServerOrigin": "${webServerOrigin}",
|
||||
"restore": {
|
||||
"url": "${restoreUrl}",
|
||||
"key": "${encryptionKey}"
|
||||
@@ -249,17 +232,9 @@ fi
|
||||
|
||||
echo "=> Installing version ${version} (this takes some time) ..."
|
||||
echo "${data}" > "${DATA_FILE}"
|
||||
# poor mans semver
|
||||
if [[ ${version} == "0.10"* ]]; then
|
||||
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" --data-file "${DATA_FILE}" &>> "${LOG_FILE}"; then
|
||||
echo "Failed to install cloudron. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" --data-file "${DATA_FILE}" --data-dir "${baseDataDir}" &>> "${LOG_FILE}"; then
|
||||
echo "Failed to install cloudron. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" --data-file "${DATA_FILE}" &>> "${LOG_FILE}"; then
|
||||
echo "Failed to install cloudron. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
rm "${DATA_FILE}"
|
||||
|
||||
|
||||
+1
-17
@@ -9,26 +9,22 @@ fi
|
||||
|
||||
readonly USER=yellowtent
|
||||
readonly BOX_SRC_DIR=/home/${USER}/box
|
||||
readonly BASE_DATA_DIR=/home/${USER}
|
||||
readonly CLOUDRON_CONF=/home/yellowtent/configs/cloudron.conf
|
||||
|
||||
readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 2400"
|
||||
readonly script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly box_src_tmp_dir="$(realpath ${script_dir}/..)"
|
||||
|
||||
readonly is_update=$([[ -f "${CLOUDRON_CONF}" ]] && echo "yes" || echo "no")
|
||||
|
||||
arg_data=""
|
||||
arg_data_dir=""
|
||||
|
||||
args=$(getopt -o "" -l "data:,data-file:,data-dir:" -n "$0" -- "$@")
|
||||
args=$(getopt -o "" -l "data:,data-file:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
--data) arg_data="$2"; shift 2;;
|
||||
--data-file) arg_data=$(cat $2); shift 2;;
|
||||
--data-dir) arg_data_dir="$2"; shift 2;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
@@ -60,21 +56,9 @@ if [[ "${is_update}" == "yes" ]]; then
|
||||
${BOX_SRC_DIR}/setup/stop.sh # stop the old code
|
||||
fi
|
||||
|
||||
# setup links to data directory
|
||||
if [[ -n "${arg_data_dir}" ]]; then
|
||||
echo "==> installer: setting up links to data directory"
|
||||
mkdir "${arg_data_dir}/appsdata"
|
||||
ln -s "${arg_data_dir}/appsdata" "${BASE_DATA_DIR}/appsdata"
|
||||
mkdir "${arg_data_dir}/platformdata"
|
||||
ln -s "${arg_data_dir}/platformdata" "${BASE_DATA_DIR}/platformdata"
|
||||
fi
|
||||
|
||||
# ensure we are not inside the source directory, which we will remove now
|
||||
cd /root
|
||||
|
||||
echo "==> installer: updating packages"
|
||||
# add logic to update apt packages here
|
||||
|
||||
echo "==> installer: switching the box code"
|
||||
rm -rf "${BOX_SRC_DIR}"
|
||||
mv "${box_src_tmp_dir}" "${BOX_SRC_DIR}"
|
||||
|
||||
+7
-6
@@ -5,6 +5,7 @@ json="${source_dir}/../node_modules/.bin/json"
|
||||
|
||||
# IMPORTANT: Fix cloudron.js:doUpdate if you add/remove any arg. keep these sorted for readability
|
||||
arg_api_server_origin=""
|
||||
arg_box_versions_url=""
|
||||
arg_fqdn=""
|
||||
arg_is_custom_domain="false"
|
||||
arg_restore_key=""
|
||||
@@ -49,6 +50,8 @@ while true; do
|
||||
[[ "${arg_api_server_origin}" == "" ]] && arg_api_server_origin="https://api.cloudron.io"
|
||||
arg_web_server_origin=$(echo "$2" | $json webServerOrigin)
|
||||
[[ "${arg_web_server_origin}" == "" ]] && arg_web_server_origin="https://cloudron.io"
|
||||
arg_box_versions_url=$(echo "$2" | $json boxVersionsUrl)
|
||||
[[ "${arg_box_versions_url}" == "" ]] && arg_box_versions_url="https://s3.amazonaws.com/prod-cloudron-releases/versions.json"
|
||||
|
||||
# TODO check if an where this is used
|
||||
arg_version=$(echo "$2" | $json version)
|
||||
@@ -61,9 +64,7 @@ while true; do
|
||||
[[ "${arg_is_demo}" == "" ]] && arg_is_demo="false"
|
||||
|
||||
arg_tls_cert=$(echo "$2" | $json tlsCert)
|
||||
[[ "${arg_tls_cert}" == "null" ]] && arg_tls_cert=""
|
||||
arg_tls_key=$(echo "$2" | $json tlsKey)
|
||||
[[ "${arg_tls_key}" == "null" ]] && arg_tls_key=""
|
||||
arg_token=$(echo "$2" | $json token)
|
||||
|
||||
arg_provider=$(echo "$2" | $json provider)
|
||||
@@ -96,14 +97,14 @@ done
|
||||
|
||||
echo "Parsed arguments:"
|
||||
echo "api server: ${arg_api_server_origin}"
|
||||
echo "box versions url: ${arg_box_versions_url}"
|
||||
echo "fqdn: ${arg_fqdn}"
|
||||
echo "custom domain: ${arg_is_custom_domain}"
|
||||
echo "restore key: ${arg_restore_key}"
|
||||
echo "restore url: ${arg_restore_url}"
|
||||
echo "tls cert: ${arg_tls_cert}"
|
||||
# do not dump these as they might become available via logs API
|
||||
#echo "restore key: ${arg_restore_key}"
|
||||
#echo "tls key: ${arg_tls_key}"
|
||||
#echo "token: ${arg_token}"
|
||||
echo "tls key: ${arg_tls_key}"
|
||||
echo "token: ${arg_token}"
|
||||
echo "tlsConfig: ${arg_tls_config}"
|
||||
echo "version: ${arg_version}"
|
||||
echo "web server: ${arg_web_server_origin}"
|
||||
|
||||
+6
-6
@@ -6,12 +6,12 @@ readonly SETUP_WEBSITE_DIR="/home/yellowtent/setup/website"
|
||||
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly box_src_dir="$(realpath ${script_dir}/..)"
|
||||
readonly PLATFORM_DATA_DIR="/home/yellowtent/platformdata"
|
||||
readonly DATA_DIR="/home/yellowtent/data"
|
||||
readonly ADMIN_LOCATION="my" # keep this in sync with constants.js
|
||||
|
||||
echo "Setting up nginx update page"
|
||||
|
||||
if [[ ! -f "${PLATFORM_DATA_DIR}/nginx/applications/admin.conf" ]]; then
|
||||
if [[ ! -f "${DATA_DIR}/nginx/applications/admin.conf" ]]; then
|
||||
echo "No admin.conf found. This Cloudron has no domain yet. Skip splash setup"
|
||||
exit
|
||||
fi
|
||||
@@ -29,16 +29,16 @@ cp -r "${script_dir}/splash/website/"* "${SETUP_WEBSITE_DIR}"
|
||||
# create nginx config
|
||||
readonly current_infra=$(node -e "console.log(require('${script_dir}/../src/infra_version.js').version);")
|
||||
existing_infra="none"
|
||||
[[ -f "${PLATFORM_DATA_DIR}/INFRA_VERSION" ]] && existing_infra=$(node -e "console.log(JSON.parse(require('fs').readFileSync('${PLATFORM_DATA_DIR}/INFRA_VERSION', 'utf8')).version);")
|
||||
[[ -f "${DATA_DIR}/INFRA_VERSION" ]] && existing_infra=$(node -e "console.log(JSON.parse(require('fs').readFileSync('${DATA_DIR}/INFRA_VERSION', 'utf8')).version);")
|
||||
if [[ "${arg_retire_reason}" != "" || "${existing_infra}" != "${current_infra}" ]]; then
|
||||
echo "Showing progress bar on all subdomains in retired mode or infra update. retire: ${arg_retire_reason} existing: ${existing_infra} current: ${current_infra}"
|
||||
rm -f ${PLATFORM_DATA_DIR}/nginx/applications/*
|
||||
rm -f ${DATA_DIR}/nginx/applications/*
|
||||
${box_src_dir}/node_modules/.bin/ejs-cli -f "${script_dir}/start/nginx/appconfig.ejs" \
|
||||
-O "{ \"vhost\": \"~^(.+)\$\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\" }" > "${PLATFORM_DATA_DIR}/nginx/applications/admin.conf"
|
||||
-O "{ \"vhost\": \"~^(.+)\$\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\" }" > "${DATA_DIR}/nginx/applications/admin.conf"
|
||||
else
|
||||
echo "Show progress bar only on admin domain for normal update"
|
||||
${box_src_dir}/node_modules/.bin/ejs-cli -f "${script_dir}/start/nginx/appconfig.ejs" \
|
||||
-O "{ \"vhost\": \"${admin_fqdn}\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\" }" > "${PLATFORM_DATA_DIR}/nginx/applications/admin.conf"
|
||||
-O "{ \"vhost\": \"${admin_fqdn}\", \"adminOrigin\": \"${admin_origin}\", \"endpoint\": \"splash\", \"sourceDir\": \"${SETUP_WEBSITE_DIR}\", \"certFilePath\": \"cert/host.cert\", \"keyFilePath\": \"cert/host.key\", \"xFrameOptions\": \"SAMEORIGIN\" }" > "${DATA_DIR}/nginx/applications/admin.conf"
|
||||
fi
|
||||
|
||||
if [[ "${arg_retire_reason}" == "migrate" ]]; then
|
||||
|
||||
+85
-69
@@ -5,11 +5,10 @@ set -eu -o pipefail
|
||||
echo "==> Cloudron Start"
|
||||
|
||||
readonly USER="yellowtent"
|
||||
readonly DATA_FILE="/root/user_data.img"
|
||||
readonly HOME_DIR="/home/${USER}"
|
||||
readonly BOX_SRC_DIR="${HOME_DIR}/box"
|
||||
readonly OLD_DATA_DIR="${HOME_DIR}/data";
|
||||
readonly PLATFORM_DATA_DIR="${HOME_DIR}/platformdata" # platform data
|
||||
readonly APPS_DATA_DIR="${HOME_DIR}/appsdata" # app data
|
||||
readonly DATA_DIR="${HOME_DIR}/data" # app and platform data
|
||||
readonly BOX_DATA_DIR="${HOME_DIR}/boxdata" # box data
|
||||
readonly CONFIG_DIR="${HOME_DIR}/configs"
|
||||
readonly SETUP_PROGRESS_JSON="${HOME_DIR}/setup/website/progress.json"
|
||||
@@ -34,6 +33,36 @@ timedatectl set-ntp 1
|
||||
timedatectl set-timezone UTC
|
||||
hostnamectl set-hostname "${arg_fqdn}"
|
||||
|
||||
echo "==> Setting up firewall"
|
||||
iptables -t filter -N CLOUDRON || true
|
||||
iptables -t filter -F CLOUDRON # empty any existing rules
|
||||
|
||||
# NOTE: keep these in sync with src/apps.js validatePortBindings
|
||||
# allow ssh, http, https, ping, dns
|
||||
iptables -t filter -I CLOUDRON -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
# caas has ssh on port 202
|
||||
if [[ "${arg_provider}" == "caas" ]]; then
|
||||
iptables -A CLOUDRON -p tcp -m tcp -m multiport --dports 25,80,202,443,587,993,4190 -j ACCEPT
|
||||
else
|
||||
iptables -A CLOUDRON -p tcp -m tcp -m multiport --dports 25,80,22,443,587,993,4190 -j ACCEPT
|
||||
fi
|
||||
iptables -t filter -A CLOUDRON -p icmp --icmp-type echo-request -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p icmp --icmp-type echo-reply -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p udp --sport 53 -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -s 172.18.0.0/16 -j ACCEPT # required to accept any connections from apps to our IP:<public port>
|
||||
iptables -t filter -A CLOUDRON -i lo -j ACCEPT # required for localhost connections (mysql)
|
||||
|
||||
# log dropped incoming. keep this at the end of all the rules
|
||||
iptables -t filter -A CLOUDRON -m limit --limit 2/min -j LOG --log-prefix "IPTables Packet Dropped: " --log-level 7
|
||||
iptables -t filter -A CLOUDRON -j DROP
|
||||
|
||||
if ! iptables -t filter -C INPUT -j CLOUDRON 2>/dev/null; then
|
||||
iptables -t filter -I INPUT -j CLOUDRON
|
||||
fi
|
||||
|
||||
# so it gets restored across reboot
|
||||
mkdir -p /etc/iptables && iptables-save > /etc/iptables/rules.v4
|
||||
|
||||
echo "==> Configuring docker"
|
||||
cp "${script_dir}/start/docker-cloudron-app.apparmor" /etc/apparmor.d/docker-cloudron-app
|
||||
systemctl enable apparmor
|
||||
@@ -42,7 +71,7 @@ systemctl restart apparmor
|
||||
usermod ${USER} -a -G docker
|
||||
temp_file=$(mktemp)
|
||||
# create systemd drop-in. some apps do not work with aufs
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=devicemapper" > "${temp_file}"
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=devicemapper --dns=172.18.0.1 --dns-search=." > "${temp_file}"
|
||||
|
||||
systemctl enable docker
|
||||
# restart docker if options changed
|
||||
@@ -67,55 +96,46 @@ if [[ "${arg_provider}" == "caas" ]]; then
|
||||
systemctl reload sshd
|
||||
fi
|
||||
|
||||
mkdir -p "${BOX_DATA_DIR}"
|
||||
mkdir -p "${APPS_DATA_DIR}"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}"
|
||||
echo "==> Setup btrfs data"
|
||||
if [[ ! -d "${DATA_DIR}" ]]; then
|
||||
echo "==> Mounting loopback btrfs"
|
||||
truncate -s "8192m" "${DATA_FILE}" # 8gb start (this will get resized dynamically by cloudron-resize-fs.service)
|
||||
mkfs.btrfs -L UserDataHome "${DATA_FILE}"
|
||||
mkdir -p "${DATA_DIR}"
|
||||
mount -t btrfs -o loop,nosuid "${DATA_FILE}" ${DATA_DIR}
|
||||
fi
|
||||
|
||||
# keep these in sync with paths.js
|
||||
echo "==> Ensuring directories"
|
||||
if [[ ! -d "${PLATFORM_DATA_DIR}/mail" ]]; then
|
||||
if [[ -d "${OLD_DATA_DIR}/mail" ]]; then
|
||||
echo "==> Migrate old mail data"
|
||||
# Migrate mail data to new format
|
||||
docker stop mail || true # otherwise the move below might fail if mail container writes in the middle
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/mail"
|
||||
# we can't move the whole folder as it is a btrfs subvolume mount
|
||||
mv -f "${OLD_DATA_DIR}/mail/"* "${PLATFORM_DATA_DIR}/mail/" # this used to be mail container's run directory
|
||||
else
|
||||
echo "==> Create new mail data dir"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/mail"
|
||||
fi
|
||||
if ! btrfs subvolume show "${DATA_DIR}/mail" &> /dev/null; then
|
||||
# Migrate mail data to new format
|
||||
docker stop mail || true # otherwise the move below might fail if mail container writes in the middle
|
||||
rm -rf "${DATA_DIR}/mail" # this used to be mail container's run directory
|
||||
btrfs subvolume create "${DATA_DIR}/mail"
|
||||
[[ -d "${DATA_DIR}/box/mail" ]] && mv "${DATA_DIR}/box/mail/"* "${DATA_DIR}/mail"
|
||||
rm -rf "${DATA_DIR}/box/mail"
|
||||
fi
|
||||
mkdir -p "${DATA_DIR}/graphite"
|
||||
mkdir -p "${DATA_DIR}/mail/dkim"
|
||||
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/graphite"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/mail/dkim"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/mysql"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/postgresql"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/mongodb"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/snapshots"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/addons/mail"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/collectd/collectd.conf.d"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/acme"
|
||||
mkdir -p "${DATA_DIR}/mysql"
|
||||
mkdir -p "${DATA_DIR}/postgresql"
|
||||
mkdir -p "${DATA_DIR}/mongodb"
|
||||
mkdir -p "${DATA_DIR}/snapshots"
|
||||
mkdir -p "${DATA_DIR}/addons/mail"
|
||||
mkdir -p "${DATA_DIR}/collectd/collectd.conf.d"
|
||||
mkdir -p "${DATA_DIR}/acme"
|
||||
|
||||
mkdir -p "${BOX_DATA_DIR}"
|
||||
if btrfs subvolume show "${DATA_DIR}/box" &> /dev/null; then
|
||||
# Migrate box data out of data volume
|
||||
mv "${DATA_DIR}/box/"* "${BOX_DATA_DIR}"
|
||||
btrfs subvolume delete "${DATA_DIR}/box"
|
||||
fi
|
||||
mkdir -p "${BOX_DATA_DIR}/appicons"
|
||||
mkdir -p "${BOX_DATA_DIR}/certs"
|
||||
mkdir -p "${BOX_DATA_DIR}/acme" # acme keys
|
||||
|
||||
# ensure backups folder exists and is writeable
|
||||
mkdir -p /var/backups
|
||||
chmod 777 /var/backups
|
||||
|
||||
echo "==> Check for old btrfs volumes"
|
||||
if mountpoint -q "${OLD_DATA_DIR}"; then
|
||||
echo "==> Cleanup btrfs volumes"
|
||||
# First stop all container to be able to unmount
|
||||
docker ps -q | xargs docker stop
|
||||
umount "${OLD_DATA_DIR}"
|
||||
rm -rf "/root/user_data.img"
|
||||
else
|
||||
echo "==> No btrfs volumes found";
|
||||
fi
|
||||
|
||||
echo "==> Configuring journald"
|
||||
sed -e "s/^#SystemMaxUse=.*$/SystemMaxUse=100M/" \
|
||||
-e "s/^#ForwardToSyslog=.*$/ForwardToSyslog=no/" \
|
||||
@@ -148,10 +168,7 @@ cp -r "${script_dir}/start/systemd/." /etc/systemd/system/
|
||||
systemctl daemon-reload
|
||||
systemctl enable unbound
|
||||
systemctl enable cloudron.target
|
||||
systemctl enable cloudron-firewall
|
||||
|
||||
# update firewall rules
|
||||
systemctl restart cloudron-firewall
|
||||
systemctl enable iptables-restore
|
||||
|
||||
# For logrotate
|
||||
systemctl enable --now cron
|
||||
@@ -165,18 +182,18 @@ cp "${script_dir}/start/sudoers" /etc/sudoers.d/${USER}
|
||||
|
||||
echo "==> Configuring collectd"
|
||||
rm -rf /etc/collectd
|
||||
ln -sfF "${PLATFORM_DATA_DIR}/collectd" /etc/collectd
|
||||
cp "${script_dir}/start/collectd.conf" "${PLATFORM_DATA_DIR}/collectd/collectd.conf"
|
||||
ln -sfF "${DATA_DIR}/collectd" /etc/collectd
|
||||
cp "${script_dir}/start/collectd.conf" "${DATA_DIR}/collectd/collectd.conf"
|
||||
systemctl restart collectd
|
||||
|
||||
echo "==> Configuring nginx"
|
||||
# link nginx config to system config
|
||||
unlink /etc/nginx 2>/dev/null || rm -rf /etc/nginx
|
||||
ln -s "${PLATFORM_DATA_DIR}/nginx" /etc/nginx
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/nginx/applications"
|
||||
mkdir -p "${PLATFORM_DATA_DIR}/nginx/cert"
|
||||
cp "${script_dir}/start/nginx/nginx.conf" "${PLATFORM_DATA_DIR}/nginx/nginx.conf"
|
||||
cp "${script_dir}/start/nginx/mime.types" "${PLATFORM_DATA_DIR}/nginx/mime.types"
|
||||
ln -s "${DATA_DIR}/nginx" /etc/nginx
|
||||
mkdir -p "${DATA_DIR}/nginx/applications"
|
||||
mkdir -p "${DATA_DIR}/nginx/cert"
|
||||
cp "${script_dir}/start/nginx/nginx.conf" "${DATA_DIR}/nginx/nginx.conf"
|
||||
cp "${script_dir}/start/nginx/mime.types" "${DATA_DIR}/nginx/mime.types"
|
||||
if ! grep -q "^Restart=" /etc/systemd/system/multi-user.target.wants/nginx.service; then
|
||||
# default nginx service file does not restart on crash
|
||||
echo -e "\n[Service]\nRestart=always\n" >> /etc/systemd/system/multi-user.target.wants/nginx.service
|
||||
@@ -185,7 +202,12 @@ fi
|
||||
systemctl start nginx
|
||||
|
||||
# bookkeep the version as part of data
|
||||
echo "{ \"version\": \"${arg_version}\", \"apiServerOrigin\": \"${arg_api_server_origin}\" }" > "${BOX_DATA_DIR}/version"
|
||||
echo "{ \"version\": \"${arg_version}\", \"boxVersionsUrl\": \"${arg_box_versions_url}\" }" > "${BOX_DATA_DIR}/version"
|
||||
|
||||
# remove old snapshots. if we do want to keep this around, we will have to fix the chown -R below
|
||||
# which currently fails because these are readonly fs
|
||||
echo "==> Cleaning up snapshots"
|
||||
find "${DATA_DIR}/snapshots" -mindepth 1 -maxdepth 1 | xargs --no-run-if-empty btrfs subvolume delete
|
||||
|
||||
# restart mysql to make sure it has latest config
|
||||
if [[ ! -f /etc/mysql/mysql.cnf ]] || ! diff -q "${script_dir}/start/mysql.cnf" /etc/mysql/mysql.cnf >/dev/null; then
|
||||
@@ -208,18 +230,11 @@ mysql -u root -p${mysql_root_password} -e 'CREATE DATABASE IF NOT EXISTS box'
|
||||
if [[ -n "${arg_restore_url}" ]]; then
|
||||
set_progress "30" "Downloading restore data"
|
||||
|
||||
decrypt=""
|
||||
if [[ "${arg_restore_url}" == *.tar.gz.enc || -n "${arg_restore_key}" ]]; then
|
||||
echo "==> Downloading encrypted backup: ${arg_restore_url} and key: ${arg_restore_key}"
|
||||
decrypt=(openssl aes-256-cbc -d -nosalt -pass "pass:${arg_restore_key}")
|
||||
else
|
||||
echo "==> Downloading backup: ${arg_restore_url}"
|
||||
decrypt=(cat -)
|
||||
fi
|
||||
echo "==> Downloading backup: ${arg_restore_url} and key: ${arg_restore_key}"
|
||||
|
||||
while true; do
|
||||
if $curl -L "${arg_restore_url}" | "${decrypt[@]}" \
|
||||
| tar -zxf - --overwrite --transform="s,^box/\?,boxdata/," --transform="s,^mail/\?,platformdata/mail/," --show-transformed-names -C "${HOME_DIR}"; then break; fi
|
||||
if $curl -L "${arg_restore_url}" | openssl aes-256-cbc -d -pass "pass:${arg_restore_key}" \
|
||||
| tar -zxf - --overwrite --transform="s,^box/\?,boxdata/," --transform="s,^mail/\?,data/mail/," --show-transformed-names -C "${HOME_DIR}"; then break; fi
|
||||
echo "Failed to download data, trying again"
|
||||
done
|
||||
|
||||
@@ -246,6 +261,7 @@ cat > "${CONFIG_DIR}/cloudron.conf" <<CONF_END
|
||||
"webServerOrigin": "${arg_web_server_origin}",
|
||||
"fqdn": "${arg_fqdn}",
|
||||
"isCustomDomain": ${arg_is_custom_domain},
|
||||
"boxVersionsUrl": "${arg_box_versions_url}",
|
||||
"provider": "${arg_provider}",
|
||||
"isDemo": ${arg_is_demo},
|
||||
"database": {
|
||||
@@ -273,11 +289,11 @@ CONF_END
|
||||
|
||||
echo "==> Changing ownership"
|
||||
chown "${USER}:${USER}" -R "${CONFIG_DIR}"
|
||||
chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/nginx" "${PLATFORM_DATA_DIR}/collectd" "${PLATFORM_DATA_DIR}/addons" "${PLATFORM_DATA_DIR}/acme"
|
||||
chown "${USER}:${USER}" -R "${DATA_DIR}/nginx" "${DATA_DIR}/collectd" "${DATA_DIR}/addons" "${DATA_DIR}/acme"
|
||||
chown "${USER}:${USER}" -R "${BOX_DATA_DIR}"
|
||||
chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/mail/dkim" # this is owned by box currently since it generates the keys
|
||||
chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}/INFRA_VERSION" 2>/dev/null || true
|
||||
chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}"
|
||||
chown "${USER}:${USER}" -R "${DATA_DIR}/mail/dkim" # this is owned by box currently since it generates the keys
|
||||
chown "${USER}:${USER}" "${DATA_DIR}/INFRA_VERSION" 2>/dev/null || true
|
||||
chown "${USER}:${USER}" "${DATA_DIR}"
|
||||
|
||||
echo "==> Adding automated configs"
|
||||
if [[ ! -z "${arg_backup_config}" ]]; then
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
echo "==> Setting up firewall"
|
||||
iptables -t filter -N CLOUDRON || true
|
||||
iptables -t filter -F CLOUDRON # empty any existing rules
|
||||
|
||||
# NOTE: keep these in sync with src/apps.js validatePortBindings
|
||||
# allow ssh, http, https, ping, dns
|
||||
iptables -t filter -I CLOUDRON -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
# caas has ssh on port 202
|
||||
iptables -A CLOUDRON -p tcp -m tcp -m multiport --dports 22,25,80,202,443,587,993,4190 -j ACCEPT
|
||||
|
||||
iptables -t filter -A CLOUDRON -p icmp --icmp-type echo-request -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p icmp --icmp-type echo-reply -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -p udp --sport 53 -j ACCEPT
|
||||
iptables -t filter -A CLOUDRON -s 172.18.0.0/16 -j ACCEPT # required to accept any connections from apps to our IP:<public port>
|
||||
iptables -t filter -A CLOUDRON -i lo -j ACCEPT # required for localhost connections (mysql)
|
||||
|
||||
# log dropped incoming. keep this at the end of all the rules
|
||||
iptables -t filter -A CLOUDRON -m limit --limit 2/min -j LOG --log-prefix "IPTables Packet Dropped: " --log-level 7
|
||||
iptables -t filter -A CLOUDRON -j DROP
|
||||
|
||||
if ! iptables -t filter -C INPUT -j CLOUDRON 2>/dev/null; then
|
||||
iptables -t filter -I INPUT -j CLOUDRON
|
||||
fi
|
||||
|
||||
# Setup rate limit chain (the recent info is at /proc/net/xt_recent)
|
||||
iptables -t filter -N CLOUDRON_RATELIMIT || true
|
||||
iptables -t filter -F CLOUDRON_RATELIMIT # empty any existing rules
|
||||
|
||||
# log dropped incoming. keep this at the end of all the rules
|
||||
iptables -t filter -N CLOUDRON_RATELIMIT_LOG || true
|
||||
iptables -t filter -F CLOUDRON_RATELIMIT_LOG # empty any existing rules
|
||||
iptables -t filter -A CLOUDRON_RATELIMIT_LOG -m limit --limit 2/min -j LOG --log-prefix "IPTables RateLimit: " --log-level 7
|
||||
iptables -t filter -A CLOUDRON_RATELIMIT_LOG -j DROP
|
||||
|
||||
# http https
|
||||
for port in 80 443; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --syn --dport ${port} -m connlimit --connlimit-above 5000 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# ssh smtp ssh msa imap sieve
|
||||
for port in 22 202; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --dport ${port} -m state --state NEW -m recent --set --name "public-${port}"
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --dport ${port} -m state --state NEW -m recent --update --name "public-${port}" --seconds 10 --hitcount 5 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# TODO: move docker platform rules to platform.js so it can be specialized to rate limit only when destination is the mail container
|
||||
|
||||
# docker translates (dnat) 25, 587, 993, 4190 in the PREROUTING step
|
||||
for port in 2525 4190 9993; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --syn ! -s 172.18.0.0/16 -d 172.18.0.0/16 --dport ${port} -m connlimit --connlimit-above 50 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# msa, ldap, imap, sieve
|
||||
for port in 2525 3002 4190 9993; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --syn -s 172.18.0.0/16 -d 172.18.0.0/16 --dport ${port} -m connlimit --connlimit-above 500 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# cloudron docker network: mysql postgresql redis mongodb
|
||||
for port in 3306 5432 6379 27017; do
|
||||
iptables -A CLOUDRON_RATELIMIT -p tcp --syn -s 172.18.0.0/16 -d 172.18.0.0/16 --dport ${port} -m connlimit --connlimit-above 5000 -j CLOUDRON_RATELIMIT_LOG
|
||||
done
|
||||
|
||||
# For ssh, http, https
|
||||
if ! iptables -t filter -C INPUT -j CLOUDRON_RATELIMIT 2>/dev/null; then
|
||||
iptables -t filter -I INPUT 1 -j CLOUDRON_RATELIMIT
|
||||
fi
|
||||
|
||||
# For smtp, imap etc routed via docker/nat
|
||||
# Workaroud issue where Docker insists on adding itself first in FORWARD table
|
||||
iptables -D FORWARD -j CLOUDRON_RATELIMIT || true
|
||||
iptables -I FORWARD 1 -j CLOUDRON_RATELIMIT
|
||||
@@ -2,42 +2,49 @@
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
readonly USER_HOME="/home/yellowtent"
|
||||
readonly APPS_SWAP_FILE="/apps.swap"
|
||||
readonly USER_DATA_FILE="/root/user_data.img"
|
||||
readonly USER_DATA_DIR="/home/yellowtent/data"
|
||||
|
||||
# detect device of rootfs (http://forums.fedoraforum.org/showthread.php?t=270316)
|
||||
disk_device="$(for d in $(find /dev -type b); do [ "$(mountpoint -d /)" = "$(mountpoint -x $d)" ] && echo $d && break; done)"
|
||||
|
||||
existing_swap=$(cat /proc/meminfo | grep SwapTotal | awk '{ printf "%.0f", $2/1024 }')
|
||||
|
||||
# all sizes are in mb
|
||||
readonly physical_memory=$(LC_ALL=C free -m | awk '/Mem:/ { print $2 }')
|
||||
readonly swap_size=$((${physical_memory} > 4096 ? 4096 : ${physical_memory})) # min(RAM, 4GB) if you change this, fix enoughResourcesAvailable() in client.js
|
||||
readonly swap_size=$((${physical_memory} - ${existing_swap})) # if you change this, fix enoughResourcesAvailable() in client.js
|
||||
readonly app_count=$((${physical_memory} / 200)) # estimated app count
|
||||
readonly disk_size_bytes=$(LC_ALL=C df --output=size / | tail -n1)
|
||||
readonly disk_size=$((${disk_size_bytes}/1024))
|
||||
readonly disk_size_bytes=$(LC_ALL=C fdisk -l ${disk_device} | grep "Disk ${disk_device}" | awk '{ printf $5 }') # can't rely on fdisk human readable units, using bytes instead
|
||||
readonly disk_size=$((${disk_size_bytes}/1024/1024))
|
||||
readonly system_size=10240 # 10 gigs for system libs, apps images, installer, box code, data and tmp
|
||||
readonly ext4_reserved=$((disk_size * 5 / 100)) # this can be changes using tune2fs -m percent /dev/vda1
|
||||
|
||||
echo "Disk device: ${disk_device}"
|
||||
echo "Physical memory: ${physical_memory}"
|
||||
echo "Estimated app count: ${app_count}"
|
||||
echo "Disk size: ${disk_size}M"
|
||||
|
||||
# Allocate swap for general app usage
|
||||
readonly current_swap=$(swapon --show="name,size" --noheadings --bytes | awk 'BEGIN{s=0}{s+=$2}END{printf "%.0f", s/1024/1024}')
|
||||
readonly needed_swap_size=$((swap_size - current_swap))
|
||||
if [[ ${needed_swap_size} -gt 0 ]]; then
|
||||
echo "Need more swap of ${needed_swap_size}M"
|
||||
# compute size of apps.swap ignoring what is already set
|
||||
without_apps_swap=$(swapon --show="name,size" --noheadings --bytes | awk 'BEGIN{s=0}{if ($1!="/apps.swap") s+=$2}END{printf "%.0f", s/1024/1024}')
|
||||
apps_swap_size=$((swap_size - without_apps_swap))
|
||||
echo "Creating Apps swap file of size ${apps_swap_size}M"
|
||||
if [[ -f "${APPS_SWAP_FILE}" ]]; then
|
||||
echo "Swapping off before resizing swap"
|
||||
swapoff "${APPS_SWAP_FILE}" || true
|
||||
fi
|
||||
fallocate -l "${apps_swap_size}m" "${APPS_SWAP_FILE}"
|
||||
if [[ ! -f "${APPS_SWAP_FILE}" && ${swap_size} -gt 0 ]]; then
|
||||
echo "Creating Apps swap file of size ${swap_size}M"
|
||||
fallocate -l "${swap_size}m" "${APPS_SWAP_FILE}"
|
||||
chmod 600 "${APPS_SWAP_FILE}"
|
||||
mkswap "${APPS_SWAP_FILE}"
|
||||
swapon "${APPS_SWAP_FILE}"
|
||||
if ! grep -q "${APPS_SWAP_FILE}" /etc/fstab; then
|
||||
echo "Adding swap to fstab"
|
||||
echo "${APPS_SWAP_FILE} none swap sw 0 0" >> /etc/fstab
|
||||
fi
|
||||
echo "${APPS_SWAP_FILE} none swap sw 0 0" >> /etc/fstab
|
||||
else
|
||||
echo "Swap requirements already met"
|
||||
echo "Apps Swap file already exists"
|
||||
fi
|
||||
|
||||
# see start.sh for the initial default size of 8gb. On small disks the calculation might be lower than 8gb resulting in a failure to resize here.
|
||||
echo "Resizing data volume"
|
||||
home_data_size=$((disk_size - system_size - swap_size - ext4_reserved))
|
||||
echo "Resizing up btrfs user data to size ${home_data_size}M"
|
||||
umount "${USER_DATA_DIR}" || true
|
||||
# Do not preallocate (non-sparse). Doing so overallocates for data too much in advance and causes problems when using many apps with smaller data
|
||||
# fallocate -l "${home_data_size}m" "${USER_DATA_FILE}" # does not overwrite existing data
|
||||
truncate -s "${home_data_size}m" "${USER_DATA_FILE}" # this will shrink it if the file had existed. this is useful when running this script on a live system
|
||||
mount -t btrfs -o loop,nosuid "${USER_DATA_FILE}" ${USER_DATA_DIR}
|
||||
btrfs filesystem resize max "${USER_DATA_DIR}"
|
||||
|
||||
@@ -194,6 +194,7 @@ LoadPlugin write_graphite
|
||||
|
||||
<Plugin df>
|
||||
FSType "ext4"
|
||||
FSType "btrfs"
|
||||
|
||||
ReportByDevice true
|
||||
IgnoreSelected false
|
||||
@@ -259,3 +260,4 @@ LoadPlugin write_graphite
|
||||
<Include "/etc/collectd/collectd.conf.d">
|
||||
Filter "*.conf"
|
||||
</Include>
|
||||
|
||||
|
||||
@@ -8,19 +8,3 @@ max_connections=50
|
||||
# on ec2, without this we get a sporadic connection drop when doing the initial migration
|
||||
max_allowed_packet=32M
|
||||
|
||||
# https://mathiasbynens.be/notes/mysql-utf8mb4
|
||||
character-set-server = utf8mb4
|
||||
collation-server = utf8mb4_unicode_ci
|
||||
|
||||
[mysqldump]
|
||||
quick
|
||||
quote-names
|
||||
max_allowed_packet = 16M
|
||||
default-character-set = utf8mb4
|
||||
|
||||
[mysql]
|
||||
default-character-set = utf8mb4
|
||||
|
||||
[client]
|
||||
default-character-set = utf8mb4
|
||||
|
||||
|
||||
@@ -6,10 +6,10 @@ map $http_upgrade $connection_upgrade {
|
||||
|
||||
server {
|
||||
<% if (vhost) { %>
|
||||
listen 443 http2;
|
||||
listen 443;
|
||||
server_name <%= vhost %>;
|
||||
<% } else { %>
|
||||
listen 443 http2 default_server;
|
||||
listen 443 default_server;
|
||||
<% } %>
|
||||
|
||||
ssl on;
|
||||
@@ -32,21 +32,14 @@ server {
|
||||
|
||||
# https://developer.mozilla.org/en-US/docs/Web/HTTP/X-Frame-Options
|
||||
add_header X-Frame-Options "<%= xFrameOptions %>";
|
||||
proxy_hide_header X-Frame-Options;
|
||||
|
||||
# https://github.com/twitter/secureheaders
|
||||
# https://www.owasp.org/index.php/OWASP_Secure_Headers_Project#tab=Compatibility_Matrix
|
||||
# https://wiki.mozilla.org/Security/Guidelines/Web_Security
|
||||
add_header X-XSS-Protection "1; mode=block";
|
||||
proxy_hide_header X-XSS-Protection;
|
||||
add_header X-Download-Options "noopen";
|
||||
proxy_hide_header X-Download-Options;
|
||||
add_header X-Content-Type-Options "nosniff";
|
||||
proxy_hide_header X-Content-Type-Options;
|
||||
add_header X-Permitted-Cross-Domain-Policies "none";
|
||||
proxy_hide_header X-Permitted-Cross-Domain-Policies;
|
||||
add_header Referrer-Policy "no-referrer-when-downgrade";
|
||||
proxy_hide_header Referrer-Policy;
|
||||
|
||||
proxy_http_version 1.1;
|
||||
proxy_intercept_errors on;
|
||||
@@ -76,9 +69,6 @@ server {
|
||||
proxy_buffers 4 256k;
|
||||
proxy_busy_buffers_size 256k;
|
||||
|
||||
# No buffering to temp files, it fails for large downloads
|
||||
proxy_max_temp_file_size 0;
|
||||
|
||||
# Disable check to allow unlimited body sizes
|
||||
client_max_body_size 0;
|
||||
|
||||
@@ -88,19 +78,13 @@ server {
|
||||
client_max_body_size 1m;
|
||||
}
|
||||
|
||||
location ~ ^/api/v1/(developer|session)/login$ {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
client_max_body_size 1m;
|
||||
limit_req zone=admin_login burst=5;
|
||||
}
|
||||
|
||||
# the read timeout is between successive reads and not the whole connection
|
||||
location ~ ^/api/v1/apps/.*/exec$ {
|
||||
proxy_pass http://127.0.0.1:3000;
|
||||
proxy_read_timeout 30m;
|
||||
}
|
||||
|
||||
# graphite paths (uncomment block below and visit /graphite/index.html)
|
||||
# graphite paths
|
||||
# location ~ ^/(graphite|content|metrics|dashboard|render|browser|composer)/ {
|
||||
# proxy_pass http://127.0.0.1:8000;
|
||||
# client_max_body_size 1m;
|
||||
@@ -110,6 +94,7 @@ server {
|
||||
root <%= sourceDir %>/webadmin/dist;
|
||||
index index.html index.htm;
|
||||
}
|
||||
|
||||
<% } else if ( endpoint === 'app' ) { %>
|
||||
proxy_pass http://127.0.0.1:<%= port %>;
|
||||
<% } else if ( endpoint === 'splash' ) { %>
|
||||
@@ -149,3 +134,4 @@ server {
|
||||
<% } %>
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -33,9 +33,6 @@ http {
|
||||
# keep-alive connections timeout in 65s. this is because many browsers timeout in 60 seconds
|
||||
keepalive_timeout 65s;
|
||||
|
||||
# zones for rate limiting
|
||||
limit_req_zone $binary_remote_addr zone=admin_login:10m rate=10r/s; # 10 request a second
|
||||
|
||||
# HTTP server
|
||||
server {
|
||||
listen 80;
|
||||
@@ -51,7 +48,7 @@ http {
|
||||
# acme challenges
|
||||
location /.well-known/acme-challenge/ {
|
||||
default_type text/plain;
|
||||
alias /home/yellowtent/platformdata/acme/;
|
||||
alias /home/yellowtent/data/acme/;
|
||||
}
|
||||
|
||||
location / {
|
||||
@@ -62,3 +59,4 @@ http {
|
||||
|
||||
include applications/*.conf;
|
||||
}
|
||||
|
||||
|
||||
+12
-3
@@ -10,6 +10,15 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmappdir.sh
|
||||
Defaults!/home/yellowtent/box/src/scripts/reloadnginx.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reloadnginx.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/backupbox.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/backupbox.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/backupapp.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/backupapp.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/restoreapp.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restoreapp.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/reboot.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reboot.sh
|
||||
|
||||
@@ -22,11 +31,11 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/collectlogs.sh
|
||||
Defaults!/home/yellowtent/box/src/scripts/retire.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/retire.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/rmbackup.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmbackup.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/update.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/update.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/authorized_keys.sh env_keep="HOME BOX_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/authorized_keys.sh
|
||||
|
||||
Defaults!/home/yellowtent/box/src/scripts/node.sh env_keep="HOME BOX_ENV NODE_ENV"
|
||||
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/node.sh
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
[Unit]
|
||||
Description=Cloudron Firewall
|
||||
After=docker.service
|
||||
PartOf=docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart="/home/yellowtent/box/setup/start/cloudron-firewall.sh"
|
||||
RemainAfterExit=yes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -0,0 +1,11 @@
|
||||
[Unit]
|
||||
Description=IPTables Restore
|
||||
Before=docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/sbin/iptables-restore /etc/iptables/rules.v4
|
||||
RemainAfterExit=yes
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
+48
-74
@@ -20,7 +20,6 @@ var appdb = require('./appdb.js'),
|
||||
async = require('async'),
|
||||
clients = require('./clients.js'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
ClientsError = clients.ClientsError,
|
||||
debug = require('debug')('box:addons'),
|
||||
docker = require('./docker.js'),
|
||||
@@ -195,11 +194,7 @@ function getEnvironment(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
appdb.getAddonConfigByAppId(app.id, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
return callback(null, result.map(function (e) { return e.name + '=' + e.value; }));
|
||||
});
|
||||
appdb.getAddonConfigByAppId(app.id, callback);
|
||||
}
|
||||
|
||||
function getBindsSync(app, addons) {
|
||||
@@ -212,7 +207,7 @@ function getBindsSync(app, addons) {
|
||||
|
||||
for (var addon in addons) {
|
||||
switch (addon) {
|
||||
case 'localstorage': binds.push(path.join(paths.APPS_DATA_DIR, app.id, 'data') + ':/app/data:rw'); break;
|
||||
case 'localstorage': binds.push(path.join(paths.DATA_DIR, app.id, 'data') + ':/app/data:rw'); break;
|
||||
default: break;
|
||||
}
|
||||
}
|
||||
@@ -259,9 +254,9 @@ function setupOauth(app, options, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var env = [
|
||||
{ name: 'OAUTH_CLIENT_ID', value: result.id },
|
||||
{ name: 'OAUTH_CLIENT_SECRET', value: result.clientSecret },
|
||||
{ name: 'OAUTH_ORIGIN', value: config.adminOrigin() }
|
||||
'OAUTH_CLIENT_ID=' + result.id,
|
||||
'OAUTH_CLIENT_SECRET=' + result.clientSecret,
|
||||
'OAUTH_ORIGIN=' + config.adminOrigin()
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting oauth addon config to %j', env);
|
||||
@@ -292,13 +287,13 @@ function setupEmail(app, options, callback) {
|
||||
|
||||
// note that "external" access info can be derived from MAIL_DOMAIN (since it's part of user documentation)
|
||||
var env = [
|
||||
{ name: 'MAIL_SMTP_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_SMTP_PORT', value: '2525' },
|
||||
{ name: 'MAIL_IMAP_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_IMAP_PORT', value: '9993' },
|
||||
{ name: 'MAIL_SIEVE_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_SIEVE_PORT', value: '4190' },
|
||||
{ name: 'MAIL_DOMAIN', value: config.fqdn() }
|
||||
'MAIL_SMTP_SERVER=mail',
|
||||
'MAIL_SMTP_PORT=2525',
|
||||
'MAIL_IMAP_SERVER=mail',
|
||||
'MAIL_IMAP_PORT=9993',
|
||||
'MAIL_SIEVE_SERVER=mail',
|
||||
'MAIL_SIEVE_PORT=4190',
|
||||
'MAIL_DOMAIN=' + config.fqdn()
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting up Email');
|
||||
@@ -324,13 +319,13 @@ function setupLdap(app, options, callback) {
|
||||
if (!app.sso) return callback(null);
|
||||
|
||||
var env = [
|
||||
{ name: 'LDAP_SERVER', value: '172.18.0.1' },
|
||||
{ name: 'LDAP_PORT', value: '' + config.get('ldapPort') },
|
||||
{ name: 'LDAP_URL', value: 'ldap://172.18.0.1:' + config.get('ldapPort') },
|
||||
{ name: 'LDAP_USERS_BASE_DN', value: 'ou=users,dc=cloudron' },
|
||||
{ name: 'LDAP_GROUPS_BASE_DN', value: 'ou=groups,dc=cloudron' },
|
||||
{ name: 'LDAP_BIND_DN', value: 'cn='+ app.id + ',ou=apps,dc=cloudron' },
|
||||
{ name: 'LDAP_BIND_PASSWORD', value: hat(4 * 128) } // this is ignored
|
||||
'LDAP_SERVER=172.18.0.1',
|
||||
'LDAP_PORT=' + config.get('ldapPort'),
|
||||
'LDAP_URL=ldap://172.18.0.1:' + config.get('ldapPort'),
|
||||
'LDAP_USERS_BASE_DN=ou=users,dc=cloudron',
|
||||
'LDAP_GROUPS_BASE_DN=ou=groups,dc=cloudron',
|
||||
'LDAP_BIND_DN=cn='+ app.id + ',ou=apps,dc=cloudron',
|
||||
'LDAP_BIND_PASSWORD=' + hat(4 * 128) // this is ignored
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting up LDAP');
|
||||
@@ -359,15 +354,14 @@ function setupSendMail(app, options, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var mailbox = results.filter(function (r) { return !r.aliasTarget; })[0];
|
||||
var password = generatePassword(128, false /* memorable */, /[\w\d_]/);
|
||||
|
||||
var env = [
|
||||
{ name: 'MAIL_SMTP_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_SMTP_PORT', value: '2525' },
|
||||
{ name: 'MAIL_SMTP_USERNAME', value: mailbox.name },
|
||||
{ name: 'MAIL_SMTP_PASSWORD', value: password },
|
||||
{ name: 'MAIL_FROM', value: mailbox.name + '@' + config.fqdn() },
|
||||
{ name: 'MAIL_DOMAIN', value: config.fqdn() }
|
||||
"MAIL_SMTP_SERVER=mail",
|
||||
"MAIL_SMTP_PORT=2525",
|
||||
"MAIL_SMTP_USERNAME=" + mailbox.name,
|
||||
"MAIL_SMTP_PASSWORD=" + app.id,
|
||||
"MAIL_FROM=" + mailbox.name + '@' + config.fqdn(),
|
||||
"MAIL_DOMAIN=" + config.fqdn()
|
||||
];
|
||||
debugApp(app, 'Setting sendmail addon config to %j', env);
|
||||
appdb.setAddonConfig(app.id, 'sendmail', env, callback);
|
||||
@@ -395,15 +389,14 @@ function setupRecvMail(app, options, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var mailbox = results.filter(function (r) { return !r.aliasTarget; })[0];
|
||||
var password = generatePassword(128, false /* memorable */, /[\w\d_]/);
|
||||
|
||||
var env = [
|
||||
{ name: 'MAIL_IMAP_SERVER', value: 'mail' },
|
||||
{ name: 'MAIL_IMAP_PORT', value: '9993' },
|
||||
{ name: 'MAIL_IMAP_USERNAME', value: mailbox.name },
|
||||
{ name: 'MAIL_IMAP_PASSWORD', value: password },
|
||||
{ name: 'MAIL_TO', value: mailbox.name + '@' + config.fqdn() },
|
||||
{ name: 'MAIL_DOMAIN', value: config.fqdn() }
|
||||
"MAIL_IMAP_SERVER=mail",
|
||||
"MAIL_IMAP_PORT=9993",
|
||||
"MAIL_IMAP_USERNAME=" + mailbox.name,
|
||||
"MAIL_IMAP_PASSWORD=" + app.id,
|
||||
"MAIL_TO=" + mailbox.name + '@' + config.fqdn(),
|
||||
"MAIL_DOMAIN=" + config.fqdn()
|
||||
];
|
||||
|
||||
debugApp(app, 'Setting sendmail addon config to %j', env);
|
||||
@@ -433,9 +426,7 @@ function setupMySql(app, options, callback) {
|
||||
docker.execContainer('mysql', cmd, { bufferStdout: true }, function (error, stdout) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var result = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
var env = result.map(function (r) { var idx = r.indexOf('='); return { name: r.substr(0, idx), value: r.substr(idx + 1) }; });
|
||||
|
||||
var env = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
debugApp(app, 'Setting mysql addon config to %j', env);
|
||||
appdb.setAddonConfig(app.id, 'mysql', env, callback);
|
||||
});
|
||||
@@ -462,7 +453,7 @@ function backupMySql(app, options, callback) {
|
||||
|
||||
callback = once(callback); // ChildProcess exit may or may not be called after error
|
||||
|
||||
var output = fs.createWriteStream(path.join(paths.APPS_DATA_DIR, app.id, 'mysqldump'));
|
||||
var output = fs.createWriteStream(path.join(paths.DATA_DIR, app.id, 'mysqldump'));
|
||||
output.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/mysql/service.sh', options.multipleDatabases ? 'backup-prefix' : 'backup', app.id ];
|
||||
@@ -478,7 +469,7 @@ function restoreMySql(app, options, callback) {
|
||||
|
||||
debugApp(app, 'restoreMySql');
|
||||
|
||||
var input = fs.createReadStream(path.join(paths.APPS_DATA_DIR, app.id, 'mysqldump'));
|
||||
var input = fs.createReadStream(path.join(paths.DATA_DIR, app.id, 'mysqldump'));
|
||||
input.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/mysql/service.sh', options.multipleDatabases ? 'restore-prefix' : 'restore', app.id ];
|
||||
@@ -498,9 +489,7 @@ function setupPostgreSql(app, options, callback) {
|
||||
docker.execContainer('postgresql', cmd, { bufferStdout: true }, function (error, stdout) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var result = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
var env = result.map(function (r) { var idx = r.indexOf('='); return { name: r.substr(0, idx), value: r.substr(idx + 1) }; });
|
||||
|
||||
var env = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
debugApp(app, 'Setting postgresql addon config to %j', env);
|
||||
appdb.setAddonConfig(app.id, 'postgresql', env, callback);
|
||||
});
|
||||
@@ -527,7 +516,7 @@ function backupPostgreSql(app, options, callback) {
|
||||
|
||||
callback = once(callback); // ChildProcess exit may or may not be called after error
|
||||
|
||||
var output = fs.createWriteStream(path.join(paths.APPS_DATA_DIR, app.id, 'postgresqldump'));
|
||||
var output = fs.createWriteStream(path.join(paths.DATA_DIR, app.id, 'postgresqldump'));
|
||||
output.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/postgresql/service.sh', 'backup', app.id ];
|
||||
@@ -543,7 +532,7 @@ function restorePostgreSql(app, options, callback) {
|
||||
|
||||
debugApp(app, 'restorePostgreSql');
|
||||
|
||||
var input = fs.createReadStream(path.join(paths.APPS_DATA_DIR, app.id, 'postgresqldump'));
|
||||
var input = fs.createReadStream(path.join(paths.DATA_DIR, app.id, 'postgresqldump'));
|
||||
input.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/postgresql/service.sh', 'restore', app.id ];
|
||||
@@ -564,9 +553,7 @@ function setupMongoDb(app, options, callback) {
|
||||
docker.execContainer('mongodb', cmd, { bufferStdout: true }, function (error, stdout) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var result = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
var env = result.map(function (r) { var idx = r.indexOf('='); return { name: r.substr(0, idx), value: r.substr(idx + 1) }; });
|
||||
|
||||
var env = stdout.toString('utf8').split('\n').slice(0, -1); // remove trailing newline
|
||||
debugApp(app, 'Setting mongodb addon config to %j', env);
|
||||
appdb.setAddonConfig(app.id, 'mongodb', env, callback);
|
||||
});
|
||||
@@ -593,7 +580,7 @@ function backupMongoDb(app, options, callback) {
|
||||
|
||||
callback = once(callback); // ChildProcess exit may or may not be called after error
|
||||
|
||||
var output = fs.createWriteStream(path.join(paths.APPS_DATA_DIR, app.id, 'mongodbdump'));
|
||||
var output = fs.createWriteStream(path.join(paths.DATA_DIR, app.id, 'mongodbdump'));
|
||||
output.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/mongodb/service.sh', 'backup', app.id ];
|
||||
@@ -609,7 +596,7 @@ function restoreMongoDb(app, options, callback) {
|
||||
|
||||
debugApp(app, 'restoreMongoDb');
|
||||
|
||||
var input = fs.createReadStream(path.join(paths.APPS_DATA_DIR, app.id, 'mongodbdump'));
|
||||
var input = fs.createReadStream(path.join(paths.DATA_DIR, app.id, 'mongodbdump'));
|
||||
input.on('error', callback);
|
||||
|
||||
var cmd = [ '/addons/mongodb/service.sh', 'restore', app.id ];
|
||||
@@ -623,9 +610,9 @@ function setupRedis(app, options, callback) {
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var redisPassword = generatePassword(128, false /* memorable */, /[\w\d_]/); // ensure no / in password for being sed friendly (and be uri friendly)
|
||||
var redisPassword = generatePassword(64, false /* memorable */, /[\w\d_]/); // ensure no / in password for being sed friendly (and be uri friendly)
|
||||
var redisVarsFile = path.join(paths.ADDON_CONFIG_DIR, 'redis-' + app.id + '_vars.sh');
|
||||
var redisDataDir = path.join(paths.APPS_DATA_DIR, app.id + '/redis');
|
||||
var redisDataDir = path.join(paths.DATA_DIR, app.id + '/redis');
|
||||
|
||||
if (!safe.fs.writeFileSync(redisVarsFile, 'REDIS_PASSWORD=' + redisPassword)) {
|
||||
return callback(new Error('Error writing redis config'));
|
||||
@@ -633,34 +620,21 @@ function setupRedis(app, options, callback) {
|
||||
|
||||
if (!safe.fs.mkdirSync(redisDataDir) && safe.error.code !== 'EEXIST') return callback(new Error('Error creating redis data dir:' + safe.error));
|
||||
|
||||
// Compute redis memory limit based on app's memory limit (this is arbitrary)
|
||||
var memoryLimit = app.memoryLimit || app.manifest.memoryLimit || 0;
|
||||
|
||||
if (memoryLimit === -1) { // unrestricted (debug mode)
|
||||
memoryLimit = 0;
|
||||
} else if (memoryLimit === 0 || memoryLimit <= (2 * 1024 * 1024 * 1024)) { // less than 2G (ram+swap)
|
||||
memoryLimit = 150 * 1024 * 1024; // 150m
|
||||
} else {
|
||||
memoryLimit = 600 * 1024 * 1024; // 600m
|
||||
}
|
||||
|
||||
const tag = infra.images.redis.tag, redisName = 'redis-' + app.id;
|
||||
const cmd = `docker run --restart=always -d --name=${redisName} \
|
||||
--net cloudron \
|
||||
--net-alias ${redisName} \
|
||||
-m ${memoryLimit/2} \
|
||||
--memory-swap ${memoryLimit} \
|
||||
--dns 172.18.0.1 \
|
||||
--dns-search=. \
|
||||
-m 100m \
|
||||
--memory-swap 150m \
|
||||
-v ${redisVarsFile}:/etc/redis/redis_vars.sh:ro \
|
||||
-v ${redisDataDir}:/var/lib/redis:rw \
|
||||
--read-only -v /tmp -v /run ${tag}`;
|
||||
|
||||
var env = [
|
||||
{ name: 'REDIS_URL', value: 'redis://redisuser:' + redisPassword + '@redis-' + app.id },
|
||||
{ name: 'REDIS_PASSWORD', value: redisPassword },
|
||||
{ name: 'REDIS_HOST', value: redisName },
|
||||
{ name: 'REDIS_PORT', value: '6379' }
|
||||
'REDIS_URL=redis://redisuser:' + redisPassword + '@redis-' + app.id,
|
||||
'REDIS_PASSWORD=' + redisPassword,
|
||||
'REDIS_HOST=' + redisName,
|
||||
'REDIS_PORT=6379'
|
||||
];
|
||||
|
||||
async.series([
|
||||
|
||||
@@ -0,0 +1,49 @@
|
||||
'use strict';
|
||||
|
||||
var crypto = require('crypto');
|
||||
|
||||
// This code is taken from https://github.com/fabdrol/node-aes-helper
|
||||
module.exports = {
|
||||
algorithm: 'AES-256-CBC',
|
||||
|
||||
key: function (password, salt) {
|
||||
var key = salt.toString('utf8') + password;
|
||||
var hash = crypto.createHash('sha1');
|
||||
|
||||
hash.update(key, 'utf8');
|
||||
return hash.digest('hex');
|
||||
},
|
||||
|
||||
encrypt: function (plain, password, salt) {
|
||||
var key = this.key(password, salt);
|
||||
var cipher = crypto.createCipher(this.algorithm, key);
|
||||
var crypted;
|
||||
|
||||
try {
|
||||
crypted = cipher.update(plain, 'utf8', 'hex');
|
||||
crypted += cipher.final('hex');
|
||||
} catch (e) {
|
||||
console.error('Encryption error:', e);
|
||||
crypted = '';
|
||||
}
|
||||
|
||||
return crypted;
|
||||
},
|
||||
|
||||
decrypt: function (crypted, password, salt) {
|
||||
var key = this.key(password, salt);
|
||||
var decipher = crypto.createDecipher(this.algorithm, key);
|
||||
var decoded;
|
||||
|
||||
try {
|
||||
decoded = decipher.update(crypted, 'hex', 'utf8');
|
||||
decoded += decipher.final('utf8');
|
||||
} catch (e) {
|
||||
console.error('Decryption error:', e);
|
||||
decoded = '';
|
||||
}
|
||||
|
||||
return decoded;
|
||||
}
|
||||
};
|
||||
|
||||
+13
-21
@@ -14,7 +14,6 @@ exports = module.exports = {
|
||||
setAddonConfig: setAddonConfig,
|
||||
getAddonConfig: getAddonConfig,
|
||||
getAddonConfigByAppId: getAddonConfigByAppId,
|
||||
getAddonConfigByName: getAddonConfigByName,
|
||||
unsetAddonConfig: unsetAddonConfig,
|
||||
unsetAddonConfigByAppId: unsetAddonConfigByAppId,
|
||||
|
||||
@@ -414,11 +413,11 @@ function setAddonConfig(appId, addonId, env, callback) {
|
||||
|
||||
if (env.length === 0) return callback(null);
|
||||
|
||||
var query = 'INSERT INTO appAddonConfigs(appId, addonId, name, value) VALUES ';
|
||||
var query = 'INSERT INTO appAddonConfigs(appId, addonId, value) VALUES ';
|
||||
var args = [ ], queryArgs = [ ];
|
||||
for (var i = 0; i < env.length; i++) {
|
||||
args.push(appId, addonId, env[i].name, env[i].value);
|
||||
queryArgs.push('(?, ?, ?, ?)');
|
||||
args.push(appId, addonId, env[i]);
|
||||
queryArgs.push('(?, ?, ?)');
|
||||
}
|
||||
|
||||
database.query(query + queryArgs.join(','), args, function (error) {
|
||||
@@ -457,10 +456,13 @@ function getAddonConfig(appId, addonId, callback) {
|
||||
assert.strictEqual(typeof addonId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT name, value FROM appAddonConfigs WHERE appId = ? AND addonId = ?', [ appId, addonId ], function (error, results) {
|
||||
database.query('SELECT value FROM appAddonConfigs WHERE appId = ? AND addonId = ?', [ appId, addonId ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, results);
|
||||
var config = [ ];
|
||||
results.forEach(function (v) { config.push(v.value); });
|
||||
|
||||
callback(null, config);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -468,23 +470,13 @@ function getAddonConfigByAppId(appId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT name, value FROM appAddonConfigs WHERE appId = ?', [ appId ], function (error, results) {
|
||||
database.query('SELECT value FROM appAddonConfigs WHERE appId = ?', [ appId ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, results);
|
||||
var config = [ ];
|
||||
results.forEach(function (v) { config.push(v.value); });
|
||||
|
||||
callback(null, config);
|
||||
});
|
||||
}
|
||||
|
||||
function getAddonConfigByName(appId, addonId, name, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof addonId, 'string');
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT value FROM appAddonConfigs WHERE appId = ? AND addonId = ? AND name = ?', [ appId, addonId, name ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
if (results.length === 0) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
|
||||
|
||||
callback(null, results[0].value);
|
||||
});
|
||||
}
|
||||
|
||||
+109
-36
@@ -45,8 +45,6 @@ exports = module.exports = {
|
||||
|
||||
var addons = require('./addons.js'),
|
||||
appdb = require('./appdb.js'),
|
||||
appstore = require('./appstore.js'),
|
||||
AppstoreError = require('./appstore.js').AppstoreError,
|
||||
assert = require('assert'),
|
||||
async = require('async'),
|
||||
backups = require('./backups.js'),
|
||||
@@ -66,6 +64,7 @@ var addons = require('./addons.js'),
|
||||
paths = require('./paths.js'),
|
||||
safe = require('safetydance'),
|
||||
semver = require('semver'),
|
||||
settings = require('./settings.js'),
|
||||
spawn = require('child_process').spawn,
|
||||
split = require('split'),
|
||||
superagent = require('superagent'),
|
||||
@@ -367,6 +366,99 @@ function getAllByUser(user, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function purchase(appId, appstoreId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof appstoreId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (appstoreId === '') return callback(null);
|
||||
|
||||
function purchaseWithAppstoreConfig(appstoreConfig) {
|
||||
assert.strictEqual(typeof appstoreConfig.userId, 'string');
|
||||
assert.strictEqual(typeof appstoreConfig.cloudronId, 'string');
|
||||
assert.strictEqual(typeof appstoreConfig.token, 'string');
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/apps/' + appId;
|
||||
var data = { appstoreId: appstoreId };
|
||||
|
||||
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 404) return callback(new AppsError(AppsError.NOT_FOUND));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
// Caas Cloudrons do not store appstore credentials in their local database
|
||||
if (config.provider() === 'caas') {
|
||||
var url = config.apiServerOrigin() + '/api/v1/exchangeBoxTokenWithUserToken';
|
||||
superagent.post(url).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode !== 201) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
purchaseWithAppstoreConfig(result.body);
|
||||
});
|
||||
} else {
|
||||
settings.getAppstoreConfig(function (error, result) {
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
if (!result.token) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
|
||||
purchaseWithAppstoreConfig(result);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function unpurchase(appId, appstoreId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof appstoreId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (appstoreId === '') return callback(null);
|
||||
|
||||
function unpurchaseWithAppstoreConfig(appstoreConfig) {
|
||||
assert.strictEqual(typeof appstoreConfig.userId, 'string');
|
||||
assert.strictEqual(typeof appstoreConfig.cloudronId, 'string');
|
||||
assert.strictEqual(typeof appstoreConfig.token, 'string');
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/apps/' + appId;
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
if (result.statusCode === 404) return callback(null); // was never purchased
|
||||
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
superagent.del(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
if (result.statusCode !== 204) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// Caas Cloudrons do not store appstore credentials in their local database
|
||||
if (config.provider() === 'caas') {
|
||||
var url = config.apiServerOrigin() + '/api/v1/exchangeBoxTokenWithUserToken';
|
||||
superagent.post(url).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode !== 201) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
unpurchaseWithAppstoreConfig(result.body);
|
||||
});
|
||||
} else {
|
||||
settings.getAppstoreConfig(function (error, result) {
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
if (!result.token) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
|
||||
unpurchaseWithAppstoreConfig(result);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function downloadManifest(appStoreId, manifest, callback) {
|
||||
if (!appStoreId && !manifest) return callback(new AppsError(AppsError.BAD_FIELD, 'Neither manifest nor appStoreId provided'));
|
||||
|
||||
@@ -402,8 +494,7 @@ function install(data, auditSource, callback) {
|
||||
altDomain = data.altDomain || null,
|
||||
xFrameOptions = data.xFrameOptions || 'SAMEORIGIN',
|
||||
sso = 'sso' in data ? data.sso : null,
|
||||
debugMode = data.debugMode || null,
|
||||
backupId = data.backupId || null;
|
||||
debugMode = data.debugMode || null;
|
||||
|
||||
assert(data.appStoreId || data.manifest); // atleast one of them is required
|
||||
|
||||
@@ -438,7 +529,7 @@ function install(data, auditSource, callback) {
|
||||
// if sso was unspecified, enable it by default if possible
|
||||
if (sso === null) sso = !!manifest.addons['ldap'] || !!manifest.addons['oauth'];
|
||||
|
||||
if (altDomain !== null && !validator.isFQDN(altDomain)) return callback(new AppsError(AppsError.BAD_FIELD, 'Invalid external domain'));
|
||||
if (altDomain !== null && !validator.isFQDN(altDomain)) return callback(new AppsError(AppsError.BAD_FIELD, 'Invalid alt domain'));
|
||||
|
||||
var appId = uuid.v4();
|
||||
|
||||
@@ -455,11 +546,8 @@ function install(data, auditSource, callback) {
|
||||
|
||||
debug('Will install app with id : ' + appId);
|
||||
|
||||
appstore.purchase(appId, appStoreId, function (error) {
|
||||
if (error && error.reason === AppstoreError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND));
|
||||
if (error && error.reason === AppstoreError.BILLING_REQUIRED) return callback(new AppsError(AppsError.BILLING_REQUIRED, error.message));
|
||||
if (error && error.reason === AppstoreError.EXTERNAL_ERROR) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
purchase(appId, appStoreId, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var data = {
|
||||
accessRestriction: accessRestriction,
|
||||
@@ -468,8 +556,7 @@ function install(data, auditSource, callback) {
|
||||
xFrameOptions: xFrameOptions,
|
||||
sso: sso,
|
||||
debugMode: debugMode,
|
||||
mailboxName: (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app',
|
||||
lastBackupId: backupId
|
||||
mailboxName: (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app'
|
||||
};
|
||||
|
||||
appdb.add(appId, appStoreId, manifest, location, portBindings, data, function (error) {
|
||||
@@ -484,7 +571,7 @@ function install(data, auditSource, callback) {
|
||||
|
||||
taskmanager.restartAppTask(appId);
|
||||
|
||||
eventlog.add(eventlog.ACTION_APP_INSTALL, auditSource, { appId: appId, location: location, manifest: manifest, backupId: backupId });
|
||||
eventlog.add(eventlog.ACTION_APP_INSTALL, auditSource, { appId: appId, location: location, manifest: manifest });
|
||||
|
||||
callback(null, { id : appId });
|
||||
});
|
||||
@@ -519,7 +606,7 @@ function configure(appId, data, auditSource, callback) {
|
||||
|
||||
if ('altDomain' in data) {
|
||||
values.altDomain = data.altDomain;
|
||||
if (values.altDomain !== null && !validator.isFQDN(values.altDomain)) return callback(new AppsError(AppsError.BAD_FIELD, 'Invalid external domain'));
|
||||
if (values.altDomain !== null && !validator.isFQDN(values.altDomain)) return callback(new AppsError(AppsError.BAD_FIELD, 'Invalid alt domain'));
|
||||
}
|
||||
|
||||
if ('portBindings' in data) {
|
||||
@@ -674,9 +761,10 @@ function appLogFilter(app) {
|
||||
return names.map(function (name) { return 'CONTAINER_NAME=' + name; });
|
||||
}
|
||||
|
||||
function getLogs(appId, options, callback) {
|
||||
function getLogs(appId, lines, follow, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert(options && typeof options === 'object');
|
||||
assert.strictEqual(typeof lines, 'number');
|
||||
assert.strictEqual(typeof follow, 'boolean');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('Getting logs for %s', appId);
|
||||
@@ -685,21 +773,13 @@ function getLogs(appId, options, callback) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
|
||||
var lines = options.lines || 100,
|
||||
follow = !!options.follow,
|
||||
format = options.format || 'json';
|
||||
|
||||
var args = [ '--no-pager', '--lines=' + lines ];
|
||||
var args = [ '--output=json', '--no-pager', '--lines=' + lines ];
|
||||
if (follow) args.push('--follow');
|
||||
if (format == 'short') args.push('--output=short', '-a'); else args.push('--output=json');
|
||||
args = args.concat(appLogFilter(app));
|
||||
|
||||
var cp = spawn('/bin/journalctl', args);
|
||||
|
||||
var transformStream = split(function mapper(line) {
|
||||
if (format !== 'json') return line + '\n';
|
||||
|
||||
var obj = safe.JSON.parse(line);
|
||||
if (!obj) return undefined;
|
||||
|
||||
@@ -789,7 +869,6 @@ function clone(appId, data, auditSource, callback) {
|
||||
|
||||
backups.getRestoreConfig(backupId, function (error, restoreConfig) {
|
||||
if (error && error.reason === BackupsError.EXTERNAL_ERROR) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
|
||||
if (error && error.reason === BackupsError.NOT_FOUND) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
if (!restoreConfig) callback(new AppsError(AppsError.EXTERNAL_ERROR, 'Could not get restore config'));
|
||||
@@ -806,11 +885,8 @@ function clone(appId, data, auditSource, callback) {
|
||||
|
||||
var newAppId = uuid.v4(), appStoreId = app.appStoreId, manifest = restoreConfig.manifest;
|
||||
|
||||
appstore.purchase(newAppId, appStoreId, function (error) {
|
||||
if (error && error.reason === AppstoreError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND));
|
||||
if (error && error.reason === AppstoreError.BILLING_REQUIRED) return callback(new AppsError(AppsError.BILLING_REQUIRED, error.message));
|
||||
if (error && error.reason === AppstoreError.EXTERNAL_ERROR) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
purchase(newAppId, appStoreId, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var data = {
|
||||
installationState: appdb.ISTATE_PENDING_CLONE,
|
||||
@@ -847,11 +923,8 @@ function uninstall(appId, auditSource, callback) {
|
||||
get(appId, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
appstore.unpurchase(appId, result.appStoreId, function (error) {
|
||||
if (error && error.reason === AppstoreError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND));
|
||||
if (error && error.reason === AppstoreError.BILLING_REQUIRED) return callback(new AppsError(AppsError.BILLING_REQUIRED, error.message));
|
||||
if (error && error.reason === AppstoreError.EXTERNAL_ERROR) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
unpurchase(appId, result.appStoreId, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
taskmanager.stopAppTask(appId, function () {
|
||||
appdb.setInstallationCommand(appId, appdb.ISTATE_PENDING_UNINSTALL, function (error) {
|
||||
|
||||
-211
@@ -1,211 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
purchase: purchase,
|
||||
unpurchase: unpurchase,
|
||||
|
||||
sendAliveStatus: sendAliveStatus,
|
||||
|
||||
getAppUpdate: getAppUpdate,
|
||||
getBoxUpdate: getBoxUpdate,
|
||||
|
||||
AppstoreError: AppstoreError
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
config = require('./config.js'),
|
||||
debug = require('debug')('box:appstore'),
|
||||
os = require('os'),
|
||||
settings = require('./settings.js'),
|
||||
superagent = require('superagent'),
|
||||
util = require('util');
|
||||
|
||||
function AppstoreError(reason, errorOrMessage) {
|
||||
assert.strictEqual(typeof reason, 'string');
|
||||
assert(errorOrMessage instanceof Error || typeof errorOrMessage === 'string' || typeof errorOrMessage === 'undefined');
|
||||
|
||||
Error.call(this);
|
||||
Error.captureStackTrace(this, this.constructor);
|
||||
|
||||
this.name = this.constructor.name;
|
||||
this.reason = reason;
|
||||
if (typeof errorOrMessage === 'undefined') {
|
||||
this.message = reason;
|
||||
} else if (typeof errorOrMessage === 'string') {
|
||||
this.message = errorOrMessage;
|
||||
} else {
|
||||
this.message = 'Internal error';
|
||||
this.nestedError = errorOrMessage;
|
||||
}
|
||||
}
|
||||
util.inherits(AppstoreError, Error);
|
||||
AppstoreError.INTERNAL_ERROR = 'Internal Error';
|
||||
AppstoreError.EXTERNAL_ERROR = 'External Error';
|
||||
AppstoreError.NOT_FOUND = 'Internal Error';
|
||||
AppstoreError.BILLING_REQUIRED = 'Billing Required';
|
||||
|
||||
var NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
|
||||
function getAppstoreConfig(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Caas Cloudrons do not store appstore credentials in their local database
|
||||
if (config.provider() === 'caas') {
|
||||
var url = config.apiServerOrigin() + '/api/v1/exchangeBoxTokenWithUserToken';
|
||||
superagent.post(url).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode !== 201) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null, result.body);
|
||||
});
|
||||
} else {
|
||||
settings.getAppstoreConfig(function (error, result) {
|
||||
if (error) return callback(new AppstoreError(AppstoreError.INTERNAL_ERROR, error));
|
||||
if (!result.token) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED));
|
||||
|
||||
callback(null, result);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function purchase(appId, appstoreId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof appstoreId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (appstoreId === '') return callback(null);
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/apps/' + appId;
|
||||
var data = { appstoreId: appstoreId };
|
||||
|
||||
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 404) return callback(new AppstoreError(AppstoreError.NOT_FOUND));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED));
|
||||
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function unpurchase(appId, appstoreId, callback) {
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof appstoreId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (appstoreId === '') return callback(null);
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/apps/' + appId;
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED));
|
||||
if (result.statusCode === 404) return callback(null); // was never purchased
|
||||
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
superagent.del(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppstoreError(AppstoreError.BILLING_REQUIRED));
|
||||
if (result.statusCode !== 204) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function sendAliveStatus(data, callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
settings.getAll(function (error, result) {
|
||||
if (error) return callback(new AppstoreError(AppstoreError.INTERNAL_ERROR, error));
|
||||
|
||||
var backendSettings = {
|
||||
dnsConfig: {
|
||||
provider: result[settings.DNS_CONFIG_KEY].provider,
|
||||
wildcard: result[settings.DNS_CONFIG_KEY].provider === 'manual' ? result[settings.DNS_CONFIG_KEY].wildcard : undefined
|
||||
},
|
||||
tlsConfig: {
|
||||
provider: result[settings.TLS_CONFIG_KEY].provider
|
||||
},
|
||||
backupConfig: {
|
||||
provider: result[settings.BACKUP_CONFIG_KEY].provider
|
||||
},
|
||||
mailConfig: {
|
||||
enabled: result[settings.MAIL_CONFIG_KEY].enabled
|
||||
},
|
||||
autoupdatePattern: result[settings.AUTOUPDATE_PATTERN_KEY],
|
||||
timeZone: result[settings.TIME_ZONE_KEY]
|
||||
};
|
||||
|
||||
var data = {
|
||||
domain: config.fqdn(),
|
||||
version: config.version(),
|
||||
provider: config.provider(),
|
||||
backendSettings: backendSettings,
|
||||
machine: {
|
||||
cpus: os.cpus(),
|
||||
totalmem: os.totalmem()
|
||||
}
|
||||
};
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/alive';
|
||||
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 404) return callback(new AppstoreError(AppstoreError.NOT_FOUND));
|
||||
if (result.statusCode !== 201) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Sending alive status failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getBoxUpdate(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/boxupdate';
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token, boxVersion: config.version() }).timeout(10 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 204) return callback(null); // no update
|
||||
if (result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response: %s %s', result.statusCode, result.text)));
|
||||
|
||||
// { version, changelog, upgrade, sourceTarballUrl}
|
||||
callback(null, result.body);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getAppUpdate(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getAppstoreConfig(function (error, appstoreConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId + '/appupdate';
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token, boxVersion: config.version(), appId: app.appStoreId, appVersion: app.manifest.version }).timeout(10 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 204) return callback(null); // no update
|
||||
if (result.statusCode !== 200) return callback(new AppstoreError(AppstoreError.EXTERNAL_ERROR, util.format('Bad response: %s %s', result.statusCode, result.text)));
|
||||
|
||||
// { id, creationDate, manifest }
|
||||
callback(null, result.body);
|
||||
});
|
||||
});
|
||||
}
|
||||
+91
-47
@@ -50,7 +50,6 @@ var addons = require('./addons.js'),
|
||||
subdomains = require('./subdomains.js'),
|
||||
superagent = require('superagent'),
|
||||
sysinfo = require('./sysinfo.js'),
|
||||
tld = require('tldjs'),
|
||||
util = require('util'),
|
||||
_ = require('underscore');
|
||||
|
||||
@@ -223,7 +222,7 @@ function registerSubdomain(app, overwrite, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.retry({ times: 200, interval: 5000 }, function (retryCallback) {
|
||||
debugApp(app, 'Registering subdomain location [%s] overwrite: %s', app.location, overwrite);
|
||||
debugApp(app, 'Registering subdomain location [%s]', app.location);
|
||||
|
||||
// get the current record before updating it
|
||||
subdomains.get(app.location, 'A', function (error, values) {
|
||||
@@ -308,16 +307,7 @@ function waitForAltDomainDnsPropagation(app, callback) {
|
||||
|
||||
// try for 10 minutes before giving up. this allows the user to "reconfigure" the app in the case where
|
||||
// an app has an external domain and cloudron is migrated to custom domain.
|
||||
var isNakedDomain = tld.getDomain(app.altDomain) === app.altDomain;
|
||||
if (isNakedDomain) { // check naked domains with A record since CNAME records don't work there
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(error);
|
||||
|
||||
subdomains.waitForDns(app.altDomain, ip, 'A', { interval: 10000, times: 60 }, callback);
|
||||
});
|
||||
} else {
|
||||
subdomains.waitForDns(app.altDomain, config.appFqdn(app.location) + '.', 'CNAME', { interval: 10000, times: 60 }, callback);
|
||||
}
|
||||
subdomains.waitForDns(app.altDomain, config.appFqdn(app.location), 'CNAME', { interval: 10000, times: 60 }, callback);
|
||||
}
|
||||
|
||||
// updates the app object and the database
|
||||
@@ -353,8 +343,6 @@ function install(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const backupId = app.lastBackupId, isRestoring = app.installationState === appdb.ISTATE_PENDING_RESTORE;
|
||||
|
||||
async.series([
|
||||
verifyManifest.bind(null, app),
|
||||
|
||||
@@ -364,16 +352,9 @@ function install(app, callback) {
|
||||
removeCollectdProfile.bind(null, app),
|
||||
stopApp.bind(null, app),
|
||||
deleteContainers.bind(null, app),
|
||||
// oldConfig can be null during upgrades
|
||||
addons.teardownAddons.bind(null, app, app.oldConfig ? app.oldConfig.manifest.addons : app.manifest.addons),
|
||||
addons.teardownAddons.bind(null, app, app.manifest.addons),
|
||||
deleteVolume.bind(null, app),
|
||||
|
||||
// for restore case
|
||||
function deleteImageIfChanged(done) {
|
||||
if (!app.oldConfig || (app.oldConfig.manifest.dockerImage === app.manifest.dockerImage)) return done();
|
||||
|
||||
docker.deleteImage(app.oldConfig.manifest, done);
|
||||
},
|
||||
unregisterSubdomain.bind(null, app, app.location),
|
||||
|
||||
reserveHttpPort.bind(null, app),
|
||||
|
||||
@@ -381,7 +362,7 @@ function install(app, callback) {
|
||||
downloadIcon.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '30, Registering subdomain' }),
|
||||
registerSubdomain.bind(null, app, isRestoring /* overwrite */),
|
||||
registerSubdomain.bind(null, app, false /* overwrite */),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '40, Downloading image' }),
|
||||
docker.downloadImage.bind(null, app.manifest),
|
||||
@@ -389,19 +370,8 @@ function install(app, callback) {
|
||||
updateApp.bind(null, app, { installationProgress: '50, Creating volume' }),
|
||||
createVolume.bind(null, app),
|
||||
|
||||
function restoreFromBackup(next) {
|
||||
if (!backupId) {
|
||||
async.series([
|
||||
updateApp.bind(null, app, { installationProgress: '60, Setting up addons' }),
|
||||
addons.setupAddons.bind(null, app, app.manifest.addons),
|
||||
], next);
|
||||
} else {
|
||||
async.series([
|
||||
updateApp.bind(null, app, { installationProgress: '60, Download backup and restoring addons' }),
|
||||
backups.restoreApp.bind(null, app, app.manifest.addons, backupId),
|
||||
], next);
|
||||
}
|
||||
},
|
||||
updateApp.bind(null, app, { installationProgress: '60, Setting up addons' }),
|
||||
addons.setupAddons.bind(null, app, app.manifest.addons),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '70, Creating container' }),
|
||||
createContainer.bind(null, app),
|
||||
@@ -414,7 +384,7 @@ function install(app, callback) {
|
||||
updateApp.bind(null, app, { installationProgress: '85, Waiting for DNS propagation' }),
|
||||
exports._waitForDnsPropagation.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '90, Waiting for External Domain setup' }),
|
||||
updateApp.bind(null, app, { installationProgress: '90, Waiting for External Domain CNAME setup' }),
|
||||
exports._waitForAltDomainDnsPropagation.bind(null, app), // required when restoring and !lastBackupId
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '95, Configure nginx' }),
|
||||
@@ -456,6 +426,84 @@ function backup(app, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
// restore is also called for upgrades and infra updates. note that in those cases it is possible there is no backup
|
||||
function restore(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// we don't have a backup, same as re-install. this allows us to install from install failures (update failures always
|
||||
// have a backupId)
|
||||
if (!app.lastBackupId) {
|
||||
debugApp(app, 'No lastBackupId. reinstalling');
|
||||
return install(app, callback);
|
||||
}
|
||||
|
||||
var backupId = app.lastBackupId;
|
||||
|
||||
async.series([
|
||||
updateApp.bind(null, app, { installationProgress: '10, Cleaning up old install' }),
|
||||
unconfigureNginx.bind(null, app),
|
||||
removeCollectdProfile.bind(null, app),
|
||||
stopApp.bind(null, app),
|
||||
deleteContainers.bind(null, app),
|
||||
// oldConfig can be null during upgrades
|
||||
addons.teardownAddons.bind(null, app, app.oldConfig ? app.oldConfig.manifest.addons : null),
|
||||
deleteVolume.bind(null, app),
|
||||
function deleteImageIfChanged(done) {
|
||||
if (!app.oldConfig || (app.oldConfig.manifest.dockerImage === app.manifest.dockerImage)) return done();
|
||||
|
||||
docker.deleteImage(app.oldConfig.manifest, done);
|
||||
},
|
||||
|
||||
reserveHttpPort.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '40, Downloading icon' }),
|
||||
downloadIcon.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '55, Registering subdomain' }), // ip might change during upgrades
|
||||
registerSubdomain.bind(null, app, true /* overwrite */),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '60, Downloading image' }),
|
||||
docker.downloadImage.bind(null, app.manifest),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '65, Creating volume' }),
|
||||
createVolume.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '70, Download backup and restore addons' }),
|
||||
backups.restoreApp.bind(null, app, app.manifest.addons, backupId),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '75, Creating container' }),
|
||||
createContainer.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '80, Setting up collectd profile' }),
|
||||
addCollectdProfile.bind(null, app),
|
||||
|
||||
runApp.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '85, Waiting for DNS propagation' }),
|
||||
exports._waitForDnsPropagation.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '90, Waiting for External Domain CNAME setup' }),
|
||||
exports._waitForAltDomainDnsPropagation.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '95, Configuring Nginx' }),
|
||||
configureNginx.bind(null, app),
|
||||
|
||||
// done!
|
||||
function (callback) {
|
||||
debugApp(app, 'restored');
|
||||
updateApp(app, { installationState: appdb.ISTATE_INSTALLED, installationProgress: '', health: null }, callback);
|
||||
}
|
||||
], function seriesDone(error) {
|
||||
if (error) {
|
||||
debugApp(app, 'Error installing app: %s', error);
|
||||
return updateApp(app, { installationState: appdb.ISTATE_ERROR, installationProgress: error.message }, callback.bind(null, error));
|
||||
}
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
// note that configure is called after an infra update as well
|
||||
function configure(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
@@ -502,7 +550,7 @@ function configure(app, callback) {
|
||||
updateApp.bind(null, app, { installationProgress: '80, Waiting for DNS propagation' }),
|
||||
exports._waitForDnsPropagation.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '85, Waiting for External Domain setup' }),
|
||||
updateApp.bind(null, app, { installationProgress: '85, Waiting for External Domain CNAME setup' }),
|
||||
exports._waitForAltDomainDnsPropagation.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '90, Configuring Nginx' }),
|
||||
@@ -692,17 +740,13 @@ function startTask(appId, callback) {
|
||||
switch (app.installationState) {
|
||||
case appdb.ISTATE_PENDING_UNINSTALL: return uninstall(app, callback);
|
||||
case appdb.ISTATE_PENDING_CONFIGURE: return configure(app, callback);
|
||||
|
||||
case appdb.ISTATE_PENDING_UPDATE: return update(app, callback);
|
||||
case appdb.ISTATE_PENDING_FORCE_UPDATE: return update(app, callback);
|
||||
|
||||
case appdb.ISTATE_PENDING_INSTALL: return install(app, callback);
|
||||
case appdb.ISTATE_PENDING_CLONE: return install(app, callback);
|
||||
case appdb.ISTATE_PENDING_RESTORE: return install(app, callback);
|
||||
|
||||
case appdb.ISTATE_PENDING_RESTORE: return restore(app, callback);
|
||||
case appdb.ISTATE_PENDING_BACKUP: return backup(app, callback);
|
||||
case appdb.ISTATE_INSTALLED: return handleRunCommand(app, callback);
|
||||
|
||||
case appdb.ISTATE_PENDING_INSTALL: return install(app, callback);
|
||||
case appdb.ISTATE_PENDING_CLONE: return restore(app, callback);
|
||||
case appdb.ISTATE_PENDING_FORCE_UPDATE: return update(app, callback);
|
||||
case appdb.ISTATE_ERROR:
|
||||
debugApp(app, 'Internal error. apptask launched with error status.');
|
||||
return callback(null);
|
||||
|
||||
+1
-1
@@ -100,7 +100,7 @@ function initialize(callback) {
|
||||
var info = { scope: token.scope };
|
||||
|
||||
user.get(token.identifier, function (error, user) {
|
||||
if (error && error.reason === UserError.NOT_FOUND) return callback(null, false);
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(null, false);
|
||||
if (error) return callback(error);
|
||||
|
||||
callback(null, user, info);
|
||||
|
||||
+9
-70
@@ -3,20 +3,15 @@
|
||||
var assert = require('assert'),
|
||||
database = require('./database.js'),
|
||||
DatabaseError = require('./databaseerror.js'),
|
||||
safe = require('safetydance'),
|
||||
util = require('util');
|
||||
|
||||
var BACKUPS_FIELDS = [ 'id', 'creationTime', 'version', 'type', 'dependsOn', 'state', 'restoreConfigJson' ];
|
||||
var BACKUPS_FIELDS = [ 'id', 'creationTime', 'version', 'type', 'dependsOn', 'state', ];
|
||||
|
||||
exports = module.exports = {
|
||||
add: add,
|
||||
|
||||
getByTypeAndStatePaged: getByTypeAndStatePaged,
|
||||
getByTypePaged: getByTypePaged,
|
||||
|
||||
getPaged: getPaged,
|
||||
get: get,
|
||||
del: del,
|
||||
update: update,
|
||||
getByAppIdPaged: getByAppIdPaged,
|
||||
|
||||
_clear: clear,
|
||||
@@ -25,44 +20,21 @@ exports = module.exports = {
|
||||
BACKUP_TYPE_BOX: 'box',
|
||||
|
||||
BACKUP_STATE_NORMAL: 'normal', // should rename to created to avoid listing in UI?
|
||||
BACKUP_STATE_CREATING: 'creating',
|
||||
BACKUP_STATE_ERROR: 'error'
|
||||
};
|
||||
|
||||
function postProcess(result) {
|
||||
assert.strictEqual(typeof result, 'object');
|
||||
|
||||
result.dependsOn = result.dependsOn ? result.dependsOn.split(',') : [ ];
|
||||
|
||||
result.restoreConfig = result.restoreConfigJson ? safe.JSON.parse(result.restoreConfigJson) : null;
|
||||
delete result.restoreConfigJson;
|
||||
}
|
||||
|
||||
function getByTypeAndStatePaged(type, state, page, perPage, callback) {
|
||||
assert(type === exports.BACKUP_TYPE_APP || type === exports.BACKUP_TYPE_BOX);
|
||||
assert.strictEqual(typeof state, 'string');
|
||||
function getPaged(page, perPage, callback) {
|
||||
assert(typeof page === 'number' && page > 0);
|
||||
assert(typeof perPage === 'number' && perPage > 0);
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT ' + BACKUPS_FIELDS + ' FROM backups WHERE type = ? AND state = ? ORDER BY creationTime DESC LIMIT ?,?',
|
||||
[ type, state, (page-1)*perPage, perPage ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
results.forEach(function (result) { postProcess(result); });
|
||||
|
||||
callback(null, results);
|
||||
});
|
||||
}
|
||||
|
||||
function getByTypePaged(type, page, perPage, callback) {
|
||||
assert(type === exports.BACKUP_TYPE_APP || type === exports.BACKUP_TYPE_BOX);
|
||||
assert(typeof page === 'number' && page > 0);
|
||||
assert(typeof perPage === 'number' && perPage > 0);
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT ' + BACKUPS_FIELDS + ' FROM backups WHERE type = ? ORDER BY creationTime DESC LIMIT ?,?',
|
||||
[ type, (page-1)*perPage, perPage ], function (error, results) {
|
||||
[ exports.BACKUP_TYPE_BOX, exports.BACKUP_STATE_NORMAL, (page-1)*perPage, perPage ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
results.forEach(function (result) { postProcess(result); });
|
||||
@@ -109,14 +81,12 @@ function add(backup, callback) {
|
||||
assert.strictEqual(typeof backup.version, 'string');
|
||||
assert(backup.type === exports.BACKUP_TYPE_APP || backup.type === exports.BACKUP_TYPE_BOX);
|
||||
assert(util.isArray(backup.dependsOn));
|
||||
assert.strictEqual(typeof backup.restoreConfig, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var creationTime = backup.creationTime || new Date(); // allow tests to set the time
|
||||
var restoreConfig = backup.restoreConfig ? JSON.stringify(backup.restoreConfig) : '';
|
||||
|
||||
database.query('INSERT INTO backups (id, version, type, creationTime, state, dependsOn, restoreConfigJson) VALUES (?, ?, ?, ?, ?, ?, ?)',
|
||||
[ backup.id, backup.version, backup.type, creationTime, exports.BACKUP_STATE_NORMAL, backup.dependsOn.join(','), restoreConfig ],
|
||||
database.query('INSERT INTO backups (id, version, type, creationTime, state, dependsOn) VALUES (?, ?, ?, ?, ?, ?)',
|
||||
[ backup.id, backup.version, backup.type, creationTime, exports.BACKUP_STATE_NORMAL, backup.dependsOn.join(',') ],
|
||||
function (error) {
|
||||
if (error && error.code === 'ER_DUP_ENTRY') return callback(new DatabaseError(DatabaseError.ALREADY_EXISTS));
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
@@ -125,26 +95,6 @@ function add(backup, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function update(id, backup, callback) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof backup, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var fields = [ ], values = [ ];
|
||||
for (var p in backup) {
|
||||
fields.push(p + ' = ?');
|
||||
values.push(backup[p]);
|
||||
}
|
||||
values.push(id);
|
||||
|
||||
database.query('UPDATE backups SET ' + fields.join(', ') + ' WHERE id = ?', values, function (error) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
function clear(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
@@ -158,19 +108,8 @@ function del(id, callback) {
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
get(id, function (error, result) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback();
|
||||
if (error) return callback(error);
|
||||
|
||||
var whereClause = [ 'id=?' ], whereArgs = [ result.id ];
|
||||
result.dependsOn.forEach(function (id) {
|
||||
whereClause.push('id=?');
|
||||
whereArgs.push(id);
|
||||
});
|
||||
|
||||
database.query('DELETE FROM backups WHERE ' + whereClause.join(' OR '), whereArgs, function (error) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
callback(null);
|
||||
});
|
||||
database.query('DELETE FROM backups WHERE id=?', [ id ], function (error) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
+124
-208
@@ -5,9 +5,10 @@ exports = module.exports = {
|
||||
|
||||
testConfig: testConfig,
|
||||
|
||||
getByStatePaged: getByStatePaged,
|
||||
getPaged: getPaged,
|
||||
getByAppIdPaged: getByAppIdPaged,
|
||||
|
||||
getRestoreUrl: getRestoreUrl,
|
||||
getRestoreConfig: getRestoreConfig,
|
||||
|
||||
ensureBackup: ensureBackup,
|
||||
@@ -18,7 +19,9 @@ exports = module.exports = {
|
||||
|
||||
backupBoxAndApps: backupBoxAndApps,
|
||||
|
||||
cleanup: cleanup
|
||||
getLocalDownloadPath: getLocalDownloadPath,
|
||||
|
||||
removeBackup: removeBackup
|
||||
};
|
||||
|
||||
var addons = require('./addons.js'),
|
||||
@@ -35,7 +38,6 @@ var addons = require('./addons.js'),
|
||||
filesystem = require('./storage/filesystem.js'),
|
||||
locker = require('./locker.js'),
|
||||
mailer = require('./mailer.js'),
|
||||
noop = require('./storage/noop.js'),
|
||||
path = require('path'),
|
||||
paths = require('./paths.js'),
|
||||
progress = require('./progress.js'),
|
||||
@@ -46,8 +48,9 @@ var addons = require('./addons.js'),
|
||||
SettingsError = require('./settings.js').SettingsError,
|
||||
util = require('util');
|
||||
|
||||
var NODE_CMD = path.join(__dirname, './scripts/node.sh');
|
||||
var BACKUPTASK_CMD = path.join(__dirname, 'backuptask.js');
|
||||
var BACKUP_BOX_CMD = path.join(__dirname, 'scripts/backupbox.sh'),
|
||||
BACKUP_APP_CMD = path.join(__dirname, 'scripts/backupapp.sh'),
|
||||
RESTORE_APP_CMD = path.join(__dirname, 'scripts/restoreapp.sh');
|
||||
|
||||
var NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
|
||||
@@ -80,7 +83,6 @@ util.inherits(BackupsError, Error);
|
||||
BackupsError.EXTERNAL_ERROR = 'external error';
|
||||
BackupsError.INTERNAL_ERROR = 'internal error';
|
||||
BackupsError.BAD_STATE = 'bad state';
|
||||
BackupsError.BAD_FIELD = 'bad field';
|
||||
BackupsError.NOT_FOUND = 'not found';
|
||||
BackupsError.MISSING_CREDENTIALS = 'missing credentials';
|
||||
|
||||
@@ -90,8 +92,6 @@ function api(provider) {
|
||||
case 'caas': return caas;
|
||||
case 's3': return s3;
|
||||
case 'filesystem': return filesystem;
|
||||
case 'minio': return s3;
|
||||
case 'noop': return noop;
|
||||
default: return null;
|
||||
}
|
||||
}
|
||||
@@ -106,13 +106,12 @@ function testConfig(backupConfig, callback) {
|
||||
api(backupConfig.provider).testConfig(backupConfig, callback);
|
||||
}
|
||||
|
||||
function getByStatePaged(state, page, perPage, callback) {
|
||||
assert.strictEqual(typeof state, 'string');
|
||||
function getPaged(page, perPage, callback) {
|
||||
assert(typeof page === 'number' && page > 0);
|
||||
assert(typeof perPage === 'number' && perPage > 0);
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
backupdb.getByTypeAndStatePaged(backupdb.BACKUP_TYPE_BOX, state, page, perPage, function (error, results) {
|
||||
backupdb.getPaged(page, perPage, function (error, results) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, results);
|
||||
@@ -136,12 +135,39 @@ function getRestoreConfig(backupId, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
backupdb.get(backupId, function (error, result) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new BackupsError(BackupsError.NOT_FOUND, error));
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
if (!result.restoreConfig) return callback(new BackupsError(BackupsError.NOT_FOUND, error));
|
||||
|
||||
callback(null, result.restoreConfig);
|
||||
api(backupConfig.provider).getAppRestoreConfig(backupConfig, backupId, function (error, result) {
|
||||
if (error && error.reason === BackupsError.NOT_FOUND) return callback(error);
|
||||
if (error) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error));
|
||||
|
||||
callback(null, result);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getRestoreUrl(backupId, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
api(backupConfig.provider).getRestoreUrl(backupConfig, backupId, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var obj = {
|
||||
id: backupId,
|
||||
url: result.url,
|
||||
backupKey: backupConfig.key,
|
||||
sha1: result.sha1 || null // not supported by all backends
|
||||
};
|
||||
|
||||
debug('getRestoreUrl: id:%s url:%s backupKey:%s sha1:%s', obj.id, obj.url, obj.backupKey, obj.sha1);
|
||||
|
||||
callback(null, obj);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -153,98 +179,58 @@ function copyLastBackup(app, manifest, prefix, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var timestamp = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
|
||||
var newBackupId = util.format('%s/app_%s_%s_v%s', prefix, app.id, timestamp, manifest.version);
|
||||
|
||||
var restoreConfig = apps.getAppConfig(app);
|
||||
restoreConfig.manifest = manifest;
|
||||
var toFilenameArchive = util.format('%s/app_%s_%s_v%s.tar.gz', prefix, app.id, timestamp, manifest.version);
|
||||
var toFilenameConfig = util.format('%s/app_%s_%s_v%s.json', prefix, app.id, timestamp, manifest.version);
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
debug('copyLastBackup: copying backup %s to %s', app.lastBackupId, newBackupId);
|
||||
debug('copyLastBackup: copying archive %s to %s', app.lastBackupId, toFilenameArchive);
|
||||
|
||||
backupdb.add({ id: newBackupId, version: manifest.version, type: backupdb.BACKUP_TYPE_APP, dependsOn: [ ], restoreConfig: restoreConfig }, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
api(backupConfig.provider).copyObject(backupConfig, app.lastBackupId, toFilenameArchive, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error));
|
||||
|
||||
api(backupConfig.provider).copyBackup(backupConfig, app.lastBackupId, newBackupId, function (copyBackupError) {
|
||||
const state = copyBackupError ? backupdb.BACKUP_STATE_ERROR : backupdb.BACKUP_STATE_NORMAL;
|
||||
// TODO change that logic by adjusting app.lastBackupId to not contain the file type
|
||||
var configFileId = app.lastBackupId.slice(0, -'.tar.gz'.length) + '.json';
|
||||
|
||||
debugApp(app, 'copyLastBackup: %s done with state %s', newBackupId, state);
|
||||
debug('copyLastBackup: copying config %s to %s', configFileId, toFilenameConfig);
|
||||
|
||||
backupdb.update(newBackupId, { state: state }, function (error) {
|
||||
if (copyBackupError) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, copyBackupError.message));
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
api(backupConfig.provider).copyObject(backupConfig, configFileId, toFilenameConfig, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error));
|
||||
|
||||
callback(null, newBackupId);
|
||||
});
|
||||
return callback(null, toFilenameArchive);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function runBackupTask(backupId, appId, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert(appId === null || typeof backupId === 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var killTimerId = null;
|
||||
|
||||
var cp = shell.sudo('backup' + (appId ? 'App' : 'Box'), [ NODE_CMD, BACKUPTASK_CMD, backupId ].concat(appId ? [ appId ] : [ ]), function (error) {
|
||||
|
||||
clearTimeout(killTimerId);
|
||||
cp = null;
|
||||
|
||||
if (error && (error.code === null /* signal */ || (error.code !== 0 && error.code !== 50))) { // backuptask crashed
|
||||
return callback(new BackupsError(BackupsError.INTERNAL_ERROR, 'backuptask crashed'));
|
||||
} else if (error && error.code === 50) { // exited with error
|
||||
var result = safe.fs.readFileSync(paths.BACKUP_RESULT_FILE, 'utf8') || safe.error.message;
|
||||
return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, result));
|
||||
}
|
||||
|
||||
callback();
|
||||
});
|
||||
|
||||
killTimerId = setTimeout(function () {
|
||||
debug('runBackupTask: backup task taking too long. killing');
|
||||
cp.kill();
|
||||
}, 4 * 60 * 60 * 1000); // 4 hours
|
||||
}
|
||||
|
||||
function backupBoxWithAppBackupIds(appBackupIds, prefix, callback) {
|
||||
assert(Array.isArray(appBackupIds));
|
||||
assert(util.isArray(appBackupIds));
|
||||
assert.strictEqual(typeof prefix, 'string');
|
||||
|
||||
var timestamp = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
|
||||
var backupId = util.format('%s/box_%s_v%s', prefix, timestamp, config.version());
|
||||
var filebase = util.format('%s/box_%s_v%s', prefix, timestamp, config.version());
|
||||
var filename = filebase + '.tar.gz';
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
var password = config.database().password ? '-p' + config.database().password : '--skip-password';
|
||||
var mysqlDumpArgs = [
|
||||
'-c',
|
||||
`/usr/bin/mysqldump -u root ${password} --single-transaction --routines \
|
||||
--triggers ${config.database().name} > "${paths.BOX_DATA_DIR}/box.mysqldump"`
|
||||
];
|
||||
shell.exec('backupBox', '/bin/bash', mysqlDumpArgs, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
api(backupConfig.provider).getBoxBackupDetails(backupConfig, filename, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
backupdb.add({ id: backupId, version: config.version(), type: backupdb.BACKUP_TYPE_BOX, dependsOn: appBackupIds, restoreConfig: null }, function (error) {
|
||||
debug('backupBoxWithAppBackupIds: backup details %j', result);
|
||||
|
||||
shell.sudo('backupBox', [ BACKUP_BOX_CMD ].concat(result.backupScriptArguments), function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
runBackupTask(backupId, null /* appId */, function (backupTaskError) {
|
||||
const state = backupTaskError ? backupdb.BACKUP_STATE_ERROR : backupdb.BACKUP_STATE_NORMAL;
|
||||
debug('backupBoxWithAppBackupIds: %s', state);
|
||||
debug('backupBoxWithAppBackupIds: success');
|
||||
|
||||
backupdb.update(backupId, { state: state }, function (error) {
|
||||
if (backupTaskError) return callback(backupTaskError);
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
backupdb.add({ id: filename, version: config.version(), type: backupdb.BACKUP_TYPE_BOX, dependsOn: appBackupIds }, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
// FIXME this is only needed for caas, hopefully we can remove that in the future
|
||||
api(backupConfig.provider).backupDone(backupId, appBackupIds, function (error) {
|
||||
if (error) return callback(error);
|
||||
callback(null, backupId);
|
||||
});
|
||||
api(backupConfig.provider).backupDone(filename, null /* app */, appBackupIds, function (error) {
|
||||
if (error) return callback(error);
|
||||
callback(null, filename);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -268,31 +254,29 @@ function createNewAppBackup(app, manifest, prefix, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var timestamp = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
|
||||
var backupId = util.format('%s/app_%s_%s_v%s', prefix, app.id, timestamp, manifest.version);
|
||||
var filebase = util.format('%s/app_%s_%s_v%s', prefix, app.id, timestamp, manifest.version);
|
||||
var configFilename = filebase + '.json', dataFilename = filebase + '.tar.gz';
|
||||
|
||||
var restoreConfig = apps.getAppConfig(app);
|
||||
restoreConfig.manifest = manifest;
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/config.json'), JSON.stringify(restoreConfig))) {
|
||||
return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, 'Error creating config.json: ' + safe.error.message));
|
||||
}
|
||||
api(backupConfig.provider).getAppBackupDetails(backupConfig, app.id, dataFilename, configFilename, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
addons.backupAddons(app, manifest.addons, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
debug('createNewAppBackup: backup details %j', result);
|
||||
|
||||
backupdb.add({ id: backupId, version: manifest.version, type: backupdb.BACKUP_TYPE_APP, dependsOn: [ ], restoreConfig: restoreConfig }, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
async.series([
|
||||
addons.backupAddons.bind(null, app, manifest.addons),
|
||||
shell.sudo.bind(null, 'backupApp', [ BACKUP_APP_CMD ].concat(result.backupScriptArguments))
|
||||
], function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
runBackupTask(backupId, app.id, function (backupTaskError) {
|
||||
const state = backupTaskError ? backupdb.BACKUP_STATE_ERROR : backupdb.BACKUP_STATE_NORMAL;
|
||||
debugApp(app, 'createNewAppBackup: %s done', dataFilename);
|
||||
|
||||
debugApp(app, 'createNewAppBackup: %s done with state %s', backupId, state);
|
||||
|
||||
backupdb.update(backupId, { state: state }, function (error) {
|
||||
if (backupTaskError) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, backupTaskError.message));
|
||||
backupdb.add({ id: dataFilename, version: manifest.version, type: backupdb.BACKUP_TYPE_APP, dependsOn: [ ] }, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, backupId);
|
||||
callback(null, dataFilename);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -330,7 +314,13 @@ function backupApp(app, manifest, prefix, callback) {
|
||||
// s3 does not allow changing creation time, so copying the last backup is easy way out for now
|
||||
backupFunction = copyLastBackup.bind(null, app, manifest, prefix);
|
||||
} else {
|
||||
var appConfig = apps.getAppConfig(app);
|
||||
appConfig.manifest = manifest;
|
||||
backupFunction = createNewAppBackup.bind(null, app, manifest, prefix);
|
||||
|
||||
if (!safe.fs.writeFileSync(path.join(paths.DATA_DIR, app.id + '/config.json'), JSON.stringify(appConfig), 'utf8')) {
|
||||
return callback(safe.error);
|
||||
}
|
||||
}
|
||||
|
||||
backupFunction(function (error, backupId) {
|
||||
@@ -426,7 +416,7 @@ function ensureBackup(auditSource, callback) {
|
||||
|
||||
debug('ensureBackup: %j', auditSource);
|
||||
|
||||
getByStatePaged(backupdb.BACKUP_STATE_NORMAL, 1, 1, function (error, backups) {
|
||||
getPaged(1, 1, function (error, backups) {
|
||||
if (error) {
|
||||
debug('Unable to list backups', error);
|
||||
return callback(error); // no point trying to backup if appstore is down
|
||||
@@ -448,130 +438,56 @@ function restoreApp(app, addonsToRestore, backupId, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
assert(app.lastBackupId);
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
getRestoreUrl(backupId, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.series([
|
||||
api(backupConfig.provider).restore.bind(null, backupConfig, backupId, path.join(paths.APPS_DATA_DIR, app.id)),
|
||||
addons.restoreAddons.bind(null, app, addonsToRestore)
|
||||
], callback);
|
||||
});
|
||||
}
|
||||
debugApp(app, 'restoreApp: restoreUrl:%s', result.url);
|
||||
|
||||
function cleanupAppBackups(backupConfig, referencedAppBackups, callback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
assert(Array.isArray(referencedAppBackups));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
shell.sudo('restoreApp', [ RESTORE_APP_CMD, app.id, result.url, result.backupKey, result.sessionToken ], function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
const now = new Date();
|
||||
|
||||
// we clean app backups of any state because the ones to keep are determined by the box cleanup code
|
||||
backupdb.getByTypePaged(backupdb.BACKUP_TYPE_APP, 1, 1000, function (error, appBackups) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
async.eachSeries(appBackups, function iterator(backup, iteratorDone) {
|
||||
if (referencedAppBackups.indexOf(backup.id) !== -1) return iteratorDone();
|
||||
if ((now - backup.creationTime) < (backupConfig.retentionSecs * 1000)) return iteratorDone();
|
||||
|
||||
debug('cleanup: removing %s', backup.id);
|
||||
|
||||
api(backupConfig.provider).removeBackups(backupConfig, [ backup.id ], function (error) {
|
||||
if (error) {
|
||||
debug('cleanup: error removing backup %j : %s', backup, error.message);
|
||||
iteratorDone();
|
||||
}
|
||||
|
||||
backupdb.del(backup.id, function (error) {
|
||||
if (error) debug('cleanup: error removing from database', error);
|
||||
else debug('cleanup: removed %s', backup.id);
|
||||
|
||||
iteratorDone();
|
||||
});
|
||||
});
|
||||
}, function () {
|
||||
debug('cleanup: done cleaning app backups');
|
||||
|
||||
callback();
|
||||
addons.restoreAddons(app, addonsToRestore, callback);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function cleanupBoxBackups(backupConfig, callback) {
|
||||
assert.strictEqual(typeof backupConfig, 'object');
|
||||
function getLocalDownloadPath(backupId, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const now = new Date();
|
||||
var referencedAppBackups = [];
|
||||
|
||||
backupdb.getByTypePaged(backupdb.BACKUP_TYPE_BOX, 1, 1000, function (error, boxBackups) {
|
||||
if (error) return callback(error);
|
||||
|
||||
if (boxBackups.length === 0) return callback(null, []);
|
||||
|
||||
// search for the first valid backup
|
||||
var i;
|
||||
for (i = 0; i < boxBackups.length; i++) {
|
||||
if (boxBackups[i].state === backupdb.BACKUP_STATE_NORMAL) break;
|
||||
}
|
||||
|
||||
// keep the first valid backup
|
||||
if (i !== boxBackups.length) {
|
||||
debug('cleanup: preserving box backup %j', boxBackups[i]);
|
||||
referencedAppBackups = boxBackups[i].dependsOn;
|
||||
boxBackups.splice(i, 1);
|
||||
} else {
|
||||
debug('cleanup: no box backup to preserve');
|
||||
}
|
||||
|
||||
async.eachSeries(boxBackups, function iterator(backup, iteratorDone) {
|
||||
referencedAppBackups = referencedAppBackups.concat(backup.dependsOn);
|
||||
|
||||
// TODO: errored backups should probably be cleaned up before retention time, but we will
|
||||
// have to be careful not to remove any backup currently being created
|
||||
if ((now - backup.creationTime) < (backupConfig.retentionSecs * 1000)) return iteratorDone();
|
||||
|
||||
debug('cleanup: removing %s', backup.id);
|
||||
|
||||
var backupIds = [].concat(backup.id, backup.dependsOn);
|
||||
|
||||
api(backupConfig.provider).removeBackups(backupConfig, backupIds, function (error) {
|
||||
if (error) {
|
||||
debug('cleanup: error removing backup %j : %s', backup, error.message);
|
||||
iteratorDone();
|
||||
}
|
||||
|
||||
backupdb.del(backup.id, function (error) {
|
||||
if (error) debug('cleanup: error removing from database', error);
|
||||
else debug('cleanup: removed %j', backupIds);
|
||||
|
||||
iteratorDone();
|
||||
});
|
||||
});
|
||||
}, function () {
|
||||
return callback(null, referencedAppBackups);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function cleanup(callback) {
|
||||
assert(!callback || typeof callback === 'function'); // callback is null when called from cronjob
|
||||
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(error);
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
if (backupConfig.retentionSecs < 0) {
|
||||
debug('cleanup: keeping all backups');
|
||||
return callback();
|
||||
}
|
||||
|
||||
cleanupBoxBackups(backupConfig, function (error, referencedAppBackups) {
|
||||
api(backupConfig.provider).getLocalFilePath(backupConfig, backupId, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('cleanup: done cleaning box backups');
|
||||
debug('getLocalDownloadPath: id:%s path:%s', backupId, result.filePath);
|
||||
|
||||
cleanupAppBackups(backupConfig, referencedAppBackups, callback);
|
||||
callback(null, result.filePath);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function removeBackup(backupId, appBackupIds, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert(util.isArray(appBackupIds));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('removeBackup: %s', backupId);
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
api(backupConfig.provider).removeBackup(backupConfig, backupId, appBackupIds, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
backupdb.del(backupId, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
debug('removeBackup: %s done', backupId);
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,110 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
'use strict';
|
||||
|
||||
require('supererror')({ splatchError: true });
|
||||
|
||||
// remove timestamp from debug() based output
|
||||
require('debug').formatArgs = function formatArgs(args) {
|
||||
args[0] = this.namespace + ' ' + args[0];
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
BackupsError = require('./backups.js').BackupsError,
|
||||
caas = require('./storage/caas.js'),
|
||||
database = require('./database.js'),
|
||||
debug = require('debug')('box:backuptask'),
|
||||
filesystem = require('./storage/filesystem.js'),
|
||||
noop = require('./storage/noop.js'),
|
||||
path = require('path'),
|
||||
paths = require('./paths.js'),
|
||||
s3 = require('./storage/s3.js'),
|
||||
safe = require('safetydance'),
|
||||
settings = require('./settings.js');
|
||||
|
||||
function api(provider) {
|
||||
switch (provider) {
|
||||
case 'caas': return caas;
|
||||
case 's3': return s3;
|
||||
case 'filesystem': return filesystem;
|
||||
case 'minio': return s3;
|
||||
case 'noop': return noop;
|
||||
default: return null;
|
||||
}
|
||||
}
|
||||
|
||||
function initialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.initialize(callback);
|
||||
}
|
||||
|
||||
function backupApp(backupId, appId, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('Start app backup with id %s for %s', backupId, appId);
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
var backupMapping = [{
|
||||
source: path.join(paths.APPS_DATA_DIR, appId),
|
||||
destination: '.'
|
||||
}];
|
||||
|
||||
api(backupConfig.provider).backup(backupConfig, backupId, backupMapping, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function backupBox(backupId, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('Start box backup with id %s', backupId);
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, error));
|
||||
|
||||
var backupMapping = [{
|
||||
source: paths.BOX_DATA_DIR,
|
||||
destination: 'box'
|
||||
}, {
|
||||
source: path.join(paths.PLATFORM_DATA_DIR, 'mail'),
|
||||
destination: 'mail'
|
||||
}];
|
||||
|
||||
api(backupConfig.provider).backup(backupConfig, backupId, backupMapping, callback);
|
||||
});
|
||||
}
|
||||
|
||||
// Main process starts here
|
||||
var backupId = process.argv[2];
|
||||
var appId = process.argv[3];
|
||||
|
||||
if (appId) debug('Backuptask for the app %s with id %s', appId, backupId);
|
||||
else debug('Backuptask for the whole Cloudron with id %s', backupId);
|
||||
|
||||
process.on('SIGTERM', function () {
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
initialize(function (error) {
|
||||
if (error) throw error;
|
||||
|
||||
function resultHandler(error) {
|
||||
if (error) debug('completed with error', error);
|
||||
|
||||
debug('completed');
|
||||
|
||||
safe.fs.writeFileSync(paths.BACKUP_RESULT_FILE, error ? error.message : '');
|
||||
|
||||
// https://nodejs.org/api/process.html are exit codes used by node. apps.js uses the value below
|
||||
// to check apptask crashes
|
||||
process.exit(error ? 50 : 0);
|
||||
}
|
||||
|
||||
if (appId) backupApp(backupId, appId, resultHandler);
|
||||
else backupBox(backupId, resultHandler);
|
||||
});
|
||||
+21
-20
@@ -32,7 +32,7 @@ var acme = require('./cert/acme.js'),
|
||||
caas = require('./cert/caas.js'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
debug = require('debug')('box:certificates'),
|
||||
debug = require('debug')('box:src/certificates'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
fallback = require('./cert/fallback.js'),
|
||||
fs = require('fs'),
|
||||
@@ -96,7 +96,7 @@ function getApi(app, callback) {
|
||||
|
||||
var options = { };
|
||||
if (tlsConfig.provider === 'caas') {
|
||||
options.prod = true; // with altDomain, we will choose acme setting based on this
|
||||
options.prod = !config.isDev(); // with altDomain, we will choose acme setting based on this
|
||||
} else { // acme
|
||||
options.prod = tlsConfig.provider.match(/.*-prod/) !== null;
|
||||
}
|
||||
@@ -263,6 +263,10 @@ function validateCertificate(cert, key, fqdn) {
|
||||
assert(key === null || typeof key === 'string');
|
||||
assert.strictEqual(typeof fqdn, 'string');
|
||||
|
||||
if (cert === null && key === null) return null;
|
||||
if (!cert && key) return new Error('missing cert');
|
||||
if (cert && !key) return new Error('missing key');
|
||||
|
||||
function matchesDomain(domain) {
|
||||
if (typeof domain !== 'string') return false;
|
||||
if (domain === fqdn) return true;
|
||||
@@ -271,26 +275,23 @@ function validateCertificate(cert, key, fqdn) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (cert === null && key === null) return null;
|
||||
if (!cert && key) return new Error('missing cert');
|
||||
if (cert && !key) return new Error('missing key');
|
||||
// get commonName (http://stackoverflow.com/questions/17353122/parsing-strings-crt-files)
|
||||
var result = safe.child_process.execSync('openssl x509 -noout -subject | sed -r "s|.*CN=(.*)|\\1|; s|/[^/]*=.*$||"', { encoding: 'utf8', input: cert });
|
||||
if (!result) return new Error(util.format('could not get CN'));
|
||||
var commonName = result.trim();
|
||||
debug('validateCertificate: detected commonName as %s', commonName);
|
||||
|
||||
var result = safe.child_process.execSync('openssl x509 -noout -checkhost "' + fqdn + '"', { encoding: 'utf8', input: cert });
|
||||
if (!result) return new Error(util.format('could not get cert subject'));
|
||||
// https://github.com/drwetter/testssl.sh/pull/383
|
||||
var cmd = `openssl x509 -noout -text | grep -A3 "Subject Alternative Name" | \
|
||||
grep "DNS:" | \
|
||||
sed -e "s/DNS://g" -e "s/ //g" -e "s/,/ /g" -e "s/othername:<unsupported>//g"`;
|
||||
result = safe.child_process.execSync(cmd, { encoding: 'utf8', input: cert });
|
||||
var altNames = result ? [ ] : result.trim().split(' '); // might fail if cert has no SAN
|
||||
debug('validateCertificate: detected altNames as %j', altNames);
|
||||
|
||||
// if no match, check alt names
|
||||
if (result.indexOf('does match certificate') === -1) {
|
||||
// https://github.com/drwetter/testssl.sh/pull/383
|
||||
var cmd = `openssl x509 -noout -text | grep -A3 "Subject Alternative Name" | \
|
||||
grep "DNS:" | \
|
||||
sed -e "s/DNS://g" -e "s/ //g" -e "s/,/ /g" -e "s/othername:<unsupported>//g"`;
|
||||
result = safe.child_process.execSync(cmd, { encoding: 'utf8', input: cert });
|
||||
var altNames = result ? [ ] : result.trim().split(' '); // might fail if cert has no SAN
|
||||
debug('validateCertificate: detected altNames as %j', altNames);
|
||||
|
||||
// check altNames
|
||||
if (!altNames.some(matchesDomain)) return new Error(util.format('cert is not valid for this domain. Expecting %s in %j', fqdn, altNames));
|
||||
}
|
||||
// check altNames
|
||||
var domains = altNames.concat(commonName);
|
||||
if (!domains.some(matchesDomain)) return new Error(util.format('cert is not valid for this domain. Expecting %s in %j', fqdn, domains));
|
||||
|
||||
// http://httpd.apache.org/docs/2.0/ssl/ssl_faq.html#verify
|
||||
var certModulus = safe.child_process.execSync('openssl x509 -noout -modulus', { encoding: 'utf8', input: cert });
|
||||
|
||||
+244
-204
@@ -8,26 +8,29 @@ exports = module.exports = {
|
||||
activate: activate,
|
||||
getConfig: getConfig,
|
||||
getStatus: getStatus,
|
||||
getDisks: getDisks,
|
||||
dnsSetup: dnsSetup,
|
||||
getLogs: getLogs,
|
||||
|
||||
sendHeartbeat: sendHeartbeat,
|
||||
sendAliveStatus: sendAliveStatus,
|
||||
|
||||
updateToLatest: updateToLatest,
|
||||
reboot: reboot,
|
||||
retire: retire,
|
||||
migrate: migrate,
|
||||
|
||||
getConfigStateSync: getConfigStateSync,
|
||||
|
||||
checkDiskSpace: checkDiskSpace,
|
||||
|
||||
readDkimPublicKeySync: readDkimPublicKeySync,
|
||||
refreshDNS: refreshDNS,
|
||||
configureWebadmin: configureWebadmin
|
||||
|
||||
events: null,
|
||||
|
||||
EVENT_ACTIVATED: 'activated'
|
||||
};
|
||||
|
||||
var appdb = require('./appdb.js'),
|
||||
apps = require('./apps.js'),
|
||||
var apps = require('./apps.js'),
|
||||
assert = require('assert'),
|
||||
async = require('async'),
|
||||
backups = require('./backups.js'),
|
||||
@@ -38,7 +41,7 @@ var appdb = require('./appdb.js'),
|
||||
constants = require('./constants.js'),
|
||||
cron = require('./cron.js'),
|
||||
debug = require('debug')('box:cloudron'),
|
||||
df = require('@sindresorhus/df'),
|
||||
df = require('node-df'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
fs = require('fs'),
|
||||
locker = require('./locker.js'),
|
||||
@@ -53,11 +56,10 @@ var appdb = require('./appdb.js'),
|
||||
settings = require('./settings.js'),
|
||||
SettingsError = settings.SettingsError,
|
||||
shell = require('./shell.js'),
|
||||
spawn = require('child_process').spawn,
|
||||
split = require('split'),
|
||||
subdomains = require('./subdomains.js'),
|
||||
superagent = require('superagent'),
|
||||
sysinfo = require('./sysinfo.js'),
|
||||
taskmanager = require('./taskmanager.js'),
|
||||
tokendb = require('./tokendb.js'),
|
||||
updateChecker = require('./updatechecker.js'),
|
||||
user = require('./user.js'),
|
||||
@@ -85,8 +87,9 @@ const BOX_AND_USER_TEMPLATE = {
|
||||
}
|
||||
};
|
||||
|
||||
var gBoxAndUserDetails = null, // cached cloudron details like region,size...
|
||||
gWebadminStatus = { dns: false, tls: false, configuring: false };
|
||||
var gUpdatingDns = false, // flag for dns update reentrancy
|
||||
gBoxAndUserDetails = null, // cached cloudron details like region,size...
|
||||
gConfigState = { dns: false, tls: false, configured: false };
|
||||
|
||||
function CloudronError(reason, errorOrMessage) {
|
||||
assert.strictEqual(typeof reason, 'string');
|
||||
@@ -120,51 +123,93 @@ CloudronError.SELF_UPGRADE_NOT_SUPPORTED = 'Self upgrade not supported';
|
||||
function initialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
gWebadminStatus = { dns: false, tls: false, configuring: false };
|
||||
exports.events = new (require('events').EventEmitter)();
|
||||
|
||||
gConfigState = { dns: false, tls: false, configured: false };
|
||||
gUpdatingDns = false;
|
||||
gBoxAndUserDetails = null;
|
||||
|
||||
async.series([
|
||||
certificates.initialize,
|
||||
settings.initialize,
|
||||
platform.initialize,
|
||||
installAppBundle,
|
||||
configureDefaultServer,
|
||||
onDomainConfigured
|
||||
], function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
configureWebadmin(NOOP_CALLBACK); // for restore() and caas initial setup. do not block
|
||||
|
||||
callback();
|
||||
});
|
||||
checkConfigState,
|
||||
configureDefaultServer
|
||||
], callback);
|
||||
}
|
||||
|
||||
function uninitialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
exports.events = null;
|
||||
|
||||
platform.events.removeListener(platform.EVENT_READY, onPlatformReady);
|
||||
|
||||
async.series([
|
||||
cron.uninitialize,
|
||||
taskmanager.pauseTasks,
|
||||
mailer.stop,
|
||||
platform.stop,
|
||||
platform.uninitialize,
|
||||
certificates.uninitialize,
|
||||
settings.uninitialize
|
||||
], callback);
|
||||
}
|
||||
|
||||
function onDomainConfigured(callback) {
|
||||
function onConfigured(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
if (!config.fqdn()) return callback();
|
||||
// if we hit here, the domain has to be set, this is a logic issue if it isn't
|
||||
assert(config.fqdn());
|
||||
|
||||
debug('onConfigured: current state: %j', gConfigState);
|
||||
|
||||
if (gConfigState.configured) return callback(); // re-entracy flag
|
||||
|
||||
gConfigState.configured = true;
|
||||
|
||||
platform.events.on(platform.EVENT_READY, onPlatformReady);
|
||||
settings.events.on(settings.DNS_CONFIG_KEY, function () { refreshDNS(); });
|
||||
|
||||
async.series([
|
||||
clients.addDefaultClients,
|
||||
certificates.ensureFallbackCertificate,
|
||||
ensureDkimKey,
|
||||
platform.start, // requires fallback certs for mail container
|
||||
mailer.start, // this requires the "mail" container to be running
|
||||
cron.initialize
|
||||
ensureDkimKey,
|
||||
addDnsRecords,
|
||||
configureAdmin,
|
||||
mailer.start,
|
||||
cron.initialize // do not send heartbeats until we are "ready"
|
||||
], callback);
|
||||
}
|
||||
|
||||
function onPlatformReady(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
debug('onPlatformReady');
|
||||
|
||||
async.series([
|
||||
taskmanager.resumeTasks
|
||||
], callback);
|
||||
}
|
||||
|
||||
function getConfigStateSync() {
|
||||
return gConfigState;
|
||||
}
|
||||
|
||||
function checkConfigState(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
if (!config.fqdn()) {
|
||||
settings.events.once(settings.DNS_CONFIG_KEY, function () { checkConfigState(); }); // check again later
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
debug('checkConfigState: configured');
|
||||
|
||||
onConfigured(callback);
|
||||
}
|
||||
|
||||
function dnsSetup(dnsConfig, domain, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
@@ -178,10 +223,7 @@ function dnsSetup(dnsConfig, domain, callback) {
|
||||
|
||||
config.set('fqdn', domain); // set fqdn only after dns config is valid, otherwise cannot re-setup if we failed
|
||||
|
||||
async.series([ // do not block
|
||||
onDomainConfigured,
|
||||
configureWebadmin
|
||||
], NOOP_CALLBACK);
|
||||
onConfigured(); // do not block
|
||||
|
||||
callback();
|
||||
});
|
||||
@@ -205,6 +247,8 @@ function configureDefaultServer(callback) {
|
||||
safe.child_process.execSync(certCommand);
|
||||
}
|
||||
|
||||
safe.fs.unlinkSync(path.join(paths.NGINX_APPCONFIG_DIR,'ip_based_setup.conf'));
|
||||
|
||||
nginx.configureAdmin(certFilePath, keyFilePath, 'default.conf', '', function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
@@ -214,39 +258,30 @@ function configureDefaultServer(callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function configureWebadmin(callback) {
|
||||
function configureAdmin(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
debug('configureWebadmin: fqdn:%s status:%j', config.fqdn(), gWebadminStatus);
|
||||
if (process.env.BOX_ENV === 'test') return callback();
|
||||
|
||||
if (process.env.BOX_ENV === 'test' || !config.fqdn() || gWebadminStatus.configuring) return callback();
|
||||
|
||||
gWebadminStatus.configuring = true; // re-entracy guard
|
||||
|
||||
function done(error) {
|
||||
gWebadminStatus.configuring = false;
|
||||
debug('configureWebadmin: done error:%j', error);
|
||||
callback(error);
|
||||
}
|
||||
debug('configureAdmin');
|
||||
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return done(error);
|
||||
if (error) return callback(error);
|
||||
|
||||
addDnsRecords(ip, function (error) {
|
||||
if (error) return done(error);
|
||||
subdomains.waitForDns(config.adminFqdn(), ip, 'A', { interval: 30000, times: 50000 }, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
subdomains.waitForDns(config.adminFqdn(), ip, 'A', { interval: 30000, times: 50000 }, function (error) {
|
||||
if (error) return done(error);
|
||||
gConfigState.dns = true;
|
||||
|
||||
gWebadminStatus.dns = true;
|
||||
certificates.ensureCertificate({ location: constants.ADMIN_LOCATION }, function (error, certFilePath, keyFilePath) {
|
||||
if (error) { // currently, this can never happen
|
||||
debug('Error obtaining certificate. Proceed anyway', error);
|
||||
return callback();
|
||||
}
|
||||
|
||||
certificates.ensureCertificate({ location: constants.ADMIN_LOCATION }, function (error, certFilePath, keyFilePath) {
|
||||
if (error) return done(error);
|
||||
gConfigState.tls = true;
|
||||
|
||||
gWebadminStatus.tls = true;
|
||||
|
||||
nginx.configureAdmin(certFilePath, keyFilePath, constants.NGINX_ADMIN_CONFIG_FILE_NAME, config.adminFqdn(), done);
|
||||
});
|
||||
nginx.configureAdmin(certFilePath, keyFilePath, constants.NGINX_ADMIN_CONFIG_FILE_NAME, config.adminFqdn(), callback);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -258,22 +293,26 @@ function setTimeZone(ip, callback) {
|
||||
|
||||
debug('setTimeZone ip:%s', ip);
|
||||
|
||||
superagent.get('https://geolocation.cloudron.io/json').query({ ip: ip }).timeout(10 * 1000).end(function (error, result) {
|
||||
// https://github.com/bluesmoon/node-geoip
|
||||
// https://github.com/runk/node-maxmind
|
||||
// { url: 'http://freegeoip.net/json/%s', jpath: 'time_zone' },
|
||||
// { url: 'http://ip-api.com/json/%s', jpath: 'timezone' },
|
||||
// { url: 'http://geoip.nekudo.com/api/%s', jpath: 'time_zone }
|
||||
|
||||
superagent.get('http://ip-api.com/json/' + ip).timeout(10 * 1000).end(function (error, result) {
|
||||
if ((error && !error.response) || result.statusCode !== 200) {
|
||||
debug('Failed to get geo location: %s', error.message);
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
var timezone = safe.query(result.body, 'location.time_zone');
|
||||
|
||||
if (!timezone || typeof timezone !== 'string') {
|
||||
if (!result.body.timezone || typeof result.body.timezone !== 'string') {
|
||||
debug('No timezone in geoip response : %j', result.body);
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
debug('Setting timezone to ', timezone);
|
||||
debug('Setting timezone to ', result.body.timezone);
|
||||
|
||||
settings.setTimeZone(timezone, callback);
|
||||
settings.setTimeZone(result.body.timezone, callback);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -307,7 +346,7 @@ function activate(username, password, email, displayName, ip, auditSource, callb
|
||||
|
||||
eventlog.add(eventlog.ACTION_ACTIVATE, auditSource, { });
|
||||
|
||||
platform.createMailConfig(NOOP_CALLBACK); // bounces can now be sent to the cloudron owner
|
||||
exports.events.emit(exports.EVENT_ACTIVATED);
|
||||
|
||||
callback(null, { token: token, expires: expires });
|
||||
});
|
||||
@@ -327,42 +366,17 @@ function getStatus(callback) {
|
||||
callback(null, {
|
||||
activated: count !== 0,
|
||||
version: config.version(),
|
||||
boxVersionsUrl: config.get('boxVersionsUrl'),
|
||||
apiServerOrigin: config.apiServerOrigin(), // used by CaaS tool
|
||||
provider: config.provider(),
|
||||
cloudronName: cloudronName,
|
||||
adminFqdn: config.fqdn() ? config.adminFqdn() : null,
|
||||
webadminStatus: gWebadminStatus
|
||||
configState: gConfigState
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getDisks(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var disks = {
|
||||
boxDataDisk: null,
|
||||
platformDataDisk: null,
|
||||
appsDataDisk: null
|
||||
};
|
||||
|
||||
df.file(paths.BOX_DATA_DIR).then(function (result) {
|
||||
disks.boxDataDisk = result.filesystem;
|
||||
|
||||
return df.file(paths.PLATFORM_DATA_DIR);
|
||||
}).then(function (result) {
|
||||
disks.platformDataDisk = result.filesystem;
|
||||
|
||||
return df.file(paths.APPS_DATA_DIR);
|
||||
}).then(function (result) {
|
||||
disks.appsDataDisk = result.filesystem;
|
||||
|
||||
callback(null, disks);
|
||||
}).catch(function (error) {
|
||||
callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
});
|
||||
}
|
||||
|
||||
function getBoxAndUserDetails(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
@@ -402,6 +416,7 @@ function getConfig(callback) {
|
||||
callback(null, {
|
||||
apiServerOrigin: config.apiServerOrigin(),
|
||||
webServerOrigin: config.webServerOrigin(),
|
||||
isDev: config.isDev(),
|
||||
fqdn: config.fqdn(),
|
||||
version: config.version(),
|
||||
update: updateChecker.getUpdateInfo(),
|
||||
@@ -434,9 +449,87 @@ function sendHeartbeat() {
|
||||
});
|
||||
}
|
||||
|
||||
function ensureDkimKey(callback) {
|
||||
assert(config.fqdn(), 'fqdn is not set');
|
||||
function sendAliveStatus(callback) {
|
||||
if (typeof callback !== 'function') {
|
||||
callback = function (error) {
|
||||
if (error && error.reason !== CloudronError.INTERNAL_ERROR) debug(error);
|
||||
else if (error) debug(error);
|
||||
};
|
||||
}
|
||||
|
||||
function sendAliveStatusWithAppstoreConfig(backendSettings, appstoreConfig) {
|
||||
assert.strictEqual(typeof backendSettings, 'object');
|
||||
assert.strictEqual(typeof appstoreConfig.userId, 'string');
|
||||
assert.strictEqual(typeof appstoreConfig.cloudronId, 'string');
|
||||
assert.strictEqual(typeof appstoreConfig.token, 'string');
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/users/' + appstoreConfig.userId + '/cloudrons/' + appstoreConfig.cloudronId;
|
||||
var data = {
|
||||
domain: config.fqdn(),
|
||||
version: config.version(),
|
||||
provider: config.provider(),
|
||||
backendSettings: backendSettings,
|
||||
machine: {
|
||||
cpus: os.cpus(),
|
||||
totalmem: os.totalmem()
|
||||
}
|
||||
};
|
||||
|
||||
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new CloudronError(CloudronError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 404) return callback(new CloudronError(CloudronError.NOT_FOUND));
|
||||
if (result.statusCode !== 201) return callback(new CloudronError(CloudronError.EXTERNAL_ERROR, util.format('Sending alive status failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
settings.getAll(function (error, result) {
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
|
||||
var backendSettings = {
|
||||
dnsConfig: {
|
||||
provider: result[settings.DNS_CONFIG_KEY].provider,
|
||||
wildcard: result[settings.DNS_CONFIG_KEY].provider === 'manual' ? result[settings.DNS_CONFIG_KEY].wildcard : undefined
|
||||
},
|
||||
tlsConfig: {
|
||||
provider: result[settings.TLS_CONFIG_KEY].provider
|
||||
},
|
||||
backupConfig: {
|
||||
provider: result[settings.BACKUP_CONFIG_KEY].provider
|
||||
},
|
||||
mailConfig: {
|
||||
enabled: result[settings.MAIL_CONFIG_KEY].enabled
|
||||
},
|
||||
autoupdatePattern: result[settings.AUTOUPDATE_PATTERN_KEY],
|
||||
timeZone: result[settings.TIME_ZONE_KEY]
|
||||
};
|
||||
|
||||
// Caas Cloudrons do not store appstore credentials in their local database
|
||||
if (config.provider() === 'caas') {
|
||||
var url = config.apiServerOrigin() + '/api/v1/exchangeBoxTokenWithUserToken';
|
||||
superagent.post(url).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new CloudronError(CloudronError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode !== 201) return callback(new CloudronError(CloudronError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
sendAliveStatusWithAppstoreConfig(backendSettings, result.body);
|
||||
});
|
||||
} else {
|
||||
settings.getAppstoreConfig(function (error, result) {
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
|
||||
if (!result.token) {
|
||||
debug('sendAliveStatus: Cloudron not yet registered');
|
||||
return callback(null);
|
||||
}
|
||||
|
||||
sendAliveStatusWithAppstoreConfig(backendSettings, result);
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function ensureDkimKey(callback) {
|
||||
var dkimPath = path.join(paths.MAIL_DATA_DIR, 'dkim/' + config.fqdn());
|
||||
var dkimPrivateKeyFile = path.join(dkimPath, 'private');
|
||||
var dkimPublicKeyFile = path.join(dkimPath, 'public');
|
||||
@@ -515,55 +608,66 @@ function txtRecordsWithSpf(callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function addDnsRecords(ip, callback) {
|
||||
assert.strictEqual(typeof ip, 'string');
|
||||
function addDnsRecords(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
if (process.env.BOX_ENV === 'test') return callback();
|
||||
|
||||
if (gUpdatingDns) {
|
||||
debug('addDnsRecords: dns update already in progress');
|
||||
return callback();
|
||||
}
|
||||
gUpdatingDns = true;
|
||||
|
||||
var dkimKey = readDkimPublicKeySync();
|
||||
if (!dkimKey) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, new Error('Failed to read dkim public key')));
|
||||
|
||||
var webadminRecord = { subdomain: constants.ADMIN_LOCATION, type: 'A', values: [ ip ] };
|
||||
// t=s limits the domainkey to this domain and not it's subdomains
|
||||
var dkimRecord = { subdomain: constants.DKIM_SELECTOR + '._domainkey', type: 'TXT', values: [ '"v=DKIM1; t=s; p=' + dkimKey + '"' ] };
|
||||
sysinfo.getPublicIp(function (error, ip) {
|
||||
if (error) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, error));
|
||||
|
||||
var records = [ ];
|
||||
if (config.isCustomDomain()) {
|
||||
records.push(webadminRecord);
|
||||
records.push(dkimRecord);
|
||||
} else {
|
||||
// for non-custom domains, we show a noapp.html page
|
||||
var nakedDomainRecord = { subdomain: '', type: 'A', values: [ ip ] };
|
||||
var webadminRecord = { subdomain: constants.ADMIN_LOCATION, type: 'A', values: [ ip ] };
|
||||
// t=s limits the domainkey to this domain and not it's subdomains
|
||||
var dkimRecord = { subdomain: constants.DKIM_SELECTOR + '._domainkey', type: 'TXT', values: [ '"v=DKIM1; t=s; p=' + dkimKey + '"' ] };
|
||||
|
||||
records.push(nakedDomainRecord);
|
||||
records.push(webadminRecord);
|
||||
records.push(dkimRecord);
|
||||
}
|
||||
var records = [ ];
|
||||
if (config.isCustomDomain()) {
|
||||
records.push(webadminRecord);
|
||||
records.push(dkimRecord);
|
||||
} else {
|
||||
// for non-custom domains, we show a noapp.html page
|
||||
var nakedDomainRecord = { subdomain: '', type: 'A', values: [ ip ] };
|
||||
|
||||
debug('addDnsRecords: %j', records);
|
||||
records.push(nakedDomainRecord);
|
||||
records.push(webadminRecord);
|
||||
records.push(dkimRecord);
|
||||
}
|
||||
|
||||
async.retry({ times: 10, interval: 20000 }, function (retryCallback) {
|
||||
txtRecordsWithSpf(function (error, txtRecords) {
|
||||
if (error) return retryCallback(error);
|
||||
debug('addDnsRecords: %j', records);
|
||||
|
||||
if (txtRecords) records.push({ subdomain: '', type: 'TXT', values: txtRecords });
|
||||
async.retry({ times: 10, interval: 20000 }, function (retryCallback) {
|
||||
txtRecordsWithSpf(function (error, txtRecords) {
|
||||
if (error) return retryCallback(error);
|
||||
|
||||
debug('addDnsRecords: will update %j', records);
|
||||
if (txtRecords) records.push({ subdomain: '', type: 'TXT', values: txtRecords });
|
||||
|
||||
async.mapSeries(records, function (record, iteratorCallback) {
|
||||
subdomains.upsert(record.subdomain, record.type, record.values, iteratorCallback);
|
||||
}, function (error, changeIds) {
|
||||
if (error) debug('addDnsRecords: failed to update : %s. will retry', error);
|
||||
else debug('addDnsRecords: records %j added with changeIds %j', records, changeIds);
|
||||
debug('addDnsRecords: will update %j', records);
|
||||
|
||||
retryCallback(error);
|
||||
async.mapSeries(records, function (record, iteratorCallback) {
|
||||
subdomains.upsert(record.subdomain, record.type, record.values, iteratorCallback);
|
||||
}, function (error, changeIds) {
|
||||
if (error) debug('addDnsRecords: failed to update : %s. will retry', error);
|
||||
else debug('addDnsRecords: records %j added with changeIds %j', records, changeIds);
|
||||
|
||||
retryCallback(error);
|
||||
});
|
||||
});
|
||||
});
|
||||
}, function (error) {
|
||||
debug('addDnsRecords: done updating records with error:', error);
|
||||
}, function (error) {
|
||||
gUpdatingDns = false;
|
||||
|
||||
callback(error);
|
||||
debug('addDnsRecords: done updating records with error:', error);
|
||||
|
||||
callback(error);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -705,10 +809,11 @@ function doUpdate(boxUpdateInfo, callback) {
|
||||
webServerOrigin: config.webServerOrigin()
|
||||
},
|
||||
|
||||
version: boxUpdateInfo.version
|
||||
version: boxUpdateInfo.version,
|
||||
boxVersionsUrl: config.get('boxVersionsUrl')
|
||||
};
|
||||
|
||||
debug('updating box %s %j', boxUpdateInfo.sourceTarballUrl, _.omit(data, 'tlsCert', 'tlsKey', 'token', 'appstore', 'caas'));
|
||||
debug('updating box %s %j', boxUpdateInfo.sourceTarballUrl, data);
|
||||
|
||||
progress.set(progress.UPDATE, 5, 'Downloading and extracting new version');
|
||||
|
||||
@@ -755,40 +860,23 @@ function checkDiskSpace(callback) {
|
||||
|
||||
debug('Checking disk space');
|
||||
|
||||
getDisks(function (error, disks) {
|
||||
df(function (error, entries) {
|
||||
if (error) {
|
||||
debug('df error %s', error.message);
|
||||
return callback();
|
||||
}
|
||||
|
||||
df().then(function (entries) {
|
||||
/*
|
||||
[{
|
||||
filesystem: '/dev/disk1',
|
||||
size: 499046809600,
|
||||
used: 443222245376,
|
||||
available: 55562420224,
|
||||
capacity: 0.89,
|
||||
mountpoint: '/'
|
||||
}, ...]
|
||||
*/
|
||||
var oos = entries.some(function (entry) {
|
||||
// ignore other filesystems but where box, app and platform data is
|
||||
if (entry.filesystem !== disks.boxDataDisk && entry.filesystem !== disks.platformDataDisk && entry.filesystem !== disks.appsDataDisk) return false;
|
||||
|
||||
return (entry.available <= (1.25 * 1024 * 1024 * 1024)); // 1.5G
|
||||
});
|
||||
|
||||
debug('Disk space checked. ok: %s', !oos);
|
||||
|
||||
if (oos) mailer.outOfDiskSpace(JSON.stringify(entries, null, 4));
|
||||
|
||||
callback();
|
||||
}).catch(function (error) {
|
||||
debug('df error %s', error.message);
|
||||
mailer.outOfDiskSpace(error.message);
|
||||
return callback();
|
||||
}
|
||||
|
||||
var oos = entries.some(function (entry) {
|
||||
return (entry.mount === paths.DATA_DIR && entry.capacity >= 0.90) ||
|
||||
(entry.mount === '/' && entry.available <= (1.25 * 1024 * 1024)); // 1.5G
|
||||
});
|
||||
|
||||
debug('Disk space checked. ok: %s', !oos);
|
||||
|
||||
if (oos) mailer.outOfDiskSpace(JSON.stringify(entries, null, 4));
|
||||
|
||||
callback();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -821,11 +909,13 @@ function doMigrate(options, callback) {
|
||||
progress.set(progress.MIGRATE, 10, 'Backing up for migration');
|
||||
|
||||
// initiate the migration in the background
|
||||
backups.backupBoxAndApps({ userId: null, username: 'migrator' }, function (error) {
|
||||
backups.backupBoxAndApps({ userId: null, username: 'migrator' }, function (error, backupId) {
|
||||
if (error) return unlock(error);
|
||||
|
||||
debug('migrate: domain: %s size %s region %s', options.domain, options.size, options.region);
|
||||
|
||||
options.restoreKey = backupId;
|
||||
|
||||
superagent
|
||||
.post(config.apiServerOrigin() + '/api/v1/boxes/' + config.fqdn() + '/migrate')
|
||||
.query({ token: config.token() })
|
||||
@@ -865,7 +955,6 @@ function migrate(options, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
// called for dynamic dns setups where we have to update the IP
|
||||
function refreshDNS(callback) {
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
@@ -874,7 +963,7 @@ function refreshDNS(callback) {
|
||||
|
||||
debug('refreshDNS: current ip %s', ip);
|
||||
|
||||
addDnsRecords(ip, function (error) {
|
||||
addDnsRecords(function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('refreshDNS: done for system records');
|
||||
@@ -883,9 +972,6 @@ function refreshDNS(callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.each(result, function (app, callback) {
|
||||
// do not change state of installing apps since apptask will error if dns record already exists
|
||||
if (app.installationState !== appdb.ISTATE_INSTALLED) return callback();
|
||||
|
||||
subdomains.upsert(app.location, 'A', [ ip ], callback);
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
@@ -898,49 +984,3 @@ function refreshDNS(callback) {
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getLogs(options, callback) {
|
||||
assert(options && typeof options === 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var units = options.units || [],
|
||||
lines = options.lines || 100,
|
||||
format = options.format || 'json',
|
||||
follow = !!options.follow;
|
||||
|
||||
assert(Array.isArray(units));
|
||||
assert.strictEqual(typeof lines, 'number');
|
||||
assert.strictEqual(typeof format, 'string');
|
||||
|
||||
debug('Getting logs for %j', units);
|
||||
|
||||
var args = [ '--no-pager', '--lines=' + lines ];
|
||||
units.forEach(function (u) {
|
||||
if (u === 'box') args.push('--unit=box');
|
||||
else if (u === 'mail') args.push('CONTAINER_NAME=mail');
|
||||
});
|
||||
if (format === 'short') args.push('--output=short', '-a'); else args.push('--output=json');
|
||||
if (follow) args.push('--follow');
|
||||
|
||||
var cp = spawn('/bin/journalctl', args);
|
||||
|
||||
var transformStream = split(function mapper(line) {
|
||||
if (format !== 'json') return line + '\n';
|
||||
|
||||
var obj = safe.JSON.parse(line);
|
||||
if (!obj) return undefined;
|
||||
|
||||
return JSON.stringify({
|
||||
realtimeTimestamp: obj.__REALTIME_TIMESTAMP,
|
||||
monotonicTimestamp: obj.__MONOTONIC_TIMESTAMP,
|
||||
message: obj.MESSAGE,
|
||||
source: obj.SYSLOG_IDENTIFIER || ''
|
||||
}) + '\n';
|
||||
});
|
||||
|
||||
transformStream.close = cp.kill.bind(cp, 'SIGKILL'); // closing stream kills the child process
|
||||
|
||||
cp.stdout.pipe(transformStream);
|
||||
|
||||
return callback(null, transformStream);
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@ exports = module.exports = {
|
||||
appFqdn: appFqdn,
|
||||
zoneName: zoneName,
|
||||
|
||||
isDev: isDev,
|
||||
isDemo: isDemo,
|
||||
|
||||
tlsCert: tlsCert,
|
||||
@@ -76,6 +77,7 @@ function initConfig() {
|
||||
data.fqdn = 'localhost';
|
||||
|
||||
data.token = null;
|
||||
data.boxVersionsUrl = null;
|
||||
data.version = null;
|
||||
data.isCustomDomain = true;
|
||||
data.webServerOrigin = null;
|
||||
@@ -202,6 +204,10 @@ function database() {
|
||||
return get('database');
|
||||
}
|
||||
|
||||
function isDev() {
|
||||
return /dev/i.test(get('boxVersionsUrl'));
|
||||
}
|
||||
|
||||
function isDemo() {
|
||||
return get('isDemo') === true;
|
||||
}
|
||||
|
||||
+11
-16
@@ -6,7 +6,6 @@ exports = module.exports = {
|
||||
};
|
||||
|
||||
var apps = require('./apps.js'),
|
||||
appstore = require('./appstore.js'),
|
||||
assert = require('assert'),
|
||||
backups = require('./backups.js'),
|
||||
certificates = require('./certificates.js'),
|
||||
@@ -19,7 +18,6 @@ var apps = require('./apps.js'),
|
||||
janitor = require('./janitor.js'),
|
||||
scheduler = require('./scheduler.js'),
|
||||
settings = require('./settings.js'),
|
||||
semver = require('semver'),
|
||||
updateChecker = require('./updatechecker.js');
|
||||
|
||||
var gAutoupdaterJob = null,
|
||||
@@ -67,7 +65,7 @@ function initialize(callback) {
|
||||
var randomHourMinute = Math.floor(60*Math.random());
|
||||
gAliveJob = new CronJob({
|
||||
cronTime: '00 ' + randomHourMinute + ' * * * *', // every hour on a random minute
|
||||
onTick: appstore.sendAliveStatus,
|
||||
onTick: cloudron.sendAliveStatus,
|
||||
start: true
|
||||
});
|
||||
|
||||
@@ -93,7 +91,7 @@ function recreateJobs(tz) {
|
||||
|
||||
if (gBackupJob) gBackupJob.stop();
|
||||
gBackupJob = new CronJob({
|
||||
cronTime: '00 00 */6 * * *', // every 6 hours. backups.ensureBackup() will only trigger a backup once per day
|
||||
cronTime: '00 00 */4 * * *', // every 4 hours. backups.ensureBackup() will only trigger a backup once per day
|
||||
onTick: backups.ensureBackup.bind(null, AUDIT_SOURCE, NOOP_CALLBACK),
|
||||
start: true,
|
||||
timeZone: tz
|
||||
@@ -107,12 +105,13 @@ function recreateJobs(tz) {
|
||||
timeZone: tz
|
||||
});
|
||||
|
||||
// randomized pattern per cloudron every hour
|
||||
var randomMinute = Math.floor(60*Math.random());
|
||||
// randomized pattern per cloudron every 10 min
|
||||
var randomMinute = Math.floor(10*Math.random());
|
||||
var random10MinPattern = [0,1,2,3,4,5].map(function (n) { return n*10+randomMinute; }).join(',');
|
||||
|
||||
if (gBoxUpdateCheckerJob) gBoxUpdateCheckerJob.stop();
|
||||
gBoxUpdateCheckerJob = new CronJob({
|
||||
cronTime: '00 ' + randomMinute + ' * * * *', // once an hour
|
||||
cronTime: '00 ' + random10MinPattern + ' * * * *', // every 10 minutes
|
||||
onTick: updateChecker.checkBoxUpdates,
|
||||
start: true,
|
||||
timeZone: tz
|
||||
@@ -120,7 +119,7 @@ function recreateJobs(tz) {
|
||||
|
||||
if (gAppUpdateCheckerJob) gAppUpdateCheckerJob.stop();
|
||||
gAppUpdateCheckerJob = new CronJob({
|
||||
cronTime: '00 ' + randomMinute + ' * * * *', // once an hour
|
||||
cronTime: '00 ' + random10MinPattern + ' * * * *', // every 10 minutes
|
||||
onTick: updateChecker.checkAppUpdates,
|
||||
start: true,
|
||||
timeZone: tz
|
||||
@@ -136,8 +135,8 @@ function recreateJobs(tz) {
|
||||
|
||||
if (gCleanupBackupsJob) gCleanupBackupsJob.stop();
|
||||
gCleanupBackupsJob = new CronJob({
|
||||
cronTime: '00 45 */6 * * *', // every 6 hours. try not to overlap with ensureBackup job
|
||||
onTick: backups.cleanup,
|
||||
cronTime: '00 */30 * * * *', // every 30 minutes
|
||||
onTick: janitor.cleanupBackups,
|
||||
start: true,
|
||||
timeZone: tz
|
||||
});
|
||||
@@ -190,12 +189,8 @@ function autoupdatePatternChanged(pattern) {
|
||||
onTick: function() {
|
||||
var updateInfo = updateChecker.getUpdateInfo();
|
||||
if (updateInfo.box) {
|
||||
if (semver.major(updateInfo.box.version) === semver.major(config.version())) {
|
||||
debug('Starting autoupdate to %j', updateInfo.box);
|
||||
cloudron.updateToLatest(AUDIT_SOURCE, NOOP_CALLBACK);
|
||||
} else {
|
||||
debug('Block automatic update for major version');
|
||||
}
|
||||
debug('Starting autoupdate to %j', updateInfo.box);
|
||||
cloudron.updateToLatest(AUDIT_SOURCE, NOOP_CALLBACK);
|
||||
} else if (updateInfo.apps) {
|
||||
debug('Starting app update to %j', updateInfo.apps);
|
||||
apps.updateApps(updateInfo.apps, AUDIT_SOURCE, NOOP_CALLBACK);
|
||||
|
||||
-46
@@ -1,46 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
resolve: resolve
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
child_process = require('child_process'),
|
||||
debug = require('debug')('box:dig');
|
||||
|
||||
function resolve(domain, type, options, callback) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof options, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// dig @server cloudron.io TXT +short
|
||||
var args = [ ];
|
||||
if (options.server) args.push('@' + options.server);
|
||||
if (type === 'PTR') {
|
||||
args.push('-x', domain);
|
||||
} else {
|
||||
args.push(domain, type);
|
||||
}
|
||||
args.push('+short');
|
||||
|
||||
child_process.execFile('/usr/bin/dig', args, { encoding: 'utf8', killSignal: 'SIGKILL', timeout: options.timeout || 0 }, function (error, stdout, stderr) {
|
||||
if (error && error.killed) error.code = 'ETIMEDOUT';
|
||||
|
||||
if (error || stderr) debug('resolve error (%j): %j %s %s', args, error, stdout, stderr);
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('resolve (%j): %s', args, stdout);
|
||||
|
||||
if (!stdout) return callback(); // timeout or no result
|
||||
|
||||
var lines = stdout.trim().split('\n');
|
||||
if (type === 'MX') {
|
||||
lines = lines.map(function (line) {
|
||||
var parts = line.split(' ');
|
||||
return { priority: parts[0], exchange: parts[1] };
|
||||
});
|
||||
}
|
||||
return callback(null, lines);
|
||||
});
|
||||
}
|
||||
+11
-16
@@ -10,19 +10,14 @@ exports = module.exports = {
|
||||
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
constants = require('../constants.js'),
|
||||
debug = require('debug')('box:dns/digitalocean'),
|
||||
dns = require('dns'),
|
||||
dns = require('native-dns'),
|
||||
SubdomainError = require('../subdomains.js').SubdomainError,
|
||||
superagent = require('superagent'),
|
||||
util = require('util');
|
||||
|
||||
var DIGITALOCEAN_ENDPOINT = 'https://api.digitalocean.com';
|
||||
|
||||
function formatError(response) {
|
||||
return util.format('DigitalOcean DNS error [%s] %j', response.statusCode, response.body);
|
||||
}
|
||||
|
||||
function getInternal(dnsConfig, zoneName, subdomain, type, callback) {
|
||||
assert.strictEqual(typeof dnsConfig, 'object');
|
||||
assert.strictEqual(typeof zoneName, 'string');
|
||||
@@ -35,9 +30,9 @@ function getInternal(dnsConfig, zoneName, subdomain, type, callback) {
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode === 404) return callback(new SubdomainError(SubdomainError.NOT_FOUND, formatError(result)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 200) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, formatError(result)));
|
||||
if (result.statusCode === 404) return callback(new SubdomainError(SubdomainError.NOT_FOUND, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode !== 200) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body)));
|
||||
|
||||
var tmp = result.body.domain_records.filter(function (record) {
|
||||
return (record.type === type && record.name === subdomain);
|
||||
@@ -89,9 +84,9 @@ function upsert(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
.timeout(30 * 1000)
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode === 422) return callback(new SubdomainError(SubdomainError.BAD_FIELD, result.body.message));
|
||||
if (result.statusCode !== 201) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, formatError(result)));
|
||||
if (result.statusCode !== 201) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body)));
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
@@ -105,9 +100,9 @@ function upsert(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
++i;
|
||||
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode === 422) return callback(new SubdomainError(SubdomainError.BAD_FIELD, result.body.message));
|
||||
if (result.statusCode !== 200) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, formatError(result)));
|
||||
if (result.statusCode !== 200) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body)));
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
@@ -170,8 +165,8 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode === 404) return callback(null);
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, formatError(result)));
|
||||
if (result.statusCode !== 204) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, formatError(result)));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode !== 204) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body)));
|
||||
|
||||
debug('del: done');
|
||||
|
||||
@@ -202,7 +197,7 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Domain nameservers are not set to Digital Ocean'));
|
||||
}
|
||||
|
||||
upsert(credentials, domain, constants.ADMIN_LOCATION, 'A', [ ip ], function (error, changeId) {
|
||||
upsert(credentials, domain, 'my', 'A', [ ip ], function (error, changeId) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: A record added with change id %s', changeId);
|
||||
|
||||
+21
-11
@@ -10,10 +10,8 @@ exports = module.exports = {
|
||||
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
constants = require('../constants.js'),
|
||||
debug = require('debug')('box:dns/manual'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('dns'),
|
||||
dns = require('native-dns'),
|
||||
SubdomainError = require('../subdomains.js').SubdomainError,
|
||||
util = require('util');
|
||||
|
||||
@@ -57,7 +55,7 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
assert.strictEqual(typeof ip, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var adminDomain = constants.ADMIN_LOCATION + '.' + domain;
|
||||
var adminDomain = 'my.' + domain;
|
||||
|
||||
dns.resolveNs(domain, function (error, nameservers) {
|
||||
if (error || !nameservers) return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to get nameservers'));
|
||||
@@ -70,30 +68,42 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
}
|
||||
|
||||
async.every(nsIps, function (nsIp, everyIpCallback) {
|
||||
dig.resolve(adminDomain, 'A', { server: nsIp, timeout: 5000 }, function (error, answer) {
|
||||
if (error && error.code === 'ETIMEDOUT') {
|
||||
debug('nameserver %s (%s) timed out when trying to resolve %s', nameserver, nsIp, adminDomain);
|
||||
return everyIpCallback(null, true); // should be ok if dns server is down
|
||||
}
|
||||
var req = dns.Request({
|
||||
question: dns.Question({ name: adminDomain, type: 'A' }),
|
||||
server: { address: nsIp },
|
||||
timeout: 5000
|
||||
});
|
||||
|
||||
req.on('timeout', function () {
|
||||
debug('nameserver %s (%s) timed out when trying to resolve %s', nameserver, nsIp, adminDomain);
|
||||
return everyIpCallback(null, true); // should be ok if dns server is down
|
||||
});
|
||||
|
||||
req.on('message', function (error, message) {
|
||||
if (error) {
|
||||
debug('nameserver %s (%s) returned error trying to resolve %s: %s', nameserver, nsIp, adminDomain, error);
|
||||
return everyIpCallback(null, false);
|
||||
}
|
||||
|
||||
var answer = message.answer;
|
||||
|
||||
if (!answer || answer.length === 0) {
|
||||
debug('bad answer from nameserver %s (%s) resolving %s (%s): %j', nameserver, nsIp, adminDomain, 'A', answer);
|
||||
debug('bad answer from nameserver %s (%s) resolving %s (%s): %j', nameserver, nsIp, adminDomain, 'A', message);
|
||||
return everyIpCallback(null, false);
|
||||
}
|
||||
|
||||
debug('verifyDnsConfig: ns: %s (%s), name:%s Actual:%j Expecting:%s', nameserver, nsIp, adminDomain, answer, ip);
|
||||
|
||||
var match = answer.some(function (a) { return a === ip; });
|
||||
var match = answer.some(function (a) {
|
||||
return a.address === ip;
|
||||
});
|
||||
|
||||
if (match) return everyIpCallback(null, true); // done!
|
||||
|
||||
everyIpCallback(null, false);
|
||||
});
|
||||
|
||||
req.send();
|
||||
}, everyNsCallback);
|
||||
});
|
||||
}, function (error, success) {
|
||||
|
||||
+2
-8
@@ -13,9 +13,8 @@ exports = module.exports = {
|
||||
|
||||
var assert = require('assert'),
|
||||
AWS = require('aws-sdk'),
|
||||
constants = require('../constants.js'),
|
||||
debug = require('debug')('box:dns/route53'),
|
||||
dns = require('dns'),
|
||||
dns = require('native-dns'),
|
||||
SubdomainError = require('../subdomains.js').SubdomainError,
|
||||
util = require('util'),
|
||||
_ = require('underscore');
|
||||
@@ -42,7 +41,6 @@ function getZoneByName(dnsConfig, zoneName, callback) {
|
||||
var route53 = new AWS.Route53(getDnsCredentials(dnsConfig));
|
||||
route53.listHostedZones({}, function (error, result) {
|
||||
if (error && error.code === 'AccessDenied') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'InvalidClientTokenId') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, error.message));
|
||||
|
||||
var zone = result.HostedZones.filter(function (zone) {
|
||||
@@ -66,7 +64,6 @@ function getHostedZone(dnsConfig, zoneName, callback) {
|
||||
var route53 = new AWS.Route53(getDnsCredentials(dnsConfig));
|
||||
route53.getHostedZone({ Id: zone.Id }, function (error, result) {
|
||||
if (error && error.code === 'AccessDenied') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'InvalidClientTokenId') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, error.message));
|
||||
|
||||
callback(null, result);
|
||||
@@ -108,7 +105,6 @@ function add(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
var route53 = new AWS.Route53(getDnsCredentials(dnsConfig));
|
||||
route53.changeResourceRecordSets(params, function(error, result) {
|
||||
if (error && error.code === 'AccessDenied') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'InvalidClientTokenId') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'PriorRequestNotComplete') return callback(new SubdomainError(SubdomainError.STILL_BUSY, error.message));
|
||||
if (error && error.code === 'InvalidChangeBatch') return callback(new SubdomainError(SubdomainError.BAD_FIELD, error.message));
|
||||
if (error) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, error.message));
|
||||
@@ -149,7 +145,6 @@ function get(dnsConfig, zoneName, subdomain, type, callback) {
|
||||
var route53 = new AWS.Route53(getDnsCredentials(dnsConfig));
|
||||
route53.listResourceRecordSets(params, function (error, result) {
|
||||
if (error && error.code === 'AccessDenied') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'InvalidClientTokenId') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, error.message));
|
||||
if (result.ResourceRecordSets.length === 0) return callback(null, [ ]);
|
||||
if (result.ResourceRecordSets[0].Name !== params.StartRecordName || result.ResourceRecordSets[0].Type !== params.StartRecordType) return callback(null, [ ]);
|
||||
@@ -195,7 +190,6 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
var route53 = new AWS.Route53(getDnsCredentials(dnsConfig));
|
||||
route53.changeResourceRecordSets(params, function(error, result) {
|
||||
if (error && error.code === 'AccessDenied') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error && error.code === 'InvalidClientTokenId') return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, error.message));
|
||||
if (error && error.message && error.message.indexOf('it was not found') !== -1) {
|
||||
debug('del: resource record set not found.', error);
|
||||
return callback(new SubdomainError(SubdomainError.NOT_FOUND, error.message));
|
||||
@@ -246,7 +240,7 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Domain nameservers are not set to Route53'));
|
||||
}
|
||||
|
||||
upsert(credentials, domain, constants.ADMIN_LOCATION, 'A', [ ip ], function (error, changeId) {
|
||||
upsert(credentials, domain, 'my', 'A', [ ip ], function (error, changeId) {
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: A record added with change id %s', changeId);
|
||||
|
||||
+20
-12
@@ -5,8 +5,7 @@ exports = module.exports = waitForDns;
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
debug = require('debug')('box:dns/waitfordns'),
|
||||
dig = require('../dig.js'),
|
||||
dns = require('dns'),
|
||||
dns = require('native-dns'),
|
||||
SubdomainError = require('../subdomains.js').SubdomainError,
|
||||
tld = require('tldjs'),
|
||||
util = require('util');
|
||||
@@ -26,36 +25,45 @@ function isChangeSynced(domain, value, type, nameserver, callback) {
|
||||
}
|
||||
|
||||
async.every(nsIps, function (nsIp, iteratorCallback) {
|
||||
dig.resolve(domain, type, { server: nsIp, timeout: 5000 }, function (error, answer) {
|
||||
if (error && error.code === 'ETIMEDOUT') {
|
||||
debug('nameserver %s (%s) timed out when trying to resolve %s', nameserver, nsIp, domain);
|
||||
return iteratorCallback(null, true); // should be ok if dns server is down
|
||||
}
|
||||
var req = dns.Request({
|
||||
question: dns.Question({ name: domain, type: type }),
|
||||
server: { address: nsIp },
|
||||
timeout: 5000
|
||||
});
|
||||
|
||||
req.on('timeout', function () {
|
||||
debug('nameserver %s (%s) timed out when trying to resolve %s', nameserver, nsIp, domain);
|
||||
return iteratorCallback(null, true); // should be ok if dns server is down
|
||||
});
|
||||
|
||||
req.on('message', function (error, message) {
|
||||
if (error) {
|
||||
debug('nameserver %s (%s) returned error trying to resolve %s: %s', nameserver, nsIp, domain, error);
|
||||
return iteratorCallback(null, false);
|
||||
}
|
||||
|
||||
var answer = message.answer;
|
||||
|
||||
if (!answer || answer.length === 0) {
|
||||
debug('bad answer from nameserver %s (%s) resolving %s (%s): %j', nameserver, nsIp, domain, type, answer);
|
||||
debug('bad answer from nameserver %s (%s) resolving %s (%s): %j', nameserver, nsIp, domain, type, message);
|
||||
return iteratorCallback(null, false);
|
||||
}
|
||||
|
||||
debug('isChangeSynced: ns: %s (%s), name:%s Actual:%j Expecting:%s', nameserver, nsIp, domain, answer, value);
|
||||
|
||||
var match = answer.some(function (a) {
|
||||
return ((type === 'A' && value.test(a)) ||
|
||||
(type === 'CNAME' && value.test(a)) ||
|
||||
(type === 'TXT' && value.test(a)));
|
||||
return ((type === 'A' && value.test(a.address)) ||
|
||||
(type === 'CNAME' && value.test(a.data)) ||
|
||||
(type === 'TXT' && value.test(a.data.join(''))));
|
||||
});
|
||||
|
||||
if (match) return iteratorCallback(null, true); // done!
|
||||
|
||||
iteratorCallback(null, false);
|
||||
});
|
||||
}, callback);
|
||||
|
||||
req.send();
|
||||
}, callback);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
+3
-5
@@ -40,7 +40,7 @@ var addons = require('./addons.js'),
|
||||
child_process = require('child_process'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
debug = require('debug')('box:docker.js'),
|
||||
debug = require('debug')('box:src/docker.js'),
|
||||
once = require('once'),
|
||||
safe = require('safetydance'),
|
||||
spawn = child_process.spawn,
|
||||
@@ -202,10 +202,8 @@ function createSubcontainer(app, name, cmd, options, callback) {
|
||||
},
|
||||
CpuShares: 512, // relative to 1024 for system processes
|
||||
VolumesFrom: isAppContainer ? null : [ app.containerId + ":rw" ],
|
||||
NetworkMode: 'cloudron',
|
||||
Dns: ['172.18.0.1'], // use internal dns
|
||||
DnsSearch: ['.'], // use internal dns
|
||||
SecurityOpt: enableSecurityOpt ? [ "apparmor=docker-cloudron-app" ] : null // profile available only on cloudron
|
||||
NetworkMode: isAppContainer ? 'cloudron' : ('container:' + app.containerId), // share network namespace with parent
|
||||
SecurityOpt: enableSecurityOpt ? [ "apparmor:docker-cloudron-app" ] : null // profile available only on cloudron
|
||||
}
|
||||
};
|
||||
containerOptions = _.extend(containerOptions, options);
|
||||
|
||||
@@ -5,20 +5,20 @@
|
||||
// Do not require anything here!
|
||||
|
||||
exports = module.exports = {
|
||||
// a major version makes all apps restore from backup
|
||||
// a minor version makes all apps re-configure themselves
|
||||
'version': '48.3.0',
|
||||
// a version bump means that all app containers are recreated
|
||||
'version': 46,
|
||||
|
||||
'baseImages': [ 'cloudron/base:0.10.0' ],
|
||||
|
||||
// Note that if any of the databases include an upgrade, bump the infra version above
|
||||
// This is because we upgrade using dumps instead of mysql_upgrade, pg_upgrade etc
|
||||
'images': {
|
||||
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:0.17.0' },
|
||||
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:0.17.0' },
|
||||
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:0.13.0' },
|
||||
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:0.14.0' },
|
||||
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:0.16.0' },
|
||||
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:0.12.0' },
|
||||
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:0.11.0' },
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:0.32.0' },
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:0.30.3' },
|
||||
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:0.11.0' }
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
+39
-2
@@ -3,13 +3,16 @@
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
authcodedb = require('./authcodedb.js'),
|
||||
debug = require('debug')('box:janitor'),
|
||||
backups = require('./backups.js'),
|
||||
debug = require('debug')('box:src/janitor'),
|
||||
docker = require('./docker.js').connection,
|
||||
settings = require('./settings.js'),
|
||||
tokendb = require('./tokendb.js');
|
||||
|
||||
exports = module.exports = {
|
||||
cleanupTokens: cleanupTokens,
|
||||
cleanupDockerVolumes: cleanupDockerVolumes
|
||||
cleanupDockerVolumes: cleanupDockerVolumes,
|
||||
cleanupBackups: cleanupBackups
|
||||
};
|
||||
|
||||
var NOOP_CALLBACK = function () { };
|
||||
@@ -101,3 +104,37 @@ function cleanupDockerVolumes(callback) {
|
||||
}, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function cleanupBackups(callback) {
|
||||
assert(!callback || typeof callback === 'function'); // callback is null when called from cronjob
|
||||
|
||||
callback = callback || NOOP_CALLBACK;
|
||||
|
||||
debug('Cleaning backups');
|
||||
|
||||
settings.getBackupConfig(function (error, backupConfig) {
|
||||
if (error) return callback(error);
|
||||
|
||||
// nothing to do here
|
||||
if (backupConfig.provider !== 'filesystem') return callback();
|
||||
|
||||
backups.getPaged(1, 1000, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
// sort with latest backups first in the array and slice 2
|
||||
var toCleanup = result.sort(function (a, b) { return b.creationTime.getTime() - a.creationTime.getTime(); }).slice(2);
|
||||
|
||||
debug('cleanupBackups: about to clean: ', toCleanup);
|
||||
|
||||
async.each(toCleanup, function (backup, callback) {
|
||||
backups.removeBackup(backup.id, backup.dependsOn, function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
debug('cleanupBackups: %s, %s done', backup.id, backup.dependsOn.join(', '));
|
||||
|
||||
callback();
|
||||
});
|
||||
}, callback);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
+12
-24
@@ -6,7 +6,6 @@ exports = module.exports = {
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
appdb = require('./appdb.js'),
|
||||
apps = require('./apps.js'),
|
||||
async = require('async'),
|
||||
config = require('./config.js'),
|
||||
@@ -319,28 +318,18 @@ function authenticateMailbox(req, res, next) {
|
||||
if (error) return next(new ldap.OperationsError(error.message));
|
||||
|
||||
if (mailbox.ownerType === mailboxdb.TYPE_APP) {
|
||||
var addonId = req.dn.rdns[1].attrs.ou.value.toLowerCase(); // 'sendmail' or 'recvmail'
|
||||
var name;
|
||||
if (addonId === 'sendmail') name = 'MAIL_SMTP_PASSWORD';
|
||||
else if (addonId === 'recvmail') name = 'MAIL_IMAP_PASSWORD';
|
||||
else return next(new ldap.OperationsError('Invalid DN'));
|
||||
|
||||
appdb.getAddonConfigByName(mailbox.ownerId, addonId, name, function (error, value) {
|
||||
if (error) return next(new ldap.OperationsError(error.message));
|
||||
if (req.credentials !== value) return next(new ldap.InvalidCredentialsError(req.dn.toString()));
|
||||
|
||||
eventlog.add(eventlog.ACTION_APP_LOGIN, { authType: 'ldap', mailboxId: name }, { appId: mailbox.ownerId, addonId: addonId });
|
||||
return res.end();
|
||||
});
|
||||
} else if (mailbox.ownerType === mailboxdb.TYPE_USER) {
|
||||
authenticateUser(req, res, function (error) {
|
||||
if (error) return next(error);
|
||||
eventlog.add(eventlog.ACTION_USER_LOGIN, { authType: 'ldap', mailboxId: name }, { userId: req.user.username });
|
||||
res.end();
|
||||
});
|
||||
} else {
|
||||
return next(new ldap.OperationsError('Unknown ownerType for mailbox'));
|
||||
if (req.credentials !== mailbox.ownerId) return next(new ldap.NoSuchObjectError(req.dn.toString()));
|
||||
eventlog.add(eventlog.ACTION_APP_LOGIN, { authType: 'ldap', mailboxId: name }, { appId: mailbox.ownerId });
|
||||
return res.end();
|
||||
}
|
||||
|
||||
assert.strictEqual(mailbox.ownerType, mailboxdb.TYPE_USER);
|
||||
|
||||
authenticateUser(req, res, function (error) {
|
||||
if (error) return next(error);
|
||||
eventlog.add(eventlog.ACTION_USER_LOGIN, { authType: 'ldap', mailboxId: name }, { userId: req.user.username });
|
||||
res.end();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -367,8 +356,7 @@ function start(callback) {
|
||||
gServer.search('ou=mailaliases,dc=cloudron', mailAliasSearch);
|
||||
gServer.search('ou=mailinglists,dc=cloudron', mailingListSearch);
|
||||
|
||||
gServer.bind('ou=recvmail,dc=cloudron', authenticateMailbox);
|
||||
gServer.bind('ou=sendmail,dc=cloudron', authenticateMailbox);
|
||||
gServer.bind('ou=mailboxes,dc=cloudron', authenticateMailbox);
|
||||
|
||||
// this is the bind for addons (after bind, they might search and authenticate)
|
||||
gServer.bind('ou=addons,dc=cloudron', function(req, res, next) {
|
||||
|
||||
@@ -6,6 +6,7 @@ exports = module.exports = {
|
||||
|
||||
listAliases: listAliases,
|
||||
listMailboxes: listMailboxes,
|
||||
// listGroups: listGroups, // this is beyond my SQL skillz
|
||||
|
||||
getMailbox: getMailbox,
|
||||
getGroup: getGroup,
|
||||
|
||||
+1
-3
@@ -95,9 +95,7 @@ function mailConfig() {
|
||||
|
||||
// keep this in sync with the cloudron.js dns changes
|
||||
function checkDns() {
|
||||
if (process.env.BOX_ENV === 'test') return;
|
||||
|
||||
subdomains.waitForDns(config.fqdn(), new RegExp('^"v=spf1 .*a:' + config.adminFqdn().replace(/[-\/\\^$*+?.()|[\]{}]/g, '\\$&') + '.*'), 'TXT', { interval: 60000, times: Infinity }, function (error) {
|
||||
subdomains.waitForDns(config.fqdn(), new RegExp('^v=spf1 .*a:' + config.adminFqdn().replace(/[-\/\\^$*+?.()|[\]{}]/g, '\\$&') + '.*'), 'TXT', { interval: 60000, times: Infinity }, function (error) {
|
||||
if (error) return debug(error); // can never happen
|
||||
|
||||
debug('checkDns: SPF check passed. commencing mail processing');
|
||||
|
||||
+1
-1
@@ -2,7 +2,7 @@
|
||||
|
||||
var assert = require('assert'),
|
||||
config = require('./config.js'),
|
||||
debug = require('debug')('box:nginx'),
|
||||
debug = require('debug')('box:src/nginx'),
|
||||
ejs = require('ejs'),
|
||||
fs = require('fs'),
|
||||
path = require('path'),
|
||||
|
||||
@@ -0,0 +1,48 @@
|
||||
<% include header %>
|
||||
|
||||
<!-- tester -->
|
||||
|
||||
<script>
|
||||
|
||||
'use strict';
|
||||
|
||||
// very basic angular app
|
||||
var app = angular.module('Application', []);
|
||||
app.controller('Controller', ['$scope', function ($scope) {
|
||||
$scope.success = <%= success %>;
|
||||
$scope.error = '<%= error %>';
|
||||
}]);
|
||||
|
||||
</script>
|
||||
|
||||
<div class="container" ng-app="Application" ng-controller="Controller" ng-cloak>
|
||||
<div class="row">
|
||||
<div class="col-md-12 text-center">
|
||||
<br/>
|
||||
<h4 ng-hide="success">Hello there, welcome to <%= cloudronName %>.</h4>
|
||||
<h2 ng-hide="success">Sign up with your email address.</h2>
|
||||
<h3 ng-show="success">You have received an email invitation to this Cloudron to finish the signup.</h3>
|
||||
<br/><br/>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row">
|
||||
<div class="col-md-6 col-md-offset-3" ng-show="!success">
|
||||
<form action="/api/v1/session/account/create" method="post" name="createForm" autocomplete="off" role="form" novalidate>
|
||||
<input type="password" style="display: none;">
|
||||
<input type="hidden" name="_csrf" value="<%= csrf %>"/>
|
||||
|
||||
<div class="form-group" ng-class="{ 'has-error': (createForm.email.$dirty && createForm.email.$invalid) || (!createForm.email.$dirty && error) }">
|
||||
<label class="control-label" for="inputEmail">Email</label>
|
||||
<input type="email" class="form-control" id="inputEmail" ng-model="email" name="email" autofocus required>
|
||||
<div class="control-label" ng-show="(createForm.email.$dirty && createForm.email.$invalid) || (!createForm.email.$dirty && error)">
|
||||
<small ng-show="createForm.email.$dirty && createForm.email.$invalid">Must be a valid email address</small>
|
||||
<small ng-show="!createForm.email.$dirty && error">{{ error }}</small>
|
||||
</div>
|
||||
</div>
|
||||
<input class="btn btn-primary btn-outline pull-right" type="submit" value="Create" ng-disabled="createForm.$invalid"/>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<% include footer %>
|
||||
@@ -32,19 +32,19 @@ app.controller('Controller', ['$scope', function ($scope) {
|
||||
<center><p class="has-error"><%= error %></p></center>
|
||||
|
||||
<% if (user && user.username) { %>
|
||||
<div class="form-group"">
|
||||
<div class="form-group">
|
||||
<label class="control-label">Username</label>
|
||||
<input type="text" class="form-control" ng-model="username" name="username" readonly required>
|
||||
</div>
|
||||
<% } else { %>
|
||||
<div class="form-group" ng-class="{ 'has-error': (setupForm.username.$dirty && setupForm.username.$invalid) }">
|
||||
<label class="control-label">Username</label>
|
||||
<input type="text" class="form-control" ng-model="username" name="username" required autofocus>
|
||||
<div class="control-label" ng-show="setupForm.username.$dirty && setupForm.username.$invalid">
|
||||
<small ng-show="setupForm.username.$error.minlength">The username is too short</small>
|
||||
<small ng-show="setupForm.username.$error.maxlength">The username is too long</small>
|
||||
<small ng-show="setupForm.username.$dirty && setupForm.username.$invalid">Not a valid username</small>
|
||||
</div>
|
||||
<input type="text" class="form-control" ng-model="username" name="username" required autofocus>
|
||||
</div>
|
||||
<% } %>
|
||||
|
||||
@@ -55,18 +55,18 @@ app.controller('Controller', ['$scope', function ($scope) {
|
||||
|
||||
<div class="form-group" ng-class="{ 'has-error': (setupForm.password.$dirty && setupForm.password.$invalid) }">
|
||||
<label class="control-label">New Password</label>
|
||||
<input type="password" class="form-control" ng-model="password" name="password" ng-pattern="/^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[^a-zA-Z0-9])(?!.*\s).{8,30}$/" required>
|
||||
<div class="control-label" ng-show="setupForm.password.$dirty && setupForm.password.$invalid">
|
||||
<small ng-show="setupForm.password.$dirty && setupForm.password.$invalid">Password must be 8-30 character with at least one uppercase, one numeric and one special character</small>
|
||||
</div>
|
||||
<input type="password" class="form-control" ng-model="password" name="password" ng-pattern="/^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[^a-zA-Z0-9])(?!.*\s).{8,30}$/" required>
|
||||
</div>
|
||||
|
||||
<div class="form-group" ng-class="{ 'has-error': (setupForm.passwordRepeat.$dirty && (password !== passwordRepeat)) }">
|
||||
<label class="control-label">Repeat Password</label>
|
||||
<input type="password" class="form-control" ng-model="passwordRepeat" name="passwordRepeat" required>
|
||||
<div class="control-label" ng-show="setupForm.passwordRepeat.$dirty && (password !== passwordRepeat)">
|
||||
<small ng-show="setupForm.passwordRepeat.$dirty && (password !== passwordRepeat)">Passwords don't match</small>
|
||||
</div>
|
||||
<input type="password" class="form-control" ng-model="passwordRepeat" name="passwordRepeat" required>
|
||||
</div>
|
||||
|
||||
<input class="btn btn-primary btn-outline pull-right" type="submit" value="Create" ng-disabled="setupForm.$invalid || password !== passwordRepeat"/>
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="user-scalable=no, initial-scale=1, maximum-scale=1, minimum-scale=1, width=device-width, height=device-height" />
|
||||
<meta http-equiv="Content-Security-Policy" content="default-src 'unsafe-inline' 'unsafe-eval' 'self'; img-src 'self'" />
|
||||
|
||||
<title> <%= title %> </title>
|
||||
|
||||
|
||||
@@ -26,17 +26,17 @@ app.controller('Controller', [function () {}]);
|
||||
|
||||
<div class="form-group" ng-class="{ 'has-error': resetForm.password.$dirty && resetForm.password.$invalid }">
|
||||
<label class="control-label" for="inputPassword">New Password</label>
|
||||
<input type="password" class="form-control" id="inputPassword" ng-model="password" name="password" ng-pattern="/^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[^a-zA-Z0-9])(?!.*\s).{8,30}$/" autofocus required>
|
||||
<div class="control-label" ng-show="resetForm.password.$dirty && resetForm.password.$invalid">
|
||||
<small ng-show="resetForm.password.$dirty && resetForm.password.$invalid">Password must be 8-30 character with at least one uppercase, one numeric and one special character</small>
|
||||
</div>
|
||||
<input type="password" class="form-control" id="inputPassword" ng-model="password" name="password" ng-pattern="/^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[^a-zA-Z0-9])(?!.*\s).{8,30}$/" autofocus required>
|
||||
</div>
|
||||
<div class="form-group" ng-class="{ 'has-error': resetForm.passwordRepeat.$dirty && (password !== passwordRepeat) }">
|
||||
<label class="control-label" for="inputPasswordRepeat">Repeat Password</label>
|
||||
<input type="password" class="form-control" id="inputPasswordRepeat" ng-model="passwordRepeat" name="passwordRepeat" required>
|
||||
<div class="control-label" ng-show="resetForm.passwordRepeat.$dirty && (password !== passwordRepeat)">
|
||||
<small ng-show="resetForm.passwordRepeat.$dirty && (password !== passwordRepeat)">Passwords don't match</small>
|
||||
</div>
|
||||
<input type="password" class="form-control" id="inputPasswordRepeat" ng-model="passwordRepeat" name="passwordRepeat" required>
|
||||
</div>
|
||||
<input class="btn btn-primary btn-outline pull-right" type="submit" value="Create" ng-disabled="resetForm.$invalid || password !== passwordRepeat"/>
|
||||
</form>
|
||||
|
||||
+9
-12
@@ -6,21 +6,18 @@ var config = require('./config.js'),
|
||||
// keep these values in sync with start.sh
|
||||
exports = module.exports = {
|
||||
CLOUDRON_DEFAULT_AVATAR_FILE: path.join(__dirname + '/../assets/avatar.png'),
|
||||
INFRA_VERSION_FILE: path.join(config.baseDir(), 'platformdata/INFRA_VERSION'),
|
||||
BACKUP_RESULT_FILE: path.join(config.baseDir(), 'platformdata/backupresult'),
|
||||
INFRA_VERSION_FILE: path.join(config.baseDir(), 'data/INFRA_VERSION'),
|
||||
|
||||
OLD_DATA_DIR: path.join(config.baseDir(), 'data'),
|
||||
PLATFORM_DATA_DIR: path.join(config.baseDir(), 'platformdata'),
|
||||
APPS_DATA_DIR: path.join(config.baseDir(), 'appsdata'),
|
||||
DATA_DIR: path.join(config.baseDir(), 'data'),
|
||||
BOX_DATA_DIR: path.join(config.baseDir(), 'boxdata'),
|
||||
|
||||
ACME_CHALLENGES_DIR: path.join(config.baseDir(), 'platformdata/acme'),
|
||||
ADDON_CONFIG_DIR: path.join(config.baseDir(), 'platformdata/addons'),
|
||||
COLLECTD_APPCONFIG_DIR: path.join(config.baseDir(), 'platformdata/collectd/collectd.conf.d'),
|
||||
MAIL_DATA_DIR: path.join(config.baseDir(), 'platformdata/mail'),
|
||||
NGINX_CONFIG_DIR: path.join(config.baseDir(), 'platformdata/nginx'),
|
||||
NGINX_APPCONFIG_DIR: path.join(config.baseDir(), 'platformdata/nginx/applications'),
|
||||
NGINX_CERT_DIR: path.join(config.baseDir(), 'platformdata/nginx/cert'),
|
||||
ACME_CHALLENGES_DIR: path.join(config.baseDir(), 'data/acme'),
|
||||
ADDON_CONFIG_DIR: path.join(config.baseDir(), 'data/addons'),
|
||||
COLLECTD_APPCONFIG_DIR: path.join(config.baseDir(), 'data/collectd/collectd.conf.d'),
|
||||
MAIL_DATA_DIR: path.join(config.baseDir(), 'data/mail'),
|
||||
NGINX_CONFIG_DIR: path.join(config.baseDir(), 'data/nginx'),
|
||||
NGINX_APPCONFIG_DIR: path.join(config.baseDir(), 'data/nginx/applications'),
|
||||
NGINX_CERT_DIR: path.join(config.baseDir(), 'data/nginx/cert'),
|
||||
|
||||
// this is not part of appdata because an icon may be set before install
|
||||
ACME_ACCOUNT_KEY_FILE: path.join(config.baseDir(), 'boxdata/acme/acme.key'),
|
||||
|
||||
+41
-45
@@ -1,15 +1,20 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
start: start,
|
||||
stop: stop,
|
||||
initialize: initialize,
|
||||
uninitialize: uninitialize,
|
||||
|
||||
createMailConfig: createMailConfig
|
||||
start: start,
|
||||
|
||||
events: null,
|
||||
|
||||
EVENT_READY: 'ready'
|
||||
};
|
||||
|
||||
var apps = require('./apps.js'),
|
||||
assert = require('assert'),
|
||||
async = require('async'),
|
||||
cloudron = require('./cloudron.js'),
|
||||
config = require('./config.js'),
|
||||
certificates = require('./certificates.js'),
|
||||
debug = require('debug')('box:platform'),
|
||||
@@ -20,11 +25,9 @@ var apps = require('./apps.js'),
|
||||
os = require('os'),
|
||||
paths = require('./paths.js'),
|
||||
safe = require('safetydance'),
|
||||
semver = require('semver'),
|
||||
settings = require('./settings.js'),
|
||||
shell = require('./shell.js'),
|
||||
subdomains = require('./subdomains.js'),
|
||||
taskmanager = require('./taskmanager.js'),
|
||||
user = require('./user.js'),
|
||||
util = require('util'),
|
||||
_ = require('underscore');
|
||||
@@ -33,6 +36,13 @@ var gPlatformReadyTimer = null;
|
||||
|
||||
var NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
|
||||
function initialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
exports.events = new (require('events').EventEmitter)();
|
||||
return callback();
|
||||
}
|
||||
|
||||
function start(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
@@ -46,6 +56,8 @@ function start(callback) {
|
||||
if (domain === '*.' + config.fqdn() || domain === config.adminFqdn()) startMail(NOOP_CALLBACK);
|
||||
});
|
||||
|
||||
cloudron.events.on(cloudron.EVENT_ACTIVATED, function () { createMailConfig(NOOP_CALLBACK); });
|
||||
|
||||
var existingInfra = { version: 'none' };
|
||||
if (fs.existsSync(paths.INFRA_VERSION_FILE)) {
|
||||
existingInfra = safe.JSON.parse(fs.readFileSync(paths.INFRA_VERSION_FILE, 'utf8'));
|
||||
@@ -76,22 +88,24 @@ function start(callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function stop(callback) {
|
||||
function uninitialize(callback) {
|
||||
clearTimeout(gPlatformReadyTimer);
|
||||
gPlatformReadyTimer = null;
|
||||
|
||||
exports.events = null;
|
||||
taskmanager.pauseTasks(callback);
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
function emitPlatformReady() {
|
||||
// give some time for the platform to "settle". For example, mysql might still be initing the
|
||||
// give 30 seconds for the platform to "settle". For example, mysql might still be initing the
|
||||
// database dir and we cannot call service scripts until that's done.
|
||||
// TODO: make this smarter to not wait for 15secs for the crash-restart case
|
||||
// TODO: make this smarter to not wait for 30secs for the crash-restart case
|
||||
gPlatformReadyTimer = setTimeout(function () {
|
||||
debug('emitting platform ready');
|
||||
gPlatformReadyTimer = null;
|
||||
taskmanager.resumeTasks();
|
||||
}, 15000);
|
||||
exports.events.emit(exports.EVENT_READY);
|
||||
}, 30000);
|
||||
}
|
||||
|
||||
function removeOldImages(callback) {
|
||||
@@ -111,8 +125,7 @@ function removeOldImages(callback) {
|
||||
function stopContainers(existingInfra, callback) {
|
||||
// TODO: be nice and stop addons cleanly (example, shutdown commands)
|
||||
|
||||
// always stop addons to restart them on any infra change, regardless of minor or major update
|
||||
if (existingInfra.version !== infra.version) {
|
||||
if (existingInfra.version !== infra.version) { // infra upgrade
|
||||
debug('stopping all containers for infra upgrade');
|
||||
shell.execSync('stopContainers', 'docker ps -qa | xargs --no-run-if-empty docker rm -f');
|
||||
} else {
|
||||
@@ -133,15 +146,13 @@ function stopContainers(existingInfra, callback) {
|
||||
|
||||
function startGraphite(callback) {
|
||||
const tag = infra.images.graphite.tag;
|
||||
const dataDir = paths.PLATFORM_DATA_DIR;
|
||||
const dataDir = paths.DATA_DIR;
|
||||
|
||||
const cmd = `docker run --restart=always -d --name="graphite" \
|
||||
--net cloudron \
|
||||
--net-alias graphite \
|
||||
-m 75m \
|
||||
--memory-swap 150m \
|
||||
--dns 172.18.0.1 \
|
||||
--dns-search=. \
|
||||
-p 127.0.0.1:2003:2003 \
|
||||
-p 127.0.0.1:2004:2004 \
|
||||
-p 127.0.0.1:8000:8000 \
|
||||
@@ -155,11 +166,11 @@ function startGraphite(callback) {
|
||||
|
||||
function startMysql(callback) {
|
||||
const tag = infra.images.mysql.tag;
|
||||
const dataDir = paths.PLATFORM_DATA_DIR;
|
||||
const dataDir = paths.DATA_DIR;
|
||||
const rootPassword = hat(8 * 128);
|
||||
const memoryLimit = (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256;
|
||||
|
||||
if (!safe.fs.writeFileSync(paths.ADDON_CONFIG_DIR + '/mysql_vars.sh',
|
||||
if (!safe.fs.writeFileSync(paths.DATA_DIR + '/addons/mysql_vars.sh',
|
||||
'MYSQL_ROOT_PASSWORD=' + rootPassword +'\nMYSQL_ROOT_HOST=172.18.0.1', 'utf8')) {
|
||||
return callback(new Error('Could not create mysql var file:' + safe.error.message));
|
||||
}
|
||||
@@ -169,24 +180,22 @@ function startMysql(callback) {
|
||||
--net-alias mysql \
|
||||
-m ${memoryLimit}m \
|
||||
--memory-swap ${memoryLimit * 2}m \
|
||||
--dns 172.18.0.1 \
|
||||
--dns-search=. \
|
||||
-v "${dataDir}/mysql:/var/lib/mysql" \
|
||||
-v "${dataDir}/addons/mysql_vars.sh:/etc/mysql/mysql_vars.sh:ro" \
|
||||
--read-only -v /tmp -v /run "${tag}"`;
|
||||
|
||||
shell.execSync('startMysql', cmd);
|
||||
|
||||
setTimeout(callback, 5000);
|
||||
callback();
|
||||
}
|
||||
|
||||
function startPostgresql(callback) {
|
||||
const tag = infra.images.postgresql.tag;
|
||||
const dataDir = paths.PLATFORM_DATA_DIR;
|
||||
const dataDir = paths.DATA_DIR;
|
||||
const rootPassword = hat(8 * 128);
|
||||
const memoryLimit = (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256;
|
||||
|
||||
if (!safe.fs.writeFileSync(paths.ADDON_CONFIG_DIR + '/postgresql_vars.sh', 'POSTGRESQL_ROOT_PASSWORD=' + rootPassword, 'utf8')) {
|
||||
if (!safe.fs.writeFileSync(paths.DATA_DIR + '/addons/postgresql_vars.sh', 'POSTGRESQL_ROOT_PASSWORD=' + rootPassword, 'utf8')) {
|
||||
return callback(new Error('Could not create postgresql var file:' + safe.error.message));
|
||||
}
|
||||
|
||||
@@ -195,24 +204,22 @@ function startPostgresql(callback) {
|
||||
--net-alias postgresql \
|
||||
-m ${memoryLimit}m \
|
||||
--memory-swap ${memoryLimit * 2}m \
|
||||
--dns 172.18.0.1 \
|
||||
--dns-search=. \
|
||||
-v "${dataDir}/postgresql:/var/lib/postgresql" \
|
||||
-v "${dataDir}/addons/postgresql_vars.sh:/etc/postgresql/postgresql_vars.sh:ro" \
|
||||
--read-only -v /tmp -v /run "${tag}"`;
|
||||
|
||||
shell.execSync('startPostgresql', cmd);
|
||||
|
||||
setTimeout(callback, 5000);
|
||||
callback();
|
||||
}
|
||||
|
||||
function startMongodb(callback) {
|
||||
const tag = infra.images.mongodb.tag;
|
||||
const dataDir = paths.PLATFORM_DATA_DIR;
|
||||
const dataDir = paths.DATA_DIR;
|
||||
const rootPassword = hat(8 * 128);
|
||||
const memoryLimit = (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 200;
|
||||
|
||||
if (!safe.fs.writeFileSync(paths.ADDON_CONFIG_DIR + '/mongodb_vars.sh', 'MONGODB_ROOT_PASSWORD=' + rootPassword, 'utf8')) {
|
||||
if (!safe.fs.writeFileSync(paths.DATA_DIR + '/addons/mongodb_vars.sh', 'MONGODB_ROOT_PASSWORD=' + rootPassword, 'utf8')) {
|
||||
return callback(new Error('Could not create mongodb var file:' + safe.error.message));
|
||||
}
|
||||
|
||||
@@ -221,15 +228,13 @@ function startMongodb(callback) {
|
||||
--net-alias mongodb \
|
||||
-m ${memoryLimit}m \
|
||||
--memory-swap ${memoryLimit * 2}m \
|
||||
--dns 172.18.0.1 \
|
||||
--dns-search=. \
|
||||
-v "${dataDir}/mongodb:/var/lib/mongodb" \
|
||||
-v "${dataDir}/addons/mongodb_vars.sh:/etc/mongodb_vars.sh:ro" \
|
||||
--read-only -v /tmp -v /run "${tag}"`;
|
||||
|
||||
shell.execSync('startMongodb', cmd);
|
||||
|
||||
setTimeout(callback, 5000);
|
||||
callback();
|
||||
}
|
||||
|
||||
function createMailConfig(callback) {
|
||||
@@ -243,7 +248,7 @@ function createMailConfig(callback) {
|
||||
var alertsTo = config.provider() === 'caas' ? [ 'support@cloudron.io' ] : [ ];
|
||||
alertsTo.concat(error ? [] : owner.email).join(',');
|
||||
|
||||
if (!safe.fs.writeFileSync(paths.ADDON_CONFIG_DIR + '/mail/mail.ini',
|
||||
if (!safe.fs.writeFileSync(paths.DATA_DIR + '/addons/mail/mail.ini',
|
||||
`mail_domain=${fqdn}\nmail_server_name=${mailFqdn}\nalerts_from=${alertsFrom}\nalerts_to=${alertsTo}`, 'utf8')) {
|
||||
return callback(new Error('Could not create mail var file:' + safe.error.message));
|
||||
}
|
||||
@@ -259,15 +264,15 @@ function startMail(callback) {
|
||||
// mail container uses /app/data for backed up data and /run for restart-able data
|
||||
|
||||
const tag = infra.images.mail.tag;
|
||||
const dataDir = paths.PLATFORM_DATA_DIR;
|
||||
const dataDir = paths.DATA_DIR;
|
||||
const memoryLimit = Math.max((1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 128, 256);
|
||||
|
||||
// admin and mail share the same certificate
|
||||
certificates.getAdminCertificate(function (error, cert, key) {
|
||||
if (error) return callback(error);
|
||||
|
||||
if (!safe.fs.writeFileSync(paths.ADDON_CONFIG_DIR + '/mail/tls_cert.pem', cert)) return callback(new Error('Could not create cert file:' + safe.error.message));
|
||||
if (!safe.fs.writeFileSync(paths.ADDON_CONFIG_DIR + '/mail/tls_key.pem', key)) return callback(new Error('Could not create key file:' + safe.error.message));
|
||||
if (!safe.fs.writeFileSync(paths.DATA_DIR + '/addons/mail/tls_cert.pem', cert)) return callback(new Error('Could not create cert file:' + safe.error.message));
|
||||
if (!safe.fs.writeFileSync(paths.DATA_DIR + '/addons/mail/tls_key.pem', key)) return callback(new Error('Could not create key file:' + safe.error.message));
|
||||
|
||||
settings.getMailConfig(function (error, mailConfig) {
|
||||
if (error) return callback(error);
|
||||
@@ -284,9 +289,6 @@ function startMail(callback) {
|
||||
--net-alias mail \
|
||||
-m ${memoryLimit}m \
|
||||
--memory-swap ${memoryLimit * 2}m \
|
||||
--dns 172.18.0.1 \
|
||||
--dns-search=. \
|
||||
--env ENABLE_MDA=${mailConfig.enabled} \
|
||||
-v "${dataDir}/mail:/app/data" \
|
||||
-v "${dataDir}/addons/mail:/etc/mail" \
|
||||
${ports} \
|
||||
@@ -314,7 +316,6 @@ function startMail(callback) {
|
||||
function startAddons(existingInfra, callback) {
|
||||
var startFuncs = [ ];
|
||||
|
||||
// always start addons on any infra change, regardless of minor or major update
|
||||
if (existingInfra.version !== infra.version) {
|
||||
debug('startAddons: no existing infra or infra upgrade. starting all addons');
|
||||
startFuncs.push(startGraphite, startMysql, startPostgresql, startMongodb, startMail);
|
||||
@@ -334,15 +335,10 @@ function startAddons(existingInfra, callback) {
|
||||
}
|
||||
|
||||
function startApps(existingInfra, callback) {
|
||||
// Infra version change strategy:
|
||||
// * no existing version - restore apps
|
||||
// * major versions - restore apps
|
||||
// * minor versions - reconfigure apps
|
||||
|
||||
if (existingInfra.version === infra.version) {
|
||||
debug('startApp: apps are already uptodate');
|
||||
callback();
|
||||
} else if (existingInfra.version === 'none' || !semver.valid(existingInfra.version) || semver.major(existingInfra.version) !== semver.major(infra.version)) {
|
||||
} else if (existingInfra.version === 'none') {
|
||||
debug('startApps: restoring installed apps');
|
||||
apps.restoreInstalledApps(callback);
|
||||
} else {
|
||||
|
||||
+2
-15
@@ -113,8 +113,6 @@ function installApp(req, res, next) {
|
||||
if (('portBindings' in data) && typeof data.portBindings !== 'object') return next(new HttpError(400, 'portBindings must be an object'));
|
||||
if ('icon' in data && typeof data.icon !== 'string') return next(new HttpError(400, 'icon is not a string'));
|
||||
|
||||
if (data.backupId && typeof data.backupId !== 'string') return next(new HttpError(400, 'backupId must be string or null'));
|
||||
|
||||
// falsy values in cert and key unset the cert
|
||||
if (data.key && typeof data.cert !== 'string') return next(new HttpError(400, 'cert must be a string'));
|
||||
if (data.cert && typeof data.key !== 'string') return next(new HttpError(400, 'key must be a string'));
|
||||
@@ -329,12 +327,7 @@ function getLogStream(req, res, next) {
|
||||
|
||||
if (req.headers.accept !== 'text/event-stream') return next(new HttpError(400, 'This API call requires EventStream'));
|
||||
|
||||
var options = {
|
||||
lines: lines,
|
||||
follow: true
|
||||
};
|
||||
|
||||
apps.getLogs(req.params.id, options, function (error, logStream) {
|
||||
apps.getLogs(req.params.id, lines, true /* follow */, function (error, logStream) {
|
||||
if (error && error.reason === AppsError.NOT_FOUND) return next(new HttpError(404, 'No such app'));
|
||||
if (error && error.reason === AppsError.BAD_STATE) return next(new HttpError(412, error.message));
|
||||
if (error) return next(new HttpError(500, error));
|
||||
@@ -365,13 +358,7 @@ function getLogs(req, res, next) {
|
||||
|
||||
debug('Getting logs of app id:%s', req.params.id);
|
||||
|
||||
var options = {
|
||||
lines: lines,
|
||||
follow: false,
|
||||
format: req.query.format
|
||||
};
|
||||
|
||||
apps.getLogs(req.params.id, options, function (error, logStream) {
|
||||
apps.getLogs(req.params.id, lines, false /* follow */, function (error, logStream) {
|
||||
if (error && error.reason === AppsError.NOT_FOUND) return next(new HttpError(404, 'No such app'));
|
||||
if (error && error.reason === AppsError.BAD_STATE) return next(new HttpError(412, error.message));
|
||||
if (error) return next(new HttpError(500, error));
|
||||
|
||||
+25
-3
@@ -2,10 +2,12 @@
|
||||
|
||||
exports = module.exports = {
|
||||
get: get,
|
||||
create: create
|
||||
create: create,
|
||||
createDownloadUrl: createDownloadUrl,
|
||||
download: download
|
||||
};
|
||||
|
||||
var backupdb = require('../backupdb.js'),
|
||||
var assert = require('assert'),
|
||||
backups = require('../backups.js'),
|
||||
BackupsError = require('../backups.js').BackupsError,
|
||||
HttpError = require('connect-lastmile').HttpError,
|
||||
@@ -23,7 +25,7 @@ function get(req, res, next) {
|
||||
var perPage = typeof req.query.per_page !== 'undefined'? parseInt(req.query.per_page) : 25;
|
||||
if (!perPage || perPage < 0) return next(new HttpError(400, 'per_page query param has to be a postive number'));
|
||||
|
||||
backups.getByStatePaged(backupdb.BACKUP_STATE_NORMAL, page, perPage, function (error, result) {
|
||||
backups.getPaged(page, perPage, function (error, result) {
|
||||
if (error && error.reason === BackupsError.EXTERNAL_ERROR) return next(new HttpError(503, error.message));
|
||||
if (error) return next(new HttpError(500, error));
|
||||
|
||||
@@ -41,3 +43,23 @@ function create(req, res, next) {
|
||||
next(new HttpSuccess(202, {}));
|
||||
});
|
||||
}
|
||||
|
||||
function createDownloadUrl(req, res, next) {
|
||||
assert.strictEqual(typeof req.params.backupId, 'string');
|
||||
|
||||
backups.getRestoreUrl(req.params.backupId, function (error, result) {
|
||||
if (error) return next(new HttpError(500, error));
|
||||
|
||||
next(new HttpSuccess(200, result));
|
||||
});
|
||||
}
|
||||
|
||||
function download(req, res, next) {
|
||||
assert.strictEqual(typeof req.params.backupId, 'string');
|
||||
|
||||
backups.getLocalDownloadPath(req.params.backupId, function (error, result) {
|
||||
if (error) return next(new HttpError(500, error));
|
||||
|
||||
res.sendFile(result);
|
||||
});
|
||||
}
|
||||
|
||||
+5
-52
@@ -4,17 +4,14 @@ exports = module.exports = {
|
||||
activate: activate,
|
||||
dnsSetup: dnsSetup,
|
||||
setupTokenAuth: setupTokenAuth,
|
||||
providerTokenAuth: providerTokenAuth,
|
||||
getStatus: getStatus,
|
||||
reboot: reboot,
|
||||
migrate: migrate,
|
||||
getProgress: getProgress,
|
||||
getConfig: getConfig,
|
||||
getDisks: getDisks,
|
||||
update: update,
|
||||
feedback: feedback,
|
||||
checkForUpdates: checkForUpdates,
|
||||
getLogs: getLogs
|
||||
checkForUpdates: checkForUpdates
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
@@ -80,7 +77,7 @@ function dnsSetup(req, res, next) {
|
||||
if (typeof req.body.provider !== 'string') return next(new HttpError(400, 'provider is required'));
|
||||
if (typeof req.body.domain !== 'string' || !req.body.domain) return next(new HttpError(400, 'domain is required'));
|
||||
|
||||
cloudron.dnsSetup(req.body, req.body.domain.toLowerCase(), function (error) {
|
||||
cloudron.dnsSetup(req.body, req.body.domain, function (error) {
|
||||
if (error && error.reason === CloudronError.ALREADY_SETUP) return next(new HttpError(409, error.message));
|
||||
if (error && error.reason === CloudronError.BAD_FIELD) return next(new HttpError(400, error.message));
|
||||
if (error) return next(new HttpError(500, error));
|
||||
@@ -105,22 +102,14 @@ function setupTokenAuth(req, res, next) {
|
||||
|
||||
next();
|
||||
});
|
||||
} else {
|
||||
next();
|
||||
}
|
||||
}
|
||||
|
||||
function providerTokenAuth(req, res, next) {
|
||||
assert.strictEqual(typeof req.body, 'object');
|
||||
|
||||
if (config.provider() === 'ami') {
|
||||
if (typeof req.body.providerToken !== 'string' || !req.body.providerToken) return next(new HttpError(400, 'providerToken must be a non empty string'));
|
||||
} else if (config.provider() === 'ami') {
|
||||
if (typeof req.query.setupToken !== 'string' || !req.query.setupToken) return next(new HttpError(400, 'setupToken must be a non empty string'));
|
||||
|
||||
superagent.get('http://169.254.169.254/latest/meta-data/instance-id').timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return next(new HttpError(500, error));
|
||||
if (result.statusCode !== 200) return next(new HttpError(500, 'Unable to get meta data'));
|
||||
|
||||
if (result.text !== req.body.providerToken) return next(new HttpError(403, 'Invalid providerToken'));
|
||||
if (result.text !== req.query.setupToken) return next(new HttpError(403, 'Invalid token'));
|
||||
|
||||
next();
|
||||
});
|
||||
@@ -164,8 +153,6 @@ function migrate(req, res, next) {
|
||||
var options = _.pick(req.body, 'domain', 'size', 'region');
|
||||
if (Object.keys(options).length === 0) return next(new HttpError(400, 'no migrate option provided'));
|
||||
|
||||
if (options.domain) options.domain = options.domain.toLowerCase();
|
||||
|
||||
cloudron.migrate(req.body, function (error) { // pass req.body because 'domain' can have arbitrary options
|
||||
if (error && error.reason === CloudronError.BAD_STATE) return next(new HttpError(409, error.message));
|
||||
if (error && error.reason === CloudronError.BAD_FIELD) return next(new HttpError(400, error.message));
|
||||
@@ -183,13 +170,6 @@ function getConfig(req, res, next) {
|
||||
});
|
||||
}
|
||||
|
||||
function getDisks(req, res, next) {
|
||||
cloudron.getDisks(function (error, result) {
|
||||
if (error) return next(new HttpError(500, error));
|
||||
next(new HttpSuccess(200, result));
|
||||
});
|
||||
}
|
||||
|
||||
function update(req, res, next) {
|
||||
// this only initiates the update, progress can be checked via the progress route
|
||||
cloudron.updateToLatest(auditSource(req), function (error) {
|
||||
@@ -226,30 +206,3 @@ function feedback(req, res, next) {
|
||||
|
||||
next(new HttpSuccess(201, {}));
|
||||
}
|
||||
|
||||
function getLogs(req, res, next) {
|
||||
var lines = req.query.lines ? parseInt(req.query.lines, 10) : 100;
|
||||
if (isNaN(lines)) return next(new HttpError(400, 'lines must be a number'));
|
||||
|
||||
var units = req.query.units || 'all';
|
||||
|
||||
var options = {
|
||||
lines: lines,
|
||||
follow: false,
|
||||
units: units.split(','),
|
||||
format: req.query.format
|
||||
};
|
||||
|
||||
cloudron.getLogs(options, function (error, logStream) {
|
||||
if (error && error.reason === CloudronError.BAD_FIELD) return next(new HttpError(404, 'Invalid type'));
|
||||
if (error) return next(new HttpError(500, error));
|
||||
|
||||
res.writeHead(200, {
|
||||
'Content-Type': 'application/x-logs',
|
||||
'Content-Disposition': 'attachment; filename="log.txt"',
|
||||
'Cache-Control': 'no-cache',
|
||||
'X-Accel-Buffering': 'no' // disable nginx buffering
|
||||
});
|
||||
logStream.pipe(res);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ var appdb = require('../appdb'),
|
||||
DatabaseError = require('../databaseerror'),
|
||||
debug = require('debug')('box:routes/oauth2'),
|
||||
eventlog = require('../eventlog.js'),
|
||||
generatePassword = require('../password.js').generate,
|
||||
hat = require('hat'),
|
||||
HttpError = require('connect-lastmile').HttpError,
|
||||
middleware = require('../middleware/index.js'),
|
||||
@@ -357,6 +358,87 @@ function accountSetup(req, res, next) {
|
||||
});
|
||||
}
|
||||
|
||||
// -> POST /api/v1/session/account/setup
|
||||
function accountSetup(req, res, next) {
|
||||
assert.strictEqual(typeof req.body, 'object');
|
||||
|
||||
if (typeof req.body.resetToken !== 'string') return next(new HttpError(400, 'Missing resetToken'));
|
||||
if (typeof req.body.password !== 'string') return next(new HttpError(400, 'Missing password'));
|
||||
if (typeof req.body.username !== 'string') return next(new HttpError(400, 'Missing username'));
|
||||
if (typeof req.body.displayName !== 'string') return next(new HttpError(400, 'Missing displayName'));
|
||||
|
||||
debug('accountSetup: with token %s.', req.body.resetToken);
|
||||
|
||||
user.getByResetToken(req.body.resetToken, function (error, userObject) {
|
||||
if (error) return sendError(req, res, 'Invalid Reset Token');
|
||||
|
||||
var data = _.pick(req.body, 'username', 'displayName');
|
||||
user.update(userObject.id, data, auditSource(req), function (error) {
|
||||
if (error && error.reason === UserError.ALREADY_EXISTS) return renderAccountSetupSite(res, req, userObject, 'Username already exists');
|
||||
if (error && error.reason === UserError.BAD_FIELD) return renderAccountSetupSite(res, req, userObject, error.message);
|
||||
if (error && error.reason === UserError.NOT_FOUND) return renderAccountSetupSite(res, req, userObject, 'No such user');
|
||||
if (error) return next(new HttpError(500, error));
|
||||
|
||||
userObject.username = req.body.username;
|
||||
userObject.displayName = req.body.displayName;
|
||||
|
||||
// setPassword clears the resetToken
|
||||
user.setPassword(userObject.id, req.body.password, function (error, result) {
|
||||
if (error && error.reason === UserError.BAD_FIELD) return renderAccountSetupSite(res, req, userObject, error.message);
|
||||
|
||||
if (error) return next(new HttpError(500, error));
|
||||
|
||||
res.redirect(util.format('%s?accessToken=%s&expiresAt=%s', config.adminOrigin(), result.token, result.expiresAt));
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function renderAccountCreateSite(res, req, error, success) {
|
||||
renderTemplate(res, 'account_create', {
|
||||
error: error,
|
||||
success: !!success,
|
||||
csrf: req.csrfToken(),
|
||||
title: 'Account Create'
|
||||
});
|
||||
}
|
||||
|
||||
// -> GET /api/v1/session/account/create.html
|
||||
function accountCreateSite(req, res, next) {
|
||||
settings.getOpenRegistration(function (error, enabled) {
|
||||
if (error) return next(new HttpError(500, error));
|
||||
if (!enabled) return sendError(req, res, 'User creation is not allowed on this Cloudron');
|
||||
|
||||
renderAccountCreateSite(res, req, '', '');
|
||||
});
|
||||
}
|
||||
|
||||
// -> POST /api/v1/session/account/create
|
||||
function accountCreate(req, res, next) {
|
||||
assert.strictEqual(typeof req.body, 'object');
|
||||
|
||||
if (typeof req.body.email !== 'string') return next(new HttpError(400, 'Missing email'));
|
||||
|
||||
debug('accountCreate: with email %s.', req.body.email);
|
||||
|
||||
settings.getOpenRegistration(function (error, enabled) {
|
||||
if (error) return next(new HttpError(500, error));
|
||||
if (!enabled) return sendError(req, res, 'User signup is not allowed on this Cloudron');
|
||||
|
||||
var ip = req.headers['x-forwarded-for'] || req.connection.remoteAddress || null;
|
||||
var auditSource = { ip: ip, username: req.body.email, userId: null };
|
||||
|
||||
user.create(null, generatePassword(), req.body.email, '', auditSource, { sendInvite: true }, function (error, result) {
|
||||
if (error && error.reason === UserError.ALREADY_EXISTS) return renderAccountCreateSite(res, req, 'User with this email address already exists');
|
||||
if (error) return sendError(req, res, 'Internal Error');
|
||||
|
||||
debug('accountCreate: success for email %s now with id %s', req.body.remail, result.id);
|
||||
|
||||
renderAccountCreateSite(res, req, '', true);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// -> GET /api/v1/session/password/reset.html
|
||||
function passwordResetSite(req, res, next) {
|
||||
if (!req.query.reset_token) return next(new HttpError(400, 'Missing reset_token'));
|
||||
@@ -555,6 +637,8 @@ exports = module.exports = {
|
||||
passwordReset: passwordReset,
|
||||
accountSetupSite: accountSetupSite,
|
||||
accountSetup: accountSetup,
|
||||
accountCreateSite: accountCreateSite,
|
||||
accountCreate: accountCreate,
|
||||
authorization: authorization,
|
||||
token: token,
|
||||
validateRequestedScopes: validateRequestedScopes,
|
||||
|
||||
+24
-1
@@ -27,6 +27,9 @@ exports = module.exports = {
|
||||
getAppstoreConfig: getAppstoreConfig,
|
||||
setAppstoreConfig: setAppstoreConfig,
|
||||
|
||||
getOpenRegistration: getOpenRegistration,
|
||||
setOpenRegistration: setOpenRegistration,
|
||||
|
||||
setFallbackCertificate: setFallbackCertificate,
|
||||
setAdminCertificate: setAdminCertificate
|
||||
};
|
||||
@@ -191,7 +194,6 @@ function setBackupConfig(req, res, next) {
|
||||
assert.strictEqual(typeof req.body, 'object');
|
||||
|
||||
if (typeof req.body.provider !== 'string') return next(new HttpError(400, 'provider is required'));
|
||||
if (typeof req.body.retentionSecs !== 'number') return next(new HttpError(400, 'retentionSecs is required'));
|
||||
if ('key' in req.body && typeof req.body.key !== 'string') return next(new HttpError(400, 'key must be a string'));
|
||||
|
||||
settings.setBackupConfig(req.body, function (error) {
|
||||
@@ -235,6 +237,27 @@ function setAppstoreConfig(req, res, next) {
|
||||
});
|
||||
}
|
||||
|
||||
function getOpenRegistration(req, res, next) {
|
||||
settings.getOpenRegistration(function (error, enabled) {
|
||||
if (error) return next(new HttpError(500, error));
|
||||
|
||||
next(new HttpSuccess(200, { enabled: enabled }));
|
||||
});
|
||||
}
|
||||
|
||||
function setOpenRegistration(req, res, next) {
|
||||
assert.strictEqual(typeof req.body, 'object');
|
||||
|
||||
if (typeof req.body.enabled !== 'boolean') return next(new HttpError(400, 'enabled is required'));
|
||||
|
||||
settings.setOpenRegistration(req.body.enabled, function (error) {
|
||||
if (error && error.reason === SettingsError.BAD_FIELD) return next(new HttpError(400, error.message));
|
||||
if (error) return next(new HttpError(500, error));
|
||||
|
||||
next(new HttpSuccess(200));
|
||||
});
|
||||
}
|
||||
|
||||
// default fallback cert
|
||||
function setFallbackCertificate(req, res, next) {
|
||||
assert.strictEqual(typeof req.body, 'object');
|
||||
|
||||
@@ -41,7 +41,7 @@ var SERVER_URL = 'http://localhost:' + config.get('port');
|
||||
|
||||
// Test image information
|
||||
var TEST_IMAGE_REPO = 'cloudron/test';
|
||||
var TEST_IMAGE_TAG = '23.0.0';
|
||||
var TEST_IMAGE_TAG = '20.0.0';
|
||||
var TEST_IMAGE = TEST_IMAGE_REPO + ':' + TEST_IMAGE_TAG;
|
||||
// var TEST_IMAGE_ID = child_process.execSync('docker inspect --format={{.Id}} ' + TEST_IMAGE).toString('utf8').trim();
|
||||
|
||||
@@ -747,7 +747,7 @@ describe('App installation', function () {
|
||||
});
|
||||
|
||||
it('installation - volume created', function (done) {
|
||||
expect(fs.existsSync(paths.APPS_DATA_DIR + '/' + APP_ID));
|
||||
expect(fs.existsSync(paths.DATA_DIR + '/' + APP_ID));
|
||||
done();
|
||||
});
|
||||
|
||||
@@ -758,10 +758,7 @@ describe('App installation', function () {
|
||||
superagent.get('http://localhost:' + appEntry.httpPort + appResult.manifest.healthCheckPath)
|
||||
.end(function (err, res) {
|
||||
if (err || res.statusCode !== 200) {
|
||||
if (--tryCount === 0) {
|
||||
console.log('Unable to curl http://localhost:' + appEntry.httpPort + appResult.manifest.healthCheckPath);
|
||||
return done(new Error('Timedout'));
|
||||
}
|
||||
if (--tryCount === 0) return done(new Error('Timedout'));
|
||||
return setTimeout(healthCheck, 2000);
|
||||
}
|
||||
|
||||
@@ -787,9 +784,9 @@ describe('App installation', function () {
|
||||
|
||||
// support newer docker versions
|
||||
if (data.Volumes) {
|
||||
expect(data.Volumes['/app/data']).to.eql(paths.APPS_DATA_DIR + '/' + APP_ID + '/data');
|
||||
expect(data.Volumes['/app/data']).to.eql(paths.DATA_DIR + '/' + APP_ID + '/data');
|
||||
} else {
|
||||
expect(data.Mounts.filter(function (mount) { return mount.Destination === '/app/data'; })[0].Source).to.eql(paths.APPS_DATA_DIR + '/' + APP_ID + '/data');
|
||||
expect(data.Mounts.filter(function (mount) { return mount.Destination === '/app/data'; })[0].Source).to.eql(paths.DATA_DIR + '/' + APP_ID + '/data');
|
||||
}
|
||||
|
||||
done();
|
||||
@@ -1143,7 +1140,7 @@ describe('App installation', function () {
|
||||
});
|
||||
|
||||
it('uninstalled - volume destroyed', function (done) {
|
||||
expect(!fs.existsSync(paths.APPS_DATA_DIR + '/' + APP_ID));
|
||||
expect(!fs.existsSync(paths.DATA_DIR + '/' + APP_ID));
|
||||
done();
|
||||
});
|
||||
|
||||
|
||||
@@ -10,13 +10,10 @@ var appdb = require('../../appdb.js'),
|
||||
config = require('../../config.js'),
|
||||
database = require('../../database.js'),
|
||||
expect = require('expect.js'),
|
||||
hock = require('hock'),
|
||||
http = require('http'),
|
||||
nock = require('nock'),
|
||||
superagent = require('superagent'),
|
||||
server = require('../../server.js'),
|
||||
settings = require('../../settings.js'),
|
||||
url = require('url');
|
||||
nock = require('nock');
|
||||
|
||||
var SERVER_URL = 'http://localhost:' + config.get('port');
|
||||
|
||||
@@ -25,10 +22,7 @@ var token = null;
|
||||
|
||||
var server;
|
||||
function setup(done) {
|
||||
nock.cleanAll();
|
||||
config._reset();
|
||||
config.setVersion('1.2.3');
|
||||
config.set('fqdn', 'localhost');
|
||||
|
||||
async.series([
|
||||
server.start.bind(server),
|
||||
@@ -75,21 +69,7 @@ function cleanup(done) {
|
||||
}
|
||||
|
||||
describe('Backups API', function () {
|
||||
var apiHockInstance = hock.createHock({ throwOnUnmatched: false }), apiHockServer;
|
||||
|
||||
before(setup);
|
||||
before(function (done) {
|
||||
apiHockInstance
|
||||
.post('/api/v1/boxes/' + config.fqdn() + '/awscredentials?token=BACKUP_TOKEN')
|
||||
.reply(201, { credentials: { AccessKeyId: 'accessKeyId', SecretAccessKey: 'secretAccessKey' } }, { 'Content-Type': 'application/json' });
|
||||
var port = parseInt(url.parse(config.apiServerOrigin()).port, 10);
|
||||
apiHockServer = http.createServer(apiHockInstance.handler).listen(port, done);
|
||||
});
|
||||
|
||||
after(function (done) {
|
||||
apiHockServer.close();
|
||||
done();
|
||||
});
|
||||
after(cleanup);
|
||||
|
||||
describe('create', function () {
|
||||
@@ -111,17 +91,21 @@ describe('Backups API', function () {
|
||||
});
|
||||
|
||||
it('succeeds', function (done) {
|
||||
var scope = nock(config.apiServerOrigin())
|
||||
.post('/api/v1/boxes/' + config.fqdn() + '/awscredentials?token=BACKUP_TOKEN')
|
||||
.reply(201, { credentials: { AccessKeyId: 'accessKeyId', SecretAccessKey: 'secretAccessKey' } });
|
||||
|
||||
superagent.post(SERVER_URL + '/api/v1/backups')
|
||||
.query({ access_token: token })
|
||||
.end(function (error, result) {
|
||||
expect(result.statusCode).to.equal(202);
|
||||
|
||||
function checkAppstoreServerCalled() {
|
||||
apiHockInstance.done(function (error) {
|
||||
if (!error) return done();
|
||||
if (scope.isDone()) {
|
||||
return done();
|
||||
}
|
||||
|
||||
setTimeout(checkAppstoreServerCalled, 100);
|
||||
});
|
||||
setTimeout(checkAppstoreServerCalled, 100);
|
||||
}
|
||||
|
||||
checkAppstoreServerCalled();
|
||||
|
||||
@@ -268,7 +268,7 @@ describe('Cloudron', function () {
|
||||
|
||||
});
|
||||
|
||||
xdescribe('migrate', function () {
|
||||
describe('migrate', function () {
|
||||
before(function (done) {
|
||||
async.series([
|
||||
setup,
|
||||
@@ -463,7 +463,7 @@ describe('Cloudron', function () {
|
||||
after(cleanup);
|
||||
|
||||
it('fails without token', function (done) {
|
||||
superagent.post(SERVER_URL + '/api/v1/feedback')
|
||||
superagent.post(SERVER_URL + '/api/v1/cloudron/feedback')
|
||||
.send({ type: 'ticket', subject: 'some subject', description: 'some description' })
|
||||
.end(function (error, result) {
|
||||
expect(result.statusCode).to.equal(401);
|
||||
@@ -472,7 +472,7 @@ describe('Cloudron', function () {
|
||||
});
|
||||
|
||||
it('fails without type', function (done) {
|
||||
superagent.post(SERVER_URL + '/api/v1/feedback')
|
||||
superagent.post(SERVER_URL + '/api/v1/cloudron/feedback')
|
||||
.send({ subject: 'some subject', description: 'some description' })
|
||||
.query({ access_token: token })
|
||||
.end(function (error, result) {
|
||||
@@ -482,7 +482,7 @@ describe('Cloudron', function () {
|
||||
});
|
||||
|
||||
it('fails with empty type', function (done) {
|
||||
superagent.post(SERVER_URL + '/api/v1/feedback')
|
||||
superagent.post(SERVER_URL + '/api/v1/cloudron/feedback')
|
||||
.send({ type: '', subject: 'some subject', description: 'some description' })
|
||||
.query({ access_token: token })
|
||||
.end(function (error, result) {
|
||||
@@ -492,7 +492,7 @@ describe('Cloudron', function () {
|
||||
});
|
||||
|
||||
it('fails with unknown type', function (done) {
|
||||
superagent.post(SERVER_URL + '/api/v1/feedback')
|
||||
superagent.post(SERVER_URL + '/api/v1/cloudron/feedback')
|
||||
.send({ type: 'foobar', subject: 'some subject', description: 'some description' })
|
||||
.query({ access_token: token })
|
||||
.end(function (error, result) {
|
||||
@@ -502,7 +502,7 @@ describe('Cloudron', function () {
|
||||
});
|
||||
|
||||
it('succeeds with ticket type', function (done) {
|
||||
superagent.post(SERVER_URL + '/api/v1/feedback')
|
||||
superagent.post(SERVER_URL + '/api/v1/cloudron/feedback')
|
||||
.send({ type: 'ticket', subject: 'some subject', description: 'some description' })
|
||||
.query({ access_token: token })
|
||||
.end(function (error, result) {
|
||||
@@ -512,7 +512,7 @@ describe('Cloudron', function () {
|
||||
});
|
||||
|
||||
it('succeeds with app type', function (done) {
|
||||
superagent.post(SERVER_URL + '/api/v1/feedback')
|
||||
superagent.post(SERVER_URL + '/api/v1/cloudron/feedback')
|
||||
.send({ type: 'app_missing', subject: 'some subject', description: 'some description' })
|
||||
.query({ access_token: token })
|
||||
.end(function (error, result) {
|
||||
@@ -522,7 +522,7 @@ describe('Cloudron', function () {
|
||||
});
|
||||
|
||||
it('fails without description', function (done) {
|
||||
superagent.post(SERVER_URL + '/api/v1/feedback')
|
||||
superagent.post(SERVER_URL + '/api/v1/cloudron/feedback')
|
||||
.send({ type: 'ticket', subject: 'some subject' })
|
||||
.query({ access_token: token })
|
||||
.end(function (error, result) {
|
||||
@@ -532,7 +532,7 @@ describe('Cloudron', function () {
|
||||
});
|
||||
|
||||
it('fails with empty subject', function (done) {
|
||||
superagent.post(SERVER_URL + '/api/v1/feedback')
|
||||
superagent.post(SERVER_URL + '/api/v1/cloudron/feedback')
|
||||
.send({ type: 'ticket', subject: '', description: 'some description' })
|
||||
.query({ access_token: token })
|
||||
.end(function (error, result) {
|
||||
@@ -542,7 +542,7 @@ describe('Cloudron', function () {
|
||||
});
|
||||
|
||||
it('fails with empty description', function (done) {
|
||||
superagent.post(SERVER_URL + '/api/v1/feedback')
|
||||
superagent.post(SERVER_URL + '/api/v1/cloudron/feedback')
|
||||
.send({ type: 'ticket', subject: 'some subject', description: '' })
|
||||
.query({ access_token: token })
|
||||
.end(function (error, result) {
|
||||
@@ -552,7 +552,7 @@ describe('Cloudron', function () {
|
||||
});
|
||||
|
||||
it('succeeds with feedback type', function (done) {
|
||||
superagent.post(SERVER_URL + '/api/v1/feedback')
|
||||
superagent.post(SERVER_URL + '/api/v1/cloudron/feedback')
|
||||
.send({ type: 'feedback', subject: 'some subject', description: 'some description' })
|
||||
.query({ access_token: token })
|
||||
.end(function (error, result) {
|
||||
@@ -562,7 +562,7 @@ describe('Cloudron', function () {
|
||||
});
|
||||
|
||||
it('fails without subject', function (done) {
|
||||
superagent.post(SERVER_URL + '/api/v1/feedback')
|
||||
superagent.post(SERVER_URL + '/api/v1/cloudron/feedback')
|
||||
.send({ type: 'ticket', description: 'some description' })
|
||||
.query({ access_token: token })
|
||||
.end(function (error, result) {
|
||||
|
||||
@@ -87,7 +87,7 @@ describe('Eventlog API', function () {
|
||||
|
||||
describe('get', function () {
|
||||
it('fails due to wrong token', function (done) {
|
||||
superagent.get(SERVER_URL + '/api/v1/cloudron/eventlog')
|
||||
superagent.get(SERVER_URL + '/api/v1/eventlog')
|
||||
.query({ access_token: token.toUpperCase() })
|
||||
.end(function (error, result) {
|
||||
expect(result.statusCode).to.equal(401);
|
||||
@@ -96,7 +96,7 @@ describe('Eventlog API', function () {
|
||||
});
|
||||
|
||||
it('fails for non-admin', function (done) {
|
||||
superagent.get(SERVER_URL + '/api/v1/cloudron/eventlog')
|
||||
superagent.get(SERVER_URL + '/api/v1/eventlog')
|
||||
.query({ access_token: token_1, page: 1, per_page: 10 })
|
||||
.end(function (error, result) {
|
||||
expect(result.statusCode).to.equal(403);
|
||||
@@ -106,7 +106,7 @@ describe('Eventlog API', function () {
|
||||
});
|
||||
|
||||
it('succeeds for admin', function (done) {
|
||||
superagent.get(SERVER_URL + '/api/v1/cloudron/eventlog')
|
||||
superagent.get(SERVER_URL + '/api/v1/eventlog')
|
||||
.query({ access_token: token, page: 1, per_page: 10 })
|
||||
.end(function (error, result) {
|
||||
expect(result.statusCode).to.equal(200);
|
||||
@@ -117,7 +117,7 @@ describe('Eventlog API', function () {
|
||||
});
|
||||
|
||||
it('succeeds with action', function (done) {
|
||||
superagent.get(SERVER_URL + '/api/v1/cloudron/eventlog')
|
||||
superagent.get(SERVER_URL + '/api/v1/eventlog')
|
||||
.query({ access_token: token, page: 1, per_page: 10, action: 'cloudron.activate' })
|
||||
.end(function (error, result) {
|
||||
expect(result.statusCode).to.equal(200);
|
||||
@@ -128,7 +128,7 @@ describe('Eventlog API', function () {
|
||||
});
|
||||
|
||||
it('succeeds with search', function (done) {
|
||||
superagent.get(SERVER_URL + '/api/v1/cloudron/eventlog')
|
||||
superagent.get(SERVER_URL + '/api/v1/eventlog')
|
||||
.query({ access_token: token, page: 1, per_page: 10, search: EMAIL })
|
||||
.end(function (error, result) {
|
||||
expect(result.statusCode).to.equal(200);
|
||||
@@ -139,7 +139,7 @@ describe('Eventlog API', function () {
|
||||
});
|
||||
|
||||
it('succeeds with search', function (done) {
|
||||
superagent.get(SERVER_URL + '/api/v1/cloudron/eventlog')
|
||||
superagent.get(SERVER_URL + '/api/v1/eventlog')
|
||||
.query({ access_token: token, page: 1, per_page: 10, search: EMAIL, action: 'cloudron.activate' })
|
||||
.end(function (error, result) {
|
||||
expect(result.statusCode).to.equal(200);
|
||||
|
||||
+151
-114
@@ -533,14 +533,12 @@ describe('Settings API', function () {
|
||||
var dnsAnswerQueue = [];
|
||||
var dkimDomain, spfDomain, mxDomain, dmarcDomain;
|
||||
|
||||
this.timeout(10000);
|
||||
|
||||
before(function (done) {
|
||||
var dig = require('../../dig.js');
|
||||
var dns = require('native-dns');
|
||||
|
||||
// replace dns resolveTxt()
|
||||
resolve = dig.resolve;
|
||||
dig.resolve = function (hostname, type, options, callback) {
|
||||
resolve = dns.resolve;
|
||||
dns.resolve = function (hostname, type, callback) {
|
||||
expect(hostname).to.be.a('string');
|
||||
expect(callback).to.be.a('function');
|
||||
|
||||
@@ -558,15 +556,15 @@ describe('Settings API', function () {
|
||||
});
|
||||
|
||||
after(function (done) {
|
||||
var dig = require('../../dig.js');
|
||||
var dns = require('native-dns');
|
||||
|
||||
dig.resolve = resolve;
|
||||
dns.resolve = resolve;
|
||||
|
||||
done();
|
||||
});
|
||||
|
||||
it('does not fail when dns errors', function (done) {
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_status')
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_dns_records')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(200);
|
||||
@@ -585,42 +583,42 @@ describe('Settings API', function () {
|
||||
it('succeeds with dns errors', function (done) {
|
||||
clearDnsAnswerQueue();
|
||||
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_status')
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_dns_records')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(200);
|
||||
|
||||
expect(res.body.dns.dkim).to.be.an('object');
|
||||
expect(res.body.dns.dkim.domain).to.eql(dkimDomain);
|
||||
expect(res.body.dns.dkim.type).to.eql('TXT');
|
||||
expect(res.body.dns.dkim.value).to.eql(null);
|
||||
expect(res.body.dns.dkim.expected).to.eql('"v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync() + '"');
|
||||
expect(res.body.dns.dkim.status).to.eql(false);
|
||||
expect(res.body.dkim).to.be.an('object');
|
||||
expect(res.body.dkim.domain).to.eql(dkimDomain);
|
||||
expect(res.body.dkim.type).to.eql('TXT');
|
||||
expect(res.body.dkim.value).to.eql(null);
|
||||
expect(res.body.dkim.expected).to.eql('v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync());
|
||||
expect(res.body.dkim.status).to.eql(false);
|
||||
|
||||
expect(res.body.dns.spf).to.be.an('object');
|
||||
expect(res.body.dns.spf.domain).to.eql(spfDomain);
|
||||
expect(res.body.dns.spf.type).to.eql('TXT');
|
||||
expect(res.body.dns.spf.value).to.eql(null);
|
||||
expect(res.body.dns.spf.expected).to.eql('"v=spf1 a:' + config.adminFqdn() + ' ~all"');
|
||||
expect(res.body.dns.spf.status).to.eql(false);
|
||||
expect(res.body.spf).to.be.an('object');
|
||||
expect(res.body.spf.domain).to.eql(spfDomain);
|
||||
expect(res.body.spf.type).to.eql('TXT');
|
||||
expect(res.body.spf.value).to.eql(null);
|
||||
expect(res.body.spf.expected).to.eql('v=spf1 a:' + config.adminFqdn() + ' ~all');
|
||||
expect(res.body.spf.status).to.eql(false);
|
||||
|
||||
expect(res.body.dns.dmarc).to.be.an('object');
|
||||
expect(res.body.dns.dmarc.type).to.eql('TXT');
|
||||
expect(res.body.dns.dmarc.value).to.eql(null);
|
||||
expect(res.body.dns.dmarc.expected).to.eql('"v=DMARC1; p=reject; pct=100"');
|
||||
expect(res.body.dns.dmarc.status).to.eql(false);
|
||||
expect(res.body.dmarc).to.be.an('object');
|
||||
expect(res.body.dmarc.type).to.eql('TXT');
|
||||
expect(res.body.dmarc.value).to.eql(null);
|
||||
expect(res.body.dmarc.expected).to.eql('v=DMARC1; p=reject; pct=100');
|
||||
expect(res.body.dmarc.status).to.eql(false);
|
||||
|
||||
expect(res.body.dns.mx).to.be.an('object');
|
||||
expect(res.body.dns.mx.type).to.eql('MX');
|
||||
expect(res.body.dns.mx.value).to.eql(null);
|
||||
expect(res.body.dns.mx.expected).to.eql('10 ' + config.mailFqdn() + '.');
|
||||
expect(res.body.dns.mx.status).to.eql(false);
|
||||
expect(res.body.mx).to.be.an('object');
|
||||
expect(res.body.mx.type).to.eql('MX');
|
||||
expect(res.body.mx.value).to.eql(null);
|
||||
expect(res.body.mx.expected).to.eql('10 ' + config.mailFqdn());
|
||||
expect(res.body.mx.status).to.eql(false);
|
||||
|
||||
expect(res.body.dns.ptr).to.be.an('object');
|
||||
expect(res.body.dns.ptr.type).to.eql('PTR');
|
||||
expect(res.body.ptr).to.be.an('object');
|
||||
expect(res.body.ptr.type).to.eql('PTR');
|
||||
// expect(res.body.ptr.value).to.eql(null); this will be anything random
|
||||
expect(res.body.dns.ptr.expected).to.eql(config.mailFqdn() + '.');
|
||||
expect(res.body.dns.ptr.status).to.eql(false);
|
||||
expect(res.body.ptr.expected).to.eql(config.mailFqdn());
|
||||
expect(res.body.ptr.status).to.eql(false);
|
||||
|
||||
done();
|
||||
});
|
||||
@@ -634,34 +632,34 @@ describe('Settings API', function () {
|
||||
dnsAnswerQueue[mxDomain].MX = null;
|
||||
dnsAnswerQueue[dmarcDomain].TXT = null;
|
||||
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_status')
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_dns_records')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(200);
|
||||
|
||||
expect(res.body.dns.spf).to.be.an('object');
|
||||
expect(res.body.dns.spf.expected).to.eql('"v=spf1 a:' + config.adminFqdn() + ' ~all"');
|
||||
expect(res.body.dns.spf.status).to.eql(false);
|
||||
expect(res.body.dns.spf.value).to.eql(null);
|
||||
expect(res.body.spf).to.be.an('object');
|
||||
expect(res.body.spf.expected).to.eql('v=spf1 a:' + config.adminFqdn() + ' ~all');
|
||||
expect(res.body.spf.status).to.eql(false);
|
||||
expect(res.body.spf.value).to.eql(null);
|
||||
|
||||
expect(res.body.dns.dkim).to.be.an('object');
|
||||
expect(res.body.dns.dkim.expected).to.eql('"v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync() + '"');
|
||||
expect(res.body.dns.dkim.status).to.eql(false);
|
||||
expect(res.body.dns.dkim.value).to.eql(null);
|
||||
expect(res.body.dkim).to.be.an('object');
|
||||
expect(res.body.dkim.expected).to.eql('v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync());
|
||||
expect(res.body.dkim.status).to.eql(false);
|
||||
expect(res.body.dkim.value).to.eql(null);
|
||||
|
||||
expect(res.body.dns.dmarc).to.be.an('object');
|
||||
expect(res.body.dns.dmarc.expected).to.eql('"v=DMARC1; p=reject; pct=100"');
|
||||
expect(res.body.dns.dmarc.status).to.eql(false);
|
||||
expect(res.body.dns.dmarc.value).to.eql(null);
|
||||
expect(res.body.dmarc).to.be.an('object');
|
||||
expect(res.body.dmarc.expected).to.eql('v=DMARC1; p=reject; pct=100');
|
||||
expect(res.body.dmarc.status).to.eql(false);
|
||||
expect(res.body.dmarc.value).to.eql(null);
|
||||
|
||||
expect(res.body.dns.mx).to.be.an('object');
|
||||
expect(res.body.dns.mx.status).to.eql(false);
|
||||
expect(res.body.dns.mx.expected).to.eql('10 ' + config.mailFqdn() + '.');
|
||||
expect(res.body.dns.mx.value).to.eql(null);
|
||||
expect(res.body.mx).to.be.an('object');
|
||||
expect(res.body.mx.status).to.eql(false);
|
||||
expect(res.body.mx.expected).to.eql('10 ' + config.mailFqdn());
|
||||
expect(res.body.mx.value).to.eql(null);
|
||||
|
||||
expect(res.body.dns.ptr).to.be.an('object');
|
||||
expect(res.body.dns.ptr.expected).to.eql(config.mailFqdn() + '.');
|
||||
expect(res.body.dns.ptr.status).to.eql(false);
|
||||
expect(res.body.ptr).to.be.an('object');
|
||||
expect(res.body.ptr.expected).to.eql(config.mailFqdn());
|
||||
expect(res.body.ptr.status).to.eql(false);
|
||||
// expect(res.body.ptr.value).to.eql(null); this will be anything random
|
||||
|
||||
done();
|
||||
@@ -671,43 +669,41 @@ describe('Settings API', function () {
|
||||
it('succeeds with all different spf, dkim, dmarc, mx, ptr records', function (done) {
|
||||
clearDnsAnswerQueue();
|
||||
|
||||
dnsAnswerQueue[mxDomain].MX = [ { priority: '20', exchange: config.mailFqdn() + '.' }, { priority: '30', exchange: config.mailFqdn() + '.'} ];
|
||||
dnsAnswerQueue[dmarcDomain].TXT = ['"v=DMARC2; p=reject; pct=100"'];
|
||||
dnsAnswerQueue[dkimDomain].TXT = ['"v=DKIM2; t=s; p=' + cloudron.readDkimPublicKeySync() + '"'];
|
||||
dnsAnswerQueue[spfDomain].TXT = ['"v=spf1 a:random.com ~all"'];
|
||||
dnsAnswerQueue[mxDomain].MX = [ { priority: '20', exchange: config.mailFqdn() }, { priority: '30', exchange: config.mailFqdn() } ];
|
||||
dnsAnswerQueue[dmarcDomain].TXT = [['v=DMARC2; p=reject; pct=100']];
|
||||
dnsAnswerQueue[dkimDomain].TXT = [['v=DKIM2; t=s; p=' + cloudron.readDkimPublicKeySync()]];
|
||||
dnsAnswerQueue[spfDomain].TXT = [['v=spf1 a:random.com ~all']];
|
||||
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_status')
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_dns_records')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(200);
|
||||
|
||||
expect(res.body.dns.spf).to.be.an('object');
|
||||
expect(res.body.dns.spf.expected).to.eql('"v=spf1 a:' + config.adminFqdn() + ' a:random.com ~all"');
|
||||
expect(res.body.dns.spf.status).to.eql(false);
|
||||
expect(res.body.dns.spf.value).to.eql('"v=spf1 a:random.com ~all"');
|
||||
expect(res.body.spf).to.be.an('object');
|
||||
expect(res.body.spf.expected).to.eql('v=spf1 a:' + config.adminFqdn() + ' a:random.com ~all');
|
||||
expect(res.body.spf.status).to.eql(false);
|
||||
expect(res.body.spf.value).to.eql('v=spf1 a:random.com ~all');
|
||||
|
||||
expect(res.body.dns.dkim).to.be.an('object');
|
||||
expect(res.body.dns.dkim.expected).to.eql('"v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync() + '"');
|
||||
expect(res.body.dns.dkim.status).to.eql(false);
|
||||
expect(res.body.dns.dkim.value).to.eql('"v=DKIM2; t=s; p=' + cloudron.readDkimPublicKeySync() + '"');
|
||||
expect(res.body.dkim).to.be.an('object');
|
||||
expect(res.body.dkim.expected).to.eql('v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync());
|
||||
expect(res.body.dkim.status).to.eql(false);
|
||||
expect(res.body.dkim.value).to.eql('v=DKIM2; t=s; p=' + cloudron.readDkimPublicKeySync());
|
||||
|
||||
expect(res.body.dns.dmarc).to.be.an('object');
|
||||
expect(res.body.dns.dmarc.expected).to.eql('"v=DMARC1; p=reject; pct=100"');
|
||||
expect(res.body.dns.dmarc.status).to.eql(false);
|
||||
expect(res.body.dns.dmarc.value).to.eql('"v=DMARC2; p=reject; pct=100"');
|
||||
expect(res.body.dmarc).to.be.an('object');
|
||||
expect(res.body.dmarc.expected).to.eql('v=DMARC1; p=reject; pct=100');
|
||||
expect(res.body.dmarc.status).to.eql(false);
|
||||
expect(res.body.dmarc.value).to.eql('v=DMARC2; p=reject; pct=100');
|
||||
|
||||
expect(res.body.dns.mx).to.be.an('object');
|
||||
expect(res.body.dns.mx.status).to.eql(false);
|
||||
expect(res.body.dns.mx.expected).to.eql('10 ' + config.mailFqdn() + '.');
|
||||
expect(res.body.dns.mx.value).to.eql('20 ' + config.mailFqdn() + '. 30 ' + config.mailFqdn() + '.');
|
||||
expect(res.body.mx).to.be.an('object');
|
||||
expect(res.body.mx.status).to.eql(false);
|
||||
expect(res.body.mx.expected).to.eql('10 ' + config.mailFqdn());
|
||||
expect(res.body.mx.value).to.eql('20 ' + config.mailFqdn() + ' 30 ' + config.mailFqdn());
|
||||
|
||||
expect(res.body.dns.ptr).to.be.an('object');
|
||||
expect(res.body.dns.ptr.expected).to.eql(config.mailFqdn() + '.');
|
||||
expect(res.body.dns.ptr.status).to.eql(false);
|
||||
expect(res.body.ptr).to.be.an('object');
|
||||
expect(res.body.ptr.expected).to.eql(config.mailFqdn());
|
||||
expect(res.body.ptr.status).to.eql(false);
|
||||
// expect(res.body.ptr.value).to.eql(null); this will be anything random
|
||||
|
||||
expect(res.body.outboundPort25).to.be.an('object');
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
@@ -715,19 +711,19 @@ describe('Settings API', function () {
|
||||
it('succeeds with existing embedded spf', function (done) {
|
||||
clearDnsAnswerQueue();
|
||||
|
||||
dnsAnswerQueue[spfDomain].TXT = ['"v=spf1 a:example.com a:' + config.mailFqdn() + ' ~all"'];
|
||||
dnsAnswerQueue[spfDomain].TXT = [['v=spf1 a:example.com a:' + config.mailFqdn() + ' ~all']];
|
||||
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_status')
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_dns_records')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(200);
|
||||
|
||||
expect(res.body.dns.spf).to.be.an('object');
|
||||
expect(res.body.dns.spf.domain).to.eql(spfDomain);
|
||||
expect(res.body.dns.spf.type).to.eql('TXT');
|
||||
expect(res.body.dns.spf.value).to.eql('"v=spf1 a:example.com a:' + config.mailFqdn() + ' ~all"');
|
||||
expect(res.body.dns.spf.expected).to.eql('"v=spf1 a:example.com a:' + config.mailFqdn() + ' ~all"');
|
||||
expect(res.body.dns.spf.status).to.eql(true);
|
||||
expect(res.body.spf).to.be.an('object');
|
||||
expect(res.body.spf.domain).to.eql(spfDomain);
|
||||
expect(res.body.spf.type).to.eql('TXT');
|
||||
expect(res.body.spf.value).to.eql('v=spf1 a:example.com a:' + config.mailFqdn() + ' ~all');
|
||||
expect(res.body.spf.expected).to.eql('v=spf1 a:example.com a:' + config.mailFqdn() + ' ~all');
|
||||
expect(res.body.spf.status).to.eql(true);
|
||||
|
||||
done();
|
||||
});
|
||||
@@ -736,42 +732,83 @@ describe('Settings API', function () {
|
||||
it('succeeds with all correct records', function (done) {
|
||||
clearDnsAnswerQueue();
|
||||
|
||||
dnsAnswerQueue[mxDomain].MX = [ { priority: '10', exchange: config.mailFqdn() + '.' } ];
|
||||
dnsAnswerQueue[dmarcDomain].TXT = ['"v=DMARC1; p=reject; pct=100"'];
|
||||
dnsAnswerQueue[dkimDomain].TXT = ['"v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync() + '"'];
|
||||
dnsAnswerQueue[spfDomain].TXT = ['"v=spf1 a:' + config.adminFqdn() + ' ~all"'];
|
||||
dnsAnswerQueue[mxDomain].MX = [ { priority: '10', exchange: config.mailFqdn() } ];
|
||||
dnsAnswerQueue[dmarcDomain].TXT = [['v=DMARC1; p=reject; pct=100']];
|
||||
dnsAnswerQueue[dkimDomain].TXT = [['v=DKIM1;', 't=s;', 'p=' + cloudron.readDkimPublicKeySync()]];
|
||||
dnsAnswerQueue[spfDomain].TXT = [['v=spf1', ' a:' + config.adminFqdn(), ' ~all']];
|
||||
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_status')
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_dns_records')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(200);
|
||||
|
||||
expect(res.body.dns.dkim).to.be.an('object');
|
||||
expect(res.body.dns.dkim.domain).to.eql(dkimDomain);
|
||||
expect(res.body.dns.dkim.type).to.eql('TXT');
|
||||
expect(res.body.dns.dkim.value).to.eql('"v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync() + '"');
|
||||
expect(res.body.dns.dkim.expected).to.eql('"v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync() + '"');
|
||||
expect(res.body.dns.dkim.status).to.eql(true);
|
||||
expect(res.body.dkim).to.be.an('object');
|
||||
expect(res.body.dkim.domain).to.eql(dkimDomain);
|
||||
expect(res.body.dkim.type).to.eql('TXT');
|
||||
expect(res.body.dkim.value).to.eql('v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync());
|
||||
expect(res.body.dkim.expected).to.eql('v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync());
|
||||
expect(res.body.dkim.status).to.eql(true);
|
||||
|
||||
expect(res.body.dns.spf).to.be.an('object');
|
||||
expect(res.body.dns.spf.domain).to.eql(spfDomain);
|
||||
expect(res.body.dns.spf.type).to.eql('TXT');
|
||||
expect(res.body.dns.spf.value).to.eql('"v=spf1 a:' + config.adminFqdn() + ' ~all"');
|
||||
expect(res.body.dns.spf.expected).to.eql('"v=spf1 a:' + config.adminFqdn() + ' ~all"');
|
||||
expect(res.body.dns.spf.status).to.eql(true);
|
||||
expect(res.body.spf).to.be.an('object');
|
||||
expect(res.body.spf.domain).to.eql(spfDomain);
|
||||
expect(res.body.spf.type).to.eql('TXT');
|
||||
expect(res.body.spf.value).to.eql('v=spf1 a:' + config.adminFqdn() + ' ~all');
|
||||
expect(res.body.spf.expected).to.eql('v=spf1 a:' + config.adminFqdn() + ' ~all');
|
||||
expect(res.body.spf.status).to.eql(true);
|
||||
|
||||
expect(res.body.dns.dmarc).to.be.an('object');
|
||||
expect(res.body.dns.dmarc.expected).to.eql('"v=DMARC1; p=reject; pct=100"');
|
||||
expect(res.body.dns.dmarc.status).to.eql(true);
|
||||
expect(res.body.dns.dmarc.value).to.eql('"v=DMARC1; p=reject; pct=100"');
|
||||
expect(res.body.dmarc).to.be.an('object');
|
||||
expect(res.body.dmarc.expected).to.eql('v=DMARC1; p=reject; pct=100');
|
||||
expect(res.body.dmarc.status).to.eql(true);
|
||||
expect(res.body.dmarc.value).to.eql('v=DMARC1; p=reject; pct=100');
|
||||
|
||||
expect(res.body.dns.mx).to.be.an('object');
|
||||
expect(res.body.dns.mx.status).to.eql(true);
|
||||
expect(res.body.dns.mx.expected).to.eql('10 ' + config.mailFqdn() + '.');
|
||||
expect(res.body.dns.mx.value).to.eql('10 ' + config.mailFqdn() + '.');
|
||||
expect(res.body.mx).to.be.an('object');
|
||||
expect(res.body.mx.status).to.eql(true);
|
||||
expect(res.body.mx.expected).to.eql('10 ' + config.mailFqdn());
|
||||
expect(res.body.mx.value).to.eql('10 ' + config.mailFqdn());
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('open_registration', function () {
|
||||
it('get open_registration succeeds without being set', function (done) {
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/open_registration')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(200);
|
||||
expect(res.body.enabled).to.equal(false);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('cannot set without data', function (done) {
|
||||
superagent.post(SERVER_URL + '/api/v1/settings/open_registration')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(400);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('set succeeds', function (done) {
|
||||
superagent.post(SERVER_URL + '/api/v1/settings/open_registration')
|
||||
.query({ access_token: token })
|
||||
.send({ enabled: true })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(200);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('get succeeds', function (done) {
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/open_registration')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(200);
|
||||
expect(res.body.enabled).to.equal(true);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -10,13 +10,10 @@ var appdb = require('../../appdb.js'),
|
||||
config = require('../../config.js'),
|
||||
database = require('../../database.js'),
|
||||
expect = require('expect.js'),
|
||||
hock = require('hock'),
|
||||
http = require('http'),
|
||||
nock = require('nock'),
|
||||
superagent = require('superagent'),
|
||||
server = require('../../server.js'),
|
||||
settings = require('../../settings.js'),
|
||||
url = require('url');
|
||||
nock = require('nock');
|
||||
|
||||
var SERVER_URL = 'http://localhost:' + config.get('port');
|
||||
|
||||
@@ -75,33 +72,22 @@ describe('Internal API', function () {
|
||||
before(setup);
|
||||
after(cleanup);
|
||||
|
||||
var apiHockInstance = hock.createHock({ throwOnUnmatched: false }), apiHockServer;
|
||||
|
||||
before(function (done) {
|
||||
apiHockInstance
|
||||
.post('/api/v1/boxes/' + config.fqdn() + '/awscredentials?token=BACKUP_TOKEN')
|
||||
.reply(201, { credentials: { AccessKeyId: 'accessKeyId', SecretAccessKey: 'secretAccessKey' } });
|
||||
var port = parseInt(url.parse(config.apiServerOrigin()).port, 10);
|
||||
apiHockServer = http.createServer(apiHockInstance.handler).listen(port, done);
|
||||
});
|
||||
|
||||
after(function (done) {
|
||||
apiHockServer.close();
|
||||
done();
|
||||
});
|
||||
|
||||
describe('backup', function () {
|
||||
it('succeeds', function (done) {
|
||||
var scope = nock(config.apiServerOrigin())
|
||||
.post('/api/v1/boxes/' + config.fqdn() + '/awscredentials?token=BACKUP_TOKEN')
|
||||
.reply(201, { credentials: { AccessKeyId: 'accessKeyId', SecretAccessKey: 'secretAccessKey' } });
|
||||
|
||||
superagent.post(config.sysadminOrigin() + '/api/v1/backup')
|
||||
.end(function (error, result) {
|
||||
expect(result.statusCode).to.equal(202);
|
||||
|
||||
function checkAppstoreServerCalled() {
|
||||
apiHockInstance.done(function (error) {
|
||||
if (!error) return done();
|
||||
if (scope.isDone()) {
|
||||
return done();
|
||||
}
|
||||
|
||||
setTimeout(checkAppstoreServerCalled, 100);
|
||||
});
|
||||
setTimeout(checkAppstoreServerCalled, 100);
|
||||
}
|
||||
|
||||
checkAppstoreServerCalled();
|
||||
|
||||
@@ -153,8 +153,6 @@ function verifyPassword(req, res, next) {
|
||||
if (error && error.reason === UserError.NOT_FOUND) return next(new HttpError(403, 'Password incorrect'));
|
||||
if (error) return next(new HttpError(500, error));
|
||||
|
||||
req.body.password = '<redacted>'; // this will prevent logs from displaying plain text password
|
||||
|
||||
next();
|
||||
});
|
||||
}
|
||||
|
||||
+2
-2
@@ -10,7 +10,7 @@ var appdb = require('./appdb.js'),
|
||||
async = require('async'),
|
||||
config = require('./config.js'),
|
||||
CronJob = require('cron').CronJob,
|
||||
debug = require('debug')('box:scheduler'),
|
||||
debug = require('debug')('box:src/scheduler'),
|
||||
docker = require('./docker.js'),
|
||||
_ = require('underscore');
|
||||
|
||||
@@ -43,7 +43,7 @@ function sync(callback) {
|
||||
debug('sync: checking apps %j', allAppIds);
|
||||
async.eachSeries(allApps, function (app, iteratorDone) {
|
||||
var appState = gState[app.id] || null;
|
||||
var schedulerConfig = app.manifest.addons ? app.manifest.addons.scheduler : null;
|
||||
var schedulerConfig = app.manifest.addons.scheduler || null;
|
||||
|
||||
if (!appState && !schedulerConfig) return iteratorDone(); // nothing changed
|
||||
|
||||
|
||||
@@ -24,9 +24,6 @@ if [[ $# -lt 3 ]]; then
|
||||
fi
|
||||
|
||||
if [[ -f "$2" ]]; then
|
||||
# on some vanilla ubuntu installs, the .ssh directory does not exist
|
||||
mkdir -p "$(dirname $3)"
|
||||
|
||||
cp "$2" "$3"
|
||||
chown "$1":"$1" "$3"
|
||||
fi
|
||||
|
||||
Executable
+117
@@ -0,0 +1,117 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
if [[ $EUID -ne 0 ]]; then
|
||||
echo "This script should be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $# == 1 && "$1" == "--check" ]]; then
|
||||
echo "OK"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
readonly DATA_DIR="${HOME}/data"
|
||||
|
||||
# verify argument count
|
||||
if [[ "$1" == "s3" && $# -lt 9 ]]; then
|
||||
echo "Usage: backupapp.sh s3 <appId> <s3 config url> <s3 data url> <access key id> <access key> <region> <endpoint> <password> [session token]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$1" == "filesystem" && $# -lt 6 ]]; then
|
||||
echo "Usage: backupapp.sh filesystem <appId> <backupFolder> <configFileName> <dataFileName> <password>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# extract arguments
|
||||
readonly app_id="$2"
|
||||
|
||||
if [[ "$1" == "s3" ]]; then
|
||||
# env vars used by the awscli
|
||||
readonly s3_config_url="$3"
|
||||
readonly s3_data_url="$4"
|
||||
export AWS_ACCESS_KEY_ID="$5"
|
||||
export AWS_SECRET_ACCESS_KEY="$6"
|
||||
export AWS_DEFAULT_REGION="$7"
|
||||
readonly endpoint_url="$8"
|
||||
readonly password="$9"
|
||||
|
||||
if [ $# -gt 9 ]; then
|
||||
export AWS_SESSION_TOKEN="${10}"
|
||||
fi
|
||||
elif [[ "$1" == "filesystem" ]]; then
|
||||
readonly backup_folder="$3"
|
||||
readonly backup_config_fileName="$4"
|
||||
readonly backup_data_fileName="$5"
|
||||
readonly password="$6"
|
||||
fi
|
||||
|
||||
# perform backup
|
||||
readonly now=$(date "+%Y-%m-%d-%H%M%S")
|
||||
readonly app_data_dir="${DATA_DIR}/${app_id}"
|
||||
readonly app_data_snapshot="${DATA_DIR}/snapshots/${app_id}-${now}"
|
||||
|
||||
btrfs subvolume snapshot -r "${app_data_dir}" "${app_data_snapshot}"
|
||||
|
||||
# will be checked at the end
|
||||
try=0
|
||||
|
||||
if [[ "$1" == "s3" ]]; then
|
||||
# may be empty
|
||||
optional_args=""
|
||||
if [ -n "${endpoint_url}" ]; then
|
||||
optional_args="--endpoint-url ${endpoint_url}"
|
||||
fi
|
||||
|
||||
# Upload config.json first because uploading tarball might take a lot of time, leading to token expiry
|
||||
for try in `seq 1 5`; do
|
||||
echo "Uploading config.json to ${s3_config_url} (try ${try})"
|
||||
error_log=$(mktemp)
|
||||
|
||||
# use aws instead of curl because curl will always read entire stream memory to set Content-Length
|
||||
# aws will do multipart upload
|
||||
if cat "${app_data_snapshot}/config.json" \
|
||||
| aws ${optional_args} s3 cp - "${s3_config_url}" 2>"${error_log}"; then
|
||||
break
|
||||
fi
|
||||
cat "${error_log}" && rm "${error_log}"
|
||||
done
|
||||
|
||||
if [[ ${try} -eq 5 ]]; then
|
||||
echo "Backup failed uploading config.json"
|
||||
btrfs subvolume delete "${app_data_snapshot}"
|
||||
exit 3
|
||||
fi
|
||||
|
||||
for try in `seq 1 5`; do
|
||||
echo "Uploading backup to ${s3_data_url} (try ${try})"
|
||||
error_log=$(mktemp)
|
||||
|
||||
if tar -czf - -C "${app_data_snapshot}" . \
|
||||
| openssl aes-256-cbc -e -pass "pass:${password}" \
|
||||
| aws ${optional_args} s3 cp - "${s3_data_url}" 2>"${error_log}"; then
|
||||
break
|
||||
fi
|
||||
cat "${error_log}" && rm "${error_log}"
|
||||
done
|
||||
elif [[ "$1" == "filesystem" ]]; then
|
||||
mkdir -p $(dirname "${backup_folder}/${backup_config_fileName}")
|
||||
|
||||
echo "Storing backup config to ${backup_folder}/${backup_config_fileName}"
|
||||
cat "${app_data_snapshot}/config.json" > "${backup_folder}/${backup_config_fileName}"
|
||||
|
||||
echo "Storing backup data to ${backup_folder}/${backup_data_fileName}"
|
||||
tar -czf - -C "${app_data_snapshot}" . | openssl aes-256-cbc -e -pass "pass:${password}" > "${backup_folder}/${backup_data_fileName}"
|
||||
fi
|
||||
|
||||
btrfs subvolume delete "${app_data_snapshot}"
|
||||
|
||||
if [[ ${try} -eq 5 ]]; then
|
||||
echo "Backup failed uploading backup tarball"
|
||||
exit 3
|
||||
else
|
||||
echo "Backup successful"
|
||||
fi
|
||||
Executable
+98
@@ -0,0 +1,98 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
if [[ $EUID -ne 0 ]]; then
|
||||
echo "This script should be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $# == 1 && "$1" == "--check" ]]; then
|
||||
echo "OK"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
# verify argument count
|
||||
if [[ "$1" == "s3" && $# -lt 7 ]]; then
|
||||
echo "Usage: backupbox.sh s3 <s3 url> <access key id> <access key> <region> <endpoint> <password> [session token]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$1" == "filesystem" && $# -lt 4 ]]; then
|
||||
echo "Usage: backupbox.sh filesystem <backupFolder> <fileName> <password>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# extract arguments
|
||||
if [[ "$1" == "s3" ]]; then
|
||||
# env vars used by the awscli
|
||||
readonly s3_url="$2"
|
||||
export AWS_ACCESS_KEY_ID="$3"
|
||||
export AWS_SECRET_ACCESS_KEY="$4"
|
||||
export AWS_DEFAULT_REGION="$5"
|
||||
readonly endpoint_url="$6"
|
||||
readonly password="$7"
|
||||
|
||||
if [ $# -gt 7 ]; then
|
||||
export AWS_SESSION_TOKEN="$8"
|
||||
fi
|
||||
elif [[ "$1" == "filesystem" ]]; then
|
||||
readonly backup_folder="$2"
|
||||
readonly backup_fileName="$3"
|
||||
readonly password="$4"
|
||||
fi
|
||||
|
||||
# perform backup
|
||||
BOX_DATA_DIR="${HOME}/boxdata"
|
||||
MAIL_DATA_DIR="${HOME}/data/mail"
|
||||
mail_snapshot_dir="${HOME}/data/snapshots/mail"
|
||||
|
||||
echo "Creating MySQL dump"
|
||||
mysqldump -u root -ppassword --single-transaction --routines --triggers box > "${BOX_DATA_DIR}/box.mysqldump"
|
||||
|
||||
echo "Snapshotting mail"
|
||||
btrfs subvolume delete "${mail_snapshot_dir}" &> /dev/null || true
|
||||
btrfs subvolume snapshot -r "${MAIL_DATA_DIR}" "${mail_snapshot_dir}"
|
||||
|
||||
# will be checked at the end
|
||||
try=0
|
||||
|
||||
if [[ "$1" == "s3" ]]; then
|
||||
for try in `seq 1 5`; do
|
||||
echo "Uploading backup to ${s3_url} (try ${try})"
|
||||
error_log=$(mktemp)
|
||||
|
||||
# may be empty
|
||||
optional_args=""
|
||||
if [ -n "${endpoint_url}" ]; then
|
||||
optional_args="--endpoint-url ${endpoint_url}"
|
||||
fi
|
||||
|
||||
# use aws instead of curl because curl will always read entire stream memory to set Content-Length
|
||||
# aws will do multipart upload
|
||||
if tar -czf - -C "${HOME}" --transform="s,^boxdata/\?,box/," --transform="s,^data/mail/\?,mail/," --show-transformed-names boxdata data/mail \
|
||||
| openssl aes-256-cbc -e -pass "pass:${password}" \
|
||||
| aws ${optional_args} s3 cp - "${s3_url}" 2>"${error_log}"; then
|
||||
break
|
||||
fi
|
||||
cat "${error_log}" && rm "${error_log}"
|
||||
done
|
||||
elif [[ "$1" == "filesystem" ]]; then
|
||||
echo "Storing backup to ${backup_folder}/${backup_fileName}"
|
||||
|
||||
mkdir -p $(dirname "${backup_folder}/${backup_fileName}")
|
||||
|
||||
tar -czf - -C "${HOME}" --transform="s,^boxdata/\?,box/," --transform="s,^data/mail/\?,mail/," --show-transformed-names boxdata data/mail \
|
||||
| openssl aes-256-cbc -e -pass "pass:${password}" > "${backup_folder}/${backup_fileName}"
|
||||
fi
|
||||
|
||||
echo "Deleting backup snapshot"
|
||||
btrfs subvolume delete "${mail_snapshot_dir}"
|
||||
|
||||
if [[ ${try} -eq 5 ]]; then
|
||||
echo "Backup failed"
|
||||
exit 3
|
||||
else
|
||||
echo "Backup successful"
|
||||
fi
|
||||
Executable
+19
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
readonly s3_url="$1"
|
||||
export AWS_ACCESS_KEY_ID="$2"
|
||||
export AWS_SECRET_ACCESS_KEY="$3"
|
||||
export AWS_DEFAULT_REGION="$4"
|
||||
readonly endpoint_url="$5"
|
||||
|
||||
optional_args=""
|
||||
|
||||
if [ -n "${endpoint_url}" ]; then
|
||||
optional_args="--endpoint-url ${endpoint_url}"
|
||||
fi
|
||||
|
||||
echo "Test Content" | aws ${optional_args} s3 cp - "${s3_url}"
|
||||
|
||||
aws ${optional_args} s3 rm "${s3_url}"
|
||||
@@ -18,15 +18,20 @@ if [[ "$1" == "--check" ]]; then
|
||||
fi
|
||||
|
||||
if [[ "${BOX_ENV}" == "cloudron" ]]; then
|
||||
readonly app_data_dir="${HOME}/appsdata/$1"
|
||||
readonly app_data_dir="${HOME}/data/$1"
|
||||
|
||||
# Only create subvolume if it does not exist
|
||||
if [[ ! -d "${app_data_dir}" ]]; then
|
||||
btrfs subvolume create "${app_data_dir}"
|
||||
fi
|
||||
|
||||
mkdir -p "${app_data_dir}/data"
|
||||
# only the top level ownership is changed because containers own the subdirectores
|
||||
# and will chown them as necessary
|
||||
chown yellowtent:yellowtent "${app_data_dir}"
|
||||
chown yellowtent:yellowtent "${app_data_dir}/data"
|
||||
else
|
||||
readonly app_data_dir="${HOME}/.cloudron_test/appsdata/$1"
|
||||
readonly app_data_dir="${HOME}/.cloudron_test/data/$1"
|
||||
mkdir -p "${app_data_dir}/data"
|
||||
chown ${SUDO_USER}:${SUDO_USER} "${app_data_dir}"
|
||||
fi
|
||||
|
||||
|
||||
Executable
+49
@@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
if [[ $EUID -ne 0 ]]; then
|
||||
echo "This script should be run as root." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $# == 1 && "$1" == "--check" ]]; then
|
||||
echo "OK"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ $# -lt 3 ]; then
|
||||
echo "Usage: restoreapp.sh <appid> <url> <key> [aws session token]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
readonly DATA_DIR="${HOME}/data"
|
||||
readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 2400"
|
||||
|
||||
app_id="$1"
|
||||
restore_url="$2"
|
||||
restore_key="$3"
|
||||
session_token="$4" # unused since it seems to be part of the url query param in v4 signature
|
||||
|
||||
echo "Downloading backup: ${restore_url} and key: ${restore_key}"
|
||||
|
||||
for try in `seq 1 5`; do
|
||||
echo "Download backup from ${restore_url} (try ${try})"
|
||||
error_log=$(mktemp)
|
||||
|
||||
if $curl -L "${restore_url}" \
|
||||
| openssl aes-256-cbc -d -pass "pass:${restore_key}" \
|
||||
| tar -zxf - -C "${DATA_DIR}/${app_id}" 2>"${error_log}"; then
|
||||
chown -R yellowtent:yellowtent "${DATA_DIR}/${app_id}"
|
||||
break
|
||||
fi
|
||||
cat "${error_log}" && rm "${error_log}"
|
||||
done
|
||||
|
||||
if [[ ${try} -eq 5 ]]; then
|
||||
echo "restore failed"
|
||||
exit 3
|
||||
else
|
||||
echo "restore successful"
|
||||
fi
|
||||
|
||||
@@ -18,9 +18,13 @@ if [[ "$1" == "--check" ]]; then
|
||||
fi
|
||||
|
||||
if [[ "${BOX_ENV}" == "cloudron" ]]; then
|
||||
readonly app_data_dir="${HOME}/appsdata/$1"
|
||||
rm -rf "${app_data_dir}"
|
||||
readonly app_data_dir="${HOME}/data/$1"
|
||||
if [[ -d "${app_data_dir}" ]]; then
|
||||
find "${app_data_dir}" -mindepth 1 -delete
|
||||
rm -rf "${app_data_dir}" || btrfs subvolume delete "${app_data_dir}"
|
||||
fi
|
||||
else
|
||||
readonly app_data_dir="${HOME}/.cloudron_test/appsdata/$1"
|
||||
readonly app_data_dir="${HOME}/.cloudron_test/data/$1"
|
||||
rm -rf "${app_data_dir}"
|
||||
fi
|
||||
|
||||
|
||||
@@ -17,7 +17,5 @@ if [[ "$1" == "--check" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Running node with memory constraints"
|
||||
|
||||
# note BOX_ENV and NODE_ENV are derived from parent process
|
||||
exec env "DEBUG=box*,connect-lastmile" /usr/bin/node --max_old_space_size=200 "$@"
|
||||
# remove the file
|
||||
rm -rf "${1}"
|
||||
@@ -25,6 +25,7 @@ readonly sourceTarballUrl="${1}"
|
||||
readonly data="${2}"
|
||||
|
||||
echo "Updating Cloudron with ${sourceTarballUrl}"
|
||||
echo "${data}"
|
||||
|
||||
# TODO: pre-download tarball
|
||||
box_src_tmp_dir=$(mktemp -dt box-src-XXXXXX)
|
||||
|
||||
+24
-28
@@ -14,11 +14,11 @@ var assert = require('assert'),
|
||||
database = require('./database.js'),
|
||||
eventlog = require('./eventlog.js'),
|
||||
express = require('express'),
|
||||
hat = require('hat'),
|
||||
http = require('http'),
|
||||
middleware = require('./middleware'),
|
||||
passport = require('passport'),
|
||||
path = require('path'),
|
||||
RateLimit = require('express-rate-limit'),
|
||||
routes = require('./routes/index.js');
|
||||
|
||||
var gHttpServer = null;
|
||||
@@ -44,34 +44,25 @@ function initializeExpressSync() {
|
||||
// for rate limiting
|
||||
app.enable('trust proxy');
|
||||
|
||||
if (process.env.BOX_ENV !== 'test') {
|
||||
app.use(middleware.morgan('Box :method :url :status :response-time ms - :res[content-length]', {
|
||||
immediate: false,
|
||||
// only log failed requests by default
|
||||
skip: function (req, res) { return res.statusCode < 400; }
|
||||
}));
|
||||
}
|
||||
var limiter = new RateLimit({
|
||||
windowMs: 60*1000, // 1 minute
|
||||
max: 200, // limit each IP to 200 requests per windowMs
|
||||
delayMs: 0 // disable delaying - full speed until the max limit is reached
|
||||
});
|
||||
|
||||
if (process.env.BOX_ENV !== 'test') app.use(middleware.morgan('Box :method :url :status :response-time ms - :res[content-length]', { immediate: false }));
|
||||
|
||||
var router = new express.Router();
|
||||
router.del = router.delete; // amend router.del for readability further on
|
||||
|
||||
app
|
||||
.use(limiter)
|
||||
.use(middleware.timeout(REQUEST_TIMEOUT))
|
||||
.use(json)
|
||||
.use(urlencoded)
|
||||
.use(middleware.cookieParser())
|
||||
.use(middleware.cors({ origins: [ '*' ], allowCredentials: false }))
|
||||
.use(middleware.session({
|
||||
secret: hat(128), // we only use the session during oauth, and already have an in-memory session store, so we can safely change that during restarts
|
||||
resave: true,
|
||||
saveUninitialized: true,
|
||||
cookie: {
|
||||
path: '/',
|
||||
httpOnly: true,
|
||||
secure: process.env.BOX_ENV !== 'test',
|
||||
maxAge: 600000
|
||||
}
|
||||
}))
|
||||
.use(middleware.session({ secret: 'yellow is blue', resave: true, saveUninitialized: true, cookie: { path: '/', httpOnly: true, secure: false, maxAge: 600000 } }))
|
||||
.use(passport.initialize())
|
||||
.use(passport.session())
|
||||
.use(router)
|
||||
@@ -96,7 +87,7 @@ function initializeExpressSync() {
|
||||
|
||||
// public routes
|
||||
router.post('/api/v1/cloudron/activate', routes.cloudron.setupTokenAuth, routes.cloudron.activate);
|
||||
router.post('/api/v1/cloudron/dns_setup', routes.cloudron.providerTokenAuth, routes.cloudron.dnsSetup); // only available until no-domain
|
||||
router.post('/api/v1/cloudron/dns_setup', routes.cloudron.dnsSetup); // only available until no-domain
|
||||
router.get ('/api/v1/cloudron/progress', routes.cloudron.getProgress);
|
||||
router.get ('/api/v1/cloudron/status', routes.cloudron.getStatus);
|
||||
router.get ('/api/v1/cloudron/avatar', routes.settings.getCloudronAvatar); // this is a public alias for /api/v1/settings/cloudron_avatar
|
||||
@@ -111,16 +102,16 @@ function initializeExpressSync() {
|
||||
router.get ('/api/v1/cloudron/config', cloudronScope, routes.cloudron.getConfig);
|
||||
router.post('/api/v1/cloudron/update', cloudronScope, routes.user.requireAdmin, routes.user.verifyPassword, routes.cloudron.update);
|
||||
router.post('/api/v1/cloudron/check_for_updates', cloudronScope, routes.user.requireAdmin, routes.cloudron.checkForUpdates);
|
||||
router.post('/api/v1/cloudron/reboot', cloudronScope, routes.user.requireAdmin, routes.cloudron.reboot);
|
||||
router.post('/api/v1/cloudron/reboot', cloudronScope, routes.cloudron.reboot);
|
||||
router.post('/api/v1/cloudron/migrate', cloudronScope, routes.user.requireAdmin, routes.user.verifyPassword, routes.cloudron.migrate);
|
||||
router.get ('/api/v1/cloudron/graphs', cloudronScope, routes.user.requireAdmin, routes.graphs.getGraphs);
|
||||
router.get ('/api/v1/cloudron/disks', cloudronScope, routes.user.requireAdmin, routes.cloudron.getDisks);
|
||||
router.get ('/api/v1/cloudron/logs', cloudronScope, routes.user.requireAdmin, routes.cloudron.getLogs);
|
||||
router.get ('/api/v1/cloudron/graphs', cloudronScope, routes.graphs.getGraphs);
|
||||
router.get ('/api/v1/cloudron/ssh/authorized_keys', cloudronScope, routes.user.requireAdmin, routes.ssh.getAuthorizedKeys);
|
||||
router.put ('/api/v1/cloudron/ssh/authorized_keys', cloudronScope, routes.user.requireAdmin, routes.ssh.addAuthorizedKey);
|
||||
router.get ('/api/v1/cloudron/ssh/authorized_keys/:identifier', cloudronScope, routes.user.requireAdmin, routes.ssh.getAuthorizedKey);
|
||||
router.del ('/api/v1/cloudron/ssh/authorized_keys/:identifier', cloudronScope, routes.user.requireAdmin, routes.ssh.delAuthorizedKey);
|
||||
router.get ('/api/v1/cloudron/eventlog', settingsScope, routes.user.requireAdmin, routes.eventlog.get);
|
||||
|
||||
// feedback
|
||||
router.post('/api/v1/cloudron/feedback', usersScope, routes.cloudron.feedback);
|
||||
|
||||
// profile api, working off the user behind the provided token
|
||||
router.get ('/api/v1/profile', profileScope, routes.profile.get);
|
||||
@@ -157,6 +148,8 @@ function initializeExpressSync() {
|
||||
router.post('/api/v1/session/password/reset', csrf, routes.oauth2.passwordReset);
|
||||
router.get ('/api/v1/session/account/setup.html', csrf, routes.oauth2.accountSetupSite);
|
||||
router.post('/api/v1/session/account/setup', csrf, routes.oauth2.accountSetup);
|
||||
router.get ('/api/v1/session/account/create.html', csrf, routes.oauth2.accountCreateSite);
|
||||
router.post('/api/v1/session/account/create', csrf, routes.oauth2.accountCreate);
|
||||
|
||||
// oauth2 routes
|
||||
router.get ('/api/v1/oauth/dialog/authorize', routes.oauth2.authorization);
|
||||
@@ -203,7 +196,6 @@ function initializeExpressSync() {
|
||||
router.get ('/api/v1/settings/backup_config', settingsScope, routes.user.requireAdmin, routes.settings.getBackupConfig);
|
||||
router.post('/api/v1/settings/backup_config', settingsScope, routes.user.requireAdmin, routes.settings.setBackupConfig);
|
||||
router.post('/api/v1/settings/certificate', settingsScope, routes.user.requireAdmin, routes.settings.setFallbackCertificate);
|
||||
|
||||
router.post('/api/v1/settings/admin_certificate', settingsScope, routes.user.requireAdmin, routes.settings.setAdminCertificate);
|
||||
router.get ('/api/v1/settings/time_zone', settingsScope, routes.user.requireAdmin, routes.settings.getTimeZone);
|
||||
router.post('/api/v1/settings/time_zone', settingsScope, routes.user.requireAdmin, routes.settings.setTimeZone);
|
||||
@@ -211,13 +203,17 @@ function initializeExpressSync() {
|
||||
router.post('/api/v1/settings/appstore_config', settingsScope, routes.user.requireAdmin, routes.settings.setAppstoreConfig);
|
||||
router.get ('/api/v1/settings/mail_config', settingsScope, routes.user.requireAdmin, routes.settings.getMailConfig);
|
||||
router.post('/api/v1/settings/mail_config', settingsScope, routes.user.requireAdmin, routes.settings.setMailConfig);
|
||||
router.get ('/api/v1/settings/open_registration', settingsScope, routes.user.requireAdmin, routes.settings.getOpenRegistration);
|
||||
router.post('/api/v1/settings/open_registration', settingsScope, routes.user.requireAdmin, routes.settings.setOpenRegistration);
|
||||
|
||||
// feedback
|
||||
router.post('/api/v1/feedback', usersScope, routes.cloudron.feedback);
|
||||
// eventlog route
|
||||
router.get('/api/v1/eventlog', settingsScope, routes.user.requireAdmin, routes.eventlog.get);
|
||||
|
||||
// backup routes
|
||||
router.get ('/api/v1/backups', settingsScope, routes.user.requireAdmin, routes.backups.get);
|
||||
router.post('/api/v1/backups', settingsScope, routes.user.requireAdmin, routes.backups.create);
|
||||
router.post('/api/v1/backups/:backupId/download_url', appsScope, routes.user.requireAdmin, routes.backups.createDownloadUrl);
|
||||
router.get ('/api/v1/backups/:backupId/download', appsScope, routes.user.requireAdmin, routes.backups.download);
|
||||
|
||||
// disable server socket "idle" timeout. we use the timeout middleware to handle timeouts on a route level
|
||||
// we rely on nginx for timeouts on the TCP level (see client_header_timeout)
|
||||
|
||||
+55
-33
@@ -44,6 +44,9 @@ exports = module.exports = {
|
||||
getMailConfig: getMailConfig,
|
||||
setMailConfig: setMailConfig,
|
||||
|
||||
getOpenRegistration: getOpenRegistration,
|
||||
setOpenRegistration: setOpenRegistration,
|
||||
|
||||
getDefaultSync: getDefaultSync,
|
||||
getAll: getAll,
|
||||
|
||||
@@ -58,6 +61,7 @@ exports = module.exports = {
|
||||
UPDATE_CONFIG_KEY: 'update_config',
|
||||
APPSTORE_CONFIG_KEY: 'appstore_config',
|
||||
MAIL_CONFIG_KEY: 'mail_config',
|
||||
OPEN_REGISTRATION_KEY: 'open_registration',
|
||||
|
||||
events: null
|
||||
};
|
||||
@@ -65,13 +69,12 @@ exports = module.exports = {
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
backups = require('./backups.js'),
|
||||
BackupsError = backups.BackupsError,
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
CronJob = require('cron').CronJob,
|
||||
DatabaseError = require('./databaseerror.js'),
|
||||
debug = require('debug')('box:settings'),
|
||||
dig = require('./dig.js'),
|
||||
dns = require('native-dns'),
|
||||
cloudron = require('./cloudron.js'),
|
||||
CloudronError = cloudron.CloudronError,
|
||||
moment = require('moment-timezone'),
|
||||
@@ -97,19 +100,17 @@ var gDefaults = (function () {
|
||||
result[exports.BACKUP_CONFIG_KEY] = {
|
||||
provider: 'filesystem',
|
||||
key: '',
|
||||
backupFolder: '/var/backups',
|
||||
retentionSecs: 172800
|
||||
backupFolder: '/var/backups'
|
||||
};
|
||||
result[exports.TLS_CONFIG_KEY] = { provider: 'letsencrypt-prod' };
|
||||
result[exports.UPDATE_CONFIG_KEY] = { prerelease: false };
|
||||
result[exports.APPSTORE_CONFIG_KEY] = {};
|
||||
result[exports.MAIL_CONFIG_KEY] = { enabled: false };
|
||||
result[exports.OPEN_REGISTRATION_KEY] = false;
|
||||
|
||||
return result;
|
||||
})();
|
||||
|
||||
var NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
|
||||
function SettingsError(reason, errorOrMessage) {
|
||||
assert.strictEqual(typeof reason, 'string');
|
||||
assert(errorOrMessage instanceof Error || typeof errorOrMessage === 'string' || typeof errorOrMessage === 'undefined');
|
||||
@@ -151,8 +152,6 @@ function uninitialize(callback) {
|
||||
function getEmailStatus(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var digOptions = { server: '127.0.0.1', port: 53, timeout: 5000 };
|
||||
|
||||
var records = {}, outboundPort25 = {};
|
||||
|
||||
var dkimKey = cloudron.readDkimPublicKeySync();
|
||||
@@ -162,17 +161,17 @@ function getEmailStatus(callback) {
|
||||
records.dkim = {
|
||||
domain: constants.DKIM_SELECTOR + '._domainkey.' + config.fqdn(),
|
||||
type: 'TXT',
|
||||
expected: '"v=DKIM1; t=s; p=' + dkimKey + '"',
|
||||
expected: 'v=DKIM1; t=s; p=' + dkimKey,
|
||||
value: null,
|
||||
status: false
|
||||
};
|
||||
|
||||
dig.resolve(records.dkim.domain, records.dkim.type, digOptions, function (error, txtRecords) {
|
||||
dns.resolve(records.dkim.domain, records.dkim.type, function (error, txtRecords) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(null); // not setup
|
||||
if (error) return callback(error);
|
||||
|
||||
if (Array.isArray(txtRecords) && txtRecords.length !== 0) {
|
||||
records.dkim.value = txtRecords[0];
|
||||
records.dkim.value = txtRecords[0].join(' ');
|
||||
records.dkim.status = (records.dkim.value === records.dkim.expected);
|
||||
}
|
||||
|
||||
@@ -185,12 +184,12 @@ function getEmailStatus(callback) {
|
||||
domain: config.fqdn(),
|
||||
type: 'TXT',
|
||||
value: null,
|
||||
expected: '"v=spf1 a:' + config.adminFqdn() + ' ~all"',
|
||||
expected: 'v=spf1 a:' + config.adminFqdn() + ' ~all',
|
||||
status: false
|
||||
};
|
||||
|
||||
// https://agari.zendesk.com/hc/en-us/articles/202952749-How-long-can-my-SPF-record-be-
|
||||
dig.resolve(records.spf.domain, records.spf.type, digOptions, function (error, txtRecords) {
|
||||
dns.resolve(records.spf.domain, records.spf.type, function (error, txtRecords) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(null); // not setup
|
||||
if (error) return callback(error);
|
||||
|
||||
@@ -198,8 +197,8 @@ function getEmailStatus(callback) {
|
||||
|
||||
var i;
|
||||
for (i = 0; i < txtRecords.length; i++) {
|
||||
if (txtRecords[i].indexOf('"v=spf1 ') !== 0) continue; // not SPF
|
||||
records.spf.value = txtRecords[i];
|
||||
if (txtRecords[i].join('').indexOf('v=spf1 ') !== 0) continue; // not SPF
|
||||
records.spf.value = txtRecords[i].join('');
|
||||
records.spf.status = records.spf.value.indexOf(' a:' + config.adminFqdn()) !== -1;
|
||||
break;
|
||||
}
|
||||
@@ -207,7 +206,7 @@ function getEmailStatus(callback) {
|
||||
if (records.spf.status) {
|
||||
records.spf.expected = records.spf.value;
|
||||
} else if (i !== txtRecords.length) {
|
||||
records.spf.expected = '"v=spf1 a:' + config.adminFqdn() + ' ' + records.spf.value.slice('"v=spf1 '.length);
|
||||
records.spf.expected = 'v=spf1 a:' + config.adminFqdn() + ' ' + records.spf.value.slice('v=spf1 '.length);
|
||||
}
|
||||
|
||||
callback();
|
||||
@@ -219,16 +218,16 @@ function getEmailStatus(callback) {
|
||||
domain: config.fqdn(),
|
||||
type: 'MX',
|
||||
value: null,
|
||||
expected: '10 ' + config.mailFqdn() + '.',
|
||||
expected: '10 ' + config.mailFqdn(),
|
||||
status: false
|
||||
};
|
||||
|
||||
dig.resolve(records.mx.domain, records.mx.type, digOptions, function (error, mxRecords) {
|
||||
dns.resolve(records.mx.domain, records.mx.type, function (error, mxRecords) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(null); // not setup
|
||||
if (error) return callback(error);
|
||||
|
||||
if (Array.isArray(mxRecords) && mxRecords.length !== 0) {
|
||||
records.mx.status = mxRecords.length == 1 && mxRecords[0].exchange === (config.mailFqdn() + '.');
|
||||
records.mx.status = mxRecords.length == 1 && mxRecords[0].exchange === config.mailFqdn();
|
||||
records.mx.value = mxRecords.map(function (r) { return r.priority + ' ' + r.exchange; }).join(' ');
|
||||
}
|
||||
|
||||
@@ -241,16 +240,16 @@ function getEmailStatus(callback) {
|
||||
domain: '_dmarc.' + config.fqdn(),
|
||||
type: 'TXT',
|
||||
value: null,
|
||||
expected: '"v=DMARC1; p=reject; pct=100"',
|
||||
expected: 'v=DMARC1; p=reject; pct=100',
|
||||
status: false
|
||||
};
|
||||
|
||||
dig.resolve(records.dmarc.domain, records.dmarc.type, digOptions, function (error, txtRecords) {
|
||||
dns.resolve(records.dmarc.domain, records.dmarc.type, function (error, txtRecords) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(null); // not setup
|
||||
if (error) return callback(error);
|
||||
|
||||
if (Array.isArray(txtRecords) && txtRecords.length !== 0) {
|
||||
records.dmarc.value = txtRecords[0];
|
||||
records.dmarc.value = txtRecords[0].join(' ');
|
||||
records.dmarc.status = (records.dmarc.value === records.dmarc.expected);
|
||||
}
|
||||
|
||||
@@ -263,7 +262,7 @@ function getEmailStatus(callback) {
|
||||
domain: null,
|
||||
type: 'PTR',
|
||||
value: null,
|
||||
expected: config.mailFqdn() + '.',
|
||||
expected: config.mailFqdn(),
|
||||
status: false
|
||||
};
|
||||
|
||||
@@ -272,13 +271,13 @@ function getEmailStatus(callback) {
|
||||
|
||||
records.ptr.domain = ip.split('.').reverse().join('.') + '.in-addr.arpa';
|
||||
|
||||
dig.resolve(ip, 'PTR', digOptions, function (error, ptrRecords) {
|
||||
dns.reverse(ip, function (error, ptrRecords) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(null); // not setup
|
||||
if (error) return callback(error);
|
||||
|
||||
if (Array.isArray(ptrRecords) && ptrRecords.length !== 0) {
|
||||
records.ptr.value = ptrRecords.join(' ');
|
||||
records.ptr.status = ptrRecords.some(function (v) { return v === records.ptr.expected; });
|
||||
records.ptr.status = ptrRecords.some(function (v) { return v === config.mailFqdn(); });
|
||||
}
|
||||
|
||||
return callback();
|
||||
@@ -287,8 +286,6 @@ function getEmailStatus(callback) {
|
||||
}
|
||||
|
||||
function checkOutbound25(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var smtpServer = _.sample([
|
||||
'smtp.gmail.com',
|
||||
'smtp.live.com',
|
||||
@@ -309,7 +306,7 @@ function getEmailStatus(callback) {
|
||||
client.on('connect', function () {
|
||||
outboundPort25.status = true;
|
||||
outboundPort25.value = 'OK';
|
||||
client.destroy(); // do not use end() because it still triggers timeout
|
||||
client.end();
|
||||
callback();
|
||||
});
|
||||
client.on('timeout', function () {
|
||||
@@ -336,6 +333,10 @@ function getEmailStatus(callback) {
|
||||
};
|
||||
}
|
||||
|
||||
dns.platform.timeout = 5000; // hack so that each query finish in 5 seconds. this applies to _each_ ns
|
||||
dns.platform.name_servers = [ { address: '127.0.0.1', port: 53 } ];
|
||||
dns.platform.attempts = 1;
|
||||
|
||||
async.parallel([
|
||||
ignoreError('mx', checkMx),
|
||||
ignoreError('spf', checkSpf),
|
||||
@@ -513,8 +514,6 @@ function setDnsConfig(dnsConfig, domain, callback) {
|
||||
|
||||
exports.events.emit(exports.DNS_CONFIG_KEY, dnsConfig);
|
||||
|
||||
cloudron.configureWebadmin(NOOP_CALLBACK); // do not block
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
@@ -591,9 +590,7 @@ function setBackupConfig(backupConfig, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
backups.testConfig(backupConfig, function (error) {
|
||||
if (error && error.reason === BackupsError.BAD_FIELD) return callback(new SettingsError(SettingsError.BAD_FIELD, error.message));
|
||||
if (error && error.reason === BackupsError.EXTERNAL_ERROR) return callback(new SettingsError(SettingsError.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, error));
|
||||
if (error) return callback(error);
|
||||
|
||||
settingsdb.set(exports.BACKUP_CONFIG_KEY, JSON.stringify(backupConfig), function (error) {
|
||||
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, error));
|
||||
@@ -653,6 +650,31 @@ function setMailConfig(mailConfig, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function getOpenRegistration(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
settingsdb.get(exports.OPEN_REGISTRATION_KEY, function (error, value) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(null, gDefaults[exports.OPEN_REGISTRATION_KEY]);
|
||||
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, error));
|
||||
|
||||
// settingsdb holds string values only
|
||||
callback(null, !!value);
|
||||
});
|
||||
}
|
||||
|
||||
function setOpenRegistration(enabled, callback) {
|
||||
assert.strictEqual(typeof enabled, 'boolean');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
settingsdb.set(exports.OPEN_REGISTRATION_KEY, enabled ? 'enabled' : '', function (error) {
|
||||
if (error) return callback(new SettingsError(SettingsError.INTERNAL_ERROR, error));
|
||||
|
||||
exports.events.emit(exports.OPEN_REGISTRATION_KEY, enabled);
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
function getAppstoreConfig(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
|
||||
+2
-7
@@ -37,7 +37,7 @@ function exec(tag, file, args, callback) {
|
||||
|
||||
callback = once(callback); // exit may or may not be called after an 'error'
|
||||
|
||||
debug(tag + ' execFile: %s', file); // do not dump args as it might have sensitive info
|
||||
debug(tag + ' execFile: %s %s', file, args.join(' '));
|
||||
|
||||
var cp = child_process.spawn(file, args);
|
||||
cp.stdout.on('data', function (data) {
|
||||
@@ -50,12 +50,8 @@ function exec(tag, file, args, callback) {
|
||||
|
||||
cp.on('exit', function (code, signal) {
|
||||
if (code || signal) debug(tag + ' code: %s, signal: %s', code, signal);
|
||||
if (code === 0) return callback(null);
|
||||
|
||||
var e = new Error(util.format(tag + ' exited with error %s signal %s', code, signal));
|
||||
e.code = code;
|
||||
e.signal = signal;
|
||||
callback(e);
|
||||
callback(code === 0 ? null : new Error(util.format(tag + ' exited with error %s signal %s', code, signal)));
|
||||
});
|
||||
|
||||
cp.on('error', function (error) {
|
||||
@@ -74,7 +70,6 @@ function sudo(tag, args, callback) {
|
||||
// -S makes sudo read stdin for password
|
||||
var cp = exec(tag, SUDO, [ '-S' ].concat(args), callback);
|
||||
cp.stdin.end();
|
||||
return cp;
|
||||
}
|
||||
|
||||
function sudoSync(tag, cmd, callback) {
|
||||
|
||||
+6
-8
@@ -13,7 +13,6 @@ exports = module.exports = {
|
||||
|
||||
var assert = require('assert'),
|
||||
config = require('./config.js'),
|
||||
debug = require('debug')('box:ssh'),
|
||||
fs = require('fs'),
|
||||
path = require('path'),
|
||||
safe = require('safetydance'),
|
||||
@@ -51,16 +50,17 @@ SshError.INTERNAL_ERROR = 'Internal Error';
|
||||
|
||||
function clear(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
safe.fs.unlinkSync(AUTHORIZED_KEYS_FILEPATH);
|
||||
callback();
|
||||
fs.unlink(AUTHORIZED_KEYS_FILEPATH, function (error) {
|
||||
if (error && error.code !== 'ENOENT') return callback(error);
|
||||
callback();
|
||||
});
|
||||
}
|
||||
|
||||
function saveKeys(keys) {
|
||||
assert(Array.isArray(keys));
|
||||
|
||||
if (!safe.fs.writeFileSync(AUTHORIZED_KEYS_TMP_FILEPATH, keys.map(function (k) { return k.key; }).join('\n'))) {
|
||||
debug('Error writing to temporary file', safe.error);
|
||||
console.error(safe.error);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ function saveKeys(keys) {
|
||||
// 600 = rw-------
|
||||
fs.chmodSync(AUTHORIZED_KEYS_TMP_FILEPATH, '600');
|
||||
} catch (e) {
|
||||
debug('Failed to adjust permissions of %s %j', AUTHORIZED_KEYS_TMP_FILEPATH, e);
|
||||
console.error('Failed to adjust permissions of %s', AUTHORIZED_KEYS_TMP_FILEPATH, e);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -89,8 +89,6 @@ function getKeys() {
|
||||
.map(function (k) { return { identifier: k.split(' ')[2], key: k }; })
|
||||
.filter(function (k) { return k.identifier && k.key; });
|
||||
|
||||
safe.fs.unlinkSync(AUTHORIZED_KEYS_TMP_FILEPATH);
|
||||
|
||||
return keys;
|
||||
}
|
||||
|
||||
|
||||
+143
-158
@@ -1,31 +1,29 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
backup: backup,
|
||||
restore: restore,
|
||||
copyBackup: copyBackup,
|
||||
removeBackups: removeBackups,
|
||||
getBoxBackupDetails: getBoxBackupDetails,
|
||||
getAppBackupDetails: getAppBackupDetails,
|
||||
|
||||
getRestoreUrl: getRestoreUrl,
|
||||
getAppRestoreConfig: getAppRestoreConfig,
|
||||
getLocalFilePath: getLocalFilePath,
|
||||
|
||||
copyObject: copyObject,
|
||||
removeBackup: removeBackup,
|
||||
|
||||
backupDone: backupDone,
|
||||
|
||||
testConfig: testConfig,
|
||||
testConfig: testConfig
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
AWS = require('aws-sdk'),
|
||||
BackupsError = require('../backups.js').BackupsError,
|
||||
config = require('../config.js'),
|
||||
debug = require('debug')('box:storage/caas'),
|
||||
once = require('once'),
|
||||
PassThrough = require('stream').PassThrough,
|
||||
path = require('path'),
|
||||
S3BlockReadStream = require('s3-block-read-stream'),
|
||||
superagent = require('superagent'),
|
||||
targz = require('./targz.js');
|
||||
safe = require('safetydance'),
|
||||
SettingsError = require('../settings.js').SettingsError,
|
||||
superagent = require('superagent');
|
||||
|
||||
var FILE_TYPE = '.tar.gz.enc';
|
||||
|
||||
// internal only
|
||||
function getBackupCredentials(apiConfig, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
@@ -35,7 +33,7 @@ function getBackupCredentials(apiConfig, callback) {
|
||||
superagent.post(url).query({ token: apiConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode !== 201) return callback(new Error(result.text));
|
||||
if (!result.body || !result.body.credentials) return callback(new Error('Unexpected response: ' + JSON.stringify(result.headers)));
|
||||
if (!result.body || !result.body.credentials) return callback(new Error('Unexpected response'));
|
||||
|
||||
var credentials = {
|
||||
signatureVersion: 'v4',
|
||||
@@ -51,178 +49,165 @@ function getBackupCredentials(apiConfig, callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function getBackupFilePath(apiConfig, backupId) {
|
||||
function getBoxBackupDetails(apiConfig, id, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getBackupCredentials(apiConfig, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var s3Url = 's3://' + apiConfig.bucket + '/' + apiConfig.prefix + '/' + id;
|
||||
var region = apiConfig.region || 'us-east-1';
|
||||
|
||||
var details = {
|
||||
backupScriptArguments: [ 's3', s3Url, result.accessKeyId, result.secretAccessKey, region, '', apiConfig.key, result.sessionToken ]
|
||||
};
|
||||
|
||||
callback(null, details);
|
||||
});
|
||||
}
|
||||
|
||||
function getAppBackupDetails(apiConfig, appId, dataId, configId, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof dataId, 'string');
|
||||
assert.strictEqual(typeof configId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getBackupCredentials(apiConfig, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var s3DataUrl = 's3://' + apiConfig.bucket + '/' + apiConfig.prefix + '/' + dataId;
|
||||
var s3ConfigUrl = 's3://' + apiConfig.bucket + '/' + apiConfig.prefix + '/' + configId;
|
||||
var region = apiConfig.region || 'us-east-1';
|
||||
|
||||
var details = {
|
||||
backupScriptArguments: [ 's3', appId, s3ConfigUrl, s3DataUrl, result.accessKeyId, result.secretAccessKey, region, '', apiConfig.key, result.sessionToken ]
|
||||
};
|
||||
|
||||
callback(null, details);
|
||||
});
|
||||
}
|
||||
|
||||
function getRestoreUrl(apiConfig, filename, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof filename, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (!apiConfig.bucket || !apiConfig.prefix) return new Error('Invalid configuration'); // prevent error in s3
|
||||
|
||||
getBackupCredentials(apiConfig, function (error, credentials) {
|
||||
if (error) return callback(error);
|
||||
|
||||
credentials.region = apiConfig.region; // use same region as where we uploaded
|
||||
var s3 = new AWS.S3(credentials);
|
||||
|
||||
var params = {
|
||||
Bucket: apiConfig.bucket,
|
||||
Key: apiConfig.prefix + '/' + filename,
|
||||
Expires: 60 * 60 * 24 /* 1 day */
|
||||
};
|
||||
|
||||
var url = s3.getSignedUrl('getObject', params);
|
||||
|
||||
callback(null, { url: url });
|
||||
});
|
||||
}
|
||||
|
||||
function getAppRestoreConfig(apiConfig, backupId, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const FILE_TYPE = apiConfig.key ? '.tar.gz.enc' : '.tar.gz';
|
||||
var configFilename = backupId.replace(/\.tar\.gz$/, '.json');
|
||||
|
||||
return path.join(apiConfig.prefix, backupId.endsWith(FILE_TYPE) ? backupId : backupId+FILE_TYPE);
|
||||
getRestoreUrl(apiConfig, configFilename, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
superagent.get(result.url).buffer(true).timeout(30 * 1000).end(function (error, response) {
|
||||
if (error && !error.response) return callback(new Error(error.message));
|
||||
if (response.statusCode !== 200) return callback(new Error('Invalid response code when getting config.json : ' + response.statusCode));
|
||||
|
||||
var config = safe.JSON.parse(response.text);
|
||||
if (!config) return callback(new Error('Error in config:' + safe.error.message));
|
||||
|
||||
return callback(null, config);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// storage api
|
||||
function backup(apiConfig, backupId, sourceDirectories, callback) {
|
||||
function getLocalFilePath(apiConfig, filename, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof filename, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback(new Error('not supported'));
|
||||
}
|
||||
|
||||
function copyObject(apiConfig, from, to, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof from, 'string');
|
||||
assert.strictEqual(typeof to, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (!apiConfig.bucket || !apiConfig.prefix) return new Error('Invalid configuration'); // prevent error in s3
|
||||
|
||||
getBackupCredentials(apiConfig, function (error, credentials) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var params = {
|
||||
Bucket: apiConfig.bucket, // target bucket
|
||||
Key: apiConfig.prefix + '/' + to, // target file
|
||||
CopySource: apiConfig.bucket + '/' + apiConfig.prefix + '/' + from, // source
|
||||
};
|
||||
|
||||
var s3 = new AWS.S3(credentials);
|
||||
s3.copyObject(params, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function removeBackup(apiConfig, backupId, appBackupIds, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert(Array.isArray(sourceDirectories));
|
||||
assert(Array.isArray(appBackupIds));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback = once(callback);
|
||||
// Result: none
|
||||
|
||||
var backupFilePath = getBackupFilePath(apiConfig, backupId);
|
||||
|
||||
debug('[%s] backup: %j -> %s', backupId, sourceDirectories, backupFilePath);
|
||||
|
||||
getBackupCredentials(apiConfig, function (error, credentials) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var passThrough = new PassThrough();
|
||||
|
||||
var params = {
|
||||
Bucket: apiConfig.bucket,
|
||||
Key: backupFilePath,
|
||||
Body: passThrough
|
||||
};
|
||||
|
||||
var s3 = new AWS.S3(credentials);
|
||||
// s3.upload automatically does a multi-part upload. we set queueSize to 1 to reduce memory usage
|
||||
s3.upload(params, { partSize: 10 * 1024 * 1024, queueSize: 1 }, function (error) {
|
||||
if (error) {
|
||||
debug('[%s] backup: s3 upload error.', backupId, error);
|
||||
return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error));
|
||||
}
|
||||
|
||||
callback(null);
|
||||
});
|
||||
|
||||
targz.create(sourceDirectories, apiConfig.key || null, passThrough, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function restore(apiConfig, backupId, destination, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof destination, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback = once(callback);
|
||||
|
||||
var backupFilePath = getBackupFilePath(apiConfig, backupId);
|
||||
|
||||
debug('[%s] restore: %s -> %s', backupId, backupFilePath, destination);
|
||||
|
||||
getBackupCredentials(apiConfig, function (error, credentials) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var params = {
|
||||
Bucket: apiConfig.bucket,
|
||||
Key: backupFilePath
|
||||
};
|
||||
|
||||
var s3 = new AWS.S3(credentials);
|
||||
var multipartDownload = new S3BlockReadStream(s3, params, { blockSize: 64 * 1024 * 1024, logCallback: debug });
|
||||
|
||||
multipartDownload.on('error', function (error) {
|
||||
// TODO ENOENT for the mock, fix upstream!
|
||||
if (error.code === 'NoSuchKey' || error.code === 'ENOENT') return callback(new BackupsError(BackupsError.NOT_FOUND));
|
||||
|
||||
debug('[%s] restore: s3 stream error.', backupId, error);
|
||||
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
targz.extract(multipartDownload, destination, apiConfig.key || null, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function copyBackup(apiConfig, oldBackupId, newBackupId, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof oldBackupId, 'string');
|
||||
assert.strictEqual(typeof newBackupId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getBackupCredentials(apiConfig, function (error, credentials) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var params = {
|
||||
Bucket: apiConfig.bucket,
|
||||
Key: getBackupFilePath(apiConfig, newBackupId),
|
||||
CopySource: path.join(apiConfig.bucket, getBackupFilePath(apiConfig, oldBackupId))
|
||||
};
|
||||
|
||||
var s3 = new AWS.S3(credentials);
|
||||
s3.copyObject(params, function (error) {
|
||||
if (error && error.code === 'NoSuchKey') return callback(new BackupsError(BackupsError.NOT_FOUND));
|
||||
if (error) {
|
||||
debug('copyBackup: s3 copy error.', error);
|
||||
return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error));
|
||||
}
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function removeBackups(apiConfig, backupIds, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert(Array.isArray(backupIds));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getBackupCredentials(apiConfig, function (error, credentials) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var params = {
|
||||
Bucket: apiConfig.bucket,
|
||||
Delete: {
|
||||
Objects: [ ] // { Key }
|
||||
}
|
||||
};
|
||||
|
||||
backupIds.forEach(function (backupId) {
|
||||
params.Delete.Objects.push({ Key: getBackupFilePath(apiConfig, backupId) });
|
||||
});
|
||||
|
||||
var s3 = new AWS.S3(credentials);
|
||||
s3.deleteObjects(params, function (error, data) {
|
||||
if (error) debug('Unable to remove %s. Not fatal.', params.Key, error);
|
||||
else debug('removeBackups: Deleted: %j Errors: %j', data.Deleted, data.Errors);
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
callback(new Error('not implemented'));
|
||||
}
|
||||
|
||||
function testConfig(apiConfig, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (config.provider() !== 'caas') return callback(new BackupsError(BackupsError.BAD_FIELD, 'instance provider must be caas'));
|
||||
if (config.provider() !== 'caas') return callback(new SettingsError(SettingsError.BAD_FIELD, 'instance provider must be caas'));
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
function backupDone(backupId, appBackupIds, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert(Array.isArray(appBackupIds));
|
||||
function backupDone(filename, app, appBackupIds, callback) {
|
||||
assert.strictEqual(typeof filename, 'string');
|
||||
assert(!app || typeof app === 'object');
|
||||
assert(!appBackupIds || Array.isArray(appBackupIds));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Caas expects filenames instead of backupIds, this means no prefix but a file type extension
|
||||
var boxBackupFilename = backupId + FILE_TYPE;
|
||||
var appBackupFilenames = appBackupIds.map(function (id) { return id + FILE_TYPE; });
|
||||
|
||||
debug('[%s] backupDone: %s apps %j', backupId, boxBackupFilename, appBackupFilenames);
|
||||
debug('backupDone %s', filename);
|
||||
|
||||
var url = config.apiServerOrigin() + '/api/v1/boxes/' + config.fqdn() + '/backupDone';
|
||||
var data = {
|
||||
boxVersion: config.version(),
|
||||
restoreKey: boxBackupFilename,
|
||||
appId: null, // now unused
|
||||
appVersion: null, // now unused
|
||||
appBackupIds: appBackupFilenames
|
||||
restoreKey: filename,
|
||||
appId: app ? app.id : null,
|
||||
appVersion: app ? app.manifest.version : null,
|
||||
appBackupIds: appBackupIds
|
||||
};
|
||||
|
||||
superagent.post(url).send(data).query({ token: config.token() }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode !== 200) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, result.text));
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode !== 200) return callback(new Error(result.text));
|
||||
if (!result.body) return callback(new Error('Unexpected response'));
|
||||
|
||||
return callback(null);
|
||||
});
|
||||
|
||||
+96
-115
@@ -1,10 +1,15 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
backup: backup,
|
||||
restore: restore,
|
||||
copyBackup: copyBackup,
|
||||
removeBackups: removeBackups,
|
||||
getBoxBackupDetails: getBoxBackupDetails,
|
||||
getAppBackupDetails: getAppBackupDetails,
|
||||
|
||||
getRestoreUrl: getRestoreUrl,
|
||||
getAppRestoreConfig: getAppRestoreConfig,
|
||||
getLocalFilePath: getLocalFilePath,
|
||||
|
||||
copyObject: copyObject,
|
||||
removeBackup: removeBackup,
|
||||
|
||||
backupDone: backupDone,
|
||||
|
||||
@@ -14,143 +19,129 @@ exports = module.exports = {
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
BackupsError = require('../backups.js').BackupsError,
|
||||
config = require('../config.js'),
|
||||
debug = require('debug')('box:storage/filesystem'),
|
||||
checksum = require('checksum'),
|
||||
fs = require('fs'),
|
||||
mkdirp = require('mkdirp'),
|
||||
once = require('once'),
|
||||
path = require('path'),
|
||||
safe = require('safetydance'),
|
||||
targz = require('./targz.js');
|
||||
SettingsError = require('../settings.js').SettingsError,
|
||||
shell = require('../shell.js'),
|
||||
util = require('util');
|
||||
|
||||
var FALLBACK_BACKUP_FOLDER = '/var/backups';
|
||||
var BACKUP_USER = config.TEST ? process.env.USER : 'yellowtent';
|
||||
var RMBACKUP_CMD = path.join(__dirname, '../scripts/rmbackup.sh');
|
||||
|
||||
// internal only
|
||||
function getBackupFilePath(apiConfig, backupId) {
|
||||
function getBoxBackupDetails(apiConfig, id, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
|
||||
const FILE_TYPE = apiConfig.key ? '.tar.gz.enc' : '.tar.gz';
|
||||
|
||||
return path.join(apiConfig.backupFolder || FALLBACK_BACKUP_FOLDER, backupId.endsWith(FILE_TYPE) ? backupId : backupId+FILE_TYPE);
|
||||
}
|
||||
|
||||
// storage api
|
||||
function backup(apiConfig, backupId, sourceDirectories, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert(Array.isArray(sourceDirectories));
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback = once(callback);
|
||||
var backupFolder = apiConfig.backupFolder || FALLBACK_BACKUP_FOLDER;
|
||||
|
||||
var backupFilePath = getBackupFilePath(apiConfig, backupId);
|
||||
var details = {
|
||||
backupScriptArguments: [ 'filesystem', backupFolder, id, apiConfig.key ]
|
||||
};
|
||||
|
||||
debug('[%s] backup: %j -> %s', backupId, sourceDirectories, backupFilePath);
|
||||
callback(null, details);
|
||||
}
|
||||
|
||||
mkdirp(path.dirname(backupFilePath), function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
function getAppBackupDetails(apiConfig, appId, dataId, configId, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof dataId, 'string');
|
||||
assert.strictEqual(typeof configId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var fileStream = fs.createWriteStream(backupFilePath);
|
||||
var backupFolder = apiConfig.backupFolder || FALLBACK_BACKUP_FOLDER;
|
||||
|
||||
fileStream.on('error', function (error) {
|
||||
debug('[%s] backup: out stream error.', backupId, error);
|
||||
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
var details = {
|
||||
backupScriptArguments: [ 'filesystem', appId, backupFolder, configId, dataId, apiConfig.key ]
|
||||
};
|
||||
|
||||
fileStream.on('close', function () {
|
||||
debug('[%s] backup: changing ownership.', backupId);
|
||||
callback(null, details);
|
||||
}
|
||||
|
||||
if (!safe.child_process.execSync('chown -R ' + BACKUP_USER + ':' + BACKUP_USER + ' ' + path.dirname(backupFilePath))) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, safe.error.message));
|
||||
function getRestoreUrl(apiConfig, filename, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof filename, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('[%s] backup: done.', backupId);
|
||||
var backupFolder = apiConfig.backupFolder || FALLBACK_BACKUP_FOLDER;
|
||||
var restoreUrl = 'file://' + path.join(backupFolder, filename);
|
||||
|
||||
callback(null);
|
||||
});
|
||||
checksum.file(path.join(backupFolder, filename), function (error, result) {
|
||||
if (error) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, util.format('Failed to calculate checksum:', error)));
|
||||
|
||||
targz.create(sourceDirectories, apiConfig.key || null, fileStream, callback);
|
||||
callback(null, { url: restoreUrl, sha1: result });
|
||||
});
|
||||
}
|
||||
|
||||
function restore(apiConfig, backupId, destination, callback) {
|
||||
function getAppRestoreConfig(apiConfig, backupId, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof destination, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback = once(callback);
|
||||
var backupFolder = apiConfig.backupFolder || FALLBACK_BACKUP_FOLDER;
|
||||
var configFilename = backupId.replace(/\.tar\.gz$/, '.json');
|
||||
|
||||
var sourceFilePath = getBackupFilePath(apiConfig, backupId);
|
||||
var restoreConfig = safe.require(path.join(backupFolder, configFilename));
|
||||
if (!restoreConfig) return callback(new BackupsError(BackupsError.NOT_FOUND, 'No app backup config found for ' + configFilename));
|
||||
|
||||
debug('[%s] restore: %s -> %s', backupId, sourceFilePath, destination);
|
||||
|
||||
if (!fs.existsSync(sourceFilePath)) return callback(new BackupsError(BackupsError.NOT_FOUND, 'backup file does not exist'));
|
||||
|
||||
var fileStream = fs.createReadStream(sourceFilePath);
|
||||
|
||||
fileStream.on('error', function (error) {
|
||||
debug('restore: file stream error.', error);
|
||||
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
targz.extract(fileStream, destination, apiConfig.key || null, callback);
|
||||
callback(null, restoreConfig);
|
||||
}
|
||||
|
||||
function copyBackup(apiConfig, oldBackupId, newBackupId, callback) {
|
||||
function getLocalFilePath(apiConfig, filename, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof oldBackupId, 'string');
|
||||
assert.strictEqual(typeof newBackupId, 'string');
|
||||
assert.strictEqual(typeof filename, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback = once(callback);
|
||||
var backupFolder = apiConfig.backupFolder || FALLBACK_BACKUP_FOLDER;
|
||||
|
||||
var oldFilePath = getBackupFilePath(apiConfig, oldBackupId);
|
||||
var newFilePath = getBackupFilePath(apiConfig, newBackupId);
|
||||
callback(null, { filePath: path.join(backupFolder, filename) });
|
||||
}
|
||||
|
||||
debug('copyBackup: %s -> %s', oldFilePath, newFilePath);
|
||||
function copyObject(apiConfig, from, to, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof from, 'string');
|
||||
assert.strictEqual(typeof to, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
mkdirp(path.dirname(newFilePath), function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
var calledBack = false;
|
||||
function done (error) {
|
||||
if (!calledBack) callback(error);
|
||||
calledBack = true;
|
||||
}
|
||||
|
||||
var readStream = fs.createReadStream(oldFilePath);
|
||||
var writeStream = fs.createWriteStream(newFilePath);
|
||||
var backupFolder = apiConfig.backupFolder || FALLBACK_BACKUP_FOLDER;
|
||||
var readStream = fs.createReadStream(path.join(backupFolder, from));
|
||||
var writeStream = fs.createWriteStream(path.join(backupFolder, to));
|
||||
|
||||
readStream.on('error', function (error) {
|
||||
debug('copyBackup: read stream error.', error);
|
||||
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
readStream.on('error', done);
|
||||
writeStream.on('error', done);
|
||||
|
||||
writeStream.on('error', function (error) {
|
||||
debug('copyBackup: write stream error.', error);
|
||||
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
writeStream.on('close', function () {
|
||||
// avoid passing arguments
|
||||
done(null);
|
||||
});
|
||||
|
||||
writeStream.on('close', function () {
|
||||
if (!safe.child_process.execSync('chown -R ' + BACKUP_USER + ':' + BACKUP_USER + ' ' + path.dirname(newFilePath))) return callback(new BackupsError(BackupsError.INTERNAL_ERROR, safe.error.message));
|
||||
readStream.pipe(writeStream);
|
||||
}
|
||||
|
||||
function removeBackup(apiConfig, backupId, appBackupIds, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert(Array.isArray(appBackupIds));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var backupFolder = apiConfig.backupFolder || FALLBACK_BACKUP_FOLDER;
|
||||
var appBackupJSONFiles = appBackupIds.map(function (id) { return id.replace(/\.tar\.gz$/, '.json'); });
|
||||
|
||||
async.each([backupId].concat(appBackupIds).concat(appBackupJSONFiles), function (id, callback) {
|
||||
var filePath = path.join(backupFolder, id);
|
||||
|
||||
shell.sudo('deleteBackup', [ RMBACKUP_CMD, filePath ], function (error) {
|
||||
if (error) console.error('Unable to remove %s. Not fatal.', filePath, safe.error);
|
||||
callback();
|
||||
});
|
||||
|
||||
readStream.pipe(writeStream);
|
||||
});
|
||||
}
|
||||
|
||||
function removeBackups(apiConfig, backupIds, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert(Array.isArray(backupIds));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
async.eachSeries(backupIds, function (id, iteratorCallback) {
|
||||
var filePath = getBackupFilePath(apiConfig, id);
|
||||
|
||||
if (!safe.fs.unlinkSync(filePath)) {
|
||||
debug('removeBackups: Unable to remove %s : %s', filePath, safe.error.message);
|
||||
}
|
||||
|
||||
safe.fs.rmdirSync(path.dirname(filePath)); // try to cleanup empty directories
|
||||
|
||||
iteratorCallback();
|
||||
}, callback);
|
||||
}
|
||||
|
||||
@@ -158,27 +149,17 @@ function testConfig(apiConfig, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if ('backupFolder' in apiConfig && typeof apiConfig.backupFolder !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'backupFolder must be string'));
|
||||
if (typeof apiConfig.backupFolder !== 'string') return callback(new SettingsError(SettingsError.BAD_FIELD, 'backupFolder must be string'));
|
||||
|
||||
// default value will be used
|
||||
if (!apiConfig.backupFolder) return callback();
|
||||
|
||||
fs.stat(apiConfig.backupFolder, function (error, result) {
|
||||
if (error) {
|
||||
debug('testConfig: %s', apiConfig.backupFolder, error);
|
||||
return callback(new BackupsError(BackupsError.BAD_FIELD, 'Directory does not exist or cannot be accessed'));
|
||||
}
|
||||
|
||||
if (!result.isDirectory()) return callback(new BackupsError(BackupsError.BAD_FIELD, 'Backup location is not a directory'));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
callback();
|
||||
}
|
||||
|
||||
function backupDone(backupId, appBackupIds, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert(Array.isArray(appBackupIds));
|
||||
function backupDone(filename, app, appBackupIds, callback) {
|
||||
assert.strictEqual(typeof filename, 'string');
|
||||
assert(!app || typeof app === 'object');
|
||||
assert(!appBackupIds || Array.isArray(appBackupIds));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
|
||||
+77
-33
@@ -7,10 +7,15 @@
|
||||
// -------------------------------------------
|
||||
|
||||
exports = module.exports = {
|
||||
backup: backup,
|
||||
restore: restore,
|
||||
copyBackup: copyBackup,
|
||||
removeBackups: removeBackups,
|
||||
getBoxBackupDetails: getBoxBackupDetails,
|
||||
getAppBackupDetails: getAppBackupDetails,
|
||||
|
||||
getRestoreUrl: getRestoreUrl,
|
||||
getAppRestoreConfig: getAppRestoreConfig,
|
||||
getLocalFilePath: getLocalFilePath,
|
||||
|
||||
copyObject: copyObject,
|
||||
removeBackup: removeBackup,
|
||||
|
||||
backupDone: backupDone,
|
||||
|
||||
@@ -19,10 +24,69 @@ exports = module.exports = {
|
||||
|
||||
var assert = require('assert');
|
||||
|
||||
function backup(apiConfig, backupId, sourceDirectories, callback) {
|
||||
function getBoxBackupDetails(apiConfig, id, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Result: { backupScriptArguments: [] }
|
||||
// The resulting array consists of string passed down 1to1 to the backupbox.sh
|
||||
|
||||
callback(new Error('not implemented'));
|
||||
}
|
||||
|
||||
function getAppBackupDetails(apiConfig, appId, dataId, configId, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof dataId, 'string');
|
||||
assert.strictEqual(typeof configId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Result: { backupScriptArguments: [] }
|
||||
// The resulting array consists of string passed down 1to1 to the backupapp.sh
|
||||
|
||||
callback(new Error('not implemented'));
|
||||
}
|
||||
|
||||
function getRestoreUrl(apiConfig, filename, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof filename, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Result: { url: <restoreUrl>, sha1: <optional> }
|
||||
// The resulting url must work with curl as it is passed into start.sh and restoreapp.sh
|
||||
|
||||
callback(new Error('not implemented'));
|
||||
}
|
||||
|
||||
function getAppRestoreConfig(apiConfig, backupId, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert(Array.isArray(sourceDirectories));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// var configFilename = backupId.replace(/\.tar\.gz$/, '.json');
|
||||
|
||||
// Result: {} <- Backup config object from .json file
|
||||
// The resulting url must work with curl as it is passed into start.sh and restoreapp.sh
|
||||
|
||||
callback(new Error('not implemented'));
|
||||
}
|
||||
|
||||
function getLocalFilePath(apiConfig, filename, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof filename, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Result: { filePath: <localFilePath> }
|
||||
// The resulting filePath is a local path to the backup file
|
||||
|
||||
callback(new Error('not implemented'));
|
||||
}
|
||||
|
||||
function copyObject(apiConfig, from, to, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof from, 'string');
|
||||
assert.strictEqual(typeof to, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Result: none
|
||||
@@ -30,31 +94,10 @@ function backup(apiConfig, backupId, sourceDirectories, callback) {
|
||||
callback(new Error('not implemented'));
|
||||
}
|
||||
|
||||
function restore(apiConfig, backupId, destination, callback) {
|
||||
function removeBackup(apiConfig, backupId, appBackupIds, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof destination, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Result: none
|
||||
|
||||
callback(new Error('not implemented'));
|
||||
}
|
||||
|
||||
function copyBackup(apiConfig, oldBackupId, newBackupId, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof oldBackupId, 'string');
|
||||
assert.strictEqual(typeof newBackupId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Result: none
|
||||
|
||||
callback(new Error('not implemented'));
|
||||
}
|
||||
|
||||
function removeBackups(apiConfig, backupIds, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert(Array.isArray(backupIds));
|
||||
assert(Array.isArray(appBackupIds));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Result: none
|
||||
@@ -66,14 +109,15 @@ function testConfig(apiConfig, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// Result: none - first callback argument error if config does not pass the test
|
||||
// Result: none
|
||||
|
||||
callback(new Error('not implemented'));
|
||||
}
|
||||
|
||||
function backupDone(backupId, appBackupIds, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert(Array.isArray(appBackupIds));
|
||||
function backupDone(filename, app, appBackupIds, callback) {
|
||||
assert.strictEqual(typeof filename, 'string');
|
||||
assert(!app || typeof app === 'object');
|
||||
assert(!appBackupIds || Array.isArray(appBackupIds));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback(new Error('not implemented'));
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
backup: backup,
|
||||
restore: restore,
|
||||
copyBackup: copyBackup,
|
||||
removeBackups: removeBackups,
|
||||
|
||||
backupDone: backupDone,
|
||||
|
||||
testConfig: testConfig
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
debug = require('debug')('box:storage/noop');
|
||||
|
||||
function backup(apiConfig, backupId, sourceDirectories, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert(Array.isArray(sourceDirectories));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('backup: %s %j', backupId, sourceDirectories);
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
function restore(apiConfig, backupId, destination, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof destination, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('restore: %s %s', backupId, destination);
|
||||
|
||||
callback(new Error('Cannot restore from noop backend'));
|
||||
}
|
||||
|
||||
function copyBackup(apiConfig, oldBackupId, newBackupId, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof oldBackupId, 'string');
|
||||
assert.strictEqual(typeof newBackupId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('copyBackup: %s -> %s', oldBackupId, newBackupId);
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
function removeBackups(apiConfig, backupIds, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert(Array.isArray(backupIds));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
debug('removeBackups: %j', backupIds);
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
function testConfig(apiConfig, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
function backupDone(backupId, appBackupIds, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert(Array.isArray(appBackupIds));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback();
|
||||
}
|
||||
+144
-171
@@ -1,42 +1,28 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
backup: backup,
|
||||
restore: restore,
|
||||
copyBackup: copyBackup,
|
||||
removeBackups: removeBackups,
|
||||
getBoxBackupDetails: getBoxBackupDetails,
|
||||
getAppBackupDetails: getAppBackupDetails,
|
||||
|
||||
getRestoreUrl: getRestoreUrl,
|
||||
getAppRestoreConfig: getAppRestoreConfig,
|
||||
getLocalFilePath: getLocalFilePath,
|
||||
|
||||
copyObject: copyObject,
|
||||
removeBackup: removeBackup,
|
||||
|
||||
backupDone: backupDone,
|
||||
|
||||
testConfig: testConfig,
|
||||
|
||||
// Used to mock AWS
|
||||
_mockInject: mockInject,
|
||||
_mockRestore: mockRestore
|
||||
testConfig: testConfig
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
AWS = require('aws-sdk'),
|
||||
BackupsError = require('../backups.js').BackupsError,
|
||||
debug = require('debug')('box:storage/s3'),
|
||||
once = require('once'),
|
||||
PassThrough = require('stream').PassThrough,
|
||||
path = require('path'),
|
||||
S3BlockReadStream = require('s3-block-read-stream'),
|
||||
targz = require('./targz.js');
|
||||
safe = require('safetydance'),
|
||||
SettingsError = require('../settings.js').SettingsError,
|
||||
shell = require('../shell.js'),
|
||||
superagent = require('superagent');
|
||||
|
||||
// test only
|
||||
var originalAWS;
|
||||
function mockInject(mock) {
|
||||
originalAWS = AWS;
|
||||
AWS = mock;
|
||||
}
|
||||
|
||||
function mockRestore() {
|
||||
AWS = originalAWS;
|
||||
}
|
||||
|
||||
// internal only
|
||||
function getBackupCredentials(apiConfig, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
@@ -44,7 +30,7 @@ function getBackupCredentials(apiConfig, callback) {
|
||||
assert(apiConfig.accessKeyId && apiConfig.secretAccessKey);
|
||||
|
||||
var credentials = {
|
||||
signatureVersion: apiConfig.signatureVersion || 'v4',
|
||||
signatureVersion: 'v4',
|
||||
s3ForcePathStyle: true,
|
||||
accessKeyId: apiConfig.accessKeyId,
|
||||
secretAccessKey: apiConfig.secretAccessKey,
|
||||
@@ -56,190 +42,177 @@ function getBackupCredentials(apiConfig, callback) {
|
||||
callback(null, credentials);
|
||||
}
|
||||
|
||||
function getBackupFilePath(apiConfig, backupId) {
|
||||
function getBoxBackupDetails(apiConfig, id, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof id, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var s3Url = 's3://' + apiConfig.bucket + '/' + apiConfig.prefix + '/' + id;
|
||||
var region = apiConfig.region || 'us-east-1';
|
||||
|
||||
var details = {
|
||||
backupScriptArguments: [ 's3', s3Url, apiConfig.accessKeyId, apiConfig.secretAccessKey, region, apiConfig.endpoint || '', apiConfig.key ]
|
||||
};
|
||||
|
||||
callback(null, details);
|
||||
}
|
||||
|
||||
function getAppBackupDetails(apiConfig, appId, dataId, configId, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof appId, 'string');
|
||||
assert.strictEqual(typeof dataId, 'string');
|
||||
assert.strictEqual(typeof configId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var s3DataUrl = 's3://' + apiConfig.bucket + '/' + apiConfig.prefix + '/' + dataId;
|
||||
var s3ConfigUrl = 's3://' + apiConfig.bucket + '/' + apiConfig.prefix + '/' + configId;
|
||||
var region = apiConfig.region || 'us-east-1';
|
||||
|
||||
var details = {
|
||||
backupScriptArguments: [ 's3', appId, s3ConfigUrl, s3DataUrl, apiConfig.accessKeyId, apiConfig.secretAccessKey, region, apiConfig.endpoint || '', apiConfig.key ]
|
||||
};
|
||||
|
||||
callback(null, details);
|
||||
}
|
||||
|
||||
function getRestoreUrl(apiConfig, filename, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof filename, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getBackupCredentials(apiConfig, function (error, credentials) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var s3 = new AWS.S3(credentials);
|
||||
|
||||
var params = {
|
||||
Bucket: apiConfig.bucket,
|
||||
Key: apiConfig.prefix + '/' + filename,
|
||||
Expires: 60 * 60 * 24 /* 1 day */
|
||||
};
|
||||
|
||||
var url = s3.getSignedUrl('getObject', params);
|
||||
|
||||
callback(null, { url: url });
|
||||
});
|
||||
}
|
||||
|
||||
function getAppRestoreConfig(apiConfig, backupId, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
const FILE_TYPE = apiConfig.key ? '.tar.gz.enc' : '.tar.gz';
|
||||
var configFilename = backupId.replace(/\.tar\.gz$/, '.json');
|
||||
|
||||
return path.join(apiConfig.prefix, backupId.endsWith(FILE_TYPE) ? backupId : backupId+FILE_TYPE);
|
||||
getRestoreUrl(apiConfig, configFilename, function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
superagent.get(result.url).buffer(true).timeout(30 * 1000).end(function (error, response) {
|
||||
if (error && !error.response) return callback(new Error(error.message));
|
||||
if (response.statusCode !== 200) return callback(new Error('Invalid response code when getting config.json : ' + response.statusCode));
|
||||
|
||||
var config = safe.JSON.parse(response.text);
|
||||
if (!config) return callback(new Error('Error in config:' + safe.error.message));
|
||||
|
||||
return callback(null, config);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// storage api
|
||||
function backup(apiConfig, backupId, sourceDirectories, callback) {
|
||||
function getLocalFilePath(apiConfig, filename, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof filename, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback(new Error('not supported'));
|
||||
}
|
||||
|
||||
function copyObject(apiConfig, from, to, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof from, 'string');
|
||||
assert.strictEqual(typeof to, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getBackupCredentials(apiConfig, function (error, credentials) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var params = {
|
||||
Bucket: apiConfig.bucket, // target bucket
|
||||
Key: apiConfig.prefix + '/' + to, // target file
|
||||
CopySource: apiConfig.bucket + '/' + apiConfig.prefix + '/' + from, // source
|
||||
};
|
||||
|
||||
var s3 = new AWS.S3(credentials);
|
||||
s3.copyObject(params, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function removeBackup(apiConfig, backupId, appBackupIds, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert(Array.isArray(sourceDirectories));
|
||||
assert(Array.isArray(appBackupIds));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback = once(callback);
|
||||
// Result: none
|
||||
|
||||
var backupFilePath = getBackupFilePath(apiConfig, backupId);
|
||||
|
||||
debug('[%s] backup: %j -> %s', backupId, sourceDirectories, backupFilePath);
|
||||
|
||||
getBackupCredentials(apiConfig, function (error, credentials) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var passThrough = new PassThrough();
|
||||
|
||||
var params = {
|
||||
Bucket: apiConfig.bucket,
|
||||
Key: backupFilePath,
|
||||
Body: passThrough
|
||||
};
|
||||
|
||||
var s3 = new AWS.S3(credentials);
|
||||
// s3.upload automatically does a multi-part upload. we set queueSize to 1 to reduce memory usage
|
||||
s3.upload(params, { partSize: 10 * 1024 * 1024, queueSize: 1 }, function (error) {
|
||||
if (error) {
|
||||
debug('[%s] backup: s3 upload error.', backupId, error);
|
||||
return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
}
|
||||
|
||||
callback(null);
|
||||
});
|
||||
|
||||
targz.create(sourceDirectories, apiConfig.key || null, passThrough, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function restore(apiConfig, backupId, destination, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert.strictEqual(typeof destination, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback = once(callback);
|
||||
|
||||
var backupFilePath = getBackupFilePath(apiConfig, backupId);
|
||||
|
||||
debug('[%s] restore: %s -> %s', backupId, backupFilePath, destination);
|
||||
|
||||
getBackupCredentials(apiConfig, function (error, credentials) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var params = {
|
||||
Bucket: apiConfig.bucket,
|
||||
Key: backupFilePath
|
||||
};
|
||||
|
||||
var s3 = new AWS.S3(credentials);
|
||||
|
||||
var multipartDownload = new S3BlockReadStream(s3, params, { blockSize: 64 * 1024 * 1024, logCallback: debug });
|
||||
|
||||
multipartDownload.on('error', function (error) {
|
||||
// TODO ENOENT for the mock, fix upstream!
|
||||
if (error.code === 'NoSuchKey' || error.code === 'ENOENT') return callback(new BackupsError(BackupsError.NOT_FOUND));
|
||||
|
||||
debug('[%s] restore: s3 stream error.', backupId, error);
|
||||
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
targz.extract(multipartDownload, destination, apiConfig.key || null, callback);
|
||||
});
|
||||
}
|
||||
|
||||
function copyBackup(apiConfig, oldBackupId, newBackupId, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof oldBackupId, 'string');
|
||||
assert.strictEqual(typeof newBackupId, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getBackupCredentials(apiConfig, function (error, credentials) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var params = {
|
||||
Bucket: apiConfig.bucket,
|
||||
Key: getBackupFilePath(apiConfig, newBackupId),
|
||||
CopySource: path.join(apiConfig.bucket, getBackupFilePath(apiConfig, oldBackupId))
|
||||
};
|
||||
|
||||
var s3 = new AWS.S3(credentials);
|
||||
s3.copyObject(params, function (error) {
|
||||
if (error && error.code === 'NoSuchKey') return callback(new BackupsError(BackupsError.NOT_FOUND, 'Old backup not found'));
|
||||
if (error) {
|
||||
debug('copyBackup: s3 copy error.', error);
|
||||
return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
}
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function removeBackups(apiConfig, backupIds, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert(Array.isArray(backupIds));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
getBackupCredentials(apiConfig, function (error, credentials) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var params = {
|
||||
Bucket: apiConfig.bucket,
|
||||
Delete: {
|
||||
Objects: [ ] // { Key }
|
||||
}
|
||||
};
|
||||
|
||||
backupIds.forEach(function (backupId) {
|
||||
params.Delete.Objects.push({ Key: getBackupFilePath(apiConfig, backupId) });
|
||||
});
|
||||
|
||||
var s3 = new AWS.S3(credentials);
|
||||
s3.deleteObjects(params, function (error, data) {
|
||||
if (error) debug('removeBackups: Unable to remove %s. Not fatal.', params.Key, error);
|
||||
else debug('removeBackups: Deleted: %j Errors: %j', data.Deleted, data.Errors);
|
||||
|
||||
callback(null);
|
||||
});
|
||||
});
|
||||
callback(new Error('not implemented'));
|
||||
}
|
||||
|
||||
function testConfig(apiConfig, callback) {
|
||||
assert.strictEqual(typeof apiConfig, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (typeof apiConfig.accessKeyId !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'accessKeyId must be a string'));
|
||||
if (typeof apiConfig.secretAccessKey !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'secretAccessKey must be a string'));
|
||||
if (typeof apiConfig.bucket !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'bucket must be a string'));
|
||||
if (typeof apiConfig.prefix !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'prefix must be a string'));
|
||||
if ('signatureVersion' in apiConfig && typeof apiConfig.prefix !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'signatureVersion must be a string'));
|
||||
if ('endpoint' in apiConfig && typeof apiConfig.prefix !== 'string') return callback(new BackupsError(BackupsError.BAD_FIELD, 'endpoint must be a string'));
|
||||
if (typeof apiConfig.accessKeyId !== 'string') return callback(new SettingsError(SettingsError.BAD_FIELD, 'accessKeyId must be a string'));
|
||||
if (typeof apiConfig.secretAccessKey !== 'string') return callback(new SettingsError(SettingsError.BAD_FIELD, 'secretAccessKey must be a string'));
|
||||
if (typeof apiConfig.bucket !== 'string') return callback(new SettingsError(SettingsError.BAD_FIELD, 'bucket must be a string'));
|
||||
if (typeof apiConfig.prefix !== 'string') return callback(new SettingsError(SettingsError.BAD_FIELD, 'prefix must be a string'));
|
||||
|
||||
// attempt to upload and delete a file with new credentials
|
||||
// First use the javascript api, to get better feedback, then use aws cli tool
|
||||
// The javascript api always autodetects the correct settings, regardless of the region provided, the cli tool does not
|
||||
getBackupCredentials(apiConfig, function (error, credentials) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var params = {
|
||||
Bucket: apiConfig.bucket,
|
||||
Key: apiConfig.prefix + '/cloudron-testfile',
|
||||
Key: apiConfig.prefix + '/testfile',
|
||||
Body: 'testcontent'
|
||||
};
|
||||
|
||||
var s3 = new AWS.S3(credentials);
|
||||
s3.putObject(params, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(new SettingsError(SettingsError.EXTERNAL_ERROR, error.message));
|
||||
|
||||
var params = {
|
||||
Bucket: apiConfig.bucket,
|
||||
Key: apiConfig.prefix + '/cloudron-testfile'
|
||||
Key: apiConfig.prefix + '/testfile'
|
||||
};
|
||||
|
||||
s3.deleteObject(params, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
if (error) return callback(new SettingsError(SettingsError.EXTERNAL_ERROR, error.message));
|
||||
|
||||
callback();
|
||||
// now perform the same as what we do in the backup shell scripts
|
||||
var BACKUP_TEST_CMD = require('path').join(__dirname, '../scripts/backuptests3.sh');
|
||||
var tmpUrl = 's3://' + apiConfig.bucket + '/' + apiConfig.prefix + '/testfile';
|
||||
var args = [ tmpUrl, credentials.accessKeyId, credentials.secretAccessKey, credentials.region, credentials.endpoint || '' ];
|
||||
|
||||
// if this fails the region is wrong, otherwise we would have failed earlier.
|
||||
shell.exec('backupTestS3', BACKUP_TEST_CMD, args, function (error) {
|
||||
if (error) return callback(new SettingsError(SettingsError.EXTERNAL_ERROR, 'Wrong region'));
|
||||
|
||||
callback();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function backupDone(backupId, appBackupIds, callback) {
|
||||
assert.strictEqual(typeof backupId, 'string');
|
||||
assert(Array.isArray(appBackupIds));
|
||||
function backupDone(filename, app, appBackupIds, callback) {
|
||||
assert.strictEqual(typeof filename, 'string');
|
||||
assert(!app || typeof app === 'object');
|
||||
assert(!appBackupIds || Array.isArray(appBackupIds));
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,105 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
exports = module.exports = {
|
||||
create: create,
|
||||
extract: extract
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
BackupsError = require('../backups.js').BackupsError,
|
||||
crypto = require('crypto'),
|
||||
debug = require('debug')('box:storage/targz'),
|
||||
mkdirp = require('mkdirp'),
|
||||
progress = require('progress-stream'),
|
||||
tar = require('tar-fs'),
|
||||
zlib = require('zlib');
|
||||
|
||||
function create(sourceDirectories, key, outStream, callback) {
|
||||
assert(Array.isArray(sourceDirectories));
|
||||
assert(key === null || typeof key === 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var pack = tar.pack('/', {
|
||||
dereference: false, // pack the symlink and not what it points to
|
||||
entries: sourceDirectories.map(function (m) { return m.source; }),
|
||||
map: function(header) {
|
||||
sourceDirectories.forEach(function (m) {
|
||||
header.name = header.name.replace(new RegExp('^' + m.source + '(/?)'), m.destination + '$1');
|
||||
});
|
||||
return header;
|
||||
},
|
||||
strict: false // do not error for unknown types (skip fifo, char/block devices)
|
||||
});
|
||||
|
||||
var gzip = zlib.createGzip({});
|
||||
var progressStream = progress({ time: 10000 }); // display a progress every 10 seconds
|
||||
|
||||
pack.on('error', function (error) {
|
||||
debug('backup: tar stream error.', error);
|
||||
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
gzip.on('error', function (error) {
|
||||
debug('backup: gzip stream error.', error);
|
||||
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
progressStream.on('progress', function(progress) {
|
||||
debug('backup: %s@%s', Math.round(progress.transferred/1024/1024) + 'M', Math.round(progress.speed/1024/1024) + 'Mbps');
|
||||
});
|
||||
|
||||
if (key !== null) {
|
||||
var encrypt = crypto.createCipher('aes-256-cbc', key);
|
||||
encrypt.on('error', function (error) {
|
||||
debug('backup: encrypt stream error.', error);
|
||||
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
pack.pipe(gzip).pipe(encrypt).pipe(progressStream).pipe(outStream);
|
||||
} else {
|
||||
pack.pipe(gzip).pipe(progressStream).pipe(outStream);
|
||||
}
|
||||
}
|
||||
|
||||
function extract(inStream, destination, key, callback) {
|
||||
assert.strictEqual(typeof destination, 'string');
|
||||
assert(key === null || typeof key === 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
mkdirp(destination, function (error) {
|
||||
if (error) return callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
|
||||
var gunzip = zlib.createGunzip({});
|
||||
var progressStream = progress({ time: 10000 }); // display a progress every 10 seconds
|
||||
var extract = tar.extract(destination);
|
||||
|
||||
progressStream.on('progress', function(progress) {
|
||||
debug('restore: %s@%s', Math.round(progress.transferred/1024/1024) + 'M', Math.round(progress.speed/1024/1024) + 'Mbps');
|
||||
});
|
||||
|
||||
gunzip.on('error', function (error) {
|
||||
debug('restore: gunzip stream error.', error);
|
||||
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
extract.on('error', function (error) {
|
||||
debug('restore: extract stream error.', error);
|
||||
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
|
||||
extract.on('finish', function () {
|
||||
debug('restore: done.');
|
||||
callback(null);
|
||||
});
|
||||
|
||||
if (key !== null) {
|
||||
var decrypt = crypto.createDecipher('aes-256-cbc', key);
|
||||
decrypt.on('error', function (error) {
|
||||
debug('restore: decrypt stream error.', error);
|
||||
callback(new BackupsError(BackupsError.EXTERNAL_ERROR, error.message));
|
||||
});
|
||||
inStream.pipe(progressStream).pipe(decrypt).pipe(gunzip).pipe(extract);
|
||||
} else {
|
||||
inStream.pipe(progressStream).pipe(gunzip).pipe(extract);
|
||||
}
|
||||
});
|
||||
}
|
||||
+2
-2
@@ -17,14 +17,14 @@ function getPublicIp(callback) {
|
||||
superagent.get('http://169.254.169.254/metadata/v1.json').timeout(30 * 1000).end(function (error, result) {
|
||||
if (error || result.statusCode !== 200) {
|
||||
console.error('Error getting metadata', error);
|
||||
return callback(new SysInfoError(SysInfoError.INTERNAL_ERROR, 'Could not detect public IP from metadata'));
|
||||
return callback(new SysInfoError(SysInfoError.INTERNAL_ERROR, 'No IP found'));
|
||||
}
|
||||
|
||||
// Note that we do not use a floating IP for 3 reasons:
|
||||
// The PTR record is not set to floating IP, the outbound interface is not changeable to floating IP
|
||||
// and there are reports that port 25 on floating IP is blocked.
|
||||
var ip = safe.query(result.body, 'interfaces.public[0].ipv4.ip_address');
|
||||
if (!ip) return callback(new SysInfoError(SysInfoError.INTERNAL_ERROR, 'Could not detect public IP from interface'));
|
||||
if (!ip) return callback(new SysInfoError(SysInfoError.INTERNAL_ERROR, 'No IP found'));
|
||||
|
||||
callback(null, ip);
|
||||
});
|
||||
|
||||
@@ -30,7 +30,7 @@ var MANIFEST = {
|
||||
"contactEmail": "support@cloudron.io",
|
||||
"version": "0.1.0",
|
||||
"manifestVersion": 1,
|
||||
"dockerImage": "cloudron/test:23.0.0",
|
||||
"dockerImage": "cloudron/test:18.0.0",
|
||||
"healthCheckPath": "/",
|
||||
"httpPort": 7777,
|
||||
"tcpPorts": {
|
||||
@@ -69,7 +69,6 @@ describe('apptask', function () {
|
||||
before(function (done) {
|
||||
config.set('version', '0.5.0');
|
||||
config.set('fqdn', 'foobar.com');
|
||||
config.set('provider', 'caas');
|
||||
|
||||
awsHostedZones = {
|
||||
HostedZones: [{
|
||||
@@ -131,7 +130,7 @@ describe('apptask', function () {
|
||||
|
||||
it('create volume', function (done) {
|
||||
apptask._createVolume(APP, function (error) {
|
||||
expect(fs.existsSync(paths.APPS_DATA_DIR + '/' + APP.id + '/data')).to.be(true);
|
||||
expect(fs.existsSync(paths.DATA_DIR + '/' + APP.id + '/data')).to.be(true);
|
||||
expect(error).to.be(null);
|
||||
done();
|
||||
});
|
||||
@@ -139,7 +138,7 @@ describe('apptask', function () {
|
||||
|
||||
it('delete volume', function (done) {
|
||||
apptask._deleteVolume(APP, function (error) {
|
||||
expect(!fs.existsSync(paths.APPS_DATA_DIR + '/' + APP.id + '/data')).to.be(true);
|
||||
expect(!fs.existsSync(paths.DATA_DIR + '/' + APP.id + '/data')).to.be(true);
|
||||
expect(error).to.be(null);
|
||||
done();
|
||||
});
|
||||
@@ -242,3 +241,5 @@ describe('apptask', function () {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user