Compare commits
142 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 7035b3c18a | |||
| 2108c61d97 | |||
| 2bdbb47286 | |||
| 333b8970b8 | |||
| 5673cfe2be | |||
| 4429239dbc | |||
| b6ab9aa9f5 | |||
| 84bde6327f | |||
| d6f49eb54f | |||
| 3c8c5e158b | |||
| b3045b796f | |||
| c0febacc30 | |||
| f8ada91dc5 | |||
| d0e2ce9a9e | |||
| e157608992 | |||
| 8dbe0ddaf3 | |||
| b0cb18539c | |||
| 97b6d76694 | |||
| 9de6c8ee2b | |||
| cd28b1106b | |||
| b3a5dafee0 | |||
| eb4ab8defd | |||
| 639744e9cb | |||
| 6a942ab27a | |||
| 278f1d6d24 | |||
| 563eeca1a9 | |||
| 7a9c954646 | |||
| d768c36afb | |||
| 36ae3b267d | |||
| cd60f394d3 | |||
| 9aba90a6f7 | |||
| 68a8155f49 | |||
| 16695fd4ec | |||
| 9b6c6dc709 | |||
| 7923ed4f0d | |||
| 0b3d1c855c | |||
| d8273719d2 | |||
| c6d2c39ff7 | |||
| 6960afdf0b | |||
| 3a5000ab1d | |||
| 98951bab9e | |||
| 96fc3b8612 | |||
| 2b345b6c2d | |||
| 504662b466 | |||
| f56e6edbe4 | |||
| 191b84d389 | |||
| 8a4350d22e | |||
| cc6dae0f9e | |||
| 58528450e2 | |||
| ebf3559e60 | |||
| 57d20b2b32 | |||
| fd27240b26 | |||
| cad69d335c | |||
| 1f08cca355 | |||
| 7f4f525551 | |||
| b0037b6141 | |||
| 7956c8f58d | |||
| 330c9054b4 | |||
| d444d8552e | |||
| 595bf583c7 | |||
| 3386b99a29 | |||
| 5fd667cdaf | |||
| 4217db9e18 | |||
| b4717e2edb | |||
| 1d5465f21e | |||
| 2f1998fa67 | |||
| a7e998c030 | |||
| 8cc15726ec | |||
| 62e59868b4 | |||
| a64027f4af | |||
| f5a02930ec | |||
| 530ca20ee2 | |||
| f3b84ece3d | |||
| ca2d5957e4 | |||
| 7837214276 | |||
| 994202ca94 | |||
| ff7ceb1442 | |||
| 56545b7f41 | |||
| 586e78dfea | |||
| 92ede4c242 | |||
| 5ca2c2d564 | |||
| 9692aa3c08 | |||
| 10ad1028ae | |||
| 7155856b08 | |||
| 69aa771d44 | |||
| d164b5ae3a | |||
| b34d09f547 | |||
| 9e2850ffad | |||
| 480cface63 | |||
| 85aba589b8 | |||
| e890140aa9 | |||
| 53d56ef3a0 | |||
| b91674799b | |||
| 4bb864e2ac | |||
| 7db091525e | |||
| 695923ed75 | |||
| 1b43ccca6f | |||
| 96a0bad149 | |||
| 243ade15e1 | |||
| 9d3cf990d1 | |||
| 02bcff2223 | |||
| 8f388c86a6 | |||
| 8dc929f0ff | |||
| 509bd7e79b | |||
| 19c665d747 | |||
| cb09086ae8 | |||
| fa915d0b23 | |||
| a383f01406 | |||
| 1a46e80403 | |||
| e8cd230c12 | |||
| 0711dc2c5a | |||
| 486e72457d | |||
| 450e017bdb | |||
| c6d9cfc0d7 | |||
| a0b073d881 | |||
| 4dde16f987 | |||
| f7d2e262f4 | |||
| 34fedb5835 | |||
| ff491be976 | |||
| 7635482191 | |||
| b23001e43f | |||
| 06c8e8f0cb | |||
| ce2cd00fbf | |||
| 651af185c8 | |||
| 6951383ae0 | |||
| 37596e89b4 | |||
| 711fe37dad | |||
| 7fee3d0da0 | |||
| 45a61e9541 | |||
| bd0be2affc | |||
| 7812c0e5c2 | |||
| 7efb6d60bc | |||
| cd31e12bec | |||
| 87755c6097 | |||
| 73f56efe2c | |||
| 20eaa60a97 | |||
| b80f0082e9 | |||
| 1ff800a842 | |||
| 5b0abb1b17 | |||
| 178aa4794a | |||
| 76583cb2fa | |||
| aa484dc5b4 |
@@ -736,4 +736,33 @@
|
||||
* Cleanup graphs UI
|
||||
* Polish webadmin UI
|
||||
* Fix bug where hard disk size was detected incorrectly
|
||||
* Use overlay2 as docker storage backend for scaleway
|
||||
|
||||
[0.99.1]
|
||||
* Fix bug with duplicate nginx configs
|
||||
|
||||
[0.100.0]
|
||||
* Improve DNS notifications for email
|
||||
* Do not enable HSTS for subdomains
|
||||
|
||||
[0.100.1]
|
||||
* Fix crash when fetching mail records
|
||||
* Fix crash in LDAP server when username and displayName are empty
|
||||
|
||||
[0.101.0]
|
||||
* New base image 0.10.0
|
||||
* Better error handling of unpurchase errors
|
||||
* Validate that cloudron domain name is a subdomain of public suffic list
|
||||
* Add canada and london to S3 backup regions
|
||||
* Bundle Font Awesome as part of webadmin
|
||||
* Fix crash in custom certiicate validation
|
||||
* Get A+ rating in SSL Check
|
||||
* More robust detection and injection of SPF record
|
||||
* Add azure, lightsail, linode, ovh, vultr to provider list
|
||||
|
||||
[0.102.0]
|
||||
* Fix issue where SPF record check was only done 5 times (updated 'async')
|
||||
* Make auto-generated self-signed cert load quickly on Firefox
|
||||
* Ensure we download docker images and have an app data volume on app re-configure
|
||||
* Improve certificate renewal erorr message
|
||||
* Fix disk usage graph
|
||||
|
||||
|
||||
@@ -10,9 +10,7 @@ readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)"
|
||||
export JSON="${SOURCE_DIR}/node_modules/.bin/json"
|
||||
|
||||
IMAGE_ID="ami-5aee2235" # ubuntu 16.04 eu-central-1
|
||||
INSTANCE_TYPE="t2.micro"
|
||||
SECURITY_GROUP="sg-19f5a770" # everything open on eu-central-1
|
||||
BLOCK_DEVICE="DeviceName=/dev/sda1,Ebs={VolumeSize=20,DeleteOnTermination=true,VolumeType=gp2}"
|
||||
SSH_KEY_NAME="id_rsa_yellowtent"
|
||||
|
||||
@@ -22,8 +20,9 @@ server_id=""
|
||||
server_ip=""
|
||||
destroy_server="yes"
|
||||
deploy_env="prod"
|
||||
image_id=""
|
||||
|
||||
args=$(getopt -o "" -l "revision:,name:,no-destroy,env:" -n "$0" -- "$@")
|
||||
args=$(getopt -o "" -l "revision:,name:,no-destroy,env:,region:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
@@ -32,19 +31,35 @@ while true; do
|
||||
--revision) revision="$2"; shift 2;;
|
||||
--name) ami_name="$2"; shift 2;;
|
||||
--no-destroy) destroy_server="no"; shift 2;;
|
||||
--region)
|
||||
case "$2" in
|
||||
"us-east-1")
|
||||
image_id="ami-6edd3078"
|
||||
security_group="sg-a5e17fd9"
|
||||
subnet_id="subnet-b8fbc0f1"
|
||||
;;
|
||||
"eu-central-1")
|
||||
image_id="ami-5aee2235"
|
||||
security_group="sg-19f5a770" # everything open on eu-central-1
|
||||
subnet_id=""
|
||||
;;
|
||||
*)
|
||||
echo "Unknown aws region $2"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
export AWS_DEFAULT_REGION="$2" # used by the aws cli tool
|
||||
shift 2
|
||||
;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
export AWS_DEFAULT_REGION="eu-central-1" # we have to use us-east-1 to publish
|
||||
|
||||
# TODO fix this
|
||||
export AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY}"
|
||||
export AWS_SECRET_ACCESS_KEY="${AWS_ACCESS_SECRET}"
|
||||
|
||||
echo "=> Creating AMI"
|
||||
|
||||
readonly ssh_keys="${HOME}/.ssh/id_rsa_yellowtent"
|
||||
readonly SSH="ssh -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}"
|
||||
|
||||
@@ -53,6 +68,11 @@ if [[ ! -f "${ssh_keys}" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${image_id}" ]]; then
|
||||
echo "--region is required"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function get_pretty_revision() {
|
||||
local git_rev="$1"
|
||||
local sha1=$(git rev-parse --short "${git_rev}" 2>/dev/null)
|
||||
@@ -60,21 +80,34 @@ function get_pretty_revision() {
|
||||
echo "${sha1}"
|
||||
}
|
||||
|
||||
function wait_for_ssh() {
|
||||
echo "=> Waiting for ssh connection"
|
||||
while true; do
|
||||
echo -n "."
|
||||
|
||||
if $SSH ubuntu@${server_ip} echo "hello"; then
|
||||
echo ""
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
done
|
||||
}
|
||||
|
||||
now=$(date "+%Y-%m-%d-%H%M%S")
|
||||
pretty_revision=$(get_pretty_revision "${revision}")
|
||||
|
||||
if [[ -z "${ami_name}" ]]; then
|
||||
# if you change this, change the regexp is appstore/janitor.js
|
||||
ami_name="box-${deploy_env}-${pretty_revision}-${now}" # remove slashes
|
||||
ami_name="box-${deploy_env}-${pretty_revision}-${now}"
|
||||
fi
|
||||
|
||||
echo "=> Create EC2 instance"
|
||||
id=$(aws ec2 run-instances --image-id "${IMAGE_ID}" --instance-type "${INSTANCE_TYPE}" --security-group-ids "${SECURITY_GROUP}" --block-device-mappings "${BLOCK_DEVICE}" --key-name "${SSH_KEY_NAME}"\
|
||||
id=$(aws ec2 run-instances --image-id "${image_id}" --instance-type "${INSTANCE_TYPE}" --security-group-ids "${security_group}" --block-device-mappings "${BLOCK_DEVICE}" --key-name "${SSH_KEY_NAME}" --subnet-id "${subnet_id}" --associate-public-ip-address \
|
||||
| $JSON Instances \
|
||||
| $JSON 0.InstanceId)
|
||||
|
||||
[[ -z "$id" ]] && exit 1
|
||||
echo "Instance created with ID $id"
|
||||
echo "Instance created ID $id"
|
||||
|
||||
echo "=> Waiting for instance to get a public IP"
|
||||
while true; do
|
||||
@@ -93,17 +126,7 @@ done
|
||||
|
||||
echo "Got public IP ${server_ip}"
|
||||
|
||||
echo "=> Waiting for ssh connection"
|
||||
while true; do
|
||||
echo -n "."
|
||||
|
||||
if $SSH ubuntu@${server_ip} echo "hello"; then
|
||||
echo ""
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
done
|
||||
wait_for_ssh
|
||||
|
||||
echo "=> Fetching cloudron-setup"
|
||||
while true; do
|
||||
@@ -118,7 +141,12 @@ while true; do
|
||||
done
|
||||
|
||||
echo "=> Running cloudron-setup"
|
||||
$SSH ubuntu@${server_ip} sudo /bin/bash "cloudron-setup" --env "${deploy_env}" --provider "ec2"
|
||||
$SSH ubuntu@${server_ip} sudo /bin/bash "cloudron-setup" --env "${deploy_env}" --provider "ec2" --skip-reboot
|
||||
|
||||
wait_for_ssh
|
||||
|
||||
echo "=> Removing ssh key"
|
||||
$SSH ubuntu@${server_ip} sudo rm /home/ubuntu/.ssh/authorized_keys /root/.ssh/authorized_keys
|
||||
|
||||
echo "=> Creating AMI"
|
||||
image_id=$(aws ec2 create-image --instance-id "${id}" --name "${ami_name}" | $JSON ImageId)
|
||||
@@ -31,7 +31,7 @@ function create_droplet() {
|
||||
|
||||
local image_region="sfo1"
|
||||
local ubuntu_image_slug="ubuntu-16-04-x64"
|
||||
local box_size="512mb"
|
||||
local box_size="1gb"
|
||||
|
||||
local data="{\"name\":\"${box_name}\",\"size\":\"${box_size}\",\"region\":\"${image_region}\",\"image\":\"${ubuntu_image_slug}\",\"ssh_keys\":[ \"${ssh_key_id}\" ],\"backups\":false}"
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ apt-get -y install \
|
||||
build-essential \
|
||||
cron \
|
||||
curl \
|
||||
dmsetup \
|
||||
iptables \
|
||||
logrotate \
|
||||
mysql-server-5.7 \
|
||||
@@ -57,7 +58,7 @@ apt-get -y update
|
||||
|
||||
# create systemd drop-in file
|
||||
mkdir -p /etc/systemd/system/docker.service.d
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/docker daemon -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=devicemapper" > /etc/systemd/system/docker.service.d/cloudron.conf
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=devicemapper" > /etc/systemd/system/docker.service.d/cloudron.conf
|
||||
|
||||
apt-get -y --allow-downgrades install docker-engine=1.12.5-0~ubuntu-xenial # apt-cache madison docker-engine
|
||||
apt-mark hold docker-engine # do not update docker
|
||||
|
||||
@@ -449,7 +449,7 @@ POST `/api/v1/apps/:appId/configure` <scope>admin</scope>
|
||||
|
||||
Re-configures an existing app with id `appId`.
|
||||
|
||||
Configuring an app won't preserve existing data. Cloudron apps are written in a way to support reconfiguring
|
||||
Configuring an app preserves existing data. Cloudron apps are written in a way to support reconfiguring
|
||||
any of the parameters listed below without loss of data.
|
||||
|
||||
Request:
|
||||
@@ -1116,7 +1116,7 @@ POST `/api/v1/settings/mail_config` <scope>admin</scope> <scope>internal</scope>
|
||||
|
||||
Sets the email configuration. The Cloudron has a built-in email server for users.
|
||||
This configuration can be used to enable or disable the email server. Note that
|
||||
the Cloudron will always be able to send email on behalf of apps, regardless of
|
||||
the Cloudron will always be able to send email on behalf of apps, regardless of
|
||||
this setting.
|
||||
|
||||
Request:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Overview
|
||||
|
||||
The application's Dockerfile must specify the FROM base image to be `cloudron/base:0.9.0`.
|
||||
The application's Dockerfile must specify the FROM base image to be `cloudron/base:0.10.0`.
|
||||
|
||||
The base image already contains most popular software packages including node, nginx, apache,
|
||||
ruby, PHP. Using the base image greatly reduces the size of app images.
|
||||
@@ -17,16 +17,16 @@ install it yourself.
|
||||
|
||||
* Apache 2.4.18
|
||||
* Composer 1.2.0
|
||||
* Go 1.5.4, 1.6.3
|
||||
* Go 1.6.4, 1.7.5 (install under `/usr/local/go-<version>`)
|
||||
* Gunicorn 19.4.5
|
||||
* Java 1.8
|
||||
* Maven 3.3.9
|
||||
* Mongo 2.6.10
|
||||
* MySQL Client 5.7.13
|
||||
* MySQL Client 5.7.17
|
||||
* nginx 1.10.0
|
||||
* Node 0.10.40, 0.12.7, 4.2.6, 4.4.7 (installed under `/usr/local/node-<version>`) [more information](#node-js)
|
||||
* Node 0.10.48, 0.12.18, 4.7.3, 6.9.5 (installed under `/usr/local/node-<version>`) [more information](#node-js)
|
||||
* Perl 5.22.1
|
||||
* PHP 7.0.8
|
||||
* PHP 7.0.13
|
||||
* Postgresql client 9.5.4
|
||||
* Python 2.7.12
|
||||
* Redis 3.0.6
|
||||
@@ -41,16 +41,16 @@ The base image can be inspected by installing [Docker](https://docs.docker.com/i
|
||||
|
||||
Once installed, pull down the base image locally using the following command:
|
||||
```
|
||||
docker pull cloudron/base:0.9.0
|
||||
docker pull cloudron/base:0.10.0
|
||||
```
|
||||
|
||||
To inspect the base image:
|
||||
```
|
||||
docker run -ti cloudron/base:0.9.0 /bin/bash
|
||||
docker run -ti cloudron/base:0.10.0 /bin/bash
|
||||
```
|
||||
|
||||
*Note:* Please use `docker 1.9.0` or above to pull the base image. Doing otherwise results in a base
|
||||
image with an incorrect image id. The image id of `cloudron/base:0.9.0` is `d038af182821`.
|
||||
image with an incorrect image id. The image id of `cloudron/base:0.10.0` is `5ec8ca8525be`.
|
||||
|
||||
# The `cloudron` user
|
||||
|
||||
|
||||
@@ -93,26 +93,6 @@ Example:
|
||||
"changelog": "* Add support for IE8 \n* New logo"
|
||||
```
|
||||
|
||||
## configurePath
|
||||
|
||||
Type: path string
|
||||
|
||||
Required: no
|
||||
|
||||
The `configurePath` can be used to specify the absolute path to the configuration / settings
|
||||
page of the app. When this path is present, an absoluted URL is constructed from the app's
|
||||
install location this path and presented to the user in the configuration dialog of the app.
|
||||
|
||||
This is useful for apps that have a main page which does not display a configuration / settings
|
||||
url (i.e) it's hidden for aesthetic reasons. For example, a blogging app like wordpress might
|
||||
keep the admin page url hidden in the main page. Setting the configurationPath makes the
|
||||
configuration url discoverable by the user.
|
||||
|
||||
Example:
|
||||
```
|
||||
"configurePath": "/wp-admin"
|
||||
```
|
||||
|
||||
## contactEmail
|
||||
|
||||
Type: email
|
||||
|
||||
@@ -19,7 +19,7 @@ The Cloudron requires a domain name when it is installed. Apps are installed int
|
||||
The `my` subdomain is special and is the location of the Cloudron web interface. For this to
|
||||
work, the Cloudron requires a way to programmatically configure the DNS entries of the domain.
|
||||
Note that the Cloudron will never overwrite _existing_ DNS entries and refuse to install
|
||||
apps on existing subdomains.
|
||||
apps on existing subdomains (so, it is safe to reuse an existing domain that runs other services).
|
||||
|
||||
# Cloud Server
|
||||
|
||||
@@ -34,9 +34,10 @@ In addition to those, the Cloudron community has successfully installed the plat
|
||||
* [hosttech](https://www.hosttech.ch/?promocode=53619290)
|
||||
* [Linode](https://www.linode.com/?r=f68d816692c49141e91dd4cef3305da457ac0f75)
|
||||
* [OVH](https://www.ovh.com/)
|
||||
* [Rosehosting](https://secure.rosehosting.com/clientarea/?affid=661)
|
||||
* [Scaleway](https://www.scaleway.com/)
|
||||
* [So you Start](https://www.soyoustart.com/)
|
||||
* [Vultr](http://www.vultr.com/?ref=7063201)
|
||||
* [Vultr](http://www.vultr.com/?ref=7110116-3B)
|
||||
|
||||
Please let us know if any of them requires tweaks or adjustments.
|
||||
|
||||
@@ -47,6 +48,10 @@ Please let us know if any of them requires tweaks or adjustments.
|
||||
Create an `Ubuntu 16.04 (Xenial)` server with at-least `1gb` RAM. Do not make any changes
|
||||
to vanilla ubuntu. Be sure to allocate a static IPv4 address for your server.
|
||||
|
||||
Cloudron has a built-in firewall and ports are opened and closed dynamically, as and when
|
||||
apps are installed, re-configured or removed. For this reason, be sure to open all TCP and
|
||||
UDP traffic to the server.
|
||||
|
||||
### Linode
|
||||
|
||||
Since Linode does not manage SSH keys, be sure to add the public key to
|
||||
@@ -64,7 +69,7 @@ SSH into your server and run the following commands:
|
||||
```
|
||||
wget https://cloudron.io/cloudron-setup
|
||||
chmod +x cloudron-setup
|
||||
./cloudron-setup --provider <digitalocean|ec2|generic|scaleway>
|
||||
./cloudron-setup --provider <azure|digitalocean|ec2|lightsail|linode|ovh|scaleway|vultr|generic>
|
||||
```
|
||||
|
||||
The setup will take around 10-15 minutes.
|
||||
@@ -99,9 +104,9 @@ IP address (`https://ip`) to complete the installation.
|
||||
The setup website will show a certificate warning. Accept the self-signed certificate
|
||||
and proceed to the domain setup.
|
||||
|
||||
Currently, only Second Level Domains are supported. For example, `example.com`,
|
||||
`example.co.uk` will work fine. Choosing a domain name at any other level like
|
||||
`cloudron.example.com` will not work.
|
||||
Currently, only subdomains of the [Public Suffix List](https://publicsuffix.org/) are supported.
|
||||
For example, `example.com`, `example.co.uk` will work fine. Choosing other non-registrable
|
||||
domain names like `cloudron.example.com` will not work.
|
||||
|
||||
### Route 53
|
||||
|
||||
@@ -194,6 +199,9 @@ for most use-cases.
|
||||
}
|
||||
```
|
||||
|
||||
The `Encryption key` is an arbitrary passphrase used to encrypt the backups. Keep the passphrase safe; it is
|
||||
required to decrypt the backups when restoring the Cloudron.
|
||||
|
||||
## Minio S3
|
||||
|
||||
[Minio](https://minio.io/) is a distributed object storage server, providing the same API as Amazon S3.
|
||||
@@ -219,6 +227,8 @@ The information to be copied to the Cloudron's backup settings form may look sim
|
||||
|
||||
<img src="/docs/img/minio_backup_config.png" class="shadow"><br/>
|
||||
|
||||
The `Encryption key` is an arbitrary passphrase used to encrypt the backups. Keep the passphrase safe; it is
|
||||
required to decrypt the backups when restoring the Cloudron.
|
||||
|
||||
# Email
|
||||
|
||||
@@ -235,13 +245,26 @@ reputation should be easy to get back.
|
||||
|
||||
## Checklist
|
||||
|
||||
* Once your Cloudron is ready, setup a Reverse DNS PTR record to be setup for the `my` subdomain.
|
||||
* If you are unable to receive mail, first thing to check is if your VPS provider lets you
|
||||
receive mail on port 25.
|
||||
|
||||
* AWS/EC2 - Fill the PTR [request form](https://aws-portal.amazon.com/gp/aws/html-forms-controller/contactus/ec2-email-limit-rdns-request.
|
||||
* Digital Ocean - New accounts frequently have port 25 blocked. Write to their support to
|
||||
unblock your server.
|
||||
|
||||
* EC2, Lightsail & Scaleway - Edit your security group to allow email.
|
||||
|
||||
* Setup a Reverse DNS PTR record to be setup for the `my` subdomain.
|
||||
**Note:** PTR records are a feature of your VPS provider and not your domain provider.
|
||||
|
||||
* You can verify the PTR record [https://mxtoolbox.com/ReverseLookup.aspx](here).
|
||||
|
||||
* AWS EC2 & Lightsail - Fill the [PTR request form](https://aws-portal.amazon.com/gp/aws/html-forms-controller/contactus/ec2-email-limit-rdns-request).
|
||||
|
||||
* Digital Ocean - Digital Ocean sets up a PTR record based on the droplet's name. So, simply rename
|
||||
your droplet to `my.<domain>`. Note that some new Digital Ocean accounts have [port 25 blocked](https://www.digitalocean.com/community/questions/port-25-smtp-external-access).
|
||||
|
||||
* Linode - Follow this [guide](https://www.linode.com/docs/networking/dns/setting-reverse-dns).
|
||||
|
||||
* Scaleway - Edit your security group to allow email. You can also set a PTR record on the interface with your
|
||||
`my.<domain>`.
|
||||
|
||||
@@ -324,9 +347,12 @@ Similar to the initial installation, a Cloudron upgrade looks like:
|
||||
$ ssh root@newserverip
|
||||
> wget https://cloudron.io/cloudron-setup
|
||||
> chmod +x cloudron-setup
|
||||
> ./cloudron-setup --provider <digitalocean|ec2|generic|scaleway> --encryption-key <key> --restore-url <publicS3Url>
|
||||
> ./cloudron-setup --provider <digitalocean|ec2|generic|scaleway> --domain <example.com> --encryption-key <key> --restore-url <publicS3Url>
|
||||
```
|
||||
|
||||
Note: When upgrading an old version of Cloudron (<= 0.94.0), pass the `--version 0.94.1` flag and then continue updating
|
||||
from that.
|
||||
|
||||
* Finally, once you see the newest version being displayed in your Cloudron webinterface, you can safely delete the old server instance.
|
||||
|
||||
# Restore
|
||||
@@ -342,7 +368,7 @@ To restore a Cloudron from a specific backup:
|
||||
* `File system` - When storing backups in `/var/backups`, you have to make the box and the app backups available to the new Cloudron instance's `/var/backups`. This can be achieved in a variety of ways depending on the situation: like scp'ing the backup files to the new machine before Cloudron installation OR mounting an external backup hard drive into the new Cloudron's `/var/backup` OR downloading a copy of the backup using `cloudron machine backup download` and uploading them to the new machine. After doing so, pass `file:///var/backups/<path to box backup>` as the `--restore-url` below.
|
||||
|
||||
* Create a new Cloudron by following the [installing](/references/selfhosting.html#installing) section.
|
||||
When running the setup script, pass in the `version`, `encryption-key` and `restore-url` flags.
|
||||
When running the setup script, pass in the `version`, `encryption-key`, `domain` and `restore-url` flags.
|
||||
The `version` field is the version of the Cloudron that the backup corresponds to (it is embedded
|
||||
in the backup file name).
|
||||
|
||||
|
||||
@@ -75,7 +75,7 @@ A Dockerfile contains commands to assemble an image.
|
||||
Create a file named `tutorial/Dockerfile` with the following content:
|
||||
|
||||
```dockerfile
|
||||
FROM cloudron/base:0.9.0
|
||||
FROM cloudron/base:0.10.0
|
||||
|
||||
ADD server.js /app/code/server.js
|
||||
|
||||
@@ -171,7 +171,7 @@ Login successful.
|
||||
Build scheduled with id 76cebfdd-7822-4f3d-af17-b3eb393ae604
|
||||
Downloading source
|
||||
Building
|
||||
Step 0 : FROM cloudron/base:0.9.0
|
||||
Step 0 : FROM cloudron/base:0.10.0
|
||||
---> 97583855cc0c
|
||||
Step 1 : ADD server.js /app/code
|
||||
---> b09b97ecdfbc
|
||||
@@ -333,7 +333,7 @@ and modify our Dockerfile to look like this:
|
||||
File `tutorial/Dockerfile`
|
||||
|
||||
```dockerfile
|
||||
FROM cloudron/base:0.9.0
|
||||
FROM cloudron/base:0.10.0
|
||||
|
||||
ADD server.js /app/code/server.js
|
||||
ADD package.json /app/code/package.json
|
||||
|
||||
@@ -79,7 +79,7 @@ console.log("Server running at port 8000");
|
||||
The Dockerfile contains instructions on how to create an image for your application.
|
||||
|
||||
```Dockerfile
|
||||
FROM cloudron/base:0.9.0
|
||||
FROM cloudron/base:0.10.0
|
||||
|
||||
ADD server.js /app/code/server.js
|
||||
|
||||
@@ -188,7 +188,7 @@ Build scheduled with id e7706847-f2e3-4ba2-9638-3f334a9453a5
|
||||
Waiting for build to begin, this may take a bit...
|
||||
Downloading source
|
||||
Building
|
||||
Step 1 : FROM cloudron/base:0.9.0
|
||||
Step 1 : FROM cloudron/base:0.10.0
|
||||
---> be9fc6312b2d
|
||||
Step 2 : ADD server.js /app/code/server.js
|
||||
---> 10513e428d7a
|
||||
@@ -389,6 +389,8 @@ field in the manifest.
|
||||
Design your application runtime for concurrent use by 50 users. The Cloudron is not designed for
|
||||
concurrent access by 100s or 1000s of users.
|
||||
|
||||
An app can determine it's memory limit by reading `/sys/fs/cgroup/memory/memory.limit_in_bytes`.
|
||||
|
||||
## Authentication
|
||||
|
||||
Apps should integrate with one of the [authentication strategies](/references/authentication.html).
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
var dbm = require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
var url = require('url');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
var fs = require('fs'),
|
||||
async = require('async'),
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN resetToken VARCHAR(128) DEFAULT ""', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('DELETE FROM tokens', [], function (error) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE authcodes ADD COLUMN expiresAt BIGINT NOT NULL', function (error) {
|
||||
@@ -13,4 +12,4 @@ exports.down = function(db, callback) {
|
||||
if (error) console.error(error);
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE appPortBindings ADD COLUMN environmentVariable VARCHAR(128) NOT NULL', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE appPortBindings DROP COLUMN containerPort', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('DELETE FROM tokens', [], function (error) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN version', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN healthy, ADD COLUMN health VARCHAR(128)', [], function (error) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN lastBackupId VARCHAR(128)', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN createdAt TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
// everyday at 1am
|
||||
@@ -8,5 +7,4 @@ exports.up = function(db, callback) {
|
||||
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('DELETE * FROM settings WHERE name="autoupdate_pattern"', [ ], callback);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
'use strict';
|
||||
|
||||
var safe = require('safetydance');
|
||||
var type = dbm.dataType;
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var tz = safe.fs.readFileSync('/etc/timezone', 'utf8');
|
||||
@@ -12,4 +12,3 @@ exports.up = function(db, callback) {
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('DELETE * FROM settings WHERE name="time_zone"', [ ], callback);
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN lastManifestJson VARCHAR(2048)', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps CHANGE lastManifestJson lastBackupConfigJson VARCHAR(2048)', [], function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN oldConfigJson VARCHAR(2048)', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('DELETE FROM settings', [ ], callback);
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN oauthProxy BOOLEAN DEFAULT 0', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps CHANGE accessRestriction accessRestrictionJson VARCHAR(2048)', [], function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps MODIFY manifestJson TEXT', [], function (error) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN displayName VARCHAR(512) DEFAULT ""', function (error) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN memoryLimit BIGINT DEFAULT 0', function (error) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var cmd = "CREATE TABLE groups(" +
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var cmd = "CREATE TABLE IF NOT EXISTS groupMembers(" +
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
'use strict';
|
||||
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var async = require('async');
|
||||
|
||||
var ADMIN_GROUP_ID = 'admin'; // see groups.js
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var cmd = "CREATE TABLE backups(" +
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups ADD COLUMN configJson TEXT', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups DROP COLUMN configJson', function (error) {
|
||||
@@ -14,4 +13,3 @@ exports.down = function(db, callback) {
|
||||
callback(error);
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE backups CHANGE filename id VARCHAR(128)', [], function (error) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users MODIFY username VARCHAR(254) UNIQUE', [], function (error) {
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
'use strict';
|
||||
|
||||
var dbm = dbm || require('db-migrate');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN altDomain VARCHAR(256)', function (error) {
|
||||
if (error) console.error(error);
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var cmd = "CREATE TABLE eventlog(" +
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users ADD COLUMN showTutorial BOOLEAN DEFAULT 0', function (error) {
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
'use strict';
|
||||
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
var cmd = 'CREATE TABLE mailboxes(' +
|
||||
'name VARCHAR(128) NOT NULL,' +
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
var async = require('async');
|
||||
|
||||
// imports mailbox entries for existing users
|
||||
exports.up = function(db, callback) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN lastBackupConfigJson', function (error) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps MODIFY installationProgress TEXT', [], function (error) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN xFrameOptions VARCHAR(512)', function (error) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.all('SELECT id FROM users', function (error, results) {
|
||||
@@ -14,4 +13,3 @@ exports.up = function(db, callback) {
|
||||
exports.down = function(db, callback) {
|
||||
db.runSql('DELETE * FROM settings WHERE name="mail_config"', [ ], callback);
|
||||
};
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
'use strict';
|
||||
|
||||
var dbm = dbm || require('db-migrate');
|
||||
var async = require('async');
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
async.series([
|
||||
@@ -71,4 +71,3 @@ exports.down = function(db, callback) {
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN sso BOOLEAN DEFAULT 1', function (error) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps DROP COLUMN oauthProxy', function (error) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
var dbm = global.dbm || require('db-migrate');
|
||||
var type = dbm.dataType;
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE users DROP COLUMN showTutorial', function (error) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
dbm = dbm || require('db-migrate');
|
||||
'use strict';
|
||||
|
||||
exports.up = function(db, callback) {
|
||||
db.runSql('ALTER TABLE apps ADD COLUMN debugModeJson TEXT', function (error) {
|
||||
|
||||
@@ -60,7 +60,7 @@ CREATE TABLE IF NOT EXISTS apps(
|
||||
manifestJson TEXT,
|
||||
httpPort INTEGER, // this is the nginx proxy port and not manifest.httpPort
|
||||
location VARCHAR(128) NOT NULL UNIQUE,
|
||||
dnsRecordId VARCHAR(512),
|
||||
dnsRecordId VARCHAR(512), // tracks any id that we got back to track dns updates (unused)
|
||||
accessRestrictionJson TEXT, // { users: [ ], groups: [ ] }
|
||||
createdAt TIMESTAMP(2) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
memoryLimit BIGINT DEFAULT 0,
|
||||
|
||||
Generated
+209
-928
File diff suppressed because it is too large
Load Diff
+3
-2
@@ -13,7 +13,7 @@
|
||||
"node >=4.0.0 <=4.1.1"
|
||||
],
|
||||
"dependencies": {
|
||||
"async": "^1.2.1",
|
||||
"async": "^2.1.4",
|
||||
"aws-sdk": "^2.1.46",
|
||||
"body-parser": "^1.13.1",
|
||||
"checksum": "^0.1.1",
|
||||
@@ -25,7 +25,8 @@
|
||||
"cookie-session": "^1.1.0",
|
||||
"cron": "^1.0.9",
|
||||
"csurf": "^1.6.6",
|
||||
"db-migrate": "^0.9.2",
|
||||
"db-migrate": "^0.10.0-beta.20",
|
||||
"db-migrate-mysql": "^1.1.10",
|
||||
"debug": "^2.2.0",
|
||||
"dockerode": "^2.2.10",
|
||||
"ejs": "^2.2.4",
|
||||
|
||||
+43
-27
@@ -14,8 +14,11 @@ fi
|
||||
|
||||
# change this to a hash when we make a upgrade release
|
||||
readonly LOG_FILE="/var/log/cloudron-setup.log"
|
||||
readonly DATA_FILE="/root/cloudron-install-data.json"
|
||||
readonly MINIMUM_DISK_SIZE_GB="19" # this is the size of "/" and required to fit in docker images 19 is a safe bet for different reporting on 20GB min
|
||||
readonly MINIMUM_MEMORY="990" # this is mostly reported for 1GB main memory (DO 992, EC2 990)
|
||||
readonly MINIMUM_MEMORY="980" # this is mostly reported for 1GB main memory (DO 992, EC2 990, Linode 989)
|
||||
|
||||
readonly curl="curl --fail --connect-timeout 20 --retry 10 --retry-delay 2 --max-time 2400"
|
||||
|
||||
# copied from cloudron-resize-fs.sh
|
||||
readonly physical_memory=$(free -m | awk '/Mem:/ { print $2 }')
|
||||
@@ -46,9 +49,11 @@ versionsUrl="https://s3.amazonaws.com/prod-cloudron-releases/versions.json"
|
||||
requestedVersion="latest"
|
||||
apiServerOrigin="https://api.cloudron.io"
|
||||
dataJson=""
|
||||
prerelease=false
|
||||
prerelease="false"
|
||||
sourceTarballUrl=""
|
||||
rebootServer="true"
|
||||
|
||||
args=$(getopt -o "" -l "domain:,help,skip-baseimage-init,data:,provider:,encryption-key:,restore-url:,tls-provider:,version:,versions-url:,api-server:,dns-provider:,env:,prerelease" -n "$0" -- "$@")
|
||||
args=$(getopt -o "" -l "domain:,help,skip-baseimage-init,data:,provider:,encryption-key:,restore-url:,tls-provider:,version:,versions-url:,api-server:,dns-provider:,env:,prerelease,skip-reboot,source-url:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
while true; do
|
||||
@@ -77,8 +82,10 @@ while true; do
|
||||
--versions-url) versionsUrl="$2"; shift 2;;
|
||||
--api-server) apiServerOrigin="$2"; shift 2;;
|
||||
--skip-baseimage-init) initBaseImage="false"; shift;;
|
||||
--skip-reboot) rebootServer="false"; shift;;
|
||||
--data) dataJson="$2"; shift 2;;
|
||||
--prerelease) prerelease="true"; shift;;
|
||||
--source-url) sourceTarballUrl="$2"; version="0.0.1+custom"; shift 2;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
@@ -87,15 +94,21 @@ done
|
||||
# validate arguments in the absence of data
|
||||
if [[ -z "${dataJson}" ]]; then
|
||||
if [[ -z "${provider}" ]]; then
|
||||
echo "--provider is required (generic, scaleway, ec2, digitalocean)"
|
||||
echo "--provider is required (azure, digitalocean, ec2, lightsail, linode, ovh, scaleway, vultr or generic)"
|
||||
exit 1
|
||||
elif [[ \
|
||||
"${provider}" != "generic" && \
|
||||
"${provider}" != "scaleway" && \
|
||||
"${provider}" != "azure" && \
|
||||
"${provider}" != "digitalocean" && \
|
||||
"${provider}" != "ec2" && \
|
||||
"${provider}" != "digitalocean" \
|
||||
"${provider}" != "lightsail" && \
|
||||
"${provider}" != "linode" && \
|
||||
"${provider}" != "ovh" && \
|
||||
"${provider}" != "rosehosting" && \
|
||||
"${provider}" != "scaleway" && \
|
||||
"${provider}" != "vultr" && \
|
||||
"${provider}" != "generic" \
|
||||
]]; then
|
||||
echo "--provider must be one of: generic, scaleway, ec2, digitalocean"
|
||||
echo "--provider must be one of: azure, digitalocean, ec2, lightsail, linode, ovh, rosehosting, scaleway, vultr or generic"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -125,7 +138,7 @@ echo " Join us at https://chat.cloudron.io for any questions."
|
||||
echo ""
|
||||
|
||||
if [[ "${initBaseImage}" == "true" ]]; then
|
||||
echo "=> Updating apt and installing script dependancies"
|
||||
echo "=> Updating apt and installing script dependencies"
|
||||
if ! apt-get update &>> "${LOG_FILE}"; then
|
||||
echo "Could not update package repositories"
|
||||
exit 1
|
||||
@@ -138,16 +151,18 @@ if [[ "${initBaseImage}" == "true" ]]; then
|
||||
fi
|
||||
|
||||
echo "=> Checking version"
|
||||
releaseJson=$(curl -s "${versionsUrl}")
|
||||
if [[ "$requestedVersion" == "latest" ]]; then
|
||||
pre=$([[ "${prerelease}" == "true" ]] && echo "null" || echo "-pre")
|
||||
version=$(echo "${releaseJson}" | python3 -c "import json,sys,collections;obj=json.load(sys.stdin, object_pairs_hook=collections.OrderedDict);latest=list(v for v in obj if '${pre}' not in v)[-1];print(latest)")
|
||||
else
|
||||
version="${requestedVersion}"
|
||||
fi
|
||||
if ! sourceTarballUrl=$(echo "${releaseJson}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj[sys.argv[1]]["sourceTarballUrl"])' "${version}"); then
|
||||
echo "No source code for version ${requestedVersion}"
|
||||
exit 1
|
||||
if [[ "${sourceTarballUrl}" == "" ]]; then
|
||||
releaseJson=$($curl -s "${versionsUrl}")
|
||||
if [[ "$requestedVersion" == "latest" ]]; then
|
||||
pre=$([[ "${prerelease}" == "true" ]] && echo "null" || echo "-pre")
|
||||
version=$(echo "${releaseJson}" | python3 -c "import json,sys,collections;obj=json.load(sys.stdin, object_pairs_hook=collections.OrderedDict);latest=list(v for v in obj if '${pre}' not in v)[-1];print(latest)")
|
||||
else
|
||||
version="${requestedVersion}"
|
||||
fi
|
||||
if ! sourceTarballUrl=$(echo "${releaseJson}" | python3 -c 'import json,sys;obj=json.load(sys.stdin);print(obj[sys.argv[1]]["sourceTarballUrl"])' "${version}"); then
|
||||
echo "No source code for version ${requestedVersion}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Build data
|
||||
@@ -200,7 +215,7 @@ fi
|
||||
echo "=> Downloading version ${version} ..."
|
||||
box_src_tmp_dir=$(mktemp -dt box-src-XXXXXX)
|
||||
|
||||
if ! curl -sL "${sourceTarballUrl}" | tar -zxf - -C "${box_src_tmp_dir}"; then
|
||||
if ! $curl -sL "${sourceTarballUrl}" | tar -zxf - -C "${box_src_tmp_dir}"; then
|
||||
echo "Could not download source tarball. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
@@ -215,29 +230,30 @@ if [[ "${initBaseImage}" == "true" ]]; then
|
||||
fi
|
||||
|
||||
echo "=> Installing version ${version} (this takes some time) ..."
|
||||
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" --data "${data}" &>> "${LOG_FILE}"; then
|
||||
echo "${data}" > "${DATA_FILE}"
|
||||
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" --data-file "${DATA_FILE}" &>> "${LOG_FILE}"; then
|
||||
echo "Failed to install cloudron. See ${LOG_FILE} for details"
|
||||
exit 1
|
||||
fi
|
||||
rm "${DATA_FILE}"
|
||||
|
||||
echo -n "=> Waiting for cloudron to be ready (this takes some time) ..."
|
||||
while true; do
|
||||
echo -n "."
|
||||
if status=$(curl -q -f "http://localhost:3000/api/v1/cloudron/status" 2>/dev/null); then
|
||||
if status=$($curl -q -f "http://localhost:3000/api/v1/cloudron/status" 2>/dev/null); then
|
||||
[[ -z "$domain" ]] && break # with no domain, we are up and running
|
||||
[[ "$status" == *"\"tls\": true"* ]] && break # with a domain, wait for the cert
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
|
||||
echo -e "\n\nRebooting this server now to let bootloader changes take effect.\n"
|
||||
|
||||
if [[ -n "${domain}" ]]; then
|
||||
echo -e "Visit https://my.${domain} to finish setup once the server has rebooted.\n"
|
||||
echo -e "\n\nVisit https://my.${domain} to finish setup once the server has rebooted.\n"
|
||||
else
|
||||
echo -e "Visit https://<IP> to finish setup once the server has rebooted.\n"
|
||||
echo -e "\n\nVisit https://<IP> to finish setup once the server has rebooted.\n"
|
||||
fi
|
||||
|
||||
if [[ "${initBaseImage}" == "true" ]]; then
|
||||
if [[ "${rebootServer}" == "true" ]]; then
|
||||
echo -e "\n\nRebooting this server now to let bootloader changes take effect.\n"
|
||||
systemctl reboot
|
||||
fi
|
||||
|
||||
@@ -2,30 +2,23 @@
|
||||
|
||||
set -eu
|
||||
|
||||
assertNotEmpty() {
|
||||
: "${!1:? "$1 is not set."}"
|
||||
}
|
||||
|
||||
# Only GNU getopt supports long options. OS X comes bundled with the BSD getopt
|
||||
# brew install gnu-getopt to get the GNU getopt on OS X
|
||||
[[ $(uname -s) == "Darwin" ]] && GNU_GETOPT="/usr/local/opt/gnu-getopt/bin/getopt" || GNU_GETOPT="getopt"
|
||||
readonly GNU_GETOPT
|
||||
|
||||
args=$(${GNU_GETOPT} -o "" -l "revision:,output:,no-upload" -n "$0" -- "$@")
|
||||
args=$(${GNU_GETOPT} -o "" -l "revision:,output:" -n "$0" -- "$@")
|
||||
eval set -- "${args}"
|
||||
|
||||
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
|
||||
delete_bundle="yes"
|
||||
commitish="HEAD"
|
||||
upload="yes"
|
||||
bundle_file=""
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
--revision) commitish="$2"; shift 2;;
|
||||
--output) bundle_file="$2"; delete_bundle="no"; shift 2;;
|
||||
--no-upload) upload="no"; shift;;
|
||||
--output) bundle_file="$2"; shift 2;;
|
||||
--) break;;
|
||||
*) echo "Unknown option $1"; exit 1;;
|
||||
esac
|
||||
@@ -90,21 +83,5 @@ echo "Create final tarball"
|
||||
echo "Cleaning up ${bundle_dir}"
|
||||
rm -rf "${bundle_dir}"
|
||||
|
||||
if [[ "${upload}" == "yes" ]]; then
|
||||
echo "Uploading bundle to S3"
|
||||
echo "Tarball saved at ${bundle_file}"
|
||||
|
||||
assertNotEmpty AWS_DEV_ACCESS_KEY
|
||||
assertNotEmpty AWS_DEV_SECRET_KEY
|
||||
|
||||
# That special header is needed to allow access with singed urls created with different aws credentials than the ones the file got uploaded
|
||||
s3cmd --multipart-chunk-size-mb=5 --ssl --acl-public --access_key="${AWS_DEV_ACCESS_KEY}" --secret_key="${AWS_DEV_SECRET_KEY}" --no-mime-magic put "${bundle_file}" "s3://dev-cloudron-releases/box-${version}.tar.gz"
|
||||
|
||||
versions_file_url="https://dev-cloudron-releases.s3.amazonaws.com/box-${version}.tar.gz"
|
||||
echo "The URL for the versions file is: ${versions_file_url}"
|
||||
fi
|
||||
|
||||
if [[ "${delete_bundle}" == "no" ]]; then
|
||||
echo "Tarball preserved at ${bundle_file}"
|
||||
else
|
||||
rm "${bundle_file}"
|
||||
fi
|
||||
|
||||
@@ -52,7 +52,7 @@ fi
|
||||
|
||||
if [[ "${is_update}" == "yes" ]]; then
|
||||
echo "Setting up update splash screen"
|
||||
"${box_src_tmp_dir}/setup/splashpage.sh" --data "${arg_data}" # show splash from new code
|
||||
"${box_src_tmp_dir}/setup/splashpage.sh" --data "${arg_data}" || true # show splash from new code
|
||||
${BOX_SRC_DIR}/setup/stop.sh # stop the old code
|
||||
fi
|
||||
|
||||
|
||||
+17
-15
@@ -71,7 +71,7 @@ systemctl restart apparmor
|
||||
usermod ${USER} -a -G docker
|
||||
temp_file=$(mktemp)
|
||||
# create systemd drop-in. some apps do not work with aufs
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/docker daemon -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=devicemapper --dns=172.18.0.1 --dns-search=." > "${temp_file}"
|
||||
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=devicemapper --dns=172.18.0.1 --dns-search=." > "${temp_file}"
|
||||
|
||||
systemctl enable docker
|
||||
# restart docker if options changed
|
||||
@@ -97,12 +97,6 @@ if [[ "${arg_provider}" == "caas" ]]; then
|
||||
fi
|
||||
|
||||
echo "==> Setup btrfs data"
|
||||
if ! grep -q loop.ko /lib/modules/`uname -r`/modules.builtin; then
|
||||
# on scaleway loop is not built-in
|
||||
echo "loop" >> /etc/modules
|
||||
modprobe loop
|
||||
fi
|
||||
|
||||
if [[ ! -d "${DATA_DIR}" ]]; then
|
||||
echo "==> Mounting loopback btrfs"
|
||||
truncate -s "8192m" "${DATA_FILE}" # 8gb start (this will get resized dynamically by cloudron-resize-fs.service)
|
||||
@@ -205,6 +199,10 @@ if ! grep "^Restart=" /etc/systemd/system/multi-user.target.wants/nginx.service;
|
||||
echo -e "\n[Service]\nRestart=always\n" >> /etc/systemd/system/multi-user.target.wants/nginx.service
|
||||
systemctl daemon-reload
|
||||
fi
|
||||
# This is here, since the splash screen needs this file to be present :-(
|
||||
if [[ ! -f "${BOX_DATA_DIR}/dhparams.pem" ]]; then
|
||||
openssl dhparam -out "${BOX_DATA_DIR}/dhparams.pem" 2048
|
||||
fi
|
||||
systemctl start nginx
|
||||
|
||||
# bookkeep the version as part of data
|
||||
@@ -216,14 +214,18 @@ echo "==> Cleaning up snapshots"
|
||||
find "${DATA_DIR}/snapshots" -mindepth 1 -maxdepth 1 | xargs --no-run-if-empty btrfs subvolume delete
|
||||
|
||||
# restart mysql to make sure it has latest config
|
||||
# wait for all running mysql jobs
|
||||
cp "${script_dir}/start/mysql.cnf" /etc/mysql/mysql.cnf
|
||||
while true; do
|
||||
if ! systemctl list-jobs | grep mysql; then break; fi
|
||||
echo "Waiting for mysql jobs..."
|
||||
sleep 1
|
||||
done
|
||||
systemctl restart mysql
|
||||
if [[ ! -f /etc/mysql/mysql.cnf ]] || ! diff -q "${script_dir}/start/mysql.cnf" /etc/mysql/mysql.cnf >/dev/null; then
|
||||
# wait for all running mysql jobs
|
||||
cp "${script_dir}/start/mysql.cnf" /etc/mysql/mysql.cnf
|
||||
while true; do
|
||||
if ! systemctl list-jobs | grep mysql; then break; fi
|
||||
echo "Waiting for mysql jobs..."
|
||||
sleep 1
|
||||
done
|
||||
systemctl restart mysql
|
||||
else
|
||||
systemctl start mysql
|
||||
fi
|
||||
|
||||
readonly mysql_root_password="password"
|
||||
mysqladmin -u root -ppassword password password # reset default root password
|
||||
|
||||
@@ -25,8 +25,10 @@ server {
|
||||
# https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # don't use SSLv3 ref: POODLE
|
||||
ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH';
|
||||
add_header Strict-Transport-Security "max-age=15768000; includeSubDomains";
|
||||
# ciphers according to https://weakdh.org/sysadmin.html
|
||||
ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
|
||||
ssl_dhparam /home/yellowtent/boxdata/dhparams.pem;
|
||||
add_header Strict-Transport-Security "max-age=15768000";
|
||||
|
||||
# https://developer.mozilla.org/en-US/docs/Web/HTTP/X-Frame-Options
|
||||
add_header X-Frame-Options "<%= xFrameOptions %>";
|
||||
|
||||
+2
-9
@@ -112,12 +112,6 @@ var KNOWN_ADDONS = {
|
||||
teardown: teardownSimpleAuth,
|
||||
backup: NOOP,
|
||||
restore: setupSimpleAuth
|
||||
},
|
||||
_docker: {
|
||||
setup: NOOP,
|
||||
teardown: NOOP,
|
||||
backup: NOOP,
|
||||
restore: NOOP
|
||||
}
|
||||
};
|
||||
|
||||
@@ -219,7 +213,6 @@ function getBindsSync(app, addons) {
|
||||
|
||||
for (var addon in addons) {
|
||||
switch (addon) {
|
||||
case '_docker': binds.push('/var/run/docker.sock:/var/run/docker.sock:rw'); break;
|
||||
case 'localstorage': binds.push(path.join(paths.DATA_DIR, app.id, 'data') + ':/app/data:rw'); break;
|
||||
default: break;
|
||||
}
|
||||
@@ -287,7 +280,7 @@ function teardownOauth(app, options, callback) {
|
||||
debugApp(app, 'teardownOauth');
|
||||
|
||||
clients.delByAppIdAndType(app.id, clients.TYPE_OAUTH, function (error) {
|
||||
if (error && error.reason !== ClientsError.NOT_FOUND) console.error(error);
|
||||
if (error && error.reason !== ClientsError.NOT_FOUND) debug(error);
|
||||
|
||||
appdb.unsetAddonConfig(app.id, 'oauth', callback);
|
||||
});
|
||||
@@ -332,7 +325,7 @@ function teardownSimpleAuth(app, options, callback) {
|
||||
debugApp(app, 'teardownSimpleAuth');
|
||||
|
||||
clients.delByAppIdAndType(app.id, clients.TYPE_SIMPLE_AUTH, function (error) {
|
||||
if (error && error.reason !== ClientsError.NOT_FOUND) console.error(error);
|
||||
if (error && error.reason !== ClientsError.NOT_FOUND) debug(error);
|
||||
|
||||
appdb.unsetAddonConfig(app.id, 'simpleauth', callback);
|
||||
});
|
||||
|
||||
+12
-2
@@ -52,6 +52,7 @@ var assert = require('assert'),
|
||||
async = require('async'),
|
||||
database = require('./database.js'),
|
||||
DatabaseError = require('./databaseerror'),
|
||||
mailboxdb = require('./mailboxdb.js'),
|
||||
safe = require('safetydance'),
|
||||
util = require('util');
|
||||
|
||||
@@ -189,7 +190,7 @@ function add(id, appStoreId, manifest, location, portBindings, data, callback) {
|
||||
var sso = 'sso' in data ? data.sso : null;
|
||||
var debugModeJson = data.debugMode ? JSON.stringify(data.debugMode) : null;
|
||||
|
||||
var queries = [ ];
|
||||
var queries = [];
|
||||
queries.push({
|
||||
query: 'INSERT INTO apps (id, appStoreId, manifestJson, installationState, location, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, lastBackupId, sso, debugModeJson) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
|
||||
args: [ id, appStoreId, manifestJson, installationState, location, accessRestrictionJson, memoryLimit, altDomain, xFrameOptions, lastBackupId, sso, debugModeJson ]
|
||||
@@ -202,6 +203,14 @@ function add(id, appStoreId, manifest, location, portBindings, data, callback) {
|
||||
});
|
||||
});
|
||||
|
||||
// only allocate a mailbox if mailboxName is set
|
||||
if (data.mailboxName) {
|
||||
queries.push({
|
||||
query: 'INSERT INTO mailboxes (name, ownerId, ownerType) VALUES (?, ?, ?)',
|
||||
args: [ data.mailboxName, id, mailboxdb.TYPE_APP ]
|
||||
});
|
||||
}
|
||||
|
||||
database.transaction(queries, function (error) {
|
||||
if (error && error.code === 'ER_DUP_ENTRY') return callback(new DatabaseError(DatabaseError.ALREADY_EXISTS, error.message));
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
@@ -242,13 +251,14 @@ function del(id, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var queries = [
|
||||
{ query: 'DELETE FROM mailboxes WHERE ownerId=?', args: [ id ] },
|
||||
{ query: 'DELETE FROM appPortBindings WHERE appId = ?', args: [ id ] },
|
||||
{ query: 'DELETE FROM apps WHERE id = ?', args: [ id ] }
|
||||
];
|
||||
|
||||
database.transaction(queries, function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
if (results[1].affectedRows !== 1) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
|
||||
if (results[2].affectedRows !== 1) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
|
||||
@@ -138,7 +138,7 @@ function run() {
|
||||
|
||||
/*
|
||||
OOM can be tested using stress tool like so:
|
||||
docker run -ti -m 100M cloudron/base:0.9.0 /bin/bash
|
||||
docker run -ti -m 100M cloudron/base:0.10.0 /bin/bash
|
||||
apt-get update && apt-get install stress
|
||||
stress --vm 1 --vm-bytes 200M --vm-hang 0
|
||||
*/
|
||||
|
||||
+35
-50
@@ -250,7 +250,7 @@ function getDuplicateErrorDetails(location, portBindings, error) {
|
||||
|
||||
var match = error.message.match(/ER_DUP_ENTRY: Duplicate entry '(.*)' for key/);
|
||||
if (!match) {
|
||||
console.error('Unexpected SQL error message.', error);
|
||||
debug('Unexpected SQL error message.', error);
|
||||
return new AppsError(AppsError.INTERNAL_ERROR);
|
||||
}
|
||||
|
||||
@@ -296,11 +296,9 @@ function hasAccessTo(app, user, callback) {
|
||||
if (!app.accessRestriction.groups) return callback(null, false);
|
||||
|
||||
async.some(app.accessRestriction.groups, function (groupId, iteratorDone) {
|
||||
groups.isMember(groupId, user.id, function (error, member) {
|
||||
iteratorDone(!error && member); // async.some does not take error argument in callback
|
||||
});
|
||||
}, function (result) {
|
||||
callback(null, result);
|
||||
groups.isMember(groupId, user.id, iteratorDone);
|
||||
}, function (error, result) {
|
||||
callback(null, !error && result);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -360,11 +358,9 @@ function getAllByUser(user, callback) {
|
||||
getAll(function (error, result) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.filter(result, function (app, callback) {
|
||||
hasAccessTo(app, user, function (error, hasAccess) {
|
||||
callback(hasAccess);
|
||||
});
|
||||
}, callback.bind(null, null)); // never error
|
||||
async.filter(result, function (app, iteratorDone) {
|
||||
hasAccessTo(app, user, iteratorDone);
|
||||
}, callback);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -428,10 +424,13 @@ function unpurchase(appId, appstoreId, callback) {
|
||||
|
||||
superagent.get(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
if (result.statusCode === 404) return callback(null); // was never purchased
|
||||
if (result.statusCode !== 201 && result.statusCode !== 200) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App purchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
superagent.del(url).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
if (error && !error.response) return callback(new AppsError(AppsError.EXTERNAL_ERROR, error));
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new AppsError(AppsError.BILLING_REQUIRED));
|
||||
if (result.statusCode !== 204) return callback(new AppsError(AppsError.EXTERNAL_ERROR, util.format('App unpurchase failed. %s %j', result.status, result.body)));
|
||||
|
||||
callback(null);
|
||||
@@ -554,30 +553,25 @@ function install(data, auditSource, callback) {
|
||||
altDomain: altDomain,
|
||||
xFrameOptions: xFrameOptions,
|
||||
sso: sso,
|
||||
debugMode: debugMode
|
||||
debugMode: debugMode,
|
||||
mailboxName: (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app'
|
||||
};
|
||||
|
||||
var from = (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app';
|
||||
mailboxdb.add(from, appId, mailboxdb.TYPE_APP, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(new AppsError(AppsError.ALREADY_EXISTS, 'Mailbox already exists'));
|
||||
appdb.add(appId, appStoreId, manifest, location, portBindings, data, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails(location, portBindings, error));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
appdb.add(appId, appStoreId, manifest, location, portBindings, data, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails(location, portBindings, error));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
// save cert to boxdata/certs
|
||||
if (cert && key) {
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.cert'), cert)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving cert: ' + safe.error.message));
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.key'), key)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving key: ' + safe.error.message));
|
||||
}
|
||||
|
||||
// save cert to boxdata/certs
|
||||
if (cert && key) {
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.cert'), cert)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving cert: ' + safe.error.message));
|
||||
if (!safe.fs.writeFileSync(path.join(paths.APP_CERTS_DIR, config.appFqdn(location) + '.user.key'), key)) return callback(new AppsError(AppsError.INTERNAL_ERROR, 'Error saving key: ' + safe.error.message));
|
||||
}
|
||||
taskmanager.restartAppTask(appId);
|
||||
|
||||
taskmanager.restartAppTask(appId);
|
||||
eventlog.add(eventlog.ACTION_APP_INSTALL, auditSource, { appId: appId, location: location, manifest: manifest });
|
||||
|
||||
eventlog.add(eventlog.ACTION_APP_INSTALL, auditSource, { appId: appId, location: location, manifest: manifest });
|
||||
|
||||
callback(null, { id : appId });
|
||||
});
|
||||
callback(null, { id : appId });
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -898,24 +892,19 @@ function clone(appId, data, auditSource, callback) {
|
||||
accessRestriction: app.accessRestriction,
|
||||
xFrameOptions: app.xFrameOptions,
|
||||
lastBackupId: backupId,
|
||||
sso: !!app.sso
|
||||
sso: !!app.sso,
|
||||
mailboxName: (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app'
|
||||
};
|
||||
|
||||
var from = (location ? location : manifest.title.toLowerCase().replace(/[^a-zA-Z0-9]/g, '')) + '.app';
|
||||
mailboxdb.add(from, newAppId, mailboxdb.TYPE_APP, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(new AppsError(AppsError.ALREADY_EXISTS, 'Mailbox already exists'));
|
||||
appdb.add(newAppId, appStoreId, manifest, location, portBindings, data, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails(location, portBindings, error));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
appdb.add(newAppId, appStoreId, manifest, location, portBindings, data, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(getDuplicateErrorDetails(location, portBindings, error));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
taskmanager.restartAppTask(newAppId);
|
||||
|
||||
taskmanager.restartAppTask(newAppId);
|
||||
eventlog.add(eventlog.ACTION_APP_CLONE, auditSource, { appId: newAppId, oldAppId: appId, backupId: backupId, location: location, manifest: manifest });
|
||||
|
||||
eventlog.add(eventlog.ACTION_APP_CLONE, auditSource, { appId: newAppId, oldAppId: appId, backupId: backupId, location: location, manifest: manifest });
|
||||
|
||||
callback(null, { id : newAppId });
|
||||
});
|
||||
callback(null, { id : newAppId });
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -935,18 +924,14 @@ function uninstall(appId, auditSource, callback) {
|
||||
unpurchase(appId, result.appStoreId, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
mailboxdb.delByOwnerId(appId, function (error) {
|
||||
if (error && error.reason !== DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
taskmanager.stopAppTask(appId, function () {
|
||||
appdb.setInstallationCommand(appId, appdb.ISTATE_PENDING_UNINSTALL, function (error) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND, 'No such app'));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
|
||||
taskmanager.stopAppTask(appId, function () {
|
||||
appdb.setInstallationCommand(appId, appdb.ISTATE_PENDING_UNINSTALL, function (error) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new AppsError(AppsError.NOT_FOUND, 'No such app'));
|
||||
if (error) return callback(new AppsError(AppsError.INTERNAL_ERROR, error));
|
||||
eventlog.add(eventlog.ACTION_APP_UNINSTALL, auditSource, { appId: appId });
|
||||
|
||||
eventlog.add(eventlog.ACTION_APP_UNINSTALL, auditSource, { appId: appId });
|
||||
|
||||
taskmanager.startAppTask(appId, callback);
|
||||
});
|
||||
taskmanager.startAppTask(appId, callback);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
+12
-7
@@ -213,15 +213,14 @@ function downloadIcon(app, callback) {
|
||||
}, callback);
|
||||
}
|
||||
|
||||
function registerSubdomain(app, callback) {
|
||||
function registerSubdomain(app, overwrite, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof overwrite, 'boolean');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
sysinfo.getIp(function (error, ip) {
|
||||
if (error) return callback(error);
|
||||
|
||||
// even though the bare domain is already registered in the appstore, we still
|
||||
// need to register it so that we have a dnsRecordId to wait for it to complete
|
||||
async.retry({ times: 200, interval: 5000 }, function (retryCallback) {
|
||||
debugApp(app, 'Registering subdomain location [%s]', app.location);
|
||||
|
||||
@@ -231,7 +230,7 @@ function registerSubdomain(app, callback) {
|
||||
|
||||
// refuse to update any existing DNS record for custom domains that we did not create
|
||||
// note that the appstore sets up the naked domain for non-custom domains
|
||||
if (config.isCustomDomain() && values.length !== 0 && !app.dnsRecordId) return retryCallback(null, new Error('DNS Record already exists'));
|
||||
if (config.isCustomDomain() && values.length !== 0 && !overwrite) return retryCallback(null, new Error('DNS Record already exists'));
|
||||
|
||||
subdomains.upsert(app.location, 'A', [ ip ], function (error, changeId) {
|
||||
if (error && (error.reason === SubdomainError.STILL_BUSY || error.reason === SubdomainError.EXTERNAL_ERROR)) return retryCallback(error); // try again
|
||||
@@ -363,7 +362,7 @@ function install(app, callback) {
|
||||
downloadIcon.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '30, Registering subdomain' }),
|
||||
registerSubdomain.bind(null, app),
|
||||
registerSubdomain.bind(null, app, false /* overwrite */),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '40, Downloading image' }),
|
||||
docker.downloadImage.bind(null, app.manifest),
|
||||
@@ -462,7 +461,7 @@ function restore(app, callback) {
|
||||
downloadIcon.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '55, Registering subdomain' }), // ip might change during upgrades
|
||||
registerSubdomain.bind(null, app),
|
||||
registerSubdomain.bind(null, app, true /* overwrite */),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '60, Downloading image' }),
|
||||
docker.downloadImage.bind(null, app.manifest),
|
||||
@@ -525,7 +524,13 @@ function configure(app, callback) {
|
||||
reserveHttpPort.bind(null, app),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '35, Registering subdomain' }),
|
||||
registerSubdomain.bind(null, app),
|
||||
registerSubdomain.bind(null, app, true /* overwrite */),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '40, Downloading image' }),
|
||||
docker.downloadImage.bind(null, app.manifest),
|
||||
|
||||
updateApp.bind(null, app, { installationProgress: '45, Ensuring volume' }),
|
||||
createVolume.bind(null, app),
|
||||
|
||||
// re-setup addons since they rely on the app's fqdn (e.g oauth)
|
||||
updateApp.bind(null, app, { installationProgress: '50, Setting up addons' }),
|
||||
|
||||
+6
-2
@@ -352,12 +352,16 @@ Acme.prototype.createKeyAndCsr = function (domain, callback) {
|
||||
Acme.prototype.downloadChain = function (linkHeader, callback) {
|
||||
if (!linkHeader) return new AcmeError(AcmeError.EXTERNAL_ERROR, 'Empty link header when downloading certificate chain');
|
||||
|
||||
debug('downloadChain: linkHeader %s', linkHeader);
|
||||
|
||||
var linkInfo = parseLinks(linkHeader);
|
||||
if (!linkInfo || !linkInfo.up) return new AcmeError(AcmeError.EXTERNAL_ERROR, 'Failed to parse link header when downloading certificate chain');
|
||||
|
||||
debug('downloadChain: downloading from %s', this.caOrigin + linkInfo.up);
|
||||
var intermediateCertUrl = linkInfo.up.startsWith('https://') ? linkInfo.up : (this.caOrigin + linkInfo.up);
|
||||
|
||||
superagent.get(this.caOrigin + linkInfo.up).buffer().parse(function (res, done) {
|
||||
debug('downloadChain: downloading from %s', intermediateCertUrl);
|
||||
|
||||
superagent.get(intermediateCertUrl).buffer().parse(function (res, done) {
|
||||
var data = [ ];
|
||||
res.on('data', function(chunk) { data.push(chunk); });
|
||||
res.on('end', function () { res.text = Buffer.concat(data); done(); });
|
||||
|
||||
+20
-1
@@ -14,7 +14,11 @@ exports = module.exports = {
|
||||
|
||||
renewAll: renewAll,
|
||||
|
||||
events: new (require('events').EventEmitter)(),
|
||||
initialize: initialize,
|
||||
uninitialize: uninitialize,
|
||||
|
||||
events: null,
|
||||
|
||||
EVENT_CERT_CHANGED: 'cert_changed',
|
||||
|
||||
// exported for testing
|
||||
@@ -65,6 +69,20 @@ CertificatesError.INTERNAL_ERROR = 'Internal Error';
|
||||
CertificatesError.INVALID_CERT = 'Invalid certificate';
|
||||
CertificatesError.NOT_FOUND = 'Not Found';
|
||||
|
||||
function initialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
exports.events = new (require('events').EventEmitter)();
|
||||
callback();
|
||||
}
|
||||
|
||||
function uninitialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
exports.events = null;
|
||||
callback();
|
||||
}
|
||||
|
||||
function getApi(app, callback) {
|
||||
assert.strictEqual(typeof app, 'object');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
@@ -261,6 +279,7 @@ function validateCertificate(cert, key, fqdn) {
|
||||
if (content.notAfter < new Date()) return new Error('cert expired');
|
||||
|
||||
function matchesDomain(domain) {
|
||||
if (typeof domain !== 'string') return false;
|
||||
if (domain === fqdn) return true;
|
||||
if (domain.indexOf('*') === 0 && domain.slice(2) === fqdn.slice(fqdn.indexOf('.') + 1)) return true;
|
||||
|
||||
|
||||
+59
-34
@@ -25,7 +25,8 @@ exports = module.exports = {
|
||||
readDkimPublicKeySync: readDkimPublicKeySync,
|
||||
refreshDNS: refreshDNS,
|
||||
|
||||
events: new (require('events').EventEmitter)(),
|
||||
events: null,
|
||||
|
||||
EVENT_ACTIVATED: 'activated'
|
||||
};
|
||||
|
||||
@@ -122,7 +123,16 @@ CloudronError.SELF_UPGRADE_NOT_SUPPORTED = 'Self upgrade not supported';
|
||||
function initialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
exports.events = new (require('events').EventEmitter)();
|
||||
|
||||
gConfigState = { dns: false, tls: false, configured: false };
|
||||
gUpdatingDns = false;
|
||||
gBoxAndUserDetails = null;
|
||||
|
||||
async.series([
|
||||
certificates.initialize,
|
||||
settings.initialize,
|
||||
platform.initialize,
|
||||
installAppBundle,
|
||||
checkConfigState,
|
||||
configureDefaultServer
|
||||
@@ -132,13 +142,17 @@ function initialize(callback) {
|
||||
function uninitialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
exports.events = null;
|
||||
|
||||
platform.events.removeListener(platform.EVENT_READY, onPlatformReady);
|
||||
|
||||
async.series([
|
||||
cron.uninitialize,
|
||||
taskmanager.pauseTasks,
|
||||
mailer.stop,
|
||||
platform.uninitialize
|
||||
platform.uninitialize,
|
||||
certificates.uninitialize,
|
||||
settings.uninitialize
|
||||
], callback);
|
||||
}
|
||||
|
||||
@@ -155,15 +169,17 @@ function onConfigured(callback) {
|
||||
gConfigState.configured = true;
|
||||
|
||||
platform.events.on(platform.EVENT_READY, onPlatformReady);
|
||||
settings.events.on(settings.DNS_CONFIG_KEY, function () { refreshDNS(); });
|
||||
|
||||
async.series([
|
||||
clients.addDefaultClients,
|
||||
cron.initialize,
|
||||
certificates.ensureFallbackCertificate,
|
||||
platform.initialize, // requires fallback certs for mail container
|
||||
platform.start, // requires fallback certs for mail container
|
||||
ensureDkimKey,
|
||||
addDnsRecords,
|
||||
configureAdmin,
|
||||
mailer.start
|
||||
mailer.start,
|
||||
cron.initialize // do not send heartbeats until we are "ready"
|
||||
], callback);
|
||||
}
|
||||
|
||||
@@ -226,7 +242,7 @@ function configureDefaultServer(callback) {
|
||||
if (!fs.existsSync(certFilePath) || !fs.existsSync(keyFilePath)) {
|
||||
debug('configureDefaultServer: create new cert');
|
||||
|
||||
var certCommand = util.format('openssl req -x509 -newkey rsa:2048 -keyout %s -out %s -days 3650 -subj /CN=%s -nodes', keyFilePath, certFilePath, 'localhost');
|
||||
var certCommand = util.format('openssl req -x509 -newkey rsa:2048 -keyout %s -out %s -days 3650 -subj /CN=%s -nodes', keyFilePath, certFilePath, 'cloudron');
|
||||
safe.child_process.execSync(certCommand);
|
||||
}
|
||||
|
||||
@@ -435,7 +451,7 @@ function sendHeartbeat() {
|
||||
function sendAliveStatus(callback) {
|
||||
if (typeof callback !== 'function') {
|
||||
callback = function (error) {
|
||||
if (error && error.reason !== CloudronError.INTERNAL_ERROR) console.error(error);
|
||||
if (error && error.reason !== CloudronError.INTERNAL_ERROR) debug(error);
|
||||
else if (error) debug(error);
|
||||
};
|
||||
}
|
||||
@@ -451,7 +467,11 @@ function sendAliveStatus(callback) {
|
||||
domain: config.fqdn(),
|
||||
version: config.version(),
|
||||
provider: config.provider(),
|
||||
backendSettings: backendSettings
|
||||
backendSettings: backendSettings,
|
||||
machine: {
|
||||
cpus: os.cpus(),
|
||||
totalmem: os.totalmem()
|
||||
}
|
||||
};
|
||||
|
||||
superagent.post(url).send(data).query({ accessToken: appstoreConfig.token }).timeout(30 * 1000).end(function (error, result) {
|
||||
@@ -480,7 +500,8 @@ function sendAliveStatus(callback) {
|
||||
mailConfig: {
|
||||
enabled: result[settings.MAIL_CONFIG_KEY].enabled
|
||||
},
|
||||
autoupdatePattern: result[settings.AUTOUPDATE_PATTERN_KEY]
|
||||
autoupdatePattern: result[settings.AUTOUPDATE_PATTERN_KEY],
|
||||
timeZone: result[settings.TIME_ZONE_KEY]
|
||||
};
|
||||
|
||||
// Caas Cloudrons do not store appstore credentials in their local database
|
||||
@@ -507,12 +528,7 @@ function sendAliveStatus(callback) {
|
||||
});
|
||||
}
|
||||
|
||||
function readDkimPublicKeySync() {
|
||||
if (!config.fqdn()) {
|
||||
debug('Cannot read dkim public key without a domain.', safe.error);
|
||||
return null;
|
||||
}
|
||||
|
||||
function ensureDkimKey(callback) {
|
||||
var dkimPath = path.join(paths.MAIL_DATA_DIR, 'dkim/' + config.fqdn());
|
||||
var dkimPrivateKeyFile = path.join(dkimPath, 'private');
|
||||
var dkimPublicKeyFile = path.join(dkimPath, 'public');
|
||||
@@ -531,6 +547,18 @@ function readDkimPublicKeySync() {
|
||||
debug('DKIM keys already present');
|
||||
}
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
function readDkimPublicKeySync() {
|
||||
if (!config.fqdn()) {
|
||||
debug('Cannot read dkim public key without a domain.', safe.error);
|
||||
return null;
|
||||
}
|
||||
|
||||
var dkimPath = path.join(paths.MAIL_DATA_DIR, 'dkim/' + config.fqdn());
|
||||
var dkimPublicKeyFile = path.join(dkimPath, 'public');
|
||||
|
||||
var publicKey = safe.fs.readFileSync(dkimPublicKeyFile, 'utf8');
|
||||
|
||||
if (publicKey === null) {
|
||||
@@ -545,6 +573,7 @@ function readDkimPublicKeySync() {
|
||||
}
|
||||
|
||||
// NOTE: if you change the SPF record here, be sure the wait check in mailer.js
|
||||
// https://agari.zendesk.com/hc/en-us/articles/202952749-How-long-can-my-SPF-record-be-
|
||||
function txtRecordsWithSpf(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
@@ -553,21 +582,25 @@ function txtRecordsWithSpf(callback) {
|
||||
|
||||
debug('txtRecordsWithSpf: current txt records - %j', txtRecords);
|
||||
|
||||
var i, validSpf;
|
||||
var i, matches, validSpf;
|
||||
|
||||
for (i = 0; i < txtRecords.length; i++) {
|
||||
if (txtRecords[i].indexOf('"v=spf1 ') !== 0) continue; // not SPF
|
||||
matches = txtRecords[i].match(/^("?v=spf1) /); // DO backend may return without quotes
|
||||
if (matches === null) continue;
|
||||
|
||||
validSpf = txtRecords[i].indexOf(' a:' + config.adminFqdn() + ' ') !== -1;
|
||||
break;
|
||||
// this won't work if the entry is arbitrarily "split" across quoted strings
|
||||
validSpf = txtRecords[i].indexOf('a:' + config.adminFqdn()) !== -1;
|
||||
break; // there can only be one SPF record
|
||||
}
|
||||
|
||||
if (validSpf) return callback(null, null);
|
||||
|
||||
if (i == txtRecords.length) {
|
||||
txtRecords[i] = '"v=spf1 a:' + config.adminFqdn() + ' ~all"';
|
||||
} else {
|
||||
txtRecords[i] = '"v=spf1 a:' + config.adminFqdn() + ' ' + txtRecords[i].slice('"v=spf1 '.length);
|
||||
if (!matches) { // no spf record was found, create one
|
||||
txtRecords.push('"v=spf1 a:' + config.adminFqdn() + ' ~all"');
|
||||
debug('txtRecordsWithSpf: adding txt record');
|
||||
} else { // just add ourself
|
||||
txtRecords[i] = matches[1] + ' a:' + config.adminFqdn() + txtRecords[i].slice(matches[1].length);
|
||||
debug('txtRecordsWithSpf: inserting txt record');
|
||||
}
|
||||
|
||||
return callback(null, txtRecords);
|
||||
@@ -661,7 +694,7 @@ function update(boxUpdateInfo, auditSource, callback) {
|
||||
debug('Starting upgrade');
|
||||
doUpgrade(boxUpdateInfo, function (error) {
|
||||
if (error) {
|
||||
console.error('Upgrade failed with error:', error);
|
||||
debug('Upgrade failed with error:', error);
|
||||
locker.unlock(locker.OP_BOX_UPDATE);
|
||||
}
|
||||
});
|
||||
@@ -669,7 +702,7 @@ function update(boxUpdateInfo, auditSource, callback) {
|
||||
debug('Starting update');
|
||||
doUpdate(boxUpdateInfo, function (error) {
|
||||
if (error) {
|
||||
console.error('Update failed with error:', error);
|
||||
debug('Update failed with error:', error);
|
||||
locker.unlock(locker.OP_BOX_UPDATE);
|
||||
}
|
||||
});
|
||||
@@ -938,15 +971,7 @@ function refreshDNS(callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
async.each(result, function (app, callback) {
|
||||
// get the current record before updating it
|
||||
subdomains.get(app.location, 'A', function (error, values) {
|
||||
if (error) return callback(error);
|
||||
|
||||
// refuse to update any existing DNS record for custom domains that we did not create
|
||||
if (values.length !== 0 && !app.dnsRecordId) return callback(null, new Error('DNS Record already exists'));
|
||||
|
||||
subdomains.upsert(app.location, 'A', [ ip ], callback);
|
||||
});
|
||||
subdomains.upsert(app.location, 'A', [ ip ], callback);
|
||||
}, function (error) {
|
||||
if (error) return callback(error);
|
||||
|
||||
|
||||
+2
-5
@@ -64,7 +64,7 @@ function saveSync() {
|
||||
fs.writeFileSync(cloudronConfigFileName, JSON.stringify(data, null, 4)); // functions are ignored by JSON.stringify
|
||||
}
|
||||
|
||||
function _reset (callback) {
|
||||
function _reset(callback) {
|
||||
safe.fs.unlinkSync(cloudronConfigFileName);
|
||||
|
||||
initConfig();
|
||||
@@ -79,7 +79,7 @@ function initConfig() {
|
||||
data.token = null;
|
||||
data.boxVersionsUrl = null;
|
||||
data.version = null;
|
||||
data.isCustomDomain = false;
|
||||
data.isCustomDomain = true;
|
||||
data.webServerOrigin = null;
|
||||
data.smtpPort = 2525; // // this value comes from mail container
|
||||
data.sysadminPort = 3001;
|
||||
@@ -116,9 +116,6 @@ function initConfig() {
|
||||
saveSync();
|
||||
}
|
||||
|
||||
// cleanup any old config file we have for tests
|
||||
if (exports.TEST) safe.fs.unlinkSync(cloudronConfigFileName);
|
||||
|
||||
initConfig();
|
||||
|
||||
// set(obj) or set(key, value)
|
||||
|
||||
+8
-2
@@ -52,9 +52,15 @@ function initialize(callback) {
|
||||
gHeartbeatJob = new CronJob({
|
||||
cronTime: '00 */1 * * * *', // every minute
|
||||
onTick: cloudron.sendHeartbeat,
|
||||
start: true
|
||||
start: false
|
||||
});
|
||||
cloudron.sendHeartbeat(); // latest unpublished version of CronJob has runOnInit
|
||||
// hack: send the first heartbeat only after we are running for 60 seconds
|
||||
// required as we end up sending a heartbeat and then cloudron-setup reboots the server
|
||||
setTimeout(function () {
|
||||
if (!gHeartbeatJob) return; // already uninitalized
|
||||
gHeartbeatJob.start();
|
||||
cloudron.sendHeartbeat();
|
||||
}, 1000 * 60);
|
||||
|
||||
var randomHourMinute = Math.floor(60*Math.random());
|
||||
gAliveJob = new CronJob({
|
||||
|
||||
@@ -85,6 +85,7 @@ function upsert(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
.end(function (error, result) {
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode === 422) return callback(new SubdomainError(SubdomainError.BAD_FIELD, result.body.message));
|
||||
if (result.statusCode !== 201) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body)));
|
||||
|
||||
return callback(null);
|
||||
@@ -100,6 +101,7 @@ function upsert(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
|
||||
if (error && !error.response) return callback(error);
|
||||
if (result.statusCode === 403 || result.statusCode === 401) return callback(new SubdomainError(SubdomainError.ACCESS_DENIED, util.format('%s %j', result.statusCode, result.body)));
|
||||
if (result.statusCode === 422) return callback(new SubdomainError(SubdomainError.BAD_FIELD, result.body.message));
|
||||
if (result.statusCode !== 200) return callback(new SubdomainError(SubdomainError.EXTERNAL_ERROR, util.format('%s %j', result.statusCode, result.body)));
|
||||
|
||||
return callback(null);
|
||||
@@ -190,6 +192,11 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to resolve nameservers for this domain'));
|
||||
if (error || !nameservers) return callback(new SubdomainError(SubdomainError.BAD_FIELD, error ? error.message : 'Unable to get nameservers'));
|
||||
|
||||
if (nameservers.map(function (n) { return n.toLowerCase(); }).indexOf('ns1.digitalocean.com') === -1) {
|
||||
debug('verifyDnsConfig: %j does not contains DO NS', nameservers);
|
||||
return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Domain nameservers are not set to Digital Ocean'));
|
||||
}
|
||||
|
||||
upsert(credentials, domain, 'my', 'A', [ ip ], function (error, changeId) {
|
||||
if (error) return callback(error);
|
||||
|
||||
|
||||
+12
-16
@@ -10,7 +10,7 @@ exports = module.exports = {
|
||||
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
debug = require('debug')('box:dns/noop'),
|
||||
debug = require('debug')('box:dns/manual'),
|
||||
dns = require('native-dns'),
|
||||
SubdomainError = require('../subdomains.js').SubdomainError,
|
||||
util = require('util');
|
||||
@@ -60,18 +60,14 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
dns.resolveNs(domain, function (error, nameservers) {
|
||||
if (error || !nameservers) return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to get nameservers'));
|
||||
|
||||
// async.every only reports bools
|
||||
var stashedError = null;
|
||||
|
||||
async.every(nameservers, function (nameserver, callback) {
|
||||
async.every(nameservers, function (nameserver, everyNsCallback) {
|
||||
// ns records cannot have cname
|
||||
dns.resolve4(nameserver, function (error, nsIps) {
|
||||
if (error || !nsIps || nsIps.length === 0) {
|
||||
stashedError = new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to resolve nameservers for this domain');
|
||||
return callback(false);
|
||||
return everyNsCallback(new SubdomainError(SubdomainError.BAD_FIELD, 'Unable to resolve nameservers for this domain'));
|
||||
}
|
||||
|
||||
async.every(nsIps, function (nsIp, callback) {
|
||||
async.every(nsIps, function (nsIp, everyIpCallback) {
|
||||
var req = dns.Request({
|
||||
question: dns.Question({ name: adminDomain, type: 'A' }),
|
||||
server: { address: nsIp },
|
||||
@@ -80,20 +76,20 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
|
||||
req.on('timeout', function () {
|
||||
debug('nameserver %s (%s) timed out when trying to resolve %s', nameserver, nsIp, adminDomain);
|
||||
return callback(true); // should be ok if dns server is down
|
||||
return everyIpCallback(null, true); // should be ok if dns server is down
|
||||
});
|
||||
|
||||
req.on('message', function (error, message) {
|
||||
if (error) {
|
||||
debug('nameserver %s (%s) returned error trying to resolve %s: %s', nameserver, nsIp, adminDomain, error);
|
||||
return callback(false);
|
||||
return everyIpCallback(null, false);
|
||||
}
|
||||
|
||||
var answer = message.answer;
|
||||
|
||||
if (!answer || answer.length === 0) {
|
||||
debug('bad answer from nameserver %s (%s) resolving %s (%s): %j', nameserver, nsIp, adminDomain, 'A', message);
|
||||
return callback(false);
|
||||
return everyIpCallback(null, false);
|
||||
}
|
||||
|
||||
debug('verifyDnsConfig: ns: %s (%s), name:%s Actual:%j Expecting:%s', nameserver, nsIp, adminDomain, answer, ip);
|
||||
@@ -102,16 +98,16 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
return a.address === ip;
|
||||
});
|
||||
|
||||
if (match) return callback(true); // done!
|
||||
if (match) return everyIpCallback(null, true); // done!
|
||||
|
||||
callback(false);
|
||||
everyIpCallback(null, false);
|
||||
});
|
||||
|
||||
req.send();
|
||||
}, callback);
|
||||
}, everyNsCallback);
|
||||
});
|
||||
}, function (success) {
|
||||
if (stashedError) return callback(stashedError);
|
||||
}, function (error, success) {
|
||||
if (error) return callback(error);
|
||||
if (!success) return callback(new SubdomainError(SubdomainError.BAD_FIELD, 'The domain ' + adminDomain + ' does not resolve to the server\'s IP ' + ip));
|
||||
|
||||
callback(null, { provider: dnsConfig.provider, wildcard: !!dnsConfig.wildcard });
|
||||
|
||||
+2
-2
@@ -48,8 +48,8 @@ function del(dnsConfig, zoneName, subdomain, type, values, callback) {
|
||||
|
||||
function waitForDns(domain, value, type, options, callback) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(type === 'A' || type === 'CNAME');
|
||||
assert(typeof value === 'string' || util.isRegExp(value));
|
||||
assert(type === 'A' || type === 'CNAME' || type === 'TXT');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
|
||||
+1
-1
@@ -241,7 +241,7 @@ function verifyDnsConfig(dnsConfig, domain, ip, callback) {
|
||||
}
|
||||
|
||||
upsert(credentials, domain, 'my', 'A', [ ip ], function (error, changeId) {
|
||||
if (error) return callback(new SubdomainError(SubdomainError.INTERNAL_ERROR, error));
|
||||
if (error) return callback(error);
|
||||
|
||||
debug('verifyDnsConfig: A record added with change id %s', changeId);
|
||||
|
||||
|
||||
+18
-12
@@ -7,12 +7,12 @@ var assert = require('assert'),
|
||||
debug = require('debug')('box:dns/waitfordns'),
|
||||
dns = require('native-dns'),
|
||||
SubdomainError = require('../subdomains.js').SubdomainError,
|
||||
tld = require('tldjs');
|
||||
tld = require('tldjs'),
|
||||
util = require('util');
|
||||
|
||||
// the first arg to callback is not an error argument; this is required for async.every
|
||||
function isChangeSynced(domain, value, type, nameserver, callback) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(util.isRegExp(value));
|
||||
assert.strictEqual(typeof type, 'string');
|
||||
assert.strictEqual(typeof nameserver, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
@@ -33,31 +33,33 @@ function isChangeSynced(domain, value, type, nameserver, callback) {
|
||||
|
||||
req.on('timeout', function () {
|
||||
debug('nameserver %s (%s) timed out when trying to resolve %s', nameserver, nsIp, domain);
|
||||
return iteratorCallback(true); // should be ok if dns server is down
|
||||
return iteratorCallback(null, true); // should be ok if dns server is down
|
||||
});
|
||||
|
||||
req.on('message', function (error, message) {
|
||||
if (error) {
|
||||
debug('nameserver %s (%s) returned error trying to resolve %s: %s', nameserver, nsIp, domain, error);
|
||||
return iteratorCallback(false);
|
||||
return iteratorCallback(null, false);
|
||||
}
|
||||
|
||||
var answer = message.answer;
|
||||
|
||||
if (!answer || answer.length === 0) {
|
||||
debug('bad answer from nameserver %s (%s) resolving %s (%s): %j', nameserver, nsIp, domain, type, message);
|
||||
return iteratorCallback(false);
|
||||
return iteratorCallback(null, false);
|
||||
}
|
||||
|
||||
debug('isChangeSynced: ns: %s (%s), name:%s Actual:%j Expecting:%s', nameserver, nsIp, domain, answer, value);
|
||||
|
||||
var match = answer.some(function (a) {
|
||||
return ((type === 'A' && a.address === value) || (type === 'CNAME' && a.data === value));
|
||||
return ((type === 'A' && value.test(a.address)) ||
|
||||
(type === 'CNAME' && value.test(a.data)) ||
|
||||
(type === 'TXT' && value.test(a.data.join(''))));
|
||||
});
|
||||
|
||||
if (match) return iteratorCallback(true); // done!
|
||||
if (match) return iteratorCallback(null, true); // done!
|
||||
|
||||
iteratorCallback(false);
|
||||
iteratorCallback(null, false);
|
||||
});
|
||||
|
||||
req.send();
|
||||
@@ -68,12 +70,16 @@ function isChangeSynced(domain, value, type, nameserver, callback) {
|
||||
// check if IP change has propagated to every nameserver
|
||||
function waitForDns(domain, value, type, options, callback) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(type === 'A' || type === 'CNAME');
|
||||
assert(typeof value === 'string' || util.isRegExp(value));
|
||||
assert(type === 'A' || type === 'CNAME' || type === 'TXT');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var zoneName = tld.getDomain(domain);
|
||||
if (typeof value === 'string') {
|
||||
// http://stackoverflow.com/questions/3561493/is-there-a-regexp-escape-function-in-javascript
|
||||
value = new RegExp('^' + value.replace(/[-\/\\^$*+?.()|[\]{}]/g, '\\$&') + '$');
|
||||
}
|
||||
debug('waitForIp: domain %s to be %s in zone %s.', domain, value, zoneName);
|
||||
|
||||
var attempt = 1;
|
||||
@@ -83,7 +89,7 @@ function waitForDns(domain, value, type, options, callback) {
|
||||
dns.resolveNs(zoneName, function (error, nameservers) {
|
||||
if (error || !nameservers) return retryCallback(error || new SubdomainError(SubdomainError.EXTERNAL_ERROR, 'Unable to get nameservers'));
|
||||
|
||||
async.every(nameservers, isChangeSynced.bind(null, domain, value, type), function (synced) {
|
||||
async.every(nameservers, isChangeSynced.bind(null, domain, value, type), function (error, synced) {
|
||||
debug('waitForIp: %s %s ns: %j', domain, synced ? 'done' : 'not done', nameservers);
|
||||
|
||||
retryCallback(synced ? null : new SubdomainError(SubdomainError.EXTERNAL_ERROR, 'ETRYAGAIN'));
|
||||
|
||||
+10
-4
@@ -25,7 +25,8 @@ exports = module.exports = {
|
||||
var assert = require('assert'),
|
||||
constants = require('./constants.js'),
|
||||
database = require('./database.js'),
|
||||
DatabaseError = require('./databaseerror');
|
||||
DatabaseError = require('./databaseerror'),
|
||||
mailboxdb = require('./mailboxdb.js');
|
||||
|
||||
var GROUPS_FIELDS = [ 'id', 'name' ].join(',');
|
||||
|
||||
@@ -88,10 +89,14 @@ function add(id, name, callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
var data = [ id, name ];
|
||||
database.query('INSERT INTO groups (id, name) VALUES (?, ?)',
|
||||
data, function (error, result) {
|
||||
|
||||
var queries = [];
|
||||
queries.push({ query: 'INSERT INTO mailboxes (name, ownerId, ownerType) VALUES (?, ?, ?)', args: [ name, id, mailboxdb.TYPE_GROUP ] });
|
||||
queries.push({ query: 'INSERT INTO groups (id, name) VALUES (?, ?)', args: [ id, name ] });
|
||||
|
||||
database.transaction(queries, function (error, result) {
|
||||
if (error && error.code === 'ER_DUP_ENTRY') return callback(new DatabaseError(DatabaseError.ALREADY_EXISTS, error));
|
||||
if (error || result.affectedRows !== 1) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
if (error || result[1].affectedRows !== 1) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
@@ -105,6 +110,7 @@ function del(id, callback) {
|
||||
var queries = [];
|
||||
queries.push({ query: 'DELETE FROM groupMembers WHERE groupId = ?', args: [ id ] });
|
||||
queries.push({ query: 'DELETE FROM groups WHERE id = ?', args: [ id ] });
|
||||
queries.push({ query: 'DELETE FROM mailboxes WHERE ownerId=?', args: [ id ] });
|
||||
|
||||
database.transaction(queries, function (error, result) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
|
||||
+5
-16
@@ -24,7 +24,6 @@ var assert = require('assert'),
|
||||
constants = require('./constants.js'),
|
||||
DatabaseError = require('./databaseerror.js'),
|
||||
groupdb = require('./groupdb.js'),
|
||||
mailboxdb = require('./mailboxdb.js'),
|
||||
util = require('util'),
|
||||
uuid = require('node-uuid');
|
||||
|
||||
@@ -60,7 +59,7 @@ GroupError.NOT_ALLOWED = 'Not Allowed';
|
||||
function validateGroupname(name) {
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
|
||||
if (name.length < 2) return new GroupError(GroupError.BAD_FIELD, 'name must be atleast 2 chars');
|
||||
if (name.length < 1) return new GroupError(GroupError.BAD_FIELD, 'name must be atleast 1 char');
|
||||
if (name.length >= 200) return new GroupError(GroupError.BAD_FIELD, 'name too long');
|
||||
|
||||
if (constants.RESERVED_NAMES.indexOf(name) !== -1) return new GroupError(GroupError.BAD_FIELD, 'name is reserved');
|
||||
@@ -85,16 +84,11 @@ function create(name, callback) {
|
||||
if (error) return callback(error);
|
||||
|
||||
var id = 'gid-' + uuid.v4();
|
||||
mailboxdb.add(name, id /* owner */, mailboxdb.TYPE_GROUP, function (error) {
|
||||
groupdb.add(id, name, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(new GroupError(GroupError.ALREADY_EXISTS));
|
||||
if (error) return callback(new GroupError(GroupError.INTERNAL_ERROR, error));
|
||||
|
||||
groupdb.add(id, name, function (error) {
|
||||
if (error && error.reason === DatabaseError.ALREADY_EXISTS) return callback(new GroupError(GroupError.ALREADY_EXISTS));
|
||||
if (error) return callback(new GroupError(GroupError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null, { id: id, name: name });
|
||||
});
|
||||
callback(null, { id: id, name: name });
|
||||
});
|
||||
}
|
||||
|
||||
@@ -105,16 +99,11 @@ function remove(id, callback) {
|
||||
// never allow admin group to be deleted
|
||||
if (id === constants.ADMIN_GROUP_ID) return callback(new GroupError(GroupError.NOT_ALLOWED));
|
||||
|
||||
mailboxdb.delByOwnerId(id, function (error) {
|
||||
groupdb.del(id, function (error) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new GroupError(GroupError.NOT_FOUND));
|
||||
if (error) return callback(new GroupError(GroupError.INTERNAL_ERROR, error));
|
||||
|
||||
groupdb.del(id, function (error) {
|
||||
if (error && error.reason === DatabaseError.NOT_FOUND) return callback(new GroupError(GroupError.NOT_FOUND));
|
||||
if (error) return callback(new GroupError(GroupError.INTERNAL_ERROR, error));
|
||||
|
||||
callback(null);
|
||||
});
|
||||
callback(null);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -8,17 +8,17 @@ exports = module.exports = {
|
||||
// a version bump means that all containers (apps and addons) are recreated
|
||||
'version': 45,
|
||||
|
||||
'baseImages': [ 'cloudron/base:0.9.0' ],
|
||||
'baseImages': [ 'cloudron/base:0.10.0' ],
|
||||
|
||||
// Note that if any of the databases include an upgrade, bump the infra version above
|
||||
// This is because we upgrade using dumps instead of mysql_upgrade, pg_upgrade etc
|
||||
'images': {
|
||||
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:0.13.0' },
|
||||
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:0.15.0' },
|
||||
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:0.11.0' },
|
||||
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:0.10.0' },
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:0.29.0' },
|
||||
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:0.10.0' }
|
||||
'mysql': { repo: 'cloudron/mysql', tag: 'cloudron/mysql:0.14.0' },
|
||||
'postgresql': { repo: 'cloudron/postgresql', tag: 'cloudron/postgresql:0.16.0' },
|
||||
'mongodb': { repo: 'cloudron/mongodb', tag: 'cloudron/mongodb:0.12.0' },
|
||||
'redis': { repo: 'cloudron/redis', tag: 'cloudron/redis:0.11.0' },
|
||||
'mail': { repo: 'cloudron/mail', tag: 'cloudron/mail:0.30.0' },
|
||||
'graphite': { repo: 'cloudron/graphite', tag: 'cloudron/graphite:0.11.0' }
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
+1
-1
@@ -51,7 +51,7 @@ function userSearch(req, res, next) {
|
||||
var groups = [ GROUP_USERS_DN ];
|
||||
if (entry.admin) groups.push(GROUP_ADMINS_DN);
|
||||
|
||||
var displayName = entry.displayName || entry.username;
|
||||
var displayName = entry.displayName || entry.username || ''; // displayName can be empty and username can be null
|
||||
var nameParts = displayName.split(' ');
|
||||
var firstName = nameParts[0];
|
||||
var lastName = nameParts.length > 1 ? nameParts[nameParts.length - 1] : ''; // choose last part, if it exists
|
||||
|
||||
@@ -4,6 +4,12 @@ Dear Cloudron Admin,
|
||||
|
||||
The certificate for <%= domain %> could not be renewed.
|
||||
|
||||
The Cloudron will attempt to renew the certificate every 12 hours
|
||||
until the certificate expires (at which point it will switch to
|
||||
using the fallback certificate).
|
||||
|
||||
The error was:
|
||||
|
||||
-------------------------------------
|
||||
|
||||
<%- message %>
|
||||
|
||||
+1
-1
@@ -208,7 +208,7 @@ function getAlias(name, callback) {
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
database.query('SELECT ' + MAILBOX_FIELDS + ' FROM mailboxes WHERE name = ? AND aliasTarget IS NOT null', [ name ], function (error, results) {
|
||||
database.query('SELECT ' + MAILBOX_FIELDS + ' FROM mailboxes WHERE name = ? AND aliasTarget IS NOT NULL', [ name ], function (error, results) {
|
||||
if (error) return callback(new DatabaseError(DatabaseError.INTERNAL_ERROR, error));
|
||||
if (results.length === 0) return callback(new DatabaseError(DatabaseError.NOT_FOUND));
|
||||
|
||||
|
||||
+14
-63
@@ -46,17 +46,17 @@ var assert = require('assert'),
|
||||
settings = require('./settings.js'),
|
||||
showdown = require('showdown'),
|
||||
smtpTransport = require('nodemailer-smtp-transport'),
|
||||
subdomains = require('./subdomains.js'),
|
||||
users = require('./user.js'),
|
||||
util = require('util'),
|
||||
_ = require('underscore');
|
||||
|
||||
var NOOP_CALLBACK = function (error) { if (error) console.error(error); };
|
||||
var NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
|
||||
var MAIL_TEMPLATES_DIR = path.join(__dirname, 'mail_templates');
|
||||
|
||||
var gMailQueue = [ ],
|
||||
gDnsReady = false,
|
||||
gCheckDnsTimerId = null;
|
||||
gDnsReady = false;
|
||||
|
||||
function splatchError(error) {
|
||||
var result = { };
|
||||
@@ -81,8 +81,6 @@ function stop(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
// TODO: interrupt processQueue as well
|
||||
clearTimeout(gCheckDnsTimerId);
|
||||
gCheckDnsTimerId = null;
|
||||
|
||||
debug(gMailQueue.length + ' mail items dropped');
|
||||
gMailQueue = [ ];
|
||||
@@ -96,60 +94,13 @@ function mailConfig() {
|
||||
};
|
||||
}
|
||||
|
||||
function getTxtRecords(callback) {
|
||||
dns.resolveNs(config.zoneName(), function (error, nameservers) {
|
||||
if (error || !nameservers) return callback(error || new Error('Unable to get nameservers'));
|
||||
|
||||
var nameserver = nameservers[0];
|
||||
|
||||
dns.resolve4(nameserver, function (error, nsIps) {
|
||||
if (error || !nsIps || nsIps.length === 0) return callback(error);
|
||||
|
||||
var req = dns.Request({
|
||||
question: dns.Question({ name: config.fqdn(), type: 'TXT' }),
|
||||
server: { address: nsIps[0] },
|
||||
timeout: 5000
|
||||
});
|
||||
|
||||
req.on('timeout', function () { return callback(new Error('ETIMEOUT')); });
|
||||
|
||||
req.on('message', function (error, message) {
|
||||
if (error || !message.answer || message.answer.length === 0) return callback(null, null);
|
||||
|
||||
var records = message.answer.map(function (a) { return a.data[0]; });
|
||||
callback(null, records);
|
||||
});
|
||||
|
||||
req.send();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// keep this in sync with the cloudron.js dns changes
|
||||
function checkDns() {
|
||||
getTxtRecords(function (error, records) {
|
||||
if (error || !records) {
|
||||
debug('checkDns: DNS error or no records looking up TXT records for %s %s', config.fqdn(), error, records);
|
||||
gCheckDnsTimerId = setTimeout(checkDns, 60000);
|
||||
return;
|
||||
}
|
||||
|
||||
var allowedToSendMail = false;
|
||||
|
||||
for (var i = 0; i < records.length; i++) {
|
||||
if (records[i].indexOf('v=spf1 ') !== 0) continue; // not SPF
|
||||
|
||||
allowedToSendMail = records[i].indexOf('a:' + config.adminFqdn()) !== -1;
|
||||
break; // only one SPF record can exist (https://support.google.com/a/answer/4568483?hl=en)
|
||||
}
|
||||
|
||||
if (!allowedToSendMail) {
|
||||
debug('checkDns: SPF records disallow sending email from cloudron. %j', records);
|
||||
gCheckDnsTimerId = setTimeout(checkDns, 60000);
|
||||
return;
|
||||
}
|
||||
subdomains.waitForDns(config.fqdn(), new RegExp('^v=spf1 .*a:' + config.adminFqdn().replace(/[-\/\\^$*+?.()|[\]{}]/g, '\\$&') + '.*'), 'TXT', { interval: 60000, times: Infinity }, function (error) {
|
||||
if (error) return debug(error); // can never happen
|
||||
|
||||
debug('checkDns: SPF check passed. commencing mail processing');
|
||||
|
||||
gDnsReady = true;
|
||||
processQueue();
|
||||
});
|
||||
@@ -183,7 +134,7 @@ function sendMails(queue, callback) {
|
||||
|
||||
async.mapSeries(queue, function iterator(mailOptions, callback) {
|
||||
transport.sendMail(mailOptions, function (error) {
|
||||
if (error) return console.error(error); // TODO: requeue?
|
||||
if (error) return debug(error); // TODO: requeue?
|
||||
debug('Email sent to ' + mailOptions.to);
|
||||
});
|
||||
callback(null);
|
||||
@@ -198,8 +149,8 @@ function sendMails(queue, callback) {
|
||||
function enqueue(mailOptions) {
|
||||
assert.strictEqual(typeof mailOptions, 'object');
|
||||
|
||||
if (!mailOptions.from) console.error('sender address is missing');
|
||||
if (!mailOptions.to) console.error('recipient address is missing');
|
||||
if (!mailOptions.from) debug('sender address is missing');
|
||||
if (!mailOptions.to) debug('recipient address is missing');
|
||||
|
||||
debug('Queued mail for ' + mailOptions.from + ' to ' + mailOptions.to);
|
||||
gMailQueue.push(mailOptions);
|
||||
@@ -232,7 +183,7 @@ function mailUserEventToAdmins(user, event) {
|
||||
assert.strictEqual(typeof event, 'string');
|
||||
|
||||
getAdminEmails(function (error, adminEmails) {
|
||||
if (error) return console.log('Error getting admins', error);
|
||||
if (error) return debug('Error getting admins', error);
|
||||
|
||||
adminEmails = _.difference(adminEmails, [ user.email ]);
|
||||
|
||||
@@ -255,7 +206,7 @@ function sendInvite(user, invitor) {
|
||||
|
||||
settings.getCloudronName(function (error, cloudronName) {
|
||||
if (error) {
|
||||
console.error(error);
|
||||
debug(error);
|
||||
cloudronName = 'Cloudron';
|
||||
}
|
||||
|
||||
@@ -300,7 +251,7 @@ function userAdded(user, inviteSent) {
|
||||
|
||||
settings.getCloudronName(function (error, cloudronName) {
|
||||
if (error) {
|
||||
console.error(error);
|
||||
debug(error);
|
||||
cloudronName = 'Cloudron';
|
||||
}
|
||||
|
||||
@@ -355,7 +306,7 @@ function passwordReset(user) {
|
||||
|
||||
settings.getCloudronName(function (error, cloudronName) {
|
||||
if (error) {
|
||||
console.error(error);
|
||||
debug(error);
|
||||
cloudronName = 'Cloudron';
|
||||
}
|
||||
|
||||
@@ -413,7 +364,7 @@ function boxUpdateAvailable(newBoxVersion, changelog) {
|
||||
|
||||
settings.getCloudronName(function (error, cloudronName) {
|
||||
if (error) {
|
||||
console.error(error);
|
||||
debug(error);
|
||||
cloudronName = 'Cloudron';
|
||||
}
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ app.controller('Controller', ['$scope', function ($scope) {
|
||||
<small ng-show="setupForm.username.$error.maxlength">The username is too long</small>
|
||||
<small ng-show="setupForm.username.$dirty && setupForm.username.$invalid">Not a valid username</small>
|
||||
</div>
|
||||
<input type="text" class="form-control" ng-model="username" name="username" ng-maxlength="512" ng-minlength="3" required autofocus>
|
||||
<input type="text" class="form-control" ng-model="username" name="username" required autofocus>
|
||||
</div>
|
||||
<% } %>
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
<link href="<%= adminOrigin %>/theme.css" rel="stylesheet">
|
||||
|
||||
<!-- Custom Fonts -->
|
||||
<link href="//maxcdn.bootstrapcdn.com/font-awesome/4.3.0/css/font-awesome.min.css" rel="stylesheet" type="text/css">
|
||||
<link href="<%= adminOrigin %>/3rdparty/css/font-awesome.min.css" rel="stylesheet" rel="stylesheet" type="text/css">
|
||||
|
||||
<!-- jQuery-->
|
||||
<script src="<%= adminOrigin %>/3rdparty/js/jquery.min.js"></script>
|
||||
|
||||
+16
-2
@@ -4,7 +4,10 @@ exports = module.exports = {
|
||||
initialize: initialize,
|
||||
uninitialize: uninitialize,
|
||||
|
||||
events: new (require('events').EventEmitter)(),
|
||||
start: start,
|
||||
|
||||
events: null,
|
||||
|
||||
EVENT_READY: 'ready'
|
||||
};
|
||||
|
||||
@@ -34,6 +37,15 @@ var gPlatformReadyTimer = null;
|
||||
var NOOP_CALLBACK = function (error) { if (error) debug(error); };
|
||||
|
||||
function initialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
exports.events = new (require('events').EventEmitter)();
|
||||
return callback();
|
||||
}
|
||||
|
||||
function start(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
if (process.env.BOX_ENV === 'test' && !process.env.TEST_CREATE_INFRA) return callback();
|
||||
|
||||
debug('initializing addon infrastructure');
|
||||
@@ -87,7 +99,7 @@ function uninitialize(callback) {
|
||||
clearTimeout(gPlatformReadyTimer);
|
||||
gPlatformReadyTimer = null;
|
||||
|
||||
// TODO: unregister event listeners
|
||||
exports.events = null;
|
||||
|
||||
callback();
|
||||
}
|
||||
@@ -96,6 +108,7 @@ function removeOldImages(callback) {
|
||||
debug('removing old addon images');
|
||||
|
||||
for (var imageName in infra.images) {
|
||||
if (imageName === 'redis') continue; // see #223
|
||||
var image = infra.images[imageName];
|
||||
debug('cleaning up images of %j', image);
|
||||
var cmd = 'docker images "%s" | tail -n +2 | awk \'{ print $1 ":" $2 }\' | grep -v "%s" | xargs --no-run-if-empty docker rmi';
|
||||
@@ -115,6 +128,7 @@ function stopContainers(existingInfra, callback) {
|
||||
assert(typeof infra.images, 'object');
|
||||
var changedAddons = [ ];
|
||||
for (var imageName in infra.images) {
|
||||
if (imageName === 'redis') continue; // see #223
|
||||
if (infra.images[imageName].tag !== existingInfra.images[imageName].tag) changedAddons.push(imageName);
|
||||
}
|
||||
|
||||
|
||||
@@ -249,6 +249,7 @@ function uninstallApp(req, res, next) {
|
||||
debug('Uninstalling app id:%s', req.params.id);
|
||||
|
||||
apps.uninstall(req.params.id, auditSource(req), function (error) {
|
||||
if (error && error.reason === AppsError.BILLING_REQUIRED) return next(new HttpError(402, 'Billing required'));
|
||||
if (error && error.reason === AppsError.NOT_FOUND) return next(new HttpError(404, 'No such app'));
|
||||
if (error) return next(new HttpError(500, error));
|
||||
|
||||
|
||||
+978
-1254
File diff suppressed because it is too large
Load Diff
@@ -224,7 +224,7 @@ describe('Cloudron', function () {
|
||||
expect(result.body.apiServerOrigin).to.eql('http://localhost:6060');
|
||||
expect(result.body.webServerOrigin).to.eql(null);
|
||||
expect(result.body.fqdn).to.eql(config.fqdn());
|
||||
expect(result.body.isCustomDomain).to.eql(false);
|
||||
expect(result.body.isCustomDomain).to.eql(true);
|
||||
expect(result.body.progress).to.be.an('object');
|
||||
expect(result.body.update).to.be.an('object');
|
||||
expect(result.body.version).to.eql(config.version());
|
||||
@@ -250,7 +250,7 @@ describe('Cloudron', function () {
|
||||
expect(result.body.apiServerOrigin).to.eql('http://localhost:6060');
|
||||
expect(result.body.webServerOrigin).to.eql(null);
|
||||
expect(result.body.fqdn).to.eql(config.fqdn());
|
||||
expect(result.body.isCustomDomain).to.eql(false);
|
||||
expect(result.body.isCustomDomain).to.eql(true);
|
||||
expect(result.body.progress).to.be.an('object');
|
||||
expect(result.body.update).to.be.an('object');
|
||||
expect(result.body.version).to.eql(config.version());
|
||||
|
||||
+156
-114
@@ -529,201 +529,243 @@ describe('Settings API', function () {
|
||||
});
|
||||
|
||||
describe('email DNS records', function () {
|
||||
var resolveTxt = null;
|
||||
var resolve = null;
|
||||
var dnsAnswerQueue = [];
|
||||
var dkimDomain, spfDomain, mxDomain, dmarcDomain;
|
||||
|
||||
before(function (done) {
|
||||
var dns = require('native-dns');
|
||||
|
||||
// replace dns resolveTxt()
|
||||
resolveTxt = dns.resolveTxt;
|
||||
dns.resolveTxt = function (hostname, callback) {
|
||||
resolve = dns.resolve;
|
||||
dns.resolve = function (hostname, type, callback) {
|
||||
expect(hostname).to.be.a('string');
|
||||
expect(callback).to.be.a('function');
|
||||
|
||||
if (dnsAnswerQueue.length === 0) return callback(new Error('no mock answer'));
|
||||
if (!dnsAnswerQueue[hostname] || !(type in dnsAnswerQueue[hostname])) return callback(new Error('no mock answer'));
|
||||
|
||||
callback(null, dnsAnswerQueue.shift());
|
||||
callback(null, dnsAnswerQueue[hostname][type]);
|
||||
};
|
||||
|
||||
dkimDomain = 'cloudron._domainkey.' + config.fqdn();
|
||||
spfDomain = config.fqdn();
|
||||
mxDomain = config.fqdn();
|
||||
dmarcDomain = '_dmarc.' + config.fqdn();
|
||||
|
||||
done();
|
||||
});
|
||||
|
||||
after(function (done) {
|
||||
var dns = require('native-dns');
|
||||
|
||||
dns.resolveTxt = resolveTxt;
|
||||
dns.resolve = resolve;
|
||||
|
||||
done();
|
||||
});
|
||||
|
||||
it('fails without dns error', function (done) {
|
||||
it('does not fail when dns errors', function (done) {
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_dns_records')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(500);
|
||||
expect(res.statusCode).to.equal(200);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('succeeds without existing answers', function (done) {
|
||||
dnsAnswerQueue.push([]);
|
||||
dnsAnswerQueue.push([]);
|
||||
function clearDnsAnswerQueue() {
|
||||
dnsAnswerQueue = { };
|
||||
dnsAnswerQueue[dkimDomain] = { };
|
||||
dnsAnswerQueue[spfDomain] = { };
|
||||
dnsAnswerQueue[mxDomain] = { };
|
||||
dnsAnswerQueue[dmarcDomain] = { };
|
||||
}
|
||||
|
||||
it('succeeds with dns errors', function (done) {
|
||||
clearDnsAnswerQueue();
|
||||
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_dns_records')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(200);
|
||||
expect(res.body.dkim).to.be.an('object');
|
||||
expect(res.body.spf).to.be.an('object');
|
||||
|
||||
expect(res.body.dkim.subdomain).to.eql('cloudron._domainkey');
|
||||
expect(res.body.dkim).to.be.an('object');
|
||||
expect(res.body.dkim.domain).to.eql(dkimDomain);
|
||||
expect(res.body.dkim.type).to.eql('TXT');
|
||||
expect(res.body.dkim.value).to.eql(null);
|
||||
expect(res.body.dkim.expected).to.eql('v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync());
|
||||
expect(res.body.dkim.status).to.eql(false);
|
||||
|
||||
expect(res.body.spf.subdomain).to.eql('');
|
||||
expect(res.body.spf).to.be.an('object');
|
||||
expect(res.body.spf.domain).to.eql(spfDomain);
|
||||
expect(res.body.spf.type).to.eql('TXT');
|
||||
expect(res.body.spf.value).to.eql(null);
|
||||
expect(res.body.spf.expected).to.eql('v=spf1 a:my-foobar.com ~all');
|
||||
expect(res.body.spf.expected).to.eql('v=spf1 a:' + config.adminFqdn() + ' ~all');
|
||||
expect(res.body.spf.status).to.eql(false);
|
||||
|
||||
expect(res.body.dmarc).to.be.an('object');
|
||||
expect(res.body.dmarc.type).to.eql('TXT');
|
||||
expect(res.body.dmarc.value).to.eql(null);
|
||||
expect(res.body.dmarc.expected).to.eql('v=DMARC1; p=reject; pct=100');
|
||||
expect(res.body.dmarc.status).to.eql(false);
|
||||
|
||||
expect(res.body.mx).to.be.an('object');
|
||||
expect(res.body.mx.type).to.eql('MX');
|
||||
expect(res.body.mx.value).to.eql(null);
|
||||
expect(res.body.mx.expected).to.eql('10 ' + config.mailFqdn());
|
||||
expect(res.body.mx.status).to.eql(false);
|
||||
|
||||
expect(res.body.ptr).to.be.an('object');
|
||||
expect(res.body.ptr.type).to.eql('PTR');
|
||||
// expect(res.body.ptr.value).to.eql(null); this will be anything random
|
||||
expect(res.body.ptr.expected).to.eql(config.mailFqdn());
|
||||
expect(res.body.ptr.status).to.eql(false);
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('succeeds with existing dkim', function (done) {
|
||||
dnsAnswerQueue.push([['v=DKIM1;', 't=s;', 'p=' + cloudron.readDkimPublicKeySync()]]);
|
||||
dnsAnswerQueue.push([]);
|
||||
it('succeeds with "undefined" spf, dkim, dmarc, mx, ptr records', function (done) {
|
||||
clearDnsAnswerQueue();
|
||||
|
||||
dnsAnswerQueue[dkimDomain].TXT = null;
|
||||
dnsAnswerQueue[spfDomain].TXT = null;
|
||||
dnsAnswerQueue[mxDomain].MX = null;
|
||||
dnsAnswerQueue[dmarcDomain].TXT = null;
|
||||
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_dns_records')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(200);
|
||||
expect(res.body.dkim).to.be.an('object');
|
||||
|
||||
expect(res.body.spf).to.be.an('object');
|
||||
|
||||
expect(res.body.dkim.subdomain).to.eql('cloudron._domainkey');
|
||||
expect(res.body.dkim.type).to.eql('TXT');
|
||||
expect(res.body.dkim.value).to.eql('v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync());
|
||||
expect(res.body.dkim.expected).to.eql('v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync());
|
||||
expect(res.body.dkim.status).to.eql(true);
|
||||
|
||||
expect(res.body.spf.subdomain).to.eql('');
|
||||
expect(res.body.spf.type).to.eql('TXT');
|
||||
expect(res.body.spf.value).to.eql(null);
|
||||
expect(res.body.spf.expected).to.eql('v=spf1 a:my-foobar.com ~all');
|
||||
expect(res.body.spf.expected).to.eql('v=spf1 a:' + config.adminFqdn() + ' ~all');
|
||||
expect(res.body.spf.status).to.eql(false);
|
||||
expect(res.body.spf.value).to.eql(null);
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('succeeds with existing spf', function (done) {
|
||||
dnsAnswerQueue.push([]);
|
||||
dnsAnswerQueue.push([['v=spf1', 'a:my-foobar.com', '~all']]);
|
||||
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_dns_records')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(200);
|
||||
expect(res.body.dkim).to.be.an('object');
|
||||
expect(res.body.spf).to.be.an('object');
|
||||
|
||||
expect(res.body.dkim.subdomain).to.eql('cloudron._domainkey');
|
||||
expect(res.body.dkim.type).to.eql('TXT');
|
||||
expect(res.body.dkim.value).to.eql(null);
|
||||
expect(res.body.dkim.expected).to.eql('v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync());
|
||||
expect(res.body.dkim.status).to.eql(false);
|
||||
expect(res.body.dkim.value).to.eql(null);
|
||||
|
||||
expect(res.body.spf.subdomain).to.eql('');
|
||||
expect(res.body.dmarc).to.be.an('object');
|
||||
expect(res.body.dmarc.expected).to.eql('v=DMARC1; p=reject; pct=100');
|
||||
expect(res.body.dmarc.status).to.eql(false);
|
||||
expect(res.body.dmarc.value).to.eql(null);
|
||||
|
||||
expect(res.body.mx).to.be.an('object');
|
||||
expect(res.body.mx.status).to.eql(false);
|
||||
expect(res.body.mx.expected).to.eql('10 ' + config.mailFqdn());
|
||||
expect(res.body.mx.value).to.eql(null);
|
||||
|
||||
expect(res.body.ptr).to.be.an('object');
|
||||
expect(res.body.ptr.expected).to.eql(config.mailFqdn());
|
||||
expect(res.body.ptr.status).to.eql(false);
|
||||
// expect(res.body.ptr.value).to.eql(null); this will be anything random
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('succeeds with all different spf, dkim, dmarc, mx, ptr records', function (done) {
|
||||
clearDnsAnswerQueue();
|
||||
|
||||
dnsAnswerQueue[mxDomain].MX = [ { priority: '20', exchange: config.mailFqdn() }, { priority: '30', exchange: config.mailFqdn() } ];
|
||||
dnsAnswerQueue[dmarcDomain].TXT = [['v=DMARC2; p=reject; pct=100']];
|
||||
dnsAnswerQueue[dkimDomain].TXT = [['v=DKIM2; t=s; p=' + cloudron.readDkimPublicKeySync()]];
|
||||
dnsAnswerQueue[spfDomain].TXT = [['v=spf1 a:random.com ~all']];
|
||||
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_dns_records')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(200);
|
||||
|
||||
expect(res.body.spf).to.be.an('object');
|
||||
expect(res.body.spf.expected).to.eql('v=spf1 a:' + config.adminFqdn() + ' a:random.com ~all');
|
||||
expect(res.body.spf.status).to.eql(false);
|
||||
expect(res.body.spf.value).to.eql('v=spf1 a:random.com ~all');
|
||||
|
||||
expect(res.body.dkim).to.be.an('object');
|
||||
expect(res.body.dkim.expected).to.eql('v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync());
|
||||
expect(res.body.dkim.status).to.eql(false);
|
||||
expect(res.body.dkim.value).to.eql('v=DKIM2; t=s; p=' + cloudron.readDkimPublicKeySync());
|
||||
|
||||
expect(res.body.dmarc).to.be.an('object');
|
||||
expect(res.body.dmarc.expected).to.eql('v=DMARC1; p=reject; pct=100');
|
||||
expect(res.body.dmarc.status).to.eql(false);
|
||||
expect(res.body.dmarc.value).to.eql('v=DMARC2; p=reject; pct=100');
|
||||
|
||||
expect(res.body.mx).to.be.an('object');
|
||||
expect(res.body.mx.status).to.eql(false);
|
||||
expect(res.body.mx.expected).to.eql('10 ' + config.mailFqdn());
|
||||
expect(res.body.mx.value).to.eql('20 ' + config.mailFqdn() + ' 30 ' + config.mailFqdn());
|
||||
|
||||
expect(res.body.ptr).to.be.an('object');
|
||||
expect(res.body.ptr.expected).to.eql(config.mailFqdn());
|
||||
expect(res.body.ptr.status).to.eql(false);
|
||||
// expect(res.body.ptr.value).to.eql(null); this will be anything random
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('succeeds with existing embedded spf', function (done) {
|
||||
clearDnsAnswerQueue();
|
||||
|
||||
dnsAnswerQueue[spfDomain].TXT = [['v=spf1 a:example.com a:' + config.mailFqdn() + ' ~all']];
|
||||
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_dns_records')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(200);
|
||||
|
||||
expect(res.body.spf).to.be.an('object');
|
||||
expect(res.body.spf.domain).to.eql(spfDomain);
|
||||
expect(res.body.spf.type).to.eql('TXT');
|
||||
expect(res.body.spf.value).to.eql('v=spf1 a:my-foobar.com ~all');
|
||||
expect(res.body.spf.expected).to.eql('v=spf1 a:my-foobar.com ~all');
|
||||
expect(res.body.spf.value).to.eql('v=spf1 a:example.com a:' + config.mailFqdn() + ' ~all');
|
||||
expect(res.body.spf.expected).to.eql('v=spf1 a:example.com a:' + config.mailFqdn() + ' ~all');
|
||||
expect(res.body.spf.status).to.eql(true);
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('succeeds with existing extra spf', function (done) {
|
||||
dnsAnswerQueue.push([]);
|
||||
dnsAnswerQueue.push([['v=spf1', 'a:my-example.com', '~all']]);
|
||||
it('succeeds with all correct records', function (done) {
|
||||
clearDnsAnswerQueue();
|
||||
|
||||
dnsAnswerQueue[mxDomain].MX = [ { priority: '10', exchange: config.mailFqdn() } ];
|
||||
dnsAnswerQueue[dmarcDomain].TXT = [['v=DMARC1; p=reject; pct=100']];
|
||||
dnsAnswerQueue[dkimDomain].TXT = [['v=DKIM1;', 't=s;', 'p=' + cloudron.readDkimPublicKeySync()]];
|
||||
dnsAnswerQueue[spfDomain].TXT = [['v=spf1', ' a:' + config.adminFqdn(), ' ~all']];
|
||||
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_dns_records')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(200);
|
||||
|
||||
expect(res.body.dkim).to.be.an('object');
|
||||
expect(res.body.spf).to.be.an('object');
|
||||
|
||||
expect(res.body.dkim.subdomain).to.eql('cloudron._domainkey');
|
||||
expect(res.body.dkim.type).to.eql('TXT');
|
||||
expect(res.body.dkim.value).to.eql(null);
|
||||
expect(res.body.dkim.expected).to.eql('v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync());
|
||||
expect(res.body.dkim.status).to.eql(false);
|
||||
|
||||
expect(res.body.spf.subdomain).to.eql('');
|
||||
expect(res.body.spf.type).to.eql('TXT');
|
||||
expect(res.body.spf.value).to.eql('v=spf1 a:my-example.com ~all');
|
||||
expect(res.body.spf.expected).to.eql('v=spf1 a:my-foobar.com a:my-example.com ~all');
|
||||
expect(res.body.spf.status).to.eql(false);
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('succeeds with wrong dkim', function (done) {
|
||||
dnsAnswerQueue.push([['v=DKIM1;', 't=s;', 'p=foobar']]);
|
||||
dnsAnswerQueue.push([]);
|
||||
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_dns_records')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(200);
|
||||
expect(res.body.dkim).to.be.an('object');
|
||||
expect(res.body.spf).to.be.an('object');
|
||||
|
||||
expect(res.body.dkim.subdomain).to.eql('cloudron._domainkey');
|
||||
expect(res.body.dkim.type).to.eql('TXT');
|
||||
expect(res.body.dkim.value).to.eql('v=DKIM1; t=s; p=foobar');
|
||||
expect(res.body.dkim.expected).to.eql('v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync());
|
||||
expect(res.body.dkim.status).to.eql(false);
|
||||
|
||||
expect(res.body.spf.subdomain).to.eql('');
|
||||
expect(res.body.spf.type).to.eql('TXT');
|
||||
expect(res.body.spf.value).to.eql(null);
|
||||
expect(res.body.spf.expected).to.eql('v=spf1 a:my-foobar.com ~all');
|
||||
expect(res.body.spf.status).to.eql(false);
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('succeeds with existing spf and dkim', function (done) {
|
||||
dnsAnswerQueue.push([['v=DKIM1;', 't=s;', 'p=' + cloudron.readDkimPublicKeySync()]]);
|
||||
dnsAnswerQueue.push([['v=spf1', 'a:my-foobar.com', '~all']]);
|
||||
|
||||
superagent.get(SERVER_URL + '/api/v1/settings/email_dns_records')
|
||||
.query({ access_token: token })
|
||||
.end(function (err, res) {
|
||||
expect(res.statusCode).to.equal(200);
|
||||
expect(res.body.dkim).to.be.an('object');
|
||||
expect(res.body.spf).to.be.an('object');
|
||||
|
||||
expect(res.body.dkim.subdomain).to.eql('cloudron._domainkey');
|
||||
expect(res.body.dkim.domain).to.eql(dkimDomain);
|
||||
expect(res.body.dkim.type).to.eql('TXT');
|
||||
expect(res.body.dkim.value).to.eql('v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync());
|
||||
expect(res.body.dkim.expected).to.eql('v=DKIM1; t=s; p=' + cloudron.readDkimPublicKeySync());
|
||||
expect(res.body.dkim.status).to.eql(true);
|
||||
|
||||
expect(res.body.spf.subdomain).to.eql('');
|
||||
expect(res.body.spf).to.be.an('object');
|
||||
expect(res.body.spf.domain).to.eql(spfDomain);
|
||||
expect(res.body.spf.type).to.eql('TXT');
|
||||
expect(res.body.spf.value).to.eql('v=spf1 a:my-foobar.com ~all');
|
||||
expect(res.body.spf.expected).to.eql('v=spf1 a:my-foobar.com ~all');
|
||||
expect(res.body.spf.value).to.eql('v=spf1 a:' + config.adminFqdn() + ' ~all');
|
||||
expect(res.body.spf.expected).to.eql('v=spf1 a:' + config.adminFqdn() + ' ~all');
|
||||
expect(res.body.spf.status).to.eql(true);
|
||||
|
||||
expect(res.body.dmarc).to.be.an('object');
|
||||
expect(res.body.dmarc.expected).to.eql('v=DMARC1; p=reject; pct=100');
|
||||
expect(res.body.dmarc.status).to.eql(true);
|
||||
expect(res.body.dmarc.value).to.eql('v=DMARC1; p=reject; pct=100');
|
||||
|
||||
expect(res.body.mx).to.be.an('object');
|
||||
expect(res.body.mx.status).to.eql(true);
|
||||
expect(res.body.mx.expected).to.eql('10 ' + config.mailFqdn());
|
||||
expect(res.body.mx.value).to.eql('10 ' + config.mailFqdn());
|
||||
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
+1
-1
@@ -42,7 +42,7 @@ function create(req, res, next) {
|
||||
var password = generatePassword();
|
||||
var email = req.body.email;
|
||||
var sendInvite = req.body.invite;
|
||||
var username = req.body.username || '';
|
||||
var username = 'username' in req.body ? req.body.username : null;
|
||||
var displayName = req.body.displayName || '';
|
||||
|
||||
user.create(username, password, email, displayName, auditSource(req), { invitor: req.user, sendInvite: sendInvite }, function (error, user) {
|
||||
|
||||
@@ -19,12 +19,19 @@ fi
|
||||
|
||||
if [[ "${BOX_ENV}" == "cloudron" ]]; then
|
||||
readonly app_data_dir="${HOME}/data/$1"
|
||||
btrfs subvolume create "${app_data_dir}"
|
||||
|
||||
# Only create subvolume if it does not exist
|
||||
if [[ ! -d "${app_data_dir}" ]]; then
|
||||
btrfs subvolume create "${app_data_dir}"
|
||||
fi
|
||||
|
||||
mkdir -p "${app_data_dir}/data"
|
||||
chown -R yellowtent:yellowtent "${app_data_dir}"
|
||||
# only the top level ownership is changed because containers own the subdirectores
|
||||
# and will chown them as necessary
|
||||
chown yellowtent:yellowtent "${app_data_dir}"
|
||||
else
|
||||
readonly app_data_dir="${HOME}/.cloudron_test/data/$1"
|
||||
mkdir -p "${app_data_dir}/data"
|
||||
chown -R ${SUDO_USER}:${SUDO_USER} "${app_data_dir}"
|
||||
chown ${SUDO_USER}:${SUDO_USER} "${app_data_dir}"
|
||||
fi
|
||||
|
||||
|
||||
+154
-49
@@ -3,6 +3,9 @@
|
||||
exports = module.exports = {
|
||||
SettingsError: SettingsError,
|
||||
|
||||
initialize: initialize,
|
||||
uninitialize: uninitialize,
|
||||
|
||||
getEmailDnsRecords: getEmailDnsRecords,
|
||||
|
||||
getAutoupdatePattern: getAutoupdatePattern,
|
||||
@@ -56,10 +59,11 @@ exports = module.exports = {
|
||||
APPSTORE_CONFIG_KEY: 'appstore_config',
|
||||
MAIL_CONFIG_KEY: 'mail_config',
|
||||
|
||||
events: new (require('events').EventEmitter)()
|
||||
events: null
|
||||
};
|
||||
|
||||
var assert = require('assert'),
|
||||
async = require('async'),
|
||||
backups = require('./backups.js'),
|
||||
config = require('./config.js'),
|
||||
constants = require('./constants.js'),
|
||||
@@ -101,11 +105,6 @@ var gDefaults = (function () {
|
||||
return result;
|
||||
})();
|
||||
|
||||
if (config.TEST) {
|
||||
// avoid noisy warnings during npm test
|
||||
exports.events.setMaxListeners(100);
|
||||
}
|
||||
|
||||
function SettingsError(reason, errorOrMessage) {
|
||||
assert.strictEqual(typeof reason, 'string');
|
||||
assert(errorOrMessage instanceof Error || typeof errorOrMessage === 'string' || typeof errorOrMessage === 'undefined');
|
||||
@@ -130,6 +129,20 @@ SettingsError.EXTERNAL_ERROR = 'External Error';
|
||||
SettingsError.NOT_FOUND = 'Not Found';
|
||||
SettingsError.BAD_FIELD = 'Bad Field';
|
||||
|
||||
function initialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
exports.events = new (require('events').EventEmitter)();
|
||||
callback();
|
||||
}
|
||||
|
||||
function uninitialize(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
exports.events = null;
|
||||
callback();
|
||||
}
|
||||
|
||||
function getEmailDnsRecords(callback) {
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
@@ -138,64 +151,156 @@ function getEmailDnsRecords(callback) {
|
||||
var dkimKey = cloudron.readDkimPublicKeySync();
|
||||
if (!dkimKey) return callback(new CloudronError(CloudronError.INTERNAL_ERROR, new Error('Failed to read dkim public key')));
|
||||
|
||||
records.dkim = {
|
||||
subdomain: constants.DKIM_SELECTOR + '._domainkey',
|
||||
type: 'TXT',
|
||||
expected: 'v=DKIM1; t=s; p=' + dkimKey,
|
||||
value: null,
|
||||
status: false
|
||||
};
|
||||
function checkDkim(callback) {
|
||||
records.dkim = {
|
||||
domain: constants.DKIM_SELECTOR + '._domainkey.' + config.fqdn(),
|
||||
type: 'TXT',
|
||||
expected: 'v=DKIM1; t=s; p=' + dkimKey,
|
||||
value: null,
|
||||
status: false
|
||||
};
|
||||
|
||||
records.spf = {
|
||||
subdomain: '',
|
||||
type: 'TXT',
|
||||
value: null,
|
||||
expected: null,
|
||||
status: false
|
||||
};
|
||||
|
||||
dns.platform.timeout = 8000;
|
||||
|
||||
// check if DKIM is already setup
|
||||
dns.resolveTxt(records.dkim.subdomain + '.' + config.fqdn(), function (error, txtRecords) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(null, records); // not setup
|
||||
if (error) return callback(error);
|
||||
|
||||
// ensure this is an array resolveTxt() returns undefined if no records are found
|
||||
txtRecords = txtRecords || [];
|
||||
|
||||
for (var i = 0; i < txtRecords.length; i++) {
|
||||
records.dkim.value = txtRecords[i].join(' ');
|
||||
records.dkim.status = (records.dkim.value === records.dkim.expected);
|
||||
break;
|
||||
}
|
||||
|
||||
// check if SPF is already setup
|
||||
dns.resolveTxt(config.fqdn(), function (error, txtRecords) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(null, records); // not setup
|
||||
dns.resolve(records.dkim.domain, records.dkim.type, function (error, txtRecords) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(null); // not setup
|
||||
if (error) return callback(error);
|
||||
|
||||
// ensure this is an array resolveTxt() returns undefined if no records are found
|
||||
txtRecords = txtRecords || [];
|
||||
if (Array.isArray(txtRecords) && txtRecords.length !== 0) {
|
||||
records.dkim.value = txtRecords[0].join(' ');
|
||||
records.dkim.status = (records.dkim.value === records.dkim.expected);
|
||||
}
|
||||
|
||||
callback();
|
||||
});
|
||||
}
|
||||
|
||||
function checkSpf(callback) {
|
||||
records.spf = {
|
||||
domain: config.fqdn(),
|
||||
type: 'TXT',
|
||||
value: null,
|
||||
expected: 'v=spf1 a:' + config.adminFqdn() + ' ~all',
|
||||
status: false
|
||||
};
|
||||
|
||||
// https://agari.zendesk.com/hc/en-us/articles/202952749-How-long-can-my-SPF-record-be-
|
||||
dns.resolve(records.spf.domain, records.spf.type, function (error, txtRecords) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(null); // not setup
|
||||
if (error) return callback(error);
|
||||
|
||||
if (!Array.isArray(txtRecords)) return callback();
|
||||
|
||||
var i;
|
||||
for (i = 0; i < txtRecords.length; i++) {
|
||||
if (txtRecords[i].join(' ').indexOf('v=spf1 ') !== 0) continue; // not SPF
|
||||
records.spf.value = txtRecords[i].join(' ');
|
||||
records.spf.status = records.spf.value.indexOf(' a:' + config.adminFqdn() + ' ') !== -1;
|
||||
if (txtRecords[i].join('').indexOf('v=spf1 ') !== 0) continue; // not SPF
|
||||
records.spf.value = txtRecords[i].join('');
|
||||
records.spf.status = records.spf.value.indexOf(' a:' + config.adminFqdn()) !== -1;
|
||||
break;
|
||||
}
|
||||
|
||||
if (records.spf.status) {
|
||||
records.spf.expected = records.spf.value;
|
||||
} else if (i === txtRecords.length) {
|
||||
records.spf.expected = 'v=spf1 a:' + config.adminFqdn() + ' ~all';
|
||||
} else {
|
||||
} else if (i !== txtRecords.length) {
|
||||
records.spf.expected = 'v=spf1 a:' + config.adminFqdn() + ' ' + records.spf.value.slice('v=spf1 '.length);
|
||||
}
|
||||
|
||||
return callback(null, records);
|
||||
callback();
|
||||
});
|
||||
}
|
||||
|
||||
function checkMx(callback) {
|
||||
records.mx = {
|
||||
domain: config.fqdn(),
|
||||
type: 'MX',
|
||||
value: null,
|
||||
expected: '10 ' + config.mailFqdn(),
|
||||
status: false
|
||||
};
|
||||
|
||||
dns.resolve(records.mx.domain, records.mx.type, function (error, mxRecords) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(null); // not setup
|
||||
if (error) return callback(error);
|
||||
|
||||
if (Array.isArray(mxRecords) && mxRecords.length !== 0) {
|
||||
records.mx.status = mxRecords.length == 1 && mxRecords[0].exchange === config.mailFqdn();
|
||||
records.mx.value = mxRecords.map(function (r) { return r.priority + ' ' + r.exchange; }).join(' ');
|
||||
}
|
||||
|
||||
callback();
|
||||
});
|
||||
}
|
||||
|
||||
function checkDmarc(callback) {
|
||||
records.dmarc = {
|
||||
domain: '_dmarc.' + config.fqdn(),
|
||||
type: 'TXT',
|
||||
value: null,
|
||||
expected: 'v=DMARC1; p=reject; pct=100',
|
||||
status: false
|
||||
};
|
||||
|
||||
dns.resolve(records.dmarc.domain, records.dmarc.type, function (error, txtRecords) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(null); // not setup
|
||||
if (error) return callback(error);
|
||||
|
||||
if (Array.isArray(txtRecords) && txtRecords.length !== 0) {
|
||||
records.dmarc.value = txtRecords[0].join(' ');
|
||||
records.dmarc.status = (records.dmarc.value === records.dmarc.expected);
|
||||
}
|
||||
|
||||
callback();
|
||||
});
|
||||
}
|
||||
|
||||
function checkPtr(callback) {
|
||||
records.ptr = {
|
||||
domain: null,
|
||||
type: 'PTR',
|
||||
value: null,
|
||||
expected: config.mailFqdn(),
|
||||
status: false
|
||||
};
|
||||
|
||||
sysinfo.getIp(function (error, ip) {
|
||||
if (error) return callback(error);
|
||||
|
||||
records.ptr.domain = ip.split('.').reverse().join('.') + '.in-addr.arpa';
|
||||
|
||||
dns.reverse(ip, function (error, ptrRecords) {
|
||||
if (error && error.code === 'ENOTFOUND') return callback(null); // not setup
|
||||
if (error) return callback(error);
|
||||
|
||||
if (Array.isArray(ptrRecords) && ptrRecords.length !== 0) {
|
||||
records.ptr.value = ptrRecords.join(' ');
|
||||
records.ptr.status = ptrRecords.some(function (v) { return v === config.mailFqdn(); });
|
||||
}
|
||||
|
||||
return callback();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function ignoreError(what, func) {
|
||||
return function (callback) {
|
||||
func(function (error) {
|
||||
if (error) debug('Ignored error - ' + what + ':', error);
|
||||
|
||||
callback();
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
dns.platform.timeout = 5000; // hack so that each query finish in 5 seconds. this applies to _each_ ns
|
||||
dns.platform.name_servers = [ { address: '127.0.0.1', port: 53 } ];
|
||||
dns.platform.attempts = 1;
|
||||
|
||||
async.parallel([
|
||||
ignoreError('mx', checkMx),
|
||||
ignoreError('spf', checkSpf),
|
||||
ignoreError('dmarc', checkDmarc),
|
||||
ignoreError('dkim', checkDkim),
|
||||
ignoreError('ptr', checkPtr)
|
||||
], function () {
|
||||
callback(null, records);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
+2
-2
@@ -110,8 +110,8 @@ function remove(subdomain, type, values, callback) {
|
||||
|
||||
function waitForDns(domain, value, type, options, callback) {
|
||||
assert.strictEqual(typeof domain, 'string');
|
||||
assert.strictEqual(typeof value, 'string');
|
||||
assert(type === 'A' || type === 'CNAME');
|
||||
assert(typeof value === 'string' || util.isRegExp(value));
|
||||
assert(type === 'A' || type === 'CNAME' || type === 'TXT');
|
||||
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
|
||||
assert.strictEqual(typeof callback, 'function');
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user