Compare commits

..

9 Commits

Author SHA1 Message Date
Girish Ramakrishnan 837ce0a879 suppress reset-failed warning message
(cherry picked from commit f9f44b18ad)
2020-10-12 10:09:24 -07:00
Girish Ramakrishnan cdae1f0d06 restore: disable IP based api calls after all activation tasks
the restore code relies on the status call to get the domain to
redirect. if the IP/v1/cloudron/status does not respond, it will
fail the redirection.

(cherry picked from commit fa8a6bfc8c814b20c8bc04b629732a51e4edcf1f)
2020-10-07 10:57:28 -07:00
Johannes Zellner 96468dd931 Limit log files to last 1000 lines
(cherry picked from commit 645c1b9151)
2020-10-07 17:49:56 +02:00
Johannes Zellner a8949649a8 For app tickets, send the log files along
(cherry picked from commit 678fca6704)
2020-10-06 13:04:27 -07:00
Johannes Zellner a3fc7e9990 Support SSH remote enabling on ticket submission
(cherry picked from commit b74fae3762)
2020-10-06 13:04:21 -07:00
Johannes Zellner c749842eab Add enableSshSupport option to support tickets
(cherry picked from commit 2817ea833a)
2020-10-06 13:04:14 -07:00
Girish Ramakrishnan 503497dcc7 add changes
(cherry picked from commit b7ed6d8463)
2020-10-05 21:32:44 -07:00
Girish Ramakrishnan 516a822cd8 locations (primary, secondary) of an app must be updated together
do the delete first to clear out all the domains. this way, you can
move primary to redirect in a single shot.

(cherry picked from commit 005c33dbb5)
2020-10-05 16:17:09 -07:00
Girish Ramakrishnan 75eb8992a9 Disable focal for now 2020-10-05 12:46:26 -07:00
266 changed files with 11721 additions and 13794 deletions
-5
View File
@@ -1,5 +0,0 @@
{
"node": true,
"unused": true,
"esversion": 8
}
-206
View File
@@ -2114,211 +2114,5 @@
* Pre-select app domain by default in the redirection drop down
* robots: preseve leading and trailing whitespaces/newlines
[5.6.3]
* Fix postgres locale issue
[6.0.0]
* Focal support
* Reduce duration of self-signed certs to 800 days
* Better backup config filename when downloading
* branding: footer can have template variables like %YEAR% and %VERSION%
* sftp: secure the API with a token
* filemanager: Add extract context menu item
* Do not download docker images if present locally
* sftp: disable access to non-admins by default
* postgresql: whitelist pgcrypto extension for loomio
* filemanager: Add new file creation action and collapse new and upload actions
* rsync: add warning to remove lifecycle rules
* Add volume management
* backups: adjust node's heap size based on memory limit
* s3: diasble per-chunk timeout
* logs: more descriptive log file names on download
* collectd: remove collectd config when app stopped (and add it back when started)
* Apps can optionally request an authwall to be installed in front of them
* mailbox can now owned by a group
* linode: enable dns provider in setup view
* dns: apps can now use the dns port
* httpPaths: allow apps to specify forwarding from custom paths to container ports (for OLS)
* add elasticemail smtp relay option
* mail: add option to fts using solr
* mail: change the namespace separator of new installations to /
* mail: enable acl
* Disable THP
* filemanager: allow download dirs as zip files
* aws: add china region
* security: fix issue where apps could send with any username (but valid password)
* i18n support
[6.0.1]
* app: add export route
* mail: on location change, fix lock up when one or more domains have invalid credentials
* mail: fix crash because of write after timeout closure
* scaleway: fix installation issue where THP is not enabled in kernel
[6.1.0]
* mail: update haraka to 2.8.27. this fixes zero-length queue file crash
* update: set/unset appStoreId from the update route
* proxyauth: Do not follow redirects
* proxyauth: add 2FA
* appstore: add category translations
* appstore: add media category
* prepend the version to assets when sourcing to avoid cache hits on update
* filemanger: list volumes of the app
* Display upload size and size progress
* nfs: chown the backups for hardlinks to work
* remove user add/remove/role change email notifications
* persist update indicator across restarts
* cloudron-setup: add --generate-setup-token
* dashboard: pass accessToken query param to automatically login
* wellknown: add a way to set well known docs
* oom: notification mails have links to dashboard
* collectd: do not install xorg* packages
* apptask: backup/restore tasks now use the backup memory limit configuration
* eventlog: add logout event
* mailbox: include alias in mailbox search
* proxyAuth: add path exclusion
* turn: fix for CVE-2020-26262
* app password: fix regression where apps are not listed anymore in the UI
* Support for multiDomain apps (domain aliases)
* netcup: add dns provider
* Container swap size is now dynamically determined based on system RAM/swap ratio
[6.1.1]
* Fix bug where platform does not start if memory limits could not be applied
[6.1.2]
* App disk usage was not shown in graphs
* Email autoconfig
* Fix SOGo login
[6.2.0]
* ovh: object storage URL has changed from s3 to storage subdomain
* ionos: add profit bricks object storage
* update node to 14.15.4
* update docker to 20.10.3
* new base image 3.0.0
* postgresql updated to 12.5
* redis updated to 5.0.7
* dovecot updated to 2.3.7
* proxyAuth: fix docker UA detection
* registry config: add UI to disable it
* update solr to 8.8.1
* firewall: fix issue where script errored when having more than 15 wl/bl ports
* If groups are used, do not allow app installation without choosing the access settings
* tls addon
* Do not overwrite existing DMARC record
* Sync dns records
* Dry run restore
* linode: show cloudron is installing when user SSHs
* mysql: disable bin logs
* Show cancel task button if task is still running after 2 minutes
* filemanager: fix various bugs involving file names with spaces
* Change Referrer-policy default to 'same-origin'
* rsync: preserve and restore symlinks
* Clean up backups function now removes missing backups
[6.2.1]
* Avoid updown notifications on full restore
* Add retries to downloader logic in installer
[6.2.2]
* Fix ENOBUFS issue with backups when collecting fs metadata
[6.2.3]
* Fix addon crashes with missing databases
* Update mail container for LMTP cert fix
* Fix services view showing yellow icon
[6.2.4]
* Another addon crash fix
[6.2.5]
* update: set memory limit properly
* Fix bug where renew certs button did not work
* sftp: fix rebuild condition
* Fix display of user management/dashboard visiblity for email apps
* graphite: disable tagdb and reduce log noise
[6.2.6]
* Fix issue where collectd is restarted too quickly before graphite
[6.2.7]
* redis: backup before upgrade
[6.2.8]
* linode object storage: update aws sdk to make it work again
* Fix crash in blocklist setting when source and list have mixed ip versions
* mysql: bump connection limit to 200
* namecheap: fix issue where DNS updates and del were not working
* turn: turn off verbose logging
* Fix crash when parsing df output (set LC_ALL for box service)
[6.3.0]
* mail: allow TLS from internal hosts
* tokens: add lastUsedTime
* update: set memory limit properly
* addons: better error handling
* filemanager: various enhancements
* sftp: fix rebuild condition
* app mailbox is now optional
* Fix display of user management/dashboard visiblity for email apps
* graphite: disable tagdb and reduce log noise
* hsts: change max-age to 2 years
* clone: copy over redis memory limit
* namecheap: fix bug where records were not removed
* add UI to disable 2FA of a user
* mail: add active flag to mailboxes and lists
* Implement OCSP stapling
* security: send new browser login location notification email
* backups: add fqdn to the backup filename
* import all boxdata settings into the database
* volumes: generate systemd mount configs based on type
* postgresql: set max conn limit per db
* ubuntu 16: add alert about EOL
* clone: save and restore app config
* app import: restore icon, tag, label, proxy configs etc
* sieve: fix redirects to not do SRS
* notifications are now system level instead of per-user
* vultr DNS
* vultr object storage
* mail: do not forward spam to mailing lists
[6.3.1]
* Fix cert migration issues
[6.3.2]
* Avatar was migrated as base64 instead of binary
* Fix issue where filemanager came up empty for CIFS mounts
[6.3.3]
* volumes: add filesystem volume type for shared folders
* mail: enable sieve extension editheader
* mail: update solr to 8.9.0
[6.3.4]
* Fix issue where old nginx configs where not removed before upgrade
[6.3.5]
* filemanager: reset selection if directory has changed
* branding: fix error highlight with empty cloudron name
* better text instead of "Cloudron in the wild"
* Make sso login hint translatable
* Give unread notifications a small left border
* Fix issue where clicking update indicator opened app in new tab
* Ensure notifications are only fetched and shown for at least admins
* setupaccount: Show input field errors below input field
* Set focus automatically for new alias or redirect
* eventlog: fix issue where old events are not periodically removed
* ssfs: fix chown
[6.3.6]
* Fix broken reboot button
* app updated notification shown despite failure
* Update translation for sso login information
* Hide groups/tags/state filter in app listing for normal users
* filemanager: Ensure breadcrumbs and hash are correctly updated on folder navigation
* cloudron-setup: check if nginx/docker is already installed
* Use the addresses of all available interfaces for port 53 binding
* refresh config on appstore login
* password reset: check 2fa when enabled
-7
View File
@@ -1,5 +1,3 @@
![Translation status](https://translate.cloudron.io/widgets/cloudron/-/svg-badge.svg)
# Cloudron
[Cloudron](https://cloudron.io) is the best way to run apps on your server.
@@ -72,13 +70,8 @@ Just to give some heads up, we are a bit restrictive in merging changes. We are
would like to keep our maintenance burden low. It might be best to discuss features first in the [forum](https://forum.cloudron.io),
to also figure out how many other people will use it to justify maintenance for a feature.
# Localization
![Translation status](https://translate.cloudron.io/widgets/cloudron/-/287x66-white.png)
## Support
* [Documentation](https://docs.cloudron.io/)
* [Forum](https://forum.cloudron.io/)
+193
View File
@@ -0,0 +1,193 @@
#!/bin/bash
set -eu -o pipefail
assertNotEmpty() {
: "${!1:? "$1 is not set."}"
}
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)"
export JSON="${SOURCE_DIR}/node_modules/.bin/json"
INSTANCE_TYPE="t2.micro"
BLOCK_DEVICE="DeviceName=/dev/sda1,Ebs={VolumeSize=20,DeleteOnTermination=true,VolumeType=gp2}"
SSH_KEY_NAME="id_rsa_yellowtent"
revision=$(git rev-parse HEAD)
ami_name=""
server_id=""
server_ip=""
destroy_server="yes"
deploy_env="prod"
image_id=""
args=$(getopt -o "" -l "revision:,name:,no-destroy,env:,region:" -n "$0" -- "$@")
eval set -- "${args}"
while true; do
case "$1" in
--env) deploy_env="$2"; shift 2;;
--revision) revision="$2"; shift 2;;
--name) ami_name="$2"; shift 2;;
--no-destroy) destroy_server="no"; shift 2;;
--region)
case "$2" in
"us-east-1")
image_id="ami-6edd3078"
security_group="sg-a5e17fd9"
subnet_id="subnet-b8fbc0f1"
;;
"eu-central-1")
image_id="ami-5aee2235"
security_group="sg-19f5a770" # everything open on eu-central-1
subnet_id=""
;;
*)
echo "Unknown aws region $2"
exit 1
;;
esac
export AWS_DEFAULT_REGION="$2" # used by the aws cli tool
shift 2
;;
--) break;;
*) echo "Unknown option $1"; exit 1;;
esac
done
# TODO fix this
export AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY}"
export AWS_SECRET_ACCESS_KEY="${AWS_ACCESS_SECRET}"
readonly ssh_keys="${HOME}/.ssh/id_rsa_yellowtent"
readonly SSH="ssh -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${ssh_keys}"
if [[ ! -f "${ssh_keys}" ]]; then
echo "caas ssh key is missing at ${ssh_keys} (pick it up from secrets repo)"
exit 1
fi
if [[ -z "${image_id}" ]]; then
echo "--region is required (us-east-1 or eu-central-1)"
exit 1
fi
function get_pretty_revision() {
local git_rev="$1"
local sha1=$(git rev-parse --short "${git_rev}" 2>/dev/null)
echo "${sha1}"
}
function wait_for_ssh() {
echo "=> Waiting for ssh connection"
while true; do
echo -n "."
if $SSH ubuntu@${server_ip} echo "hello"; then
echo ""
break
fi
sleep 5
done
}
now=$(date "+%Y-%m-%d-%H%M%S")
pretty_revision=$(get_pretty_revision "${revision}")
if [[ -z "${ami_name}" ]]; then
ami_name="box-${deploy_env}-${pretty_revision}-${now}"
fi
echo "=> Create EC2 instance"
id=$(aws ec2 run-instances --image-id "${image_id}" --instance-type "${INSTANCE_TYPE}" --security-group-ids "${security_group}" --block-device-mappings "${BLOCK_DEVICE}" --key-name "${SSH_KEY_NAME}" --subnet-id "${subnet_id}" --associate-public-ip-address \
| $JSON Instances \
| $JSON 0.InstanceId)
[[ -z "$id" ]] && exit 1
echo "Instance created ID $id"
echo "=> Waiting for instance to get a public IP"
while true; do
server_ip=$(aws ec2 describe-instances --instance-ids ${id} \
| $JSON Reservations.0.Instances \
| $JSON 0.PublicIpAddress)
if [[ ! -z "${server_ip}" ]]; then
echo ""
break
fi
echo -n "."
sleep 1
done
echo "Got public IP ${server_ip}"
wait_for_ssh
echo "=> Fetching cloudron-setup"
while true; do
if $SSH ubuntu@${server_ip} wget "https://cloudron.io/cloudron-setup" -O "cloudron-setup"; then
echo ""
break
fi
echo -n "."
sleep 5
done
echo "=> Running cloudron-setup"
$SSH ubuntu@${server_ip} sudo /bin/bash "cloudron-setup" --env "${deploy_env}" --provider "ami" --skip-reboot
wait_for_ssh
echo "=> Removing ssh key"
$SSH ubuntu@${server_ip} sudo rm /home/ubuntu/.ssh/authorized_keys /root/.ssh/authorized_keys
echo "=> Creating AMI"
image_id=$(aws ec2 create-image --instance-id "${id}" --name "${ami_name}" | $JSON ImageId)
[[ -z "$id" ]] && exit 1
echo "Creating AMI with Id ${image_id}"
echo "=> Waiting for AMI to be created"
while true; do
state=$(aws ec2 describe-images --image-ids ${image_id} \
| $JSON Images \
| $JSON 0.State)
if [[ "${state}" == "available" ]]; then
echo ""
break
fi
echo -n "."
sleep 5
done
if [[ "${destroy_server}" == "yes" ]]; then
echo "=> Deleting EC2 instance"
while true; do
state=$(aws ec2 terminate-instances --instance-id "${id}" \
| $JSON TerminatingInstances \
| $JSON 0.CurrentState.Name)
if [[ "${state}" == "shutting-down" ]]; then
echo ""
break
fi
echo -n "."
sleep 5
done
fi
echo ""
echo "Done."
echo ""
echo "New AMI is: ${image_id}"
echo ""
+261
View File
@@ -0,0 +1,261 @@
#!/bin/bash
if [[ -z "${DIGITAL_OCEAN_TOKEN}" ]]; then
echo "Script requires DIGITAL_OCEAN_TOKEN env to be set"
exit 1
fi
if [[ -z "${JSON}" ]]; then
echo "Script requires JSON env to be set to path of JSON binary"
exit 1
fi
readonly CURL="curl --retry 5 -s -u ${DIGITAL_OCEAN_TOKEN}:"
function debug() {
echo "$@" >&2
}
function get_ssh_key_id() {
id=$($CURL "https://api.digitalocean.com/v2/account/keys" \
| $JSON ssh_keys \
| $JSON -c "this.name === \"$1\"" \
| $JSON 0.id)
[[ -z "$id" ]] && exit 1
echo "$id"
}
function create_droplet() {
local ssh_key_id="$1"
local box_name="$2"
local image_region="sfo2"
local ubuntu_image_slug="ubuntu-16-04-x64"
local box_size="1gb"
local data="{\"name\":\"${box_name}\",\"size\":\"${box_size}\",\"region\":\"${image_region}\",\"image\":\"${ubuntu_image_slug}\",\"ssh_keys\":[ \"${ssh_key_id}\" ],\"backups\":false}"
id=$($CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets" | $JSON droplet.id)
[[ -z "$id" ]] && exit 1
echo "$id"
}
function get_droplet_ip() {
local droplet_id="$1"
ip=$($CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}" | $JSON "droplet.networks.v4[0].ip_address")
[[ -z "$ip" ]] && exit 1
echo "$ip"
}
function get_droplet_id() {
local droplet_name="$1"
id=$($CURL "https://api.digitalocean.com/v2/droplets?per_page=200" | $JSON "droplets" | $JSON -c "this.name === '${droplet_name}'" | $JSON "[0].id")
[[ -z "$id" ]] && exit 1
echo "$id"
}
function power_off_droplet() {
local droplet_id="$1"
local data='{"type":"power_off"}'
local response=$($CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions")
local event_id=`echo "${response}" | $JSON action.id`
if [[ -z "${event_id}" ]]; then
debug "Got no event id, assuming already powered off."
debug "Response: ${response}"
return
fi
debug "Powered off droplet. Event id: ${event_id}"
debug -n "Waiting for droplet to power off"
while true; do
local event_status=`$CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}" | $JSON action.status`
if [[ "${event_status}" == "completed" ]]; then
break
fi
debug -n "."
sleep 10
done
debug ""
}
function power_on_droplet() {
local droplet_id="$1"
local data='{"type":"power_on"}'
local event_id=`$CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions" | $JSON action.id`
debug "Powered on droplet. Event id: ${event_id}"
if [[ -z "${event_id}" ]]; then
debug "Got no event id, assuming already powered on"
return
fi
debug -n "Waiting for droplet to power on"
while true; do
local event_status=`$CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}" | $JSON action.status`
if [[ "${event_status}" == "completed" ]]; then
break
fi
debug -n "."
sleep 10
done
debug ""
}
function get_image_id() {
local snapshot_name="$1"
local image_id=""
if ! response=$($CURL "https://api.digitalocean.com/v2/images?per_page=200"); then
echo "Failed to get image listing. ${response}"
return 1
fi
if ! image_id=$(echo "$response" \
| $JSON images \
| $JSON -c "this.name === \"${snapshot_name}\"" 0.id); then
echo "Failed to parse curl response: ${response}"
return 1
fi
if [[ -z "${image_id}" ]]; then
echo "Failed to get image id of ${snapshot_name}. reponse: ${response}"
return 1
fi
echo "${image_id}"
}
function snapshot_droplet() {
local droplet_id="$1"
local snapshot_name="$2"
local data="{\"type\":\"snapshot\",\"name\":\"${snapshot_name}\"}"
local event_id=`$CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions" | $JSON action.id`
debug "Droplet snapshotted as ${snapshot_name}. Event id: ${event_id}"
debug -n "Waiting for snapshot to complete"
while true; do
if ! response=$($CURL "https://api.digitalocean.com/v2/droplets/${droplet_id}/actions/${event_id}"); then
echo "Could not get action status. ${response}"
continue
fi
if ! event_status=$(echo "${response}" | $JSON action.status); then
echo "Could not parse action.status from response. ${response}"
continue
fi
if [[ "${event_status}" == "completed" ]]; then
break
fi
debug -n "."
sleep 10
done
debug "! done"
if ! image_id=$(get_image_id "${snapshot_name}"); then
return 1
fi
echo "${image_id}"
}
function destroy_droplet() {
local droplet_id="$1"
# TODO: check for 204 status
$CURL -X DELETE "https://api.digitalocean.com/v2/droplets/${droplet_id}"
debug "Droplet destroyed"
debug ""
}
function transfer_image() {
local image_id="$1"
local region_slug="$2"
local data="{\"type\":\"transfer\",\"region\":\"${region_slug}\"}"
local event_id=`$CURL -X POST -H 'Content-Type: application/json' -d "${data}" "https://api.digitalocean.com/v2/images/${image_id}/actions" | $JSON action.id`
echo "${event_id}"
}
function wait_for_image_event() {
local image_id="$1"
local event_id="$2"
debug -n "Waiting for ${event_id}"
while true; do
local event_status=`$CURL "https://api.digitalocean.com/v2/images/${image_id}/actions/${event_id}" | $JSON action.status`
if [[ "${event_status}" == "completed" ]]; then
break
fi
debug -n "."
sleep 10
done
debug ""
}
function transfer_image_to_all_regions() {
local image_id="$1"
xfer_events=()
image_regions=(ams2) ## sfo1 is where the image is created
for image_region in ${image_regions[@]}; do
xfer_event=$(transfer_image ${image_id} ${image_region})
echo "Image transfer to ${image_region} initiated. Event id: ${xfer_event}"
xfer_events+=("${xfer_event}")
sleep 1
done
echo "Image transfer initiated, but they will take some time to get transferred."
for xfer_event in ${xfer_events[@]}; do
$vps wait_for_image_event "${image_id}" "${xfer_event}"
done
}
if [[ $# -lt 1 ]]; then
debug "<command> <params...>"
exit 1
fi
case $1 in
get_ssh_key_id)
get_ssh_key_id "${@:2}"
;;
create)
create_droplet "${@:2}"
;;
get_id)
get_droplet_id "${@:2}"
;;
get_ip)
get_droplet_ip "${@:2}"
;;
power_on)
power_on_droplet "${@:2}"
;;
power_off)
power_off_droplet "${@:2}"
;;
snapshot)
snapshot_droplet "${@:2}"
;;
destroy)
destroy_droplet "${@:2}"
;;
transfer_image_to_all_regions)
transfer_image_to_all_regions "${@:2}"
;;
*)
echo "Unknown command $1"
exit 1
esac
+16 -29
View File
@@ -29,12 +29,10 @@ debconf-set-selections <<< 'mysql-server mysql-server/root_password_again passwo
# this enables automatic security upgrades (https://help.ubuntu.com/community/AutomaticSecurityUpdates)
# resolvconf is needed for unbound to work property after disabling systemd-resolved in 18.04
gpg_package=$([[ "${ubuntu_version}" == "16.04" ]] && echo "gnupg" || echo "gpg")
mysql_package=$([[ "${ubuntu_version}" == "20.04" ]] && echo "mysql-server-8.0" || echo "mysql-server-5.7")
apt-get -y install --no-install-recommends \
apt-get -y install \
acl \
apparmor \
build-essential \
cifs-utils \
cron \
@@ -48,38 +46,34 @@ apt-get -y install --no-install-recommends \
linux-generic \
logrotate \
$mysql_package \
nfs-common \
openssh-server \
pwgen \
resolvconf \
sshfs \
swaks \
tzdata \
unattended-upgrades \
unbound \
unzip \
xfsprogs
echo "==> installing nginx for xenial for TLSv3 support"
curl -sL http://nginx.org/packages/ubuntu/pool/nginx/n/nginx/nginx_1.18.0-2~${ubuntu_codename}_amd64.deb -o /tmp/nginx.deb
curl -sL http://nginx.org/packages/ubuntu/pool/nginx/n/nginx/nginx_1.18.0-1~${ubuntu_codename}_amd64.deb -o /tmp/nginx.deb
# apt install with install deps (as opposed to dpkg -i)
apt install -y /tmp/nginx.deb
rm /tmp/nginx.deb
# on some providers like scaleway the sudo file is changed and we want to keep the old one
apt-get -o Dpkg::Options::="--force-confold" install -y --no-install-recommends sudo
apt-get -o Dpkg::Options::="--force-confold" install -y sudo
# this ensures that unattended upgades are enabled, if it was disabled during ubuntu install time (see #346)
# debconf-set-selection of unattended-upgrades/enable_auto_updates + dpkg-reconfigure does not work
cp /usr/share/unattended-upgrades/20auto-upgrades /etc/apt/apt.conf.d/20auto-upgrades
echo "==> Installing node.js"
readonly node_version=14.15.4
mkdir -p /usr/local/node-${node_version}
curl -sL https://nodejs.org/dist/v${node_version}/node-v${node_version}-linux-x64.tar.gz | tar zxf - --strip-components=1 -C /usr/local/node-${node_version}
ln -sf /usr/local/node-${node_version}/bin/node /usr/bin/node
ln -sf /usr/local/node-${node_version}/bin/npm /usr/bin/npm
apt-get install -y --no-install-recommends python # Install python which is required for npm rebuild
mkdir -p /usr/local/node-10.18.1
curl -sL https://nodejs.org/dist/v10.18.1/node-v10.18.1-linux-x64.tar.gz | tar zxf - --strip-components=1 -C /usr/local/node-10.18.1
ln -sf /usr/local/node-10.18.1/bin/node /usr/bin/node
ln -sf /usr/local/node-10.18.1/bin/npm /usr/bin/npm
apt-get install -y python # Install python which is required for npm rebuild
[[ "$(python --version 2>&1)" == "Python 2.7."* ]] || die "Expecting python version to be 2.7.x"
# https://docs.docker.com/engine/installation/linux/ubuntulinux/
@@ -90,10 +84,9 @@ mkdir -p /etc/systemd/system/docker.service.d
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --log-driver=journald --exec-opt native.cgroupdriver=cgroupfs --storage-driver=overlay2" > /etc/systemd/system/docker.service.d/cloudron.conf
# there are 3 packages for docker - containerd, CLI and the daemon
readonly docker_version=20.10.3
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.4.3-1_amd64.deb" -o /tmp/containerd.deb
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.2.13-2_amd64.deb" -o /tmp/containerd.deb
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_19.03.12~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_19.03.12~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
# apt install with install deps (as opposed to dpkg -i)
apt install -y /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
rm /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb
@@ -106,7 +99,7 @@ fi
# do not upgrade grub because it might prompt user and break this script
echo "==> Enable memory accounting"
apt-get -y --no-upgrade --no-install-recommends install grub2-common
apt-get -y --no-upgrade install grub2-common
sed -e 's/^GRUB_CMDLINE_LINUX="\(.*\)"$/GRUB_CMDLINE_LINUX="\1 cgroup_enable=memory swapaccount=1 panic_on_oops=1 panic=5"/' -i /etc/default/grub
update-grub
@@ -125,9 +118,7 @@ for image in ${images}; do
done
echo "==> Install collectd"
# without this, libnotify4 will install gnome-shell
apt-get install -y libnotify4 --no-install-recommends
if ! apt-get install -y --no-install-recommends libcurl3-gnutls collectd collectd-utils; then
if ! apt-get install -y libcurl3-gnutls collectd collectd-utils; then
# FQDNLookup is true in default debian config. The box code has a custom collectd.conf that fixes this
echo "Failed to install collectd. Presumably because of http://mailman.verplant.org/pipermail/collectd/2015-March/006491.html"
sed -e 's/^FQDNLookup true/FQDNLookup false/' -i /etc/collectd/collectd.conf
@@ -135,13 +126,8 @@ fi
# https://bugs.launchpad.net/ubuntu/+source/collectd/+bug/1872281
[[ "${ubuntu_version}" == "20.04" ]] && echo -e "\nLD_PRELOAD=/usr/lib/python3.8/config-3.8-x86_64-linux-gnu/libpython3.8.so" >> /etc/default/collectd
# some hosts like atlantic install ntp which conflicts with timedatectl. https://serverfault.com/questions/1024770/ubuntu-20-04-time-sync-problems-and-possibly-incorrect-status-information
echo "==> Configuring host"
sed -e 's/^#NTP=/NTP=0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org/' -i /etc/systemd/timesyncd.conf
if systemctl is-active ntp; then
systemctl stop ntp
apt purge -y ntp
fi
timedatectl set-ntp 1
# mysql follows the system timezone
timedatectl set-timezone UTC
@@ -155,7 +141,7 @@ if [ -f "/etc/default/motd-news" ]; then
sed -i 's/^ENABLED=.*/ENABLED=0/' /etc/default/motd-news
fi
# Disable bind for good measure (on online.net, kimsufi servers these are pre-installed)
# Disable bind for good measure (on online.net, kimsufi servers these are pre-installed and conflicts with unbound)
systemctl stop bind9 || true
systemctl disable bind9 || true
@@ -167,7 +153,7 @@ systemctl disable dnsmasq || true
systemctl stop postfix || true
systemctl disable postfix || true
# on ubuntu 18.04 and 20.04, this is the default. this requires resolvconf for DNS to work further after the disable
# on ubuntu 18.04, this is the default. this requires resolvconf for DNS to work further after the disable
systemctl stop systemd-resolved || true
systemctl disable systemd-resolved || true
@@ -176,3 +162,4 @@ systemctl disable systemd-resolved || true
ip6=$([[ -s /proc/net/if_inet6 ]] && echo "yes" || echo "no")
echo -e "server:\n\tinterface: 127.0.0.1\n\tdo-ip6: ${ip6}" > /etc/unbound/unbound.conf.d/cloudron-network.conf
systemctl restart unbound
+2 -6
View File
@@ -7,7 +7,6 @@ let async = require('async'),
fs = require('fs'),
ldap = require('./src/ldap.js'),
paths = require('./src/paths.js'),
proxyAuth = require('./src/proxyauth.js'),
server = require('./src/server.js');
const NOOP_CALLBACK = function () { };
@@ -15,7 +14,7 @@ const NOOP_CALLBACK = function () { };
function setupLogging(callback) {
if (process.env.BOX_ENV === 'test') return callback();
const logfileStream = fs.createWriteStream(paths.BOX_LOG_FILE, { flags:'a' });
var logfileStream = fs.createWriteStream(paths.BOX_LOG_FILE, { flags:'a' });
process.stdout.write = process.stderr.write = logfileStream.write.bind(logfileStream);
callback();
@@ -23,8 +22,7 @@ function setupLogging(callback) {
async.series([
setupLogging,
server.start, // do this first since it also inits the database
proxyAuth.start,
server.start,
ldap.start,
dockerProxy.start
], function (error) {
@@ -40,7 +38,6 @@ async.series([
process.on('SIGINT', function () {
debug('Received SIGINT. Shutting down.');
proxyAuth.stop(NOOP_CALLBACK);
server.stop(NOOP_CALLBACK);
ldap.stop(NOOP_CALLBACK);
dockerProxy.stop(NOOP_CALLBACK);
@@ -50,7 +47,6 @@ async.series([
process.on('SIGTERM', function () {
debug('Received SIGTERM. Shutting down.');
proxyAuth.stop(NOOP_CALLBACK);
server.stop(NOOP_CALLBACK);
ldap.stop(NOOP_CALLBACK);
dockerProxy.stop(NOOP_CALLBACK);
@@ -1,40 +0,0 @@
'use strict';
exports.up = function(db, callback) {
var cmd1 = 'CREATE TABLE volumes(' +
'id VARCHAR(128) NOT NULL UNIQUE,' +
'name VARCHAR(256) NOT NULL UNIQUE,' +
'hostPath VARCHAR(1024) NOT NULL UNIQUE,' +
'creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,' +
'PRIMARY KEY (id)) CHARACTER SET utf8 COLLATE utf8_bin';
var cmd2 = 'CREATE TABLE appMounts(' +
'appId VARCHAR(128) NOT NULL,' +
'volumeId VARCHAR(128) NOT NULL,' +
'readOnly BOOLEAN DEFAULT 1,' +
'UNIQUE KEY appMounts_appId_volumeId (appId, volumeId),' +
'FOREIGN KEY(appId) REFERENCES apps(id),' +
'FOREIGN KEY(volumeId) REFERENCES volumes(id)) CHARACTER SET utf8 COLLATE utf8_bin;';
db.runSql(cmd1, function (error) {
if (error) console.error(error);
db.runSql(cmd2, function (error) {
if (error) console.error(error);
db.runSql('ALTER TABLE apps DROP COLUMN bindsJson', callback);
});
});
};
exports.down = function(db, callback) {
db.runSql('DROP TABLE appMounts', function (error) {
if (error) console.error(error);
db.runSql('DROP TABLE volumes', function (error) {
if (error) console.error(error);
callback(error);
});
});
};
@@ -1,16 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN proxyAuth BOOLEAN DEFAULT 0', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps DROP COLUMN proxyAuth', function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -1,18 +0,0 @@
'use strict';
var async = require('async');
exports.up = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE mailboxes ADD COLUMN ownerType VARCHAR(16)'),
db.runSql.bind(db, 'UPDATE mailboxes SET ownerType=?', [ 'user' ]),
db.runSql.bind(db, 'ALTER TABLE mailboxes MODIFY ownerType VARCHAR(16) NOT NULL'),
], callback);
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE mailboxes DROP COLUMN ownerType', function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -1,13 +0,0 @@
'use strict';
var async = require('async');
exports.up = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE apps DROP COLUMN httpPort')
], callback);
};
exports.down = function(db, callback) {
callback();
};
@@ -1,29 +0,0 @@
'use strict';
const async = require('async'),
iputils = require('../src/iputils.js');
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN containerIp VARCHAR(16) UNIQUE', function (error) {
if (error) console.error(error);
let baseIp = iputils.intFromIp('172.18.16.0');
db.all('SELECT * FROM apps', function (error, apps) {
if (error) return callback(error);
async.eachSeries(apps, function (app, iteratorDone) {
const nextIp = iputils.ipFromInt(++baseIp);
db.runSql('UPDATE apps SET containerIp=? WHERE id=?', [ nextIp, app.id ], iteratorDone);
}, callback);
});
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps DROP COLUMN containerIp', function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -1,21 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.all('SELECT * FROM settings WHERE name=?', ['platform_config'], function (error, results) {
let value;
if (error || results.length === 0) {
value = { sftp: { requireAdmin: true } };
} else {
value = JSON.parse(results[0].value);
if (!value.sftp) value.sftp = {};
value.sftp.requireAdmin = true;
}
// existing installations may not even have the key. so use REPLACE instead of UPDATE
db.runSql('REPLACE INTO settings (name, value) VALUES (?, ?)', [ 'platform_config', JSON.stringify(value) ], callback);
});
};
exports.down = function(db, callback) {
callback();
};
@@ -1,18 +0,0 @@
'use strict';
var async = require('async');
exports.up = function(db, callback) {
async.series([
db.runSql.bind(db, 'CREATE TABLE groupMembers_copy(groupId VARCHAR(128) NOT NULL, userId VARCHAR(128) NOT NULL, FOREIGN KEY(groupId) REFERENCES userGroups(id), FOREIGN KEY(userId) REFERENCES users(id), UNIQUE (groupId, userId)) CHARACTER SET utf8 COLLATE utf8_bin'), // In mysql CREATE TABLE.. LIKE does not copy indexes
db.runSql.bind(db, 'INSERT INTO groupMembers_copy SELECT * FROM groupMembers GROUP BY groupId, userId'),
db.runSql.bind(db, 'DROP TABLE groupMembers'),
db.runSql.bind(db, 'ALTER TABLE groupMembers_copy RENAME TO groupMembers')
], callback);
};
exports.down = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE groupMembers DROP INDEX groupMembers_member'),
], callback);
};
@@ -1,51 +0,0 @@
'use strict';
const async = require('async'),
safe = require('safetydance');
exports.up = function(db, callback) {
db.runSql('ALTER TABLE domains ADD COLUMN wellKnownJson TEXT', function (error) {
if (error) return callback(error);
// keep the paths around, so that we don't need to trigger a re-configure. the old nginx config will use the paths
// the new one will proxy calls to the box code
const WELLKNOWN_DIR = '/home/yellowtent/boxdata/well-known';
const output = safe.child_process.execSync('find . -type f -printf "%P\n"', { cwd: WELLKNOWN_DIR, encoding: 'utf8' });
if (!output) return callback();
const paths = output.trim().split('\n');
if (paths.length === 0) return callback(); // user didn't configure any well-known
let wellKnown = {};
for (let path of paths) {
const fqdn = path.split('/', 1)[0];
const loc = path.slice(fqdn.length+1);
const doc = safe.fs.readFileSync(`${WELLKNOWN_DIR}/${path}`, { encoding: 'utf8' });
if (!doc) continue;
wellKnown[fqdn] = {};
wellKnown[fqdn][loc] = doc;
}
console.log('Migrating well-known', JSON.stringify(wellKnown, null, 4));
async.eachSeries(Object.keys(wellKnown), function (fqdn, iteratorDone) {
db.runSql('UPDATE domains SET wellKnownJson=? WHERE domain=?', [ JSON.stringify(wellKnown[fqdn]), fqdn ], function (error, result) {
if (error) {
console.error(error); // maybe the domain does not exist anymore
} else if (result.affectedRows === 0) {
console.log(`Could not migrate wellknown as domain ${fqdn} is missing`);
}
iteratorDone();
});
}, function (error) {
callback(error);
});
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE domains DROP COLUMN wellKnownJson', function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -1,23 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.all('SELECT * FROM settings WHERE name=?', ['platform_config'], function (error, results) {
if (error || results.length === 0) return callback(null);
let value = JSON.parse(results[0].value);
for (const serviceName of Object.keys(value)) {
const service = value[serviceName];
if (!service.memorySwap) continue;
service.memoryLimit = service.memorySwap;
delete service.memorySwap;
delete service.memory;
}
db.runSql('UPDATE settings SET value=? WHERE name=?', [ JSON.stringify(value), 'platform_config' ], callback);
});
};
exports.down = function(db, callback) {
callback();
};
@@ -1,28 +0,0 @@
'use strict';
const async = require('async');
exports.up = function(db, callback) {
db.all('SELECT * FROM apps', function (error, apps) {
if (error) return callback(error);
async.eachSeries(apps, function (app, iteratorDone) {
if (!app.servicesConfigJson) return iteratorDone();
let servicesConfig = JSON.parse(app.servicesConfigJson);
for (const serviceName of Object.keys(servicesConfig)) {
const service = servicesConfig[serviceName];
if (!service.memorySwap) continue;
service.memoryLimit = service.memorySwap;
delete service.memorySwap;
delete service.memory;
}
db.runSql('UPDATE apps SET servicesConfigJson=? WHERE id=?', [ JSON.stringify(servicesConfig), app.id ], iteratorDone);
}, callback);
});
};
exports.down = function(db, callback) {
callback();
};
@@ -1,9 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('UPDATE settings SET name=? WHERE name=?', [ 'services_config', 'platform_config' ], callback);
};
exports.down = function(db, callback) {
db.runSql('UPDATE settings SET name=? WHERE name=?', [ 'platform_config', 'services_config' ], callback);
};
@@ -1,10 +0,0 @@
'use strict';
exports.up = function(db, callback) {
/* this contained an invalid migration of OVH URLs from s3 subdomain to storage subdomain. See https://forum.cloudron.io/topic/4584/issue-with-backups-listings-and-saving-backup-config-in-6-2 */
callback();
};
exports.down = function(db, callback) {
callback();
};
@@ -1,16 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.all('SELECT value FROM settings WHERE name="registry_config"', function (error, results) {
if (error || results.length === 0) return callback(error);
var registryConfig = JSON.parse(results[0].value);
if (!registryConfig.provider) registryConfig.provider = 'other';
db.runSql('UPDATE settings SET value=? WHERE name="registry_config"', [ JSON.stringify(registryConfig) ], callback);
});
};
exports.down = function(db, callback) {
callback();
};
@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE tokens ADD COLUMN lastUsedTime TIMESTAMP NULL', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE tokens DROP COLUMN lastUsedTime', function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -1,16 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps ADD COLUMN enableMailbox BOOLEAN DEFAULT 1', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps DROP COLUMN enableMailbox', function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -1,17 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE mailboxes ADD COLUMN active BOOLEAN DEFAULT 1', function (error) {
if (error) return callback(error);
callback();
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE mailboxes DROP COLUMN active', function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -1,37 +0,0 @@
'use strict';
const async = require('async'),
fs = require('fs'),
path = require('path');
const AVATAR_DIR = '/home/yellowtent/boxdata/profileicons';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE users ADD COLUMN avatar MEDIUMBLOB', function (error) {
if (error) return callback(error);
fs.readdir(AVATAR_DIR, function (error, filenames) {
if (error && error.code === 'ENOENT') return callback();
if (error) return callback(error);
async.eachSeries(filenames, function (filename, iteratorCallback) {
const avatar = fs.readFileSync(path.join(AVATAR_DIR, filename));
const userId = filename;
db.runSql('UPDATE users SET avatar=? WHERE id=?', [ avatar, userId ], iteratorCallback);
}, function (error) {
if (error) return callback(error);
fs.rmdir(AVATAR_DIR, { recursive: true }, callback);
});
});
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE users DROP COLUMN avatar', function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -1,20 +0,0 @@
'use strict';
const fs = require('fs');
exports.up = function(db, callback) {
db.runSql('ALTER TABLE settings ADD COLUMN valueBlob MEDIUMBLOB', function (error) {
if (error) return callback(error);
fs.readFile('/home/yellowtent/boxdata/avatar.png', function (error, avatar) {
if (error && error.code === 'ENOENT') return callback();
if (error) return callback(error);
db.runSql('INSERT INTO settings (name, valueBlob) VALUES (?, ?)', [ 'cloudron_avatar', avatar ], callback);
});
});
};
exports.down = function(db, callback) {
callback();
};
@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE users ADD COLUMN loginLocationsJson TEXT', function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE users DROP COLUMN loginLocationsJson', function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -1,42 +0,0 @@
'use strict';
const async = require('async'),
fs = require('fs'),
path = require('path');
const APPICONS_DIR = '/home/yellowtent/boxdata/appicons';
exports.up = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE apps ADD COLUMN icon MEDIUMBLOB'),
db.runSql.bind(db, 'ALTER TABLE apps ADD COLUMN appStoreIcon MEDIUMBLOB'),
function migrateIcons(next) {
fs.readdir(APPICONS_DIR, function (error, filenames) {
if (error && error.code === 'ENOENT') return next();
if (error) return next(error);
async.eachSeries(filenames, function (filename, iteratorCallback) {
const icon = fs.readFileSync(path.join(APPICONS_DIR, filename));
const appId = filename.split('.')[0];
if (filename.endsWith('.user.png')) {
db.runSql('UPDATE apps SET icon=? WHERE id=?', [ icon, appId ], iteratorCallback);
} else {
db.runSql('UPDATE apps SET appStoreIcon=? WHERE id=?', [ icon, appId ], iteratorCallback);
}
}, function (error) {
if (error) return next(error);
fs.rmdir(APPICONS_DIR, { recursive: true }, next);
});
});
}
], callback);
};
exports.down = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE apps DROP COLUMN icon'),
db.runSql.bind(db, 'ALTER TABLE apps DROP COLUMN appStoreIcon'),
], callback);
};
@@ -1,15 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE apps MODIFY ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP', [], function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE apps MODIFY ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP', [], function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -1,20 +0,0 @@
'use strict';
exports.up = function(db, callback) {
const cmd = 'CREATE TABLE blobs(' +
'id VARCHAR(128) NOT NULL UNIQUE,' +
'value MEDIUMBLOB,' +
'PRIMARY KEY (id)) CHARACTER SET utf8 COLLATE utf8_bin';
db.runSql(cmd, function (error) {
if (error) console.error(error);
callback(error);
});
};
exports.down = function(db, callback) {
db.runSql('DROP TABLE blobs', function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -1,49 +0,0 @@
'use strict';
const async = require('async'),
fs = require('fs'),
safe = require('safetydance');
const BOX_DATA_DIR = '/home/yellowtent/boxdata';
const PLATFORM_DATA_DIR = '/home/yellowtent/platformdata';
exports.up = function (db, callback) {
let funcs = [];
const acmeKey = safe.fs.readFileSync(`${BOX_DATA_DIR}/acme/acme.key`);
if (acmeKey) {
funcs.push(db.runSql.bind(db, 'INSERT INTO blobs (id, value) VALUES (?, ?)', [ 'acme_account_key', acmeKey ]));
funcs.push(fs.rmdir.bind(fs, `${BOX_DATA_DIR}/acme`, { recursive: true }));
}
const dhparams = safe.fs.readFileSync(`${BOX_DATA_DIR}/dhparams.pem`);
if (dhparams) {
safe.fs.writeFileSync(`${PLATFORM_DATA_DIR}/dhparams.pem`, dhparams);
funcs.push(db.runSql.bind(db, 'INSERT INTO blobs (id, value) VALUES (?, ?)', [ 'dhparams', dhparams ]));
// leave the dhparms here for the moment because startup code regenerates box nginx config and reloads nginx. at that point,
// nginx config of apps has not been re-generated yet and the reload fails. post 6.3, this file can be removed in start.sh
// funcs.push(fs.unlink.bind(fs, `${BOX_DATA_DIR}/dhparams.pem`));
}
const turnSecret = safe.fs.readFileSync(`${BOX_DATA_DIR}/addon-turn-secret`);
if (turnSecret) {
funcs.push(db.runSql.bind(db, 'INSERT INTO blobs (id, value) VALUES (?, ?)', [ 'addon_turn_secret', turnSecret ]));
funcs.push(fs.unlink.bind(fs, `${BOX_DATA_DIR}/addon-turn-secret`));
}
// sftp keys get moved to platformdata in start.sh
const sftpPublicKey = safe.fs.readFileSync(`${BOX_DATA_DIR}/sftp/ssh/ssh_host_rsa_key.pub`);
const sftpPrivateKey = safe.fs.readFileSync(`${BOX_DATA_DIR}/sftp/ssh/ssh_host_rsa_key`);
if (sftpPublicKey) {
safe.fs.writeFileSync(`${PLATFORM_DATA_DIR}/sftp/ssh/ssh_host_rsa_key.pub`, sftpPublicKey);
safe.fs.writeFileSync(`${PLATFORM_DATA_DIR}/sftp/ssh/ssh_host_rsa_key`, sftpPrivateKey);
safe.fs.chmodSync(`${PLATFORM_DATA_DIR}/sftp/ssh/ssh_host_rsa_key`, 0o600);
funcs.push(db.runSql.bind(db, 'INSERT INTO blobs (id, value) VALUES (?, ?)', [ 'sftp_public_key', sftpPublicKey ]));
funcs.push(db.runSql.bind(db, 'INSERT INTO blobs (id, value) VALUES (?, ?)', [ 'sftp_private_key', sftpPrivateKey ]));
funcs.push(fs.rmdir.bind(fs, `${BOX_DATA_DIR}/sftp`, { recursive: true }));
}
async.series(funcs, callback);
};
exports.down = function(db, callback) {
callback();
};
@@ -1,31 +0,0 @@
'use strict';
const async = require('async'),
fs = require('fs'),
safe = require('safetydance');
const BOX_DATA_DIR = '/home/yellowtent/boxdata';
const PLATFORM_DATA_DIR = '/home/yellowtent/platformdata';
exports.up = function (db, callback) {
if (!fs.existsSync(`${BOX_DATA_DIR}/firewall`)) return callback();
const ports = safe.fs.readFileSync(`${BOX_DATA_DIR}/firewall/ports.json`);
if (ports) {
safe.fs.writeFileSync(`${PLATFORM_DATA_DIR}/firewall/ports.json`, ports);
}
const blocklist = safe.fs.readFileSync(`${BOX_DATA_DIR}/firewall/blocklist.txt`);
async.series([
(next) => {
if (!blocklist) return next();
db.runSql('INSERT INTO settings (name, valueBlob) VALUES (?, ?)', [ 'firewall_blocklist', blocklist ], next);
},
fs.writeFile.bind(fs, `${PLATFORM_DATA_DIR}/firewall/blocklist.txt`, blocklist || ''),
fs.rmdir.bind(fs, `${BOX_DATA_DIR}/firewall`, { recursive: true })
], callback);
};
exports.down = function(db, callback) {
callback();
};
@@ -1,38 +0,0 @@
'use strict';
const async = require('async'),
safe = require('safetydance');
const CERTS_DIR = '/home/yellowtent/boxdata/certs',
PLATFORM_CERTS_DIR = '/home/yellowtent/platformdata/nginx/cert';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE domains ADD COLUMN fallbackCertificateJson MEDIUMTEXT', function (error) {
if (error) return callback(error);
db.all('SELECT * FROM domains', [ ], function (error, domains) {
if (error) return callback(error);
async.eachSeries(domains, function (domain, iteratorDone) {
// b94dbf5fa33a6d68d784571721ff44348c2d88aa seems to have moved certs from platformdata to boxdata
let cert = safe.fs.readFileSync(`${CERTS_DIR}/${domain.domain}.host.cert`, 'utf8');
let key = safe.fs.readFileSync(`${CERTS_DIR}/${domain.domain}.host.key`, 'utf8');
if (!cert) {
cert = safe.fs.readFileSync(`${PLATFORM_CERTS_DIR}/${domain.domain}.host.cert`, 'utf8');
key = safe.fs.readFileSync(`${PLATFORM_CERTS_DIR}/${domain.domain}.host.key`, 'utf8');
}
const fallbackCertificate = { cert, key };
db.runSql('UPDATE domains SET fallbackCertificateJson=? WHERE domain=?', [ JSON.stringify(fallbackCertificate), domain.domain ], iteratorDone);
}, callback);
});
});
};
exports.down = function(db, callback) {
async.series([
db.runSql.run(db, 'ALTER TABLE domains DROP COLUMN fallbackCertificateJson')
], callback);
};
@@ -1,34 +0,0 @@
'use strict';
const async = require('async'),
fs = require('fs'),
safe = require('safetydance');
const CERTS_DIR = '/home/yellowtent/boxdata/certs';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE subdomains ADD COLUMN certificateJson MEDIUMTEXT', function (error) {
if (error) return callback(error);
db.all('SELECT * FROM subdomains', [ ], function (error, subdomains) {
if (error) return callback(error);
async.eachSeries(subdomains, function (subdomain, iteratorDone) {
const cert = safe.fs.readFileSync(`${CERTS_DIR}/${subdomain.subdomain}.${subdomain.domain}.user.cert`, 'utf8');
const key = safe.fs.readFileSync(`${CERTS_DIR}/${subdomain.subdomain}.${subdomain.domain}.user.key`, 'utf8');
if (!cert || !key) return iteratorDone();
const certificate = { cert, key };
db.runSql('UPDATE subdomains SET certificateJson=? WHERE domain=? AND subdomain=?', [ JSON.stringify(certificate), subdomain.domain, subdomain.subdomain ], iteratorDone);
}, callback);
});
});
};
exports.down = function(db, callback) {
async.series([
db.runSql.run(db, 'ALTER TABLE subdomains DROP COLUMN certificateJson')
], callback);
};
@@ -1,52 +0,0 @@
'use strict';
const async = require('async'),
child_process = require('child_process'),
fs = require('fs'),
path = require('path'),
safe = require('safetydance');
const OLD_CERTS_DIR = '/home/yellowtent/boxdata/certs';
const NEW_CERTS_DIR = '/home/yellowtent/platformdata/nginx/cert';
exports.up = function(db, callback) {
fs.readdir(OLD_CERTS_DIR, function (error, filenames) {
if (error && error.code === 'ENOENT') return callback();
if (error) return callback(error);
filenames = filenames.filter(f => f.endsWith('.key') && !f.endsWith('.host.key') && !f.endsWith('.user.key')); // ignore fallback and user keys
async.eachSeries(filenames, function (filename, iteratorCallback) {
const privateKeyFile = filename;
const privateKey = fs.readFileSync(path.join(OLD_CERTS_DIR, filename));
const certificateFile = filename.replace(/\.key$/, '.cert');
const certificate = safe.fs.readFileSync(path.join(OLD_CERTS_DIR, certificateFile));
if (!certificate) {
console.log(`${certificateFile} is missing. skipping migration`);
return iteratorCallback();
}
const csrFile = filename.replace(/\.key$/, '.csr');
const csr = safe.fs.readFileSync(path.join(OLD_CERTS_DIR, csrFile));
if (!csr) {
console.log(`${csrFile} is missing. skipping migration`);
return iteratorCallback();
}
async.series([
db.runSql.bind(db, 'INSERT INTO blobs (id, value) VALUES (?, ?) ON DUPLICATE KEY UPDATE value=VALUES(value)', `cert-${privateKeyFile}`, privateKey),
db.runSql.bind(db, 'INSERT INTO blobs (id, value) VALUES (?, ?) ON DUPLICATE KEY UPDATE value=VALUES(value)', `cert-${certificateFile}`, certificate),
db.runSql.bind(db, 'INSERT INTO blobs (id, value) VALUES (?, ?) ON DUPLICATE KEY UPDATE value=VALUES(value)', `cert-${csrFile}`, csr),
], iteratorCallback);
}, function (error) {
if (error) return callback(error);
child_process.execSync(`cp ${OLD_CERTS_DIR}/* ${NEW_CERTS_DIR}`); // this way we copy the non-migrated ones like .host, .user etc as well
fs.rmdir(OLD_CERTS_DIR, { recursive: true }, callback);
});
});
};
exports.down = function(db, callback) {
callback();
};
@@ -1,17 +0,0 @@
'use strict';
const async = require('async');
exports.up = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE volumes ADD COLUMN mountType VARCHAR(16) DEFAULT "noop"'),
db.runSql.bind(db, 'ALTER TABLE volumes ADD COLUMN mountOptionsJson TEXT')
], callback);
};
exports.down = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE volumes DROP COLUMN mountType'),
db.runSql.bind(db, 'ALTER TABLE volumes DROP COLUMN mountOptionsJson')
], callback);
};
@@ -1,21 +0,0 @@
'use strict';
var async = require('async');
exports.up = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE backups ADD INDEX creationTime_index (creationTime)'),
db.runSql.bind(db, 'ALTER TABLE eventlog ADD INDEX creationTime_index (creationTime)'),
db.runSql.bind(db, 'ALTER TABLE notifications ADD INDEX creationTime_index (creationTime)'),
db.runSql.bind(db, 'ALTER TABLE tasks ADD INDEX creationTime_index (creationTime)'),
], callback);
};
exports.down = function(db, callback) {
async.series([
db.runSql.bind(db, 'ALTER TABLE backups DROP INDEX creationTime_index'),
db.runSql.bind(db, 'ALTER TABLE eventlog DROP INDEX creationTime_index'),
db.runSql.bind(db, 'ALTER TABLE notifications DROP INDEX creationTime_index'),
db.runSql.bind(db, 'ALTER TABLE tasks DROP INDEX creationTime_index'),
], callback);
};
@@ -1,33 +0,0 @@
'use strict';
const async = require('async');
exports.up = function(db, callback) {
db.runSql('ALTER TABLE users ADD COLUMN creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP', function (error) {
if (error) return callback(error);
db.runSql('ALTER TABLE users ADD INDEX creationTime_index (creationTime)', function (error) {
if (error) return callback(error);
db.all('SELECT id, createdAt FROM users', function (error, results) {
if (error) return callback(error);
async.eachSeries(results, function (r, iteratorDone) {
const creationTime = new Date(r.createdAt);
db.runSql('UPDATE users SET creationTime=? WHERE id=?', [ creationTime, r.id ], iteratorDone);
}, function (error) {
if (error) return callback(error);
db.runSql('ALTER TABLE users DROP COLUMN createdAt', callback);
});
});
});
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE users DROP COLUMN creationTime', function (error) {
if (error) console.error(error);
callback(error);
});
};
@@ -1,27 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.all('SELECT value FROM settings WHERE name="backup_config"', function (error, results) {
if (error || results.length === 0) return callback(error);
const backupConfig = JSON.parse(results[0].value);
if (backupConfig.provider === 'sshfs' || backupConfig.provider === 'cifs' || backupConfig.provider === 'nfs' || backupConfig.externalDisk) {
backupConfig.chown = backupConfig.provider === 'nfs' || backupConfig.provider === 'sshfs' || backupConfig.externalDisk;
backupConfig.preserveAttributes = !!backupConfig.externalDisk;
backupConfig.provider = 'mountpoint';
if (backupConfig.externalDisk) {
backupConfig.mountPoint = backupConfig.backupFolder;
backupConfig.prefix = '';
delete backupConfig.backupFolder;
delete backupConfig.externalDisk;
}
db.runSql('UPDATE settings SET value=? WHERE name="backup_config"', [JSON.stringify(backupConfig)], callback);
} else {
callback();
}
});
};
exports.down = function(db, callback) {
callback();
};
@@ -1,13 +0,0 @@
'use strict';
exports.up = function(db, callback) {
db.runSql('ALTER TABLE notifications DROP COLUMN userId', function (error) {
if (error) return callback(error);
db.runSql('DELETE FROM notifications', callback); // just clear notifications table
});
};
exports.down = function(db, callback) {
db.runSql('ALTER TABLE notifications ADD COLUMN userId VARCHAR(128) NOT NULL', callback);
};
@@ -1,26 +0,0 @@
'use strict';
const async = require('async'),
safe = require('safetydance');
exports.up = function(db, callback) {
db.all('SELECT * FROM volumes', function (error, volumes) {
if (error || volumes.length === 0) return callback(error);
async.eachSeries(volumes, function (volume, iteratorDone) {
if (volume.mountType !== 'noop') return iteratorDone();
let mountType;
if (safe.child_process.execSync(`mountpoint -q -- ${volume.hostPath}`)) {
mountType = 'mountpoint';
} else {
mountType = 'filesystem';
}
db.runSql('UPDATE volumes SET mountType=? WHERE id=?', [ mountType, volume.id ], iteratorDone);
}, callback);
});
};
exports.down = function(db, callback) {
callback();
};
+7 -51
View File
@@ -6,7 +6,7 @@
#### Strict mode is enabled
#### VARCHAR - stored as part of table row (use for strings)
#### TEXT - stored offline from table row (use for strings)
#### BLOB (64KB), MEDIUMBLOB (16MB), LONGBLOB (4GB) - stored offline from table row (use for binary data)
#### BLOB - stored offline from table row (use for binary data)
#### https://dev.mysql.com/doc/refman/5.0/en/storage-requirements.html
#### Times are stored in the database in UTC. And precision is seconds
@@ -20,7 +20,7 @@ CREATE TABLE IF NOT EXISTS users(
email VARCHAR(254) NOT NULL UNIQUE,
password VARCHAR(1024) NOT NULL,
salt VARCHAR(512) NOT NULL,
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
createdAt VARCHAR(512) NOT NULL,
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
displayName VARCHAR(512) DEFAULT "",
fallbackEmail VARCHAR(512) DEFAULT "",
@@ -31,10 +31,7 @@ CREATE TABLE IF NOT EXISTS users(
resetToken VARCHAR(128) DEFAULT "",
resetTokenCreationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
active BOOLEAN DEFAULT 1,
avatar MEDIUMBLOB,
locationJson TEXT, // { locations: [{ ip, userAgent, city, country, ts }] }
INDEX creationTime_index (creationTime),
PRIMARY KEY(id));
CREATE TABLE IF NOT EXISTS userGroups(
@@ -47,8 +44,7 @@ CREATE TABLE IF NOT EXISTS groupMembers(
groupId VARCHAR(128) NOT NULL,
userId VARCHAR(128) NOT NULL,
FOREIGN KEY(groupId) REFERENCES userGroups(id),
FOREIGN KEY(userId) REFERENCES users(id),
UNIQUE (groupId, userId));
FOREIGN KEY(userId) REFERENCES users(id));
CREATE TABLE IF NOT EXISTS tokens(
id VARCHAR(128) NOT NULL UNIQUE,
@@ -58,7 +54,6 @@ CREATE TABLE IF NOT EXISTS tokens(
clientId VARCHAR(128),
scope VARCHAR(512) NOT NULL,
expires BIGINT NOT NULL, // FIXME: make this a timestamp
lastUsedTime TIMESTAMP NULL,
PRIMARY KEY(accessToken));
CREATE TABLE IF NOT EXISTS apps(
@@ -70,6 +65,7 @@ CREATE TABLE IF NOT EXISTS apps(
healthTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, // when the app last responded
containerId VARCHAR(128),
manifestJson TEXT,
httpPort INTEGER, // this is the nginx proxy port and not manifest.httpPort
accessRestrictionJson TEXT, // { users: [ ], groups: [ ] }
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, // when the app was installed
updateTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, // when the last app update was done
@@ -82,7 +78,6 @@ CREATE TABLE IF NOT EXISTS apps(
reverseProxyConfigJson TEXT, // { robotsTxt, csp }
enableBackup BOOLEAN DEFAULT 1, // misnomer: controls automatic daily backups
enableAutomaticUpdate BOOLEAN DEFAULT 1,
enableMailbox BOOLEAN DEFAULT 1, // whether sendmail addon is enabled
mailboxName VARCHAR(128), // mailbox of this app
mailboxDomain VARCHAR(128), // mailbox domain of this apps
label VARCHAR(128), // display name
@@ -90,10 +85,8 @@ CREATE TABLE IF NOT EXISTS apps(
dataDir VARCHAR(256) UNIQUE,
taskId INTEGER, // current task
errorJson TEXT,
bindsJson TEXT, // bind mounts
servicesConfigJson TEXT, // app services configuration
containerIp VARCHAR(16) UNIQUE, // this is not-null because of ip allocation fails, user can 'repair'
appStoreIcon MEDIUMBLOB,
icon MEDIUMBLOB,
FOREIGN KEY(mailboxDomain) REFERENCES domains(domain),
FOREIGN KEY(taskId) REFERENCES tasks(id),
@@ -110,7 +103,6 @@ CREATE TABLE IF NOT EXISTS appPortBindings(
CREATE TABLE IF NOT EXISTS settings(
name VARCHAR(128) NOT NULL UNIQUE,
value TEXT,
valueBlob MEDIUMBLOB,
PRIMARY KEY(name));
CREATE TABLE IF NOT EXISTS appAddonConfigs(
@@ -139,7 +131,6 @@ CREATE TABLE IF NOT EXISTS backups(
format VARCHAR(16) DEFAULT "tgz",
preserveSecs INTEGER DEFAULT 0,
INDEX creationTime_index (creationTime),
PRIMARY KEY (id));
CREATE TABLE IF NOT EXISTS eventlog(
@@ -149,7 +140,6 @@ CREATE TABLE IF NOT EXISTS eventlog(
data TEXT, /* free flowing json based on action */
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
INDEX creationTime_index (creationTime),
PRIMARY KEY (id));
CREATE TABLE IF NOT EXISTS domains(
@@ -158,9 +148,6 @@ CREATE TABLE IF NOT EXISTS domains(
provider VARCHAR(16) NOT NULL,
configJson TEXT, /* JSON containing the dns backend provider config */
tlsConfigJson TEXT, /* JSON containing the tls provider config */
wellKnownJson TEXT, /* JSON containing well known docs for this domain */
fallbackCertificateJson MEDIUMTEXT,
PRIMARY KEY (domain))
@@ -194,14 +181,12 @@ CREATE TABLE IF NOT EXISTS mailboxes(
name VARCHAR(128) NOT NULL,
type VARCHAR(16) NOT NULL, /* 'mailbox', 'alias', 'list' */
ownerId VARCHAR(128) NOT NULL, /* user id */
ownerType VARCHAR(16) NOT NULL,
aliasName VARCHAR(128), /* the target name type is an alias */
aliasDomain VARCHAR(128), /* the target domain */
membersJson TEXT, /* members of a group. fully qualified */
membersOnly BOOLEAN DEFAULT false,
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
domain VARCHAR(128),
active BOOLEAN DEFAULT 1,
FOREIGN KEY(domain) REFERENCES mail(domain),
FOREIGN KEY(aliasDomain) REFERENCES mail(domain),
@@ -213,8 +198,6 @@ CREATE TABLE IF NOT EXISTS subdomains(
subdomain VARCHAR(128) NOT NULL,
type VARCHAR(128) NOT NULL, /* primary or redirect */
certificateJson MEDIUMTEXT,
FOREIGN KEY(domain) REFERENCES domains(domain),
FOREIGN KEY(appId) REFERENCES apps(id),
UNIQUE (subdomain, domain));
@@ -228,20 +211,17 @@ CREATE TABLE IF NOT EXISTS tasks(
resultJson TEXT,
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
INDEX creationTime_index (creationTime),
PRIMARY KEY (id));
CREATE TABLE IF NOT EXISTS notifications(
id int NOT NULL AUTO_INCREMENT,
userId VARCHAR(128) NOT NULL,
eventId VARCHAR(128), // reference to eventlog. can be null
title VARCHAR(512) NOT NULL,
message TEXT,
acknowledged BOOLEAN DEFAULT false,
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
INDEX creationTime_index (creationTime),
FOREIGN KEY(eventId) REFERENCES eventlog(id),
UNIQUE KEY appPasswords_name_appId_identifier (name, userId, identifier),
PRIMARY KEY (id)
);
@@ -252,33 +232,9 @@ CREATE TABLE IF NOT EXISTS appPasswords(
identifier VARCHAR(128) NOT NULL, // resourceId: app id or mail or webadmin
hashedPassword VARCHAR(1024) NOT NULL,
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
UNIQUE KEY appPasswords_name_appId_identifier (name, userId, identifier)
FOREIGN KEY(userId) REFERENCES users(id),
PRIMARY KEY (id)
);
CREATE TABLE IF NOT EXISTS volumes(
id VARCHAR(128) NOT NULL UNIQUE,
name VARCHAR(256) NOT NULL UNIQUE,
hostPath VARCHAR(1024) NOT NULL UNIQUE,
creationTime TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
mountType VARCHAR(16) DEFAULT "noop",
mountOptionsJson TEXT,
PRIMARY KEY (id)
);
CREATE TABLE IF NOT EXISTS appMounts(
appId VARCHAR(128) NOT NULL,
volumeId VARCHAR(128) NOT NULL,
readOnly BOOLEAN DEFAULT 1,
UNIQUE KEY appMounts_appId_volumeId (appId, volumeId),
FOREIGN KEY(appId) REFERENCES apps(id),
FOREIGN KEY(volumeId) REFERENCES volumes(id));
CREATE TABLE IF NOT EXISTS blobs(
id VARCHAR(128) NOT NULL UNIQUE,
value TEXT,
PRIMARY KEY(id));
CHARACTER SET utf8 COLLATE utf8_bin;
+1468 -1251
View File
File diff suppressed because it is too large Load Diff
+37 -39
View File
@@ -10,78 +10,76 @@
"type": "git",
"url": "https://git.cloudron.io/cloudron/box.git"
},
"engines": {
"node": ">=4.0.0 <=4.1.1"
},
"dependencies": {
"@google-cloud/dns": "^2.1.0",
"@google-cloud/storage": "^5.8.5",
"@google-cloud/dns": "^1.2.9",
"@google-cloud/storage": "^2.5.0",
"@sindresorhus/df": "git+https://github.com/cloudron-io/df.git#type",
"async": "^3.2.0",
"aws-sdk": "^2.906.0",
"basic-auth": "^2.0.1",
"async": "^2.6.3",
"aws-sdk": "^2.759.0",
"body-parser": "^1.19.0",
"cloudron-manifestformat": "^5.10.2",
"cloudron-manifestformat": "^5.6.0",
"connect": "^3.7.0",
"connect-lastmile": "^2.1.1",
"connect-lastmile": "^2.0.0",
"connect-timeout": "^1.9.0",
"cookie-parser": "^1.4.5",
"cookie-session": "^1.4.0",
"cron": "^1.8.2",
"db-migrate": "^0.11.12",
"db-migrate-mysql": "^2.1.2",
"debug": "^4.3.1",
"delay": "^5.0.0",
"dockerode": "^3.3.0",
"ejs": "^3.1.6",
"db-migrate": "^0.11.11",
"db-migrate-mysql": "^2.1.1",
"debug": "^4.2.0",
"dockerode": "^2.5.8",
"ejs": "^2.6.1",
"ejs-cli": "^2.2.1",
"express": "^4.17.1",
"ipaddr.js": "^2.0.0",
"js-yaml": "^4.1.0",
"json": "^11.0.0",
"jsonwebtoken": "^8.5.1",
"ldapjs": "^2.2.4",
"lodash": "^4.17.21",
"js-yaml": "^3.14.0",
"json": "^9.0.6",
"ldapjs": "^2.2.0",
"lodash": "^4.17.20",
"lodash.chunk": "^4.2.0",
"mime": "^2.5.2",
"moment": "^2.29.1",
"moment-timezone": "^0.5.33",
"mime": "^2.4.6",
"moment": "^2.29.0",
"moment-timezone": "^0.5.31",
"morgan": "^1.10.0",
"multiparty": "^4.2.2",
"mustache-express": "^1.3.0",
"mysql": "^2.18.1",
"nodemailer": "^6.6.0",
"nodemailer": "^6.4.11",
"nodemailer-smtp-transport": "^2.7.4",
"once": "^1.4.0",
"pretty-bytes": "^5.6.0",
"pretty-bytes": "^5.4.1",
"progress-stream": "^2.0.0",
"proxy-middleware": "^0.15.0",
"qrcode": "^1.4.4",
"readdirp": "^3.6.0",
"readdirp": "^3.4.0",
"request": "^2.88.2",
"rimraf": "^3.0.2",
"rimraf": "^2.6.3",
"s3-block-read-stream": "^0.5.0",
"safetydance": "^2.0.1",
"semver": "^7.3.5",
"safetydance": "^1.1.1",
"semver": "^6.1.1",
"showdown": "^1.9.1",
"speakeasy": "^2.0.0",
"split": "^1.0.1",
"superagent": "^6.1.0",
"superagent": "^5.3.1",
"supererror": "^0.7.2",
"tar-fs": "github:cloudron-io/tar-fs#ignore_stat_error",
"tar-stream": "^2.2.0",
"tar-stream": "^2.1.4",
"tldjs": "^2.3.1",
"ua-parser-js": "^0.7.28",
"underscore": "^1.13.1",
"uuid": "^8.3.2",
"validator": "^13.6.0",
"ws": "^7.4.5",
"underscore": "^1.11.0",
"uuid": "^3.4.0",
"validator": "^11.0.0",
"ws": "^7.3.1",
"xml2js": "^0.4.23"
},
"devDependencies": {
"expect.js": "*",
"hock": "^1.4.1",
"js2xmlparser": "^4.0.1",
"mocha": "^8.4.0",
"mocha": "^6.2.3",
"mock-aws-s3": "git+https://github.com/cloudron-io/mock-aws-s3.git",
"nock": "^13.0.11",
"node-sass": "^6.0.0",
"nock": "^10.0.6",
"node-sass": "^4.14.1",
"recursive-readdir": "^2.2.2"
},
"scripts": {
+9 -22
View File
@@ -2,11 +2,11 @@
set -eu
readonly source_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly SOURCE_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly DATA_DIR="${HOME}/.cloudron_test"
readonly DEFAULT_TESTS="./src/test/*-test.js ./src/routes/test/*-test.js"
! "${source_dir}/src/test/checkInstall" && exit 1
! "${SOURCE_dir}/src/test/checkInstall" && exit 1
# cleanup old data dirs some of those docker container data requires sudo to be removed
echo "=> Provide root password to purge any leftover data in ${DATA_DIR} and load apparmor profile:"
@@ -22,29 +22,19 @@ fi
mkdir -p ${DATA_DIR}
cd ${DATA_DIR}
mkdir -p appsdata
mkdir -p boxdata/mail boxdata/certs boxdata/mail/dkim/localhost boxdata/mail/dkim/foobar.com
mkdir -p platformdata/addons/mail/banner platformdata/nginx/cert platformdata/nginx/applications platformdata/collectd/collectd.conf.d platformdata/addons platformdata/logrotate.d platformdata/backup platformdata/logs/tasks platformdata/sftp/ssh platformdata/firewall platformdata/update
sudo mkdir -p /mnt/cloudron-test-music /media/cloudron-test-music # volume test
# translations
mkdir -p box/dashboard/dist/translation
cp -r ${source_dir}/../dashboard/dist/translation/* box/dashboard/dist/translation
mkdir -p boxdata/profileicons boxdata/appicons boxdata/mail boxdata/certs boxdata/mail/dkim/localhost boxdata/mail/dkim/foobar.com
mkdir -p platformdata/addons/mail platformdata/nginx/cert platformdata/nginx/applications platformdata/collectd/collectd.conf.d platformdata/addons platformdata/logrotate.d platformdata/backup platformdata/logs/tasks
# put cert
echo "=> Generating a localhost selfsigned cert"
openssl req -x509 -newkey rsa:2048 -keyout platformdata/nginx/cert/host.key -out platformdata/nginx/cert/host.cert -days 3650 -subj '/CN=localhost' -nodes -config <(cat /etc/ssl/openssl.cnf <(printf "\n[SAN]\nsubjectAltName=DNS:*.localhost"))
# clear out any containers if FAST is unset
if [[ -z ${FAST+x} ]]; then
echo "=> Delete all docker containers first"
docker ps -qa | xargs --no-run-if-empty docker rm -f
echo "==> To skip this run with: FAST=1 ./runTests"
else
echo "==> WARNING!! Skipping docker container cleanup, the database might not be pristine!"
fi
# clear out any containers
echo "=> Delete all docker containers first"
docker ps -qa | xargs --no-run-if-empty docker rm -f
# create docker network (while the infra code does this, most tests skip infra setup)
docker network create --subnet=172.18.0.0/16 --ip-range=172.18.0.0/20 cloudron || true
docker network create --subnet=172.18.0.0/16 cloudron || true
# create the same mysql server version to test with
OUT=`docker inspect mysql-server` || true
@@ -62,9 +52,6 @@ while ! mysqladmin ping -h"${MYSQL_IP}" --silent; do
sleep 1
done
echo "=> Create iptables blocklist"
sudo ipset create cloudron_blocklist hash:net || true
echo "=> Starting cloudron-syslog"
cloudron-syslog --logdir "${DATA_DIR}/platformdata/logs/" &
@@ -72,7 +59,7 @@ echo "=> Ensure database"
mysql -h"${MYSQL_IP}" -uroot -ppassword -e 'CREATE DATABASE IF NOT EXISTS box'
echo "=> Run database migrations"
cd "${source_dir}"
cd "${SOURCE_dir}"
BOX_ENV=test DATABASE_URL=mysql://root:password@${MYSQL_IP}/box node_modules/.bin/db-migrate up
echo "=> Run tests with mocha"
+14 -53
View File
@@ -2,12 +2,6 @@
set -eu -o pipefail
function exitHandler() {
rm -f /etc/update-motd.d/91-cloudron-install-in-progress
}
trap exitHandler EXIT
# change this to a hash when we make a upgrade release
readonly LOG_FILE="/var/log/cloudron-setup.log"
readonly MINIMUM_DISK_SIZE_GB="18" # this is the size of "/" and required to fit in docker images 18 is a safe bet for different reporting on 20GB min
@@ -41,8 +35,7 @@ if [[ "${disk_size_gb}" -lt "${MINIMUM_DISK_SIZE_GB}" ]]; then
exit 1
fi
# do not use is-active in case box service is down and user attempts to re-install
if systemctl cat box.service >/dev/null 2>&1; then
if systemctl -q is-active box; then
echo "Error: Cloudron is already installed. To reinstall, start afresh"
exit 1
fi
@@ -50,14 +43,12 @@ fi
initBaseImage="true"
provider="generic"
requestedVersion=""
installServerOrigin="https://api.cloudron.io"
apiServerOrigin="https://api.cloudron.io"
webServerOrigin="https://cloudron.io"
sourceTarballUrl=""
rebootServer="true"
setupToken=""
args=$(getopt -o "" -l "help,skip-baseimage-init,provider:,version:,env:,skip-reboot,generate-setup-token" -n "$0" -- "$@")
args=$(getopt -o "" -l "help,skip-baseimage-init,provider:,version:,env:,skip-reboot" -n "$0" -- "$@")
eval set -- "${args}"
while true; do
@@ -69,18 +60,13 @@ while true; do
if [[ "$2" == "dev" ]]; then
apiServerOrigin="https://api.dev.cloudron.io"
webServerOrigin="https://dev.cloudron.io"
installServerOrigin="https://api.dev.cloudron.io"
elif [[ "$2" == "staging" ]]; then
apiServerOrigin="https://api.staging.cloudron.io"
webServerOrigin="https://staging.cloudron.io"
installServerOrigin="https://api.staging.cloudron.io"
elif [[ "$2" == "unstable" ]]; then
installServerOrigin="https://api.dev.cloudron.io"
fi
shift 2;;
--skip-baseimage-init) initBaseImage="false"; shift;;
--skip-reboot) rebootServer="false"; shift;;
--generate-setup-token) setupToken="$(openssl rand -hex 10)"; shift;;
--) break;;
*) echo "Unknown option $1"; exit 1;;
esac
@@ -94,36 +80,11 @@ fi
# Only --help works with mismatched ubuntu
ubuntu_version=$(lsb_release -rs)
if [[ "${ubuntu_version}" != "16.04" && "${ubuntu_version}" != "18.04" && "${ubuntu_version}" != "20.04" ]]; then
echo "Cloudron requires Ubuntu 16.04, 18.04 or 20.04" > /dev/stderr
if [[ "${ubuntu_version}" != "16.04" && "${ubuntu_version}" != "18.04" ]]; then
echo "Cloudron requires Ubuntu 16.04 or 18.04" > /dev/stderr
exit 1
fi
if which nginx >/dev/null || which docker >/dev/null || which node > /dev/null; then
echo "Error: Some packages like nginx/docker/nodejs are already installed. Cloudron requires specific versions of these packages and will install them as part of it's installation. Please start with a fresh Ubuntu install and run this script again." > /dev/stderr
exit 1
fi
# Install MOTD file for stack script style installations. this is removed by the trap exit handler. Heredoc quotes prevents parameter expansion
cat > /etc/update-motd.d/91-cloudron-install-in-progress <<'EOF'
#!/bin/bash
printf "**********************************************************************\n\n"
printf "\t\t\tWELCOME TO CLOUDRON\n"
printf "\t\t\t-------------------\n"
printf '\n\e[1;32m%-6s\e[m\n\n' "Cloudron is installing. Run 'tail -f /var/log/cloudron-setup.log' to view progress."
printf "Cloudron overview - https://docs.cloudron.io/ \n"
printf "Cloudron setup - https://docs.cloudron.io/installation/#setup \n"
printf "\nFor help and more information, visit https://forum.cloudron.io\n\n"
printf "**********************************************************************\n"
EOF
chmod +x /etc/update-motd.d/91-cloudron-install-in-progress
# Can only write after we have confirmed script has root access
echo "Running cloudron-setup with args : $@" > "${LOG_FILE}"
@@ -139,20 +100,26 @@ echo " Join us at https://forum.cloudron.io for any questions."
echo ""
if [[ "${initBaseImage}" == "true" ]]; then
echo "=> Installing software-properties-common"
if ! apt-get install -y software-properties-common &>> "${LOG_FILE}"; then
echo "Could not install software-properties-common (for add-apt-repository below). See ${LOG_FILE}"
exit 1
fi
echo "=> Updating apt and installing script dependencies"
if ! apt-get update &>> "${LOG_FILE}"; then
echo "Could not update package repositories. See ${LOG_FILE}"
exit 1
fi
if ! DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y install --no-install-recommends curl python3 ubuntu-standard software-properties-common -y &>> "${LOG_FILE}"; then
if ! DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y install curl python3 ubuntu-standard -y &>> "${LOG_FILE}"; then
echo "Could not install setup dependencies (curl). See ${LOG_FILE}"
exit 1
fi
fi
echo "=> Checking version"
if ! releaseJson=$($curl -s "${installServerOrigin}/api/v1/releases?boxVersion=${requestedVersion}"); then
if ! releaseJson=$($curl -s "${apiServerOrigin}/api/v1/releases?boxVersion=${requestedVersion}"); then
echo "Failed to get release information"
exit 1
fi
@@ -190,7 +157,6 @@ fi
echo "=> Installing version ${version} (this takes some time) ..."
mkdir -p /etc/cloudron
echo "${provider}" > /etc/cloudron/PROVIDER
[[ ! -z "${setupToken}" ]] && echo "${setupToken}" > /etc/cloudron/SETUP_TOKEN
if ! /bin/bash "${box_src_tmp_dir}/scripts/installer.sh" &>> "${LOG_FILE}"; then
echo "Failed to install cloudron. See ${LOG_FILE} for details"
@@ -212,12 +178,7 @@ done
if ! ip=$(curl -s --fail --connect-timeout 2 --max-time 2 https://api.cloudron.io/api/v1/helper/public_ip | sed -n -e 's/.*"ip": "\(.*\)"/\1/p'); then
ip='<IP>'
fi
if [[ -z "${setupToken}" ]]; then
url="https://${ip}"
else
url="https://${ip}/?setupToken=${setupToken}"
fi
echo -e "\n\n${GREEN}After reboot, visit ${url} and accept the self-signed certificate to finish setup.${DONE}\n"
echo -e "\n\n${GREEN}Visit https://${ip} and accept the self-signed certificate to finish setup.${DONE}\n"
if [[ "${rebootServer}" == "true" ]]; then
systemctl stop box mysql # sometimes mysql ends up having corrupt privilege tables
@@ -225,7 +186,7 @@ if [[ "${rebootServer}" == "true" ]]; then
read -p "The server has to be rebooted to apply all the settings. Reboot now ? [Y/n] " yn
yn=${yn:-y}
case $yn in
[Yy]* ) exitHandler; systemctl reboot;;
[Yy]* ) systemctl reboot;;
* ) exit;;
esac
fi
+3 -7
View File
@@ -37,13 +37,12 @@ while true; do
# fall through
;&
--owner-login)
admin_username=$(mysql -NB -uroot -ppassword -e "SELECT username FROM box.users WHERE role='owner' AND username IS NOT NULL ORDER BY creationTime LIMIT 1" 2>/dev/null)
admin_username=$(mysql -NB -uroot -ppassword -e "SELECT username FROM box.users WHERE role='owner' AND username IS NOT NULL ORDER BY createdAt LIMIT 1" 2>/dev/null)
admin_password=$(pwgen -1s 12)
dashboard_domain=$(mysql -NB -uroot -ppassword -e "SELECT value FROM box.settings WHERE name='admin_fqdn'" 2>/dev/nul)
ghost_file=/home/yellowtent/platformdata/cloudron_ghost.json
printf '{"%s":"%s"}\n' "${admin_username}" "${admin_password}" > "${ghost_file}"
chown yellowtent:yellowtent "${ghost_file}" && chmod o-r,g-r "${ghost_file}"
echo "Login at https://${dashboard_domain} as ${admin_username} / ${admin_password} . This password may only be used once. ${ghost_file} will be automatically removed after use."
echo "Login as ${admin_username} / ${admin_password} . Remove ${ghost_file} when done."
exit 0
;;
--) break;;
@@ -74,9 +73,6 @@ echo -n "Generating Cloudron Support stats..."
# clear file
rm -rf $OUT
echo -e $LINE"DASHBOARD DOMAIN"$LINE >> $OUT
mysql -NB -uroot -ppassword -e "SELECT value FROM box.settings WHERE name='admin_fqdn'" &>> $OUT 2>/dev/null || true
echo -e $LINE"PROVIDER"$LINE >> $OUT
cat /etc/cloudron/PROVIDER &>> $OUT || true
@@ -103,7 +99,7 @@ systemctl status --lines=100 box mysql unbound cloudron-syslog nginx collectd do
echo -e $LINE"Box logs"$LINE >> $OUT
tail -n 100 /home/yellowtent/platformdata/logs/box.log &>> $OUT
echo -e $LINE"Interface Info"$LINE >> $OUT
echo -e $LINE"Firewall chains"$LINE >> $OUT
ip addr &>> $OUT
echo -e $LINE"Firewall chains"$LINE >> $OUT
-31
View File
@@ -1,31 +0,0 @@
#!/bin/bash
set -eu -o pipefail
# This script downloads new translation data from weblate at https://translate.cloudron.io
OUT="/home/yellowtent/box/dashboard/dist/translation"
# We require root
if [[ ${EUID} -ne 0 ]]; then
echo "This script should be run as root. Run with sudo"
exit 1
fi
echo "=> Downloading new translation files..."
curl https://translate.cloudron.io/download/cloudron/dashboard/?format=zip -o /tmp/lang.zip
echo "=> Unpacking..."
unzip -jo /tmp/lang.zip -d $OUT
chown -R yellowtent:yellowtent $OUT
# unzip put very restrictive permissions
chmod ua+r $OUT/*
echo "=> Cleanup..."
rm /tmp/lang.zip
echo "=> Done"
echo ""
echo "Reload the dashboard to see the new translations"
echo ""
+2 -2
View File
@@ -41,8 +41,8 @@ if ! $(cd "${SOURCE_DIR}/../dashboard" && git diff --exit-code >/dev/null); then
exit 1
fi
if [[ "$(node --version)" != "v14.15.4" ]]; then
echo "This script requires node 14.15.4"
if [[ "$(node --version)" != "v10.18.1" ]]; then
echo "This script requires node 10.18.1"
exit 1
fi
+52 -96
View File
@@ -11,52 +11,6 @@ if [[ ${EUID} -ne 0 ]]; then
exit 1
fi
function log() {
echo -e "$(date +'%Y-%m-%dT%H:%M:%S')" "==> installer: $1"
}
apt_ready="no"
function prepare_apt_once() {
[[ "${apt_ready}" == "yes" ]] && return
log "Making sure apt is in a good state"
log "Waiting for all dpkg tasks to finish..."
while fuser /var/lib/dpkg/lock; do
sleep 1
done
# it's unclear what needs to be run first or whether both these command should be run. so keep trying both
for count in {1..3}; do
# alternative to apt-install -y --fix-missing ?
if ! dpkg --force-confold --configure -a; then
log "dpkg reconfigure failed (try $count)"
dpkg_configure="no"
else
dpkg_configure="yes"
fi
if ! apt update -y; then
log "apt update failed (try $count)"
apt_update="no"
else
apt_update="yes"
fi
[[ "${dpkg_configure}" == "yes" && "${apt_update}" == "yes" ]] && break
sleep 1
done
apt_ready="yes"
if [[ "${dpkg_configure}" == "yes" && "${apt_update}" == "yes" ]]; then
log "apt is ready"
else
log "apt is not ready but proceeding anyway"
fi
}
readonly user=yellowtent
readonly box_src_dir=/home/${user}/box
@@ -67,23 +21,36 @@ readonly box_src_tmp_dir="$(realpath ${script_dir}/..)"
readonly ubuntu_version=$(lsb_release -rs)
readonly ubuntu_codename=$(lsb_release -cs)
readonly is_update=$(systemctl is-active -q box && echo "yes" || echo "no")
readonly is_update=$(systemctl is-active box && echo "yes" || echo "no")
log "Updating from $(cat $box_src_dir/VERSION) to $(cat $box_src_tmp_dir/VERSION)"
echo "==> installer: Updating from $(cat $box_src_dir/VERSION) to $(cat $box_src_tmp_dir/VERSION) <=="
log "updating docker"
echo "==> installer: updating docker"
readonly docker_version=20.10.3
if [[ $(docker version --format {{.Client.Version}}) != "${docker_version}" ]]; then
if [[ $(docker version --format {{.Client.Version}}) != "19.03.12" ]]; then
# there are 3 packages for docker - containerd, CLI and the daemon
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.4.3-1_amd64.deb" -o /tmp/containerd.deb
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
$curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_${docker_version}~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/containerd.io_1.2.13-2_amd64.deb" -o /tmp/containerd.deb
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce-cli_19.03.12~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker-ce-cli.deb
curl -sL "https://download.docker.com/linux/ubuntu/dists/${ubuntu_codename}/pool/stable/amd64/docker-ce_19.03.12~3-0~ubuntu-${ubuntu_codename}_amd64.deb" -o /tmp/docker.deb
prepare_apt_once
echo "==> installer: Waiting for all dpkg tasks to finish..."
while fuser /var/lib/dpkg/lock; do
sleep 1
done
while ! dpkg --force-confold --configure -a; do
echo "==> installer: Failed to fix packages. Retry"
sleep 1
done
# the latest docker might need newer packages
while ! apt update -y; do
echo "==> installer: Failed to update packages. Retry"
sleep 1
done
while ! apt install -y /tmp/containerd.deb /tmp/docker-ce-cli.deb /tmp/docker.deb; do
log "Failed to install docker. Retry"
echo "==> installer: Failed to install docker. Retry"
sleep 1
done
@@ -92,36 +59,25 @@ fi
readonly nginx_version=$(nginx -v 2>&1)
if [[ "${nginx_version}" != *"1.18."* ]]; then
log "installing nginx 1.18"
$curl -sL http://nginx.org/packages/ubuntu/pool/nginx/n/nginx/nginx_1.18.0-2~${ubuntu_codename}_amd64.deb -o /tmp/nginx.deb
prepare_apt_once
echo "==> installer: installing nginx 1.18"
curl -sL http://nginx.org/packages/ubuntu/pool/nginx/n/nginx/nginx_1.18.0-1~${ubuntu_codename}_amd64.deb -o /tmp/nginx.deb
# apt install with install deps (as opposed to dpkg -i)
apt install -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes /tmp/nginx.deb
rm /tmp/nginx.deb
fi
if ! which mount.nfs; then
log "installing nfs-common"
prepare_apt_once
apt install -y nfs-common
if ! which ipset; then
echo "==> installer: installing ipset"
apt install -y ipset
fi
if ! which sshfs; then
log "installing sshfs"
prepare_apt_once
apt install -y sshfs
fi
log "updating node"
readonly node_version=14.15.4
if [[ "$(node --version)" != "v${node_version}" ]]; then
mkdir -p /usr/local/node-${node_version}
$curl -sL https://nodejs.org/dist/v${node_version}/node-v${node_version}-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-${node_version}
ln -sf /usr/local/node-${node_version}/bin/node /usr/bin/node
ln -sf /usr/local/node-${node_version}/bin/npm /usr/bin/npm
rm -rf /usr/local/node-10.18.1
echo "==> installer: updating node"
if [[ "$(node --version)" != "v10.18.1" ]]; then
mkdir -p /usr/local/node-10.18.1
$curl -sL https://nodejs.org/dist/v10.18.1/node-v10.18.1-linux-x64.tar.gz | tar zxvf - --strip-components=1 -C /usr/local/node-10.18.1
ln -sf /usr/local/node-10.18.1/bin/node /usr/bin/node
ln -sf /usr/local/node-10.18.1/bin/npm /usr/bin/npm
rm -rf /usr/local/node-10.15.1
fi
# this is here (and not in updater.js) because rebuild requires the above node
@@ -132,31 +88,31 @@ for try in `seq 1 10`; do
# however by default npm drops privileges for npm rebuild
# https://docs.npmjs.com/misc/config#unsafe-perm
if cd "${box_src_tmp_dir}" && npm rebuild --unsafe-perm; then break; fi
log "Failed to rebuild, trying again"
echo "==> installer: Failed to rebuild, trying again"
sleep 5
done
if [[ ${try} -eq 10 ]]; then
log "npm rebuild failed, giving up"
echo "==> installer: npm rebuild failed, giving up"
exit 4
fi
log "downloading new addon images"
echo "==> installer: downloading new addon images"
images=$(node -e "var i = require('${box_src_tmp_dir}/src/infra_version.js'); console.log(i.baseImages.map(function (x) { return x.tag; }).join(' '), Object.keys(i.images).map(function (x) { return i.images[x].tag; }).join(' '));")
log "\tPulling docker images: ${images}"
echo -e "\tPulling docker images: ${images}"
for image in ${images}; do
while ! docker pull "${image}"; do # this pulls the image using the sha256
log "Could not pull ${image}"
sleep 5
done
while ! docker pull "${image%@sha256:*}"; do # this will tag the image for readability
log "Could not pull ${image%@sha256:*}"
sleep 5
done
if ! docker pull "${image}"; then # this pulls the image using the sha256
echo "==> installer: Could not pull ${image}"
exit 5
fi
if ! docker pull "${image%@sha256:*}"; then # this will tag the image for readability
echo "==> installer: Could not pull ${image%@sha256:*}"
exit 6
fi
done
log "update cloudron-syslog"
echo "==> installer: update cloudron-syslog"
CLOUDRON_SYSLOG_DIR=/usr/local/cloudron-syslog
CLOUDRON_SYSLOG="${CLOUDRON_SYSLOG_DIR}/bin/cloudron-syslog"
CLOUDRON_SYSLOG_VERSION="1.0.3"
@@ -164,7 +120,7 @@ while [[ ! -f "${CLOUDRON_SYSLOG}" || "$(${CLOUDRON_SYSLOG} --version)" != ${CLO
rm -rf "${CLOUDRON_SYSLOG_DIR}"
mkdir -p "${CLOUDRON_SYSLOG_DIR}"
if npm install --unsafe-perm -g --prefix "${CLOUDRON_SYSLOG_DIR}" cloudron-syslog@${CLOUDRON_SYSLOG_VERSION}; then break; fi
log "Failed to install cloudron-syslog, trying again"
echo "===> installer: Failed to install cloudron-syslog, trying again"
sleep 5
done
@@ -173,17 +129,17 @@ if ! id "${user}" 2>/dev/null; then
fi
if [[ "${is_update}" == "yes" ]]; then
log "stop box service for update"
echo "==> installer: stop box service for update"
${box_src_dir}/setup/stop.sh
fi
# ensure we are not inside the source directory, which we will remove now
cd /root
log "switching the box code"
echo "==> installer: switching the box code"
rm -rf "${box_src_dir}"
mv "${box_src_tmp_dir}" "${box_src_dir}"
chown -R "${user}:${user}" "${box_src_dir}"
log "calling box setup script"
echo "==> installer: calling box setup script"
"${box_src_dir}/setup/start.sh"
+56 -56
View File
@@ -5,47 +5,39 @@ set -eu -o pipefail
# This script is run after the box code is switched. This means that this script
# should pretty much always succeed. No network logic/download code here.
function log() {
echo -e "$(date +'%Y-%m-%dT%H:%M:%S')" "==> start: $1"
}
log "Cloudron Start"
echo "==> Cloudron Start"
readonly USER="yellowtent"
readonly HOME_DIR="/home/${USER}"
readonly BOX_SRC_DIR="${HOME_DIR}/box"
readonly PLATFORM_DATA_DIR="${HOME_DIR}/platformdata"
readonly APPS_DATA_DIR="${HOME_DIR}/appsdata"
readonly BOX_DATA_DIR="${HOME_DIR}/boxdata"
readonly MAIL_DATA_DIR="${HOME_DIR}/boxdata/mail"
readonly PLATFORM_DATA_DIR="${HOME_DIR}/platformdata" # platform data
readonly APPS_DATA_DIR="${HOME_DIR}/appsdata" # app data
readonly BOX_DATA_DIR="${HOME_DIR}/boxdata" # box data
readonly script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly json="$(realpath ${script_dir}/../node_modules/.bin/json)"
readonly ubuntu_version=$(lsb_release -rs)
cp -f "${script_dir}/../scripts/cloudron-support" /usr/bin/cloudron-support
cp -f "${script_dir}/../scripts/cloudron-translation-update" /usr/bin/cloudron-translation-update
# this needs to match the cloudron/base:2.0.0 gid
if ! getent group media; then
addgroup --gid 500 --system media
fi
log "Configuring docker"
echo "==> Configuring docker"
cp "${script_dir}/start/docker-cloudron-app.apparmor" /etc/apparmor.d/docker-cloudron-app
systemctl enable apparmor
systemctl restart apparmor
usermod ${USER} -a -G docker
# unbound (which starts after box code) relies on this interface to exist. dockerproxy also relies on this.
docker network create --subnet=172.18.0.0/16 --ip-range=172.18.0.0/20 cloudron || true
docker network create --subnet=172.18.0.0/16 cloudron || true
mkdir -p "${BOX_DATA_DIR}"
mkdir -p "${APPS_DATA_DIR}"
mkdir -p "${MAIL_DATA_DIR}/dkim"
# keep these in sync with paths.js
log "Ensuring directories"
echo "==> Ensuring directories"
mkdir -p "${PLATFORM_DATA_DIR}/graphite"
mkdir -p "${PLATFORM_DATA_DIR}/mysql"
@@ -63,18 +55,20 @@ mkdir -p "${PLATFORM_DATA_DIR}/logs/backup" \
"${PLATFORM_DATA_DIR}/logs/crash" \
"${PLATFORM_DATA_DIR}/logs/collectd"
mkdir -p "${PLATFORM_DATA_DIR}/update"
mkdir -p "${PLATFORM_DATA_DIR}/sftp/ssh" # sftp keys
mkdir -p "${PLATFORM_DATA_DIR}/firewall"
mkdir -p "${BOX_DATA_DIR}/appicons"
mkdir -p "${BOX_DATA_DIR}/firewall"
mkdir -p "${BOX_DATA_DIR}/profileicons"
mkdir -p "${BOX_DATA_DIR}/certs"
mkdir -p "${BOX_DATA_DIR}/acme" # acme keys
mkdir -p "${BOX_DATA_DIR}/mail/dkim"
mkdir -p "${BOX_DATA_DIR}/well-known" # .well-known documents
# ensure backups folder exists and is writeable
mkdir -p /var/backups
chmod 777 /var/backups
# can be removed after 6.3
[[ -f "${BOX_DATA_DIR}/updatechecker.json" ]] && mv "${BOX_DATA_DIR}/updatechecker.json" "${PLATFORM_DATA_DIR}/update/updatechecker.json"
rm -rf "${BOX_DATA_DIR}/well-known"
log "Configuring journald"
echo "==> Configuring journald"
sed -e "s/^#SystemMaxUse=.*$/SystemMaxUse=100M/" \
-e "s/^#ForwardToSyslog=.*$/ForwardToSyslog=no/" \
-i /etc/systemd/journald.conf
@@ -95,7 +89,7 @@ setfacl -n -m u:${USER}:r /var/log/journal/*/system.journal
# Give user access to nginx logs (uses adm group)
usermod -a -G adm ${USER}
log "Setting up unbound"
echo "==> Setting up unbound"
# DO uses Google nameservers by default. This causes RBL queries to fail (host 2.0.0.127.zen.spamhaus.org)
# We do not use dnsmasq because it is not a recursive resolver and defaults to the value in the interfaces file (which is Google DNS!)
# We listen on 0.0.0.0 because there is no way control ordering of docker (which creates the 172.18.0.0/16) and unbound
@@ -105,16 +99,16 @@ cp -f "${script_dir}/start/unbound.conf" /etc/unbound/unbound.conf.d/cloudron-ne
# update the root anchor after a out-of-disk-space situation (see #269)
unbound-anchor -a /var/lib/unbound/root.key
log "Adding systemd services"
echo "==> Adding systemd services"
cp -r "${script_dir}/start/systemd/." /etc/systemd/system/
systemctl disable cloudron.target || true
rm -f /etc/systemd/system/cloudron.target
[[ "${ubuntu_version}" == "16.04" ]] && sed -e 's/MemoryMax/MemoryLimit/g' -i /etc/systemd/system/box.service
[[ "${ubuntu_version}" == "16.04" ]] && sed -e 's/Type=notify/Type=simple/g' -i /etc/systemd/system/unbound.service
systemctl daemon-reload
systemctl enable --now cloudron-syslog
systemctl enable unbound
systemctl enable cloudron-syslog
systemctl enable box
systemctl enable cloudron-firewall
systemctl enable --now cloudron-disable-thp
# update firewall rules
systemctl restart cloudron-firewall
@@ -128,11 +122,11 @@ systemctl restart unbound
# ensure cloudron-syslog runs
systemctl restart cloudron-syslog
log "Configuring sudoers"
echo "==> Configuring sudoers"
rm -f /etc/sudoers.d/${USER}
cp "${script_dir}/start/sudoers" /etc/sudoers.d/${USER}
log "Configuring collectd"
echo "==> Configuring collectd"
rm -rf /etc/collectd /var/log/collectd.log
ln -sfF "${PLATFORM_DATA_DIR}/collectd" /etc/collectd
cp "${script_dir}/start/collectd/collectd.conf" "${PLATFORM_DATA_DIR}/collectd/collectd.conf"
@@ -144,7 +138,7 @@ if [[ "${ubuntu_version}" == "20.04" ]]; then
fi
systemctl restart collectd
log "Configuring logrotate"
echo "==> Configuring logrotate"
if ! grep -q "^include ${PLATFORM_DATA_DIR}/logrotate.d" /etc/logrotate.conf; then
echo -e "\ninclude ${PLATFORM_DATA_DIR}/logrotate.d\n" >> /etc/logrotate.conf
fi
@@ -154,10 +148,10 @@ cp "${script_dir}/start/logrotate/"* "${PLATFORM_DATA_DIR}/logrotate.d/"
# logrotate files have to be owned by root, this is here to fixup existing installations where we were resetting the owner to yellowtent
chown root:root "${PLATFORM_DATA_DIR}/logrotate.d/"
log "Adding motd message for admins"
echo "==> Adding motd message for admins"
cp "${script_dir}/start/cloudron-motd" /etc/update-motd.d/92-cloudron
log "Configuring nginx"
echo "==> Configuring nginx"
# link nginx config to system config
unlink /etc/nginx 2>/dev/null || rm -rf /etc/nginx
ln -s "${PLATFORM_DATA_DIR}/nginx" /etc/nginx
@@ -185,26 +179,18 @@ if [[ ! -f /etc/mysql/mysql.cnf ]] || ! diff -q "${script_dir}/start/mysql.cnf"
cp "${script_dir}/start/mysql.cnf" /etc/mysql/mysql.cnf
while true; do
if ! systemctl list-jobs | grep mysql; then break; fi
log "Waiting for mysql jobs..."
echo "Waiting for mysql jobs..."
sleep 1
done
log "Stopping mysql"
systemctl stop mysql
while mysqladmin ping 2>/dev/null; do
log "Waiting for mysql to stop..."
while true; do
if systemctl restart mysql; then break; fi
echo "Restarting MySql again after sometime since this fails randomly"
sleep 1
done
else
systemctl start mysql
fi
# the start/stop of mysql is separate to make sure it got reloaded with latest config and it's up and running before we start the new box code
# when using 'system restart mysql', it seems to restart much later and the box code loses connection during platform startup (dangerous!)
log "Starting mysql"
systemctl start mysql
while ! mysqladmin ping 2>/dev/null; do
log "Waiting for mysql to start..."
sleep 1
done
readonly mysql_root_password="password"
mysqladmin -u root -ppassword password password # reset default root password
if [[ "${ubuntu_version}" == "20.04" ]]; then
@@ -215,33 +201,47 @@ mysql -u root -p${mysql_root_password} -e 'CREATE DATABASE IF NOT EXISTS box'
# set HOME explicity, because it's not set when the installer calls it. this is done because
# paths.js uses this env var and some of the migrate code requires box code
log "Migrating data"
echo "==> Migrating data"
cd "${BOX_SRC_DIR}"
if ! HOME=${HOME_DIR} BOX_ENV=cloudron DATABASE_URL=mysql://root:${mysql_root_password}@127.0.0.1/box "${BOX_SRC_DIR}/node_modules/.bin/db-migrate" up; then
log "DB migration failed"
echo "DB migration failed"
exit 1
fi
rm -f /etc/cloudron/cloudron.conf
log "Changing ownership"
# note, change ownership after db migrate. this allow db migrate to move files around as root and then we can fix it up here
if [[ ! -f "${BOX_DATA_DIR}/dhparams.pem" ]]; then
echo "==> Generating dhparams (takes forever)"
openssl dhparam -out "${BOX_DATA_DIR}/dhparams.pem" 2048
cp "${BOX_DATA_DIR}/dhparams.pem" "${PLATFORM_DATA_DIR}/addons/mail/dhparams.pem"
else
cp "${BOX_DATA_DIR}/dhparams.pem" "${PLATFORM_DATA_DIR}/addons/mail/dhparams.pem"
fi
# old installations used to create appdata/<app>/redis which is now part of old backups and prevents restore
echo "==> Cleaning up stale redis directories"
find "${APPS_DATA_DIR}" -maxdepth 2 -type d -name redis -exec rm -rf {} +
echo "==> Cleaning up old logs"
rm -f /home/yellowtent/platformdata/logs/*/*.log.* || true
echo "==> Changing ownership"
# be careful of what is chown'ed here. subdirs like mysql,redis etc are owned by the containers and will stop working if perms change
chown -R "${USER}" /etc/cloudron
chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/nginx" "${PLATFORM_DATA_DIR}/collectd" "${PLATFORM_DATA_DIR}/addons" "${PLATFORM_DATA_DIR}/acme" "${PLATFORM_DATA_DIR}/backup" "${PLATFORM_DATA_DIR}/logs" "${PLATFORM_DATA_DIR}/update" "${PLATFORM_DATA_DIR}/sftp" "${PLATFORM_DATA_DIR}/firewall"
chown "${USER}:${USER}" -R "${PLATFORM_DATA_DIR}/nginx" "${PLATFORM_DATA_DIR}/collectd" "${PLATFORM_DATA_DIR}/addons" "${PLATFORM_DATA_DIR}/acme" "${PLATFORM_DATA_DIR}/backup" "${PLATFORM_DATA_DIR}/logs" "${PLATFORM_DATA_DIR}/update"
chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}/INFRA_VERSION" 2>/dev/null || true
chown "${USER}:${USER}" "${PLATFORM_DATA_DIR}"
chown "${USER}:${USER}" "${APPS_DATA_DIR}"
# do not chown the boxdata/mail directory; dovecot gets upset
chown "${USER}:${USER}" "${BOX_DATA_DIR}"
find "${BOX_DATA_DIR}" -mindepth 1 -maxdepth 1 -not -path "${MAIL_DATA_DIR}" -exec chown -R "${USER}:${USER}" {} \;
chown "${USER}:${USER}" "${MAIL_DATA_DIR}"
chown "${USER}:${USER}" -R "${MAIL_DATA_DIR}/dkim" # this is owned by box currently since it generates the keys
find "${BOX_DATA_DIR}" -mindepth 1 -maxdepth 1 -not -path "${BOX_DATA_DIR}/mail" -exec chown -R "${USER}:${USER}" {} \;
chown "${USER}:${USER}" "${BOX_DATA_DIR}/mail"
chown "${USER}:${USER}" -R "${BOX_DATA_DIR}/mail/dkim" # this is owned by box currently since it generates the keys
log "Starting Cloudron"
echo "==> Starting Cloudron"
systemctl start box
sleep 2 # give systemd sometime to start the processes
log "Almost done"
echo "==> Almost done"
-14
View File
@@ -1,14 +0,0 @@
#!/bin/bash
set -eu
echo "==> Disabling THP"
# https://docs.couchbase.com/server/current/install/thp-disable.html
if [[ -d /sys/kernel/mm/transparent_hugepage ]]; then
echo "never" > /sys/kernel/mm/transparent_hugepage/enabled
echo "never" > /sys/kernel/mm/transparent_hugepage/defrag
else
echo "==> kernel does not have THP"
fi
+5 -17
View File
@@ -18,22 +18,12 @@ fi
# allow related and establisted connections
iptables -t filter -A CLOUDRON -m state --state RELATED,ESTABLISHED -j ACCEPT
iptables -t filter -A CLOUDRON -p tcp -m tcp -m multiport --dports 22,80,202,443 -j ACCEPT # 202 is the alternate ssh port
iptables -t filter -A CLOUDRON -p tcp -m tcp -m multiport --dports 22,25,80,202,443 -j ACCEPT # 202 is the alternate ssh port
# whitelist any user ports. we used to use --dports but it has a 15 port limit (XT_MULTI_PORTS)
ports_json="/home/yellowtent/platformdata/firewall/ports.json"
# whitelist any user ports
ports_json="/home/yellowtent/boxdata/firewall/ports.json"
if allowed_tcp_ports=$(node -e "console.log(JSON.parse(fs.readFileSync('${ports_json}', 'utf8')).allowed_tcp_ports.join(','))" 2>/dev/null); then
IFS=',' arr=(${allowed_tcp_ports})
for p in "${arr[@]}"; do
iptables -A CLOUDRON -p tcp -m tcp --dport "${p}" -j ACCEPT
done
fi
if allowed_udp_ports=$(node -e "console.log(JSON.parse(fs.readFileSync('${ports_json}', 'utf8')).allowed_udp_ports.join(','))" 2>/dev/null); then
IFS=',' arr=(${allowed_udp_ports})
for p in "${arr[@]}"; do
iptables -A CLOUDRON -p udp -m udp --dport "${p}" -j ACCEPT
done
[[ -n "${allowed_tcp_ports}" ]] && iptables -A CLOUDRON -p tcp -m tcp -m multiport --dports "${allowed_tcp_ports}" -j ACCEPT
fi
# turn and stun service
@@ -70,7 +60,7 @@ for port in 80 443; do
iptables -A CLOUDRON_RATELIMIT -p tcp --syn --dport ${port} -m connlimit --connlimit-above 5000 -j CLOUDRON_RATELIMIT_LOG
done
# ssh
# ssh smtp ssh msa imap sieve
for port in 22 202; do
iptables -A CLOUDRON_RATELIMIT -p tcp --dport ${port} -m state --state NEW -m recent --set --name "public-${port}"
iptables -A CLOUDRON_RATELIMIT -p tcp --dport ${port} -m state --state NEW -m recent --update --name "public-${port}" --seconds 10 --hitcount 5 -j CLOUDRON_RATELIMIT_LOG
@@ -98,5 +88,3 @@ fi
# Workaround issue where Docker insists on adding itself first in FORWARD table
iptables -D FORWARD -j CLOUDRON_RATELIMIT || true
iptables -I FORWARD 1 -j CLOUDRON_RATELIMIT
echo "==> Setting up firewall done"
+1 -10
View File
@@ -1,7 +1,5 @@
#!/bin/bash
[[ -f /etc/update-motd.d/91-cloudron-install-in-progress ]] && exit
printf "**********************************************************************\n\n"
if [[ -z "$(ls -A /home/yellowtent/boxdata/mail/dkim)" ]]; then
@@ -12,17 +10,10 @@ if [[ -z "$(ls -A /home/yellowtent/boxdata/mail/dkim)" ]]; then
fi
echo "${ip}" > /tmp/.cloudron-motd-cache
if [[ ! -f /etc/cloudron/SETUP_TOKEN ]]; then
url="https://${ip}"
else
setupToken="$(cat /etc/cloudron/SETUP_TOKEN)"
url="https://${ip}/?setupToken=${setupToken}"
fi
printf "\t\t\tWELCOME TO CLOUDRON\n"
printf "\t\t\t-------------------\n"
printf '\n\e[1;32m%-6s\e[m\n\n' "Visit ${url} on your browser and accept the self-signed certificate to finish setup."
printf '\n\e[1;32m%-6s\e[m\n\n' "Visit https://${ip} on your browser and accept the self-signed certificate to finish setup."
printf "Cloudron overview - https://docs.cloudron.io/ \n"
printf "Cloudron setup - https://docs.cloudron.io/installation/#setup \n"
else
+1 -3
View File
@@ -164,9 +164,7 @@ LoadPlugin swap
#LoadPlugin vmem
#LoadPlugin vserver
#LoadPlugin wireless
<LoadPlugin write_graphite>
FlushInterval 20
</LoadPlugin>
LoadPlugin write_graphite
#LoadPlugin write_http
#LoadPlugin write_riemann
+1 -2
View File
@@ -6,7 +6,7 @@ disks = []
def init():
global disks
lines = [s.split() for s in subprocess.check_output(["df", "--type=ext4", "--output=source,target,size,used,avail"]).decode('utf-8').splitlines()]
lines = [s.split() for s in subprocess.check_output(["df", "--type=ext4", "--output=source,target,size,used,avail"]).splitlines()]
disks = lines[1:] # strip header
collectd.info('custom df plugin initialized with %s' % disks)
@@ -34,5 +34,4 @@ def read():
val.dispatch(values=[used], type_instance='used')
collectd.register_init(init)
# see Interval setting in collectd.conf for polling interval
collectd.register_read(read)
+8 -31
View File
@@ -6,26 +6,19 @@ PATHS = [] # { name, dir, exclude }
# there is a pattern in carbon/storage-schemas.conf which stores values every 12h for a year
INTERVAL = 60 * 60 * 12 # twice a day. change values in docker-graphite if you change this
# we used to pass the INTERVAL as a parameter to register_read. however, collectd write_graphite
# takes a bit to load (tcp connection) and drops the du data. this then means that we have to wait
# for INTERVAL secs for du data. instead, we just cache the value for INTERVAL instead
CACHE = dict()
CACHE_TIME = 0
def du(pathinfo):
# -B1 makes du print block sizes and not apparent sizes (to match df which also uses block sizes)
dirname = pathinfo['dir']
cmd = 'timeout 1800 du -DsB1 "{}"'.format(dirname)
cmd = 'timeout 1800 du -DsB1 "{}"'.format(pathinfo['dir'])
if pathinfo['exclude'] != '':
cmd += ' --exclude "{}"'.format(pathinfo['exclude'])
collectd.info('computing size with command: %s' % cmd);
try:
size = subprocess.check_output(cmd, shell=True).split()[0].decode('utf-8')
collectd.info('\tsize of %s is %s (time: %i)' % (dirname, size, int(time.time())))
collectd.info('\tsize of %s is %s (time: %i)' % (pathinfo['dir'], size, int(time.time())))
return size
except Exception as e:
collectd.info('\terror getting the size of %s: %s' % (dirname, str(e)))
collectd.info('\terror getting the size of %s: %s' % (pathinfo['dir'], str(e)))
return 0
def parseSize(size):
@@ -71,35 +64,19 @@ def init():
collectd.info('custom du plugin initialized with %s %s' % (PATHS, sys.version))
def read():
global CACHE, CACHE_TIME
# read from cache if < 12 hours
read_cache = (time.time() - CACHE_TIME) < INTERVAL
if not read_cache:
CACHE_TIME = time.time()
for pathinfo in PATHS:
dirname = pathinfo['dir']
if read_cache and dirname in CACHE:
size = CACHE[dirname]
else:
size = du(pathinfo)
CACHE[dirname] = size
size = du(pathinfo)
# type comes from https://github.com/collectd/collectd/blob/master/src/types.db
val = collectd.Values(type='capacity', plugin='du', plugin_instance=pathinfo['name'])
val.dispatch(values=[size], type_instance='usage')
if read_cache and 'docker' in CACHE:
size = CACHE['docker']
else:
size = dockerSize()
CACHE['docker'] = size
size = dockerSize()
val = collectd.Values(type='capacity', plugin='du', plugin_instance='docker')
val.dispatch(values=[size], type_instance='usage')
collectd.register_init(init)
collectd.register_config(configure)
collectd.register_read(read)
collectd.register_read(read, INTERVAL)
+2 -11
View File
@@ -4,9 +4,9 @@
# http://bugs.mysql.com/bug.php?id=68514
[mysqld]
performance_schema=OFF
max_connections=200
max_connections=50
# on ec2, without this we get a sporadic connection drop when doing the initial migration
max_allowed_packet=64M
max_allowed_packet=32M
# https://mathiasbynens.be/notes/mysql-utf8mb4
character-set-server = utf8mb4
@@ -15,15 +15,6 @@ collation-server = utf8mb4_unicode_ci
# set timezone to UTC
default_time_zone='+00:00'
# disable bin logs. they are only useful in replication mode
skip-log-bin
# this is used when creating an index using ALTER command
innodb_sort_buffer_size=2097152
# this is a per session sort (ORDER BY) variable for non-indexed fields
sort_buffer_size = 4M
[mysqldump]
quick
quote-names
+11 -8
View File
@@ -13,6 +13,9 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/mkdirvolume.sh
Defaults!/home/yellowtent/box/src/scripts/rmaddondir.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmaddondir.sh
Defaults!/home/yellowtent/box/src/scripts/reloadnginx.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reloadnginx.sh
Defaults!/home/yellowtent/box/src/scripts/reboot.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/reboot.sh
@@ -22,6 +25,9 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/configurecollec
Defaults!/home/yellowtent/box/src/scripts/collectlogs.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/collectlogs.sh
Defaults!/home/yellowtent/box/src/scripts/retire.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/retire.sh
Defaults!/home/yellowtent/box/src/scripts/update.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/update.sh
@@ -38,8 +44,11 @@ yellowtent ALL=(root) NOPASSWD:SETENV: /home/yellowtent/box/src/scripts/backupup
Defaults!/home/yellowtent/box/src/scripts/restart.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restart.sh
Defaults!/home/yellowtent/box/src/scripts/restartservice.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restartservice.sh
Defaults!/home/yellowtent/box/src/scripts/restartdocker.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restartdocker.sh
Defaults!/home/yellowtent/box/src/scripts/restartunbound.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/restartunbound.sh
Defaults!/home/yellowtent/box/src/scripts/rmmailbox.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmmailbox.sh
@@ -53,9 +62,3 @@ yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/stoptask.sh
Defaults!/home/yellowtent/box/src/scripts/setblocklist.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/setblocklist.sh
Defaults!/home/yellowtent/box/src/scripts/addmount.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/addmount.sh
Defaults!/home/yellowtent/box/src/scripts/rmmount.sh env_keep="HOME BOX_ENV"
yellowtent ALL=(root) NOPASSWD: /home/yellowtent/box/src/scripts/rmmount.sh
+3 -2
View File
@@ -1,6 +1,8 @@
[Unit]
Description=Cloudron Admin
OnFailure=crashnotifier@%n.service
; journald crashes result in a EPIPE in node. Cannot ignore it as it results in loss of logs.
BindsTo=systemd-journald.service
After=mysql.service nginx.service
; As cloudron-resize-fs is a one-shot, the Wants= automatically ensures that the service *finishes*
Wants=cloudron-resize-fs.service
@@ -13,8 +15,7 @@ Type=idle
WorkingDirectory=/home/yellowtent/box
Restart=always
ExecStart=/home/yellowtent/box/box.js
; we run commands like df which will parse properly only with correct locale
Environment="HOME=/home/yellowtent" "USER=yellowtent" "DEBUG=box:*,connect-lastmile,-box:ldap" "BOX_ENV=cloudron" "NODE_ENV=production" "LC_ALL=C"
Environment="HOME=/home/yellowtent" "USER=yellowtent" "DEBUG=box:*,connect-lastmile,-box:ldap" "BOX_ENV=cloudron" "NODE_ENV=production"
; kill apptask processes as well
KillMode=control-group
; Do not kill this process on OOM. Children inherit this score. Do not set it to -1000 so that MemoryMax can keep working
@@ -1,15 +0,0 @@
# https://docs.mongodb.com/manual/tutorial/transparent-huge-pages/
[Unit]
Description=Disable Transparent Huge Pages (THP)
DefaultDependencies=no
After=sysinit.target local-fs.target
Before=docker.service
[Service]
Type=oneshot
ExecStart="/home/yellowtent/box/setup/start/cloudron-disable-thp.sh"
RemainAfterExit=yes
[Install]
WantedBy=basic.target
@@ -5,7 +5,6 @@ PartOf=docker.service
[Service]
Type=oneshot
Environment="BOX_ENV=cloudron"
ExecStart="/home/yellowtent/box/setup/start/cloudron-firewall.sh"
RemainAfterExit=yes
+1 -5
View File
@@ -2,17 +2,13 @@
[Unit]
Description=Unbound DNS Resolver
After=network-online.target docker.service
Before=nss-lookup.target
Wants=network-online.target nss-lookup.target
After=network.target
[Service]
PIDFile=/run/unbound.pid
ExecStart=/usr/sbin/unbound -d
ExecReload=/bin/kill -HUP $MAINPID
Restart=always
# On ubuntu 16, this doesn't work for some reason
Type=notify
[Install]
WantedBy=multi-user.target
+1 -3
View File
@@ -1,7 +1,5 @@
server:
port: 53
interface: 127.0.0.1
interface: 172.18.0.1
interface: 0.0.0.0
do-ip6: no
access-control: 127.0.0.1 allow
access-control: 172.18.0.1/16 allow
+16 -18
View File
@@ -1,31 +1,29 @@
'use strict';
exports = module.exports = {
verifyToken
verifyToken: verifyToken
};
const assert = require('assert'),
var assert = require('assert'),
BoxError = require('./boxerror.js'),
safe = require('safetydance'),
tokens = require('./tokens.js'),
users = require('./users.js'),
util = require('util');
tokendb = require('./tokendb.js'),
users = require('./users.js');
const userGet = util.promisify(users.get);
async function verifyToken(accessToken) {
function verifyToken(accessToken, callback) {
assert.strictEqual(typeof accessToken, 'string');
assert.strictEqual(typeof callback, 'function');
const token = await tokens.getByAccessToken(accessToken);
if (!token) throw new BoxError(BoxError.INVALID_CREDENTIALS, 'No such token');
tokendb.getByAccessToken(accessToken, function (error, token) {
if (error && error.reason === BoxError.NOT_FOUND) return callback(new BoxError(BoxError.INVALID_CREDENTIALS));
if (error) return callback(error);
const [error, user] = await safe(userGet(token.identifier));
if (error && error.reason === BoxError.NOT_FOUND) throw new BoxError(BoxError.INVALID_CREDENTIALS, 'User not found');
if (error) throw error;
users.get(token.identifier, function (error, user) {
if (error && error.reason === BoxError.NOT_FOUND) return callback(new BoxError(BoxError.INVALID_CREDENTIALS));
if (error) return callback(error);
if (!user.active) throw new BoxError(BoxError.INVALID_CREDENTIALS, 'User not active');
if (!user.active) return callback(new BoxError(BoxError.INVALID_CREDENTIALS));
await safe(tokens.update(token.id, { lastUsedTime: new Date() })); // ignore any error
return user;
callback(null, user);
});
});
}
-547
View File
@@ -1,547 +0,0 @@
'use strict';
exports = module.exports = {
getCertificate,
// testing
_name: 'acme',
_getChallengeSubdomain: getChallengeSubdomain
};
const assert = require('assert'),
async = require('async'),
BoxError = require('./boxerror.js'),
crypto = require('crypto'),
debug = require('debug')('box:cert/acme2'),
domains = require('./domains.js'),
fs = require('fs'),
os = require('os'),
path = require('path'),
promiseRetry = require('./promise-retry.js'),
superagent = require('superagent'),
safe = require('safetydance'),
_ = require('underscore');
const CA_PROD_DIRECTORY_URL = 'https://acme-v02.api.letsencrypt.org/directory',
CA_STAGING_DIRECTORY_URL = 'https://acme-staging-v02.api.letsencrypt.org/directory';
// http://jose.readthedocs.org/en/latest/
// https://www.ietf.org/proceedings/92/slides/slides-92-acme-1.pdf
// https://community.letsencrypt.org/t/list-of-client-implementations/2103
function Acme2(options) {
assert.strictEqual(typeof options, 'object');
this.accountKeyPem = options.accountKeyPem; // Buffer
this.email = options.email;
this.keyId = null;
this.caDirectory = options.prod ? CA_PROD_DIRECTORY_URL : CA_STAGING_DIRECTORY_URL;
this.directory = {};
this.performHttpAuthorization = !!options.performHttpAuthorization;
this.wildcard = !!options.wildcard;
}
// urlsafe base64 encoding (jose)
function urlBase64Encode(string) {
return string.replace(/\+/g, '-').replace(/\//g, '_').replace(/=/g, '');
}
function b64(str) {
var buf = Buffer.isBuffer(str) ? str : Buffer.from(str);
return urlBase64Encode(buf.toString('base64'));
}
function getModulus(pem) {
assert(Buffer.isBuffer(pem));
var stdout = safe.child_process.execSync('openssl rsa -modulus -noout', { input: pem, encoding: 'utf8' });
if (!stdout) return null;
var match = stdout.match(/Modulus=([0-9a-fA-F]+)$/m);
if (!match) return null;
return Buffer.from(match[1], 'hex');
}
Acme2.prototype.sendSignedRequest = async function (url, payload) {
assert.strictEqual(typeof url, 'string');
assert.strictEqual(typeof payload, 'string');
assert(Buffer.isBuffer(this.accountKeyPem));
const that = this;
let header = {
url: url,
alg: 'RS256'
};
// keyId is null when registering account
if (this.keyId) {
header.kid = this.keyId;
} else {
header.jwk = {
e: b64(Buffer.from([0x01, 0x00, 0x01])), // exponent - 65537
kty: 'RSA',
n: b64(getModulus(this.accountKeyPem))
};
}
const payload64 = b64(payload);
let [error, response] = await safe(superagent.get(this.directory.newNonce).timeout(30000).ok(() => true));
if (error) throw new BoxError(BoxError.NETWORK_ERROR, `Network error sending signed request: ${error.message}`);
if (response.status !== 204) throw new BoxError(BoxError.EXTERNAL_ERROR, `Invalid response code when fetching nonce : ${response.status}`);
const nonce = response.headers['Replay-Nonce'.toLowerCase()];
if (!nonce) throw new BoxError(BoxError.EXTERNAL_ERROR, 'No nonce in response');
debug('sendSignedRequest: using nonce %s for url %s', nonce, url);
const protected64 = b64(JSON.stringify(_.extend({ }, header, { nonce: nonce })));
const signer = crypto.createSign('RSA-SHA256');
signer.update(protected64 + '.' + payload64, 'utf8');
const signature64 = urlBase64Encode(signer.sign(that.accountKeyPem, 'base64'));
const data = {
protected: protected64,
payload: payload64,
signature: signature64
};
[error, response] = await safe(superagent.post(url).send(data).set('Content-Type', 'application/jose+json').set('User-Agent', 'acme-cloudron').timeout(30000).ok(() => true));
if (error) throw new BoxError(BoxError.NETWORK_ERROR, `Network error sending signed request: ${error.message}`);
return response;
};
// https://tools.ietf.org/html/rfc8555#section-6.3
Acme2.prototype.postAsGet = async function (url) {
return await this.sendSignedRequest(url, '');
};
Acme2.prototype.updateContact = async function (registrationUri) {
assert.strictEqual(typeof registrationUri, 'string');
debug(`updateContact: registrationUri: ${registrationUri} email: ${this.email}`);
// https://github.com/ietf-wg-acme/acme/issues/30
const payload = {
contact: [ 'mailto:' + this.email ]
};
const result = await this.sendSignedRequest(registrationUri, JSON.stringify(payload));
if (result.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Failed to update contact. Expecting 200, got ${result.status} ${JSON.stringify(result.body)}`);
debug(`updateContact: contact of user updated to ${this.email}`);
};
Acme2.prototype.registerUser = async function () {
const payload = {
termsOfServiceAgreed: true
};
debug('registerUser: registering user');
const result = await this.sendSignedRequest(this.directory.newAccount, JSON.stringify(payload));
// 200 if already exists. 201 for new accounts
if (result.status !== 200 && result.status !== 201) return new BoxError(BoxError.EXTERNAL_ERROR, `Failed to register new account. Expecting 200 or 201, got ${result.status} ${JSON.stringify(result.body)}`);
debug(`registerUser: user registered keyid: ${result.headers.location}`);
this.keyId = result.headers.location;
await this.updateContact(result.headers.location);
};
Acme2.prototype.newOrder = async function (domain) {
assert.strictEqual(typeof domain, 'string');
const payload = {
identifiers: [{
type: 'dns',
value: domain
}]
};
debug(`newOrder: ${domain}`);
const result = await this.sendSignedRequest(this.directory.newOrder, JSON.stringify(payload));
if (result.status === 403) throw new BoxError(BoxError.ACCESS_DENIED, `Forbidden sending new order: ${result.body.detail}`);
if (result.status !== 201) throw new BoxError(BoxError.EXTERNAL_ERROR, `Failed to send new order. Expecting 201, got ${result.statusCode} ${JSON.stringify(result.body)}`);
debug('newOrder: created order %s %j', domain, result.body);
const order = result.body, orderUrl = result.headers.location;
if (!Array.isArray(order.authorizations)) throw new BoxError(BoxError.EXTERNAL_ERROR, 'invalid authorizations in order');
if (typeof order.finalize !== 'string') throw new BoxError(BoxError.EXTERNAL_ERROR, 'invalid finalize in order');
if (typeof orderUrl !== 'string') throw new BoxError(BoxError.EXTERNAL_ERROR, 'invalid order location in order header');
return { order, orderUrl };
};
Acme2.prototype.waitForOrder = async function (orderUrl) {
assert.strictEqual(typeof orderUrl, 'string');
debug(`waitForOrder: ${orderUrl}`);
return await promiseRetry({ times: 15, interval: 20000 }, async () => {
debug('waitForOrder: getting status');
const result = await this.postAsGet(orderUrl);
if (result.status !== 200) {
debug(`waitForOrder: invalid response code getting uri ${result.status}`);
throw new BoxError(BoxError.EXTERNAL_ERROR, `Bad response code: ${result.status}`);
}
debug('waitForOrder: status is "%s %j', result.body.status, result.body);
if (result.body.status === 'pending' || result.body.status === 'processing') throw new BoxError(BoxError.TRY_AGAIN, `Request is in ${result.body.status} state`);
else if (result.body.status === 'valid' && result.body.certificate) return result.body.certificate;
else throw new BoxError(BoxError.EXTERNAL_ERROR, `Unexpected status or invalid response: ${JSON.stringify(result.body)}`);
});
};
Acme2.prototype.getKeyAuthorization = function (token) {
assert(Buffer.isBuffer(this.accountKeyPem));
let jwk = {
e: b64(Buffer.from([0x01, 0x00, 0x01])), // Exponent - 65537
kty: 'RSA',
n: b64(getModulus(this.accountKeyPem))
};
let shasum = crypto.createHash('sha256');
shasum.update(JSON.stringify(jwk));
let thumbprint = urlBase64Encode(shasum.digest('base64'));
return token + '.' + thumbprint;
};
Acme2.prototype.notifyChallengeReady = async function (challenge) {
assert.strictEqual(typeof challenge, 'object'); // { type, status, url, token }
debug('notifyChallengeReady: %s was met', challenge.url);
const keyAuthorization = this.getKeyAuthorization(challenge.token);
const payload = {
resource: 'challenge',
keyAuthorization: keyAuthorization
};
const result = await this.sendSignedRequest(challenge.url, JSON.stringify(payload));
if (result.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Failed to notify challenge. Expecting 200, got ${result.statusCode} ${JSON.stringify(result.body)}`);
};
Acme2.prototype.waitForChallenge = async function (challenge) {
assert.strictEqual(typeof challenge, 'object');
debug('waitingForChallenge: %j', challenge);
await promiseRetry({ times: 15, interval: 20000 }, async () => {
debug('waitingForChallenge: getting status');
const result = await this.postAsGet(challenge.url);
if (result.status !== 200) {
debug(`waitForChallenge: invalid response code getting uri ${result.status}`);
throw new BoxError(BoxError.EXTERNAL_ERROR, 'Bad response code:' + result.statusCode);
}
debug(`waitForChallenge: status is "${result.body.status}" "${JSON.stringify(result.body)}"`);
if (result.body.status === 'pending') throw new BoxError(BoxError.TRY_AGAIN);
else if (result.body.status === 'valid') return;
else throw new BoxError(BoxError.EXTERNAL_ERROR, `Unexpected status: ${result.body.status}`);
});
};
// https://community.letsencrypt.org/t/public-beta-rate-limits/4772 for rate limits
Acme2.prototype.signCertificate = async function (domain, finalizationUrl, csrDer) {
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof finalizationUrl, 'string');
assert(Buffer.isBuffer(csrDer));
const payload = {
csr: b64(csrDer)
};
debug('signCertificate: sending sign request');
const result = await this.sendSignedRequest(finalizationUrl, JSON.stringify(payload));
// 429 means we reached the cert limit for this domain
if (result.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Failed to sign certificate. Expecting 200, got ${result.status} ${JSON.stringify(result.body)}`);
};
Acme2.prototype.createKeyAndCsr = async function (hostname, keyFilePath, csrFilePath) {
assert.strictEqual(typeof hostname, 'string');
if (safe.fs.existsSync(keyFilePath)) {
debug('createKeyAndCsr: reuse the key for renewal at %s', keyFilePath);
} else {
let key = safe.child_process.execSync('openssl ecparam -genkey -name secp384r1'); // openssl ecparam -list_curves
if (!key) throw new BoxError(BoxError.OPENSSL_ERROR, safe.error);
if (!safe.fs.writeFileSync(keyFilePath, key)) throw new BoxError(BoxError.FS_ERROR, safe.error);
debug('createKeyAndCsr: key file saved at %s', keyFilePath);
}
const [error, tmpdir] = await safe(fs.promises.mkdtemp(path.join(os.tmpdir(), 'acme-')));
if (error) throw new BoxError(BoxError.FS_ERROR, `Error creating temporary directory for openssl config: ${error.message}`);
// OCSP must-staple is currently disabled because nginx does not provide staple on the first request (https://forum.cloudron.io/topic/4917/ocsp-stapling-for-tls-ssl/)
// ' -addext "tlsfeature = status_request"'; // this adds OCSP must-staple
// we used to use -addext to the CLI to add these but that arg doesn't work on Ubuntu 16.04
// empty distinguished_name section is required for Ubuntu 16 openssl
const conf = '[req]\nreq_extensions = v3_req\ndistinguished_name = req_distinguished_name\n'
+ '[req_distinguished_name]\n\n'
+ '[v3_req]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nsubjectAltName = @alt_names\n'
+ `[alt_names]\nDNS.1 = ${hostname}\n`;
const opensslConfigFile = path.join(tmpdir, 'openssl.conf');
if (!safe.fs.writeFileSync(opensslConfigFile, conf)) throw new BoxError(BoxError.FS_ERROR, `Failed to write openssl config: ${safe.error.message}`);
// while we pass the CN anyways, subjectAltName takes precedence
const csrDer = safe.child_process.execSync(`openssl req -new -key ${keyFilePath} -outform DER -subj /CN=${hostname} -config ${opensslConfigFile}`);
if (!csrDer) throw new BoxError(BoxError.OPENSSL_ERROR, safe.error);
if (!safe.fs.writeFileSync(csrFilePath, csrDer)) throw new BoxError(BoxError.FS_ERROR, safe.error); // bookkeeping. inspect with openssl req -text -noout -in hostname.csr -inform der
await safe(fs.promises.rmdir(tmpdir, { recursive: true }));
debug('createKeyAndCsr: csr file (DER) saved at %s', csrFilePath);
return csrDer;
};
Acme2.prototype.downloadCertificate = async function (hostname, certUrl, certFilePath) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof certUrl, 'string');
await promiseRetry({ times: 5, interval: 20000 }, async () => {
debug('downloadCertificate: downloading certificate');
const result = await this.postAsGet(certUrl);
if (result.statusCode === 202) throw new BoxError(BoxError.TRY_AGAIN, 'Retry downloading certificate');
if (result.statusCode !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Failed to get cert. Expecting 200, got ${result.statusCode} ${JSON.stringify(result.body)}`);
const fullChainPem = result.body; // buffer
if (!safe.fs.writeFileSync(certFilePath, fullChainPem)) throw new BoxError(BoxError.FS_ERROR, safe.error);
debug(`downloadCertificate: cert file for ${hostname} saved at ${certFilePath}`);
});
};
Acme2.prototype.prepareHttpChallenge = async function (hostname, domain, authorization, acmeChallengesDir) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof authorization, 'object');
assert.strictEqual(typeof acmeChallengesDir, 'string');
debug('prepareHttpChallenge: challenges: %j', authorization);
let httpChallenges = authorization.challenges.filter(function(x) { return x.type === 'http-01'; });
if (httpChallenges.length === 0) throw new BoxError(BoxError.EXTERNAL_ERROR, 'no http challenges');
let challenge = httpChallenges[0];
debug('prepareHttpChallenge: preparing for challenge %j', challenge);
let keyAuthorization = this.getKeyAuthorization(challenge.token);
debug('prepareHttpChallenge: writing %s to %s', keyAuthorization, path.join(acmeChallengesDir, challenge.token));
if (!safe.fs.writeFileSync(path.join(acmeChallengesDir, challenge.token), keyAuthorization)) throw new BoxError(BoxError.FS_ERROR, `Error writing challenge: ${safe.error.message}`);
return challenge;
};
Acme2.prototype.cleanupHttpChallenge = async function (hostname, domain, challenge, acmeChallengesDir) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof challenge, 'object');
assert.strictEqual(typeof acmeChallengesDir, 'string');
debug('cleanupHttpChallenge: unlinking %s', path.join(acmeChallengesDir, challenge.token));
if (!safe.fs.unlinkSync(path.join(acmeChallengesDir, challenge.token))) throw new BoxError(BoxError.FS_ERROR, `Error unlinking challenge: ${safe.error.message}`);
};
function getChallengeSubdomain(hostname, domain) {
let challengeSubdomain;
if (hostname === domain) {
challengeSubdomain = '_acme-challenge';
} else if (hostname.includes('*')) { // wildcard
let subdomain = hostname.slice(0, -domain.length - 1);
challengeSubdomain = subdomain ? subdomain.replace('*', '_acme-challenge') : '_acme-challenge';
} else {
challengeSubdomain = '_acme-challenge.' + hostname.slice(0, -domain.length - 1);
}
debug(`getChallengeSubdomain: challenge subdomain for hostname ${hostname} at domain ${domain} is ${challengeSubdomain}`);
return challengeSubdomain;
}
Acme2.prototype.prepareDnsChallenge = async function (hostname, domain, authorization) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof authorization, 'object');
debug('prepareDnsChallenge: challenges: %j', authorization);
const dnsChallenges = authorization.challenges.filter(function(x) { return x.type === 'dns-01'; });
if (dnsChallenges.length === 0) throw new BoxError(BoxError.EXTERNAL_ERROR, 'no dns challenges');
const challenge = dnsChallenges[0];
const keyAuthorization = this.getKeyAuthorization(challenge.token);
const shasum = crypto.createHash('sha256');
shasum.update(keyAuthorization);
const txtValue = urlBase64Encode(shasum.digest('base64'));
const challengeSubdomain = getChallengeSubdomain(hostname, domain);
debug(`prepareDnsChallenge: update ${challengeSubdomain} with ${txtValue}`);
return new Promise((resolve, reject) => {
domains.upsertDnsRecords(challengeSubdomain, domain, 'TXT', [ `"${txtValue}"` ], function (error) {
if (error) return reject(error);
domains.waitForDnsRecord(challengeSubdomain, domain, 'TXT', txtValue, { times: 200 }, function (error) {
if (error) return reject(error);
resolve(challenge);
});
});
});
};
Acme2.prototype.cleanupDnsChallenge = async function (hostname, domain, challenge) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof challenge, 'object');
const keyAuthorization = this.getKeyAuthorization(challenge.token);
let shasum = crypto.createHash('sha256');
shasum.update(keyAuthorization);
const txtValue = urlBase64Encode(shasum.digest('base64'));
let challengeSubdomain = getChallengeSubdomain(hostname, domain);
debug(`cleanupDnsChallenge: remove ${challengeSubdomain} with ${txtValue}`);
return new Promise((resolve, reject) => {
domains.removeDnsRecords(challengeSubdomain, domain, 'TXT', [ `"${txtValue}"` ], function (error) {
if (error) return reject(error);
resolve(null);
});
});
};
Acme2.prototype.prepareChallenge = async function (hostname, domain, authorizationUrl, acmeChallengesDir) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof authorizationUrl, 'string');
assert.strictEqual(typeof acmeChallengesDir, 'string');
debug(`prepareChallenge: http: ${this.performHttpAuthorization}`);
const response = await this.postAsGet(authorizationUrl);
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Invalid response code getting authorization : ${response.status}`);
const authorization = response.body;
if (this.performHttpAuthorization) {
return await this.prepareHttpChallenge(hostname, domain, authorization, acmeChallengesDir);
} else {
return await this.prepareDnsChallenge(hostname, domain, authorization);
}
};
Acme2.prototype.cleanupChallenge = async function (hostname, domain, challenge, acmeChallengesDir) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof challenge, 'object');
assert.strictEqual(typeof acmeChallengesDir, 'string');
debug(`cleanupChallenge: http: ${this.performHttpAuthorization}`);
if (this.performHttpAuthorization) {
await this.cleanupHttpChallenge(hostname, domain, challenge, acmeChallengesDir);
} else {
await this.cleanupDnsChallenge(hostname, domain, challenge);
}
};
Acme2.prototype.acmeFlow = async function (hostname, domain, paths) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof paths, 'object');
const { certFilePath, keyFilePath, csrFilePath, acmeChallengesDir } = paths;
await this.registerUser();
const { order, orderUrl } = await this.newOrder(hostname);
for (let i = 0; i < order.authorizations.length; i++) {
const authorizationUrl = order.authorizations[i];
debug(`acmeFlow: authorizing ${authorizationUrl}`);
const challenge = await this.prepareChallenge(hostname, domain, authorizationUrl, acmeChallengesDir);
await this.notifyChallengeReady(challenge);
await this.waitForChallenge(challenge);
const csrDer = await this.createKeyAndCsr(hostname, keyFilePath, csrFilePath);
await this.signCertificate(hostname, order.finalize, csrDer);
const certUrl = await this.waitForOrder(orderUrl);
await this.downloadCertificate(hostname, certUrl, certFilePath);
try {
await this.cleanupChallenge(hostname, domain, challenge, acmeChallengesDir);
} catch (cleanupError) {
debug('acmeFlow: ignoring error when cleaning up challenge:', cleanupError);
}
}
};
Acme2.prototype.loadDirectory = async function () {
await promiseRetry({ times: 3, interval: 20000 }, async () => {
const response = await superagent.get(this.caDirectory).timeout(30000).ok(() => true);
if (response.status !== 200) throw new BoxError(BoxError.EXTERNAL_ERROR, `Invalid response code when fetching directory : ${response.status}`);
if (typeof response.body.newNonce !== 'string' ||
typeof response.body.newOrder !== 'string' ||
typeof response.body.newAccount !== 'string') throw new BoxError(BoxError.EXTERNAL_ERROR, `Invalid response body : ${response.body}`);
this.directory = response.body;
});
};
Acme2.prototype.getCertificate = async function (vhost, domain, paths) {
assert.strictEqual(typeof vhost, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof paths, 'object');
debug(`getCertificate: start acme flow for ${vhost} from ${this.caDirectory}`);
if (vhost !== domain && this.wildcard) { // bare domain is not part of wildcard SAN
vhost = domains.makeWildcard(vhost);
debug(`getCertificate: will get wildcard cert for ${vhost}`);
}
await this.loadDirectory();
await this.acmeFlow(vhost, domain, paths);
};
function getCertificate(vhost, domain, paths, options, callback) {
assert.strictEqual(typeof vhost, 'string'); // this can also be a wildcard domain (for alias domains)
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof paths, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
let attempt = 1;
async.retry({ times: 3, interval: 0 }, function (retryCallback) {
debug(`getCertificate: attempt ${attempt++}`);
let acme = new Acme2(options || { });
acme.getCertificate(vhost, domain, paths).then(callback).catch(retryCallback);
}, callback);
}
+245 -320
View File
@@ -1,12 +1,10 @@
'use strict';
exports = module.exports = {
getServiceIds,
getServiceStatus,
getServiceConfig,
getServiceLogs,
getServices,
getService,
configureService,
getServiceLogs,
restartService,
rebuildService,
@@ -14,6 +12,7 @@ exports = module.exports = {
stopAppServices,
startServices,
updateServiceConfig,
setupAddons,
teardownAddons,
@@ -22,6 +21,7 @@ exports = module.exports = {
clearAddons,
getEnvironment,
getMountsSync,
getContainerNamesSync,
getContainerDetails,
@@ -31,17 +31,17 @@ exports = module.exports = {
SERVICE_STATUS_STOPPED: 'stopped'
};
const appdb = require('./appdb.js'),
var appdb = require('./appdb.js'),
apps = require('./apps.js'),
assert = require('assert'),
async = require('async'),
blobs = require('./blobs.js'),
BoxError = require('./boxerror.js'),
constants = require('./constants.js'),
crypto = require('crypto'),
debug = require('debug')('box:services'),
debug = require('debug')('box:addons'),
docker = require('./docker.js'),
fs = require('fs'),
graphs = require('./graphs.js'),
hat = require('./hat.js'),
infra = require('./infra_version.js'),
mail = require('./mail.js'),
@@ -58,13 +58,11 @@ const appdb = require('./appdb.js'),
spawn = require('child_process').spawn,
split = require('split'),
request = require('request'),
system = require('./system.js'),
util = require('util');
const NOOP = function (app, options, callback) { return callback(); };
const NOOP_CALLBACK = function (error) { if (error) debug(error); };
const RMADDONDIR_CMD = path.join(__dirname, 'scripts/rmaddondir.sh');
const RESTART_SERVICE_CMD = path.join(__dirname, 'scripts/restartservice.sh');
// setup can be called multiple times for the same app (configure crash restart) and existing data must not be lost
// teardown is destructive. app data stored with the addon is lost
@@ -118,13 +116,6 @@ var ADDONS = {
restore: restorePostgreSql,
clear: clearPostgreSql,
},
proxyAuth: {
setup: setupProxyAuth,
teardown: teardownProxyAuth,
backup: NOOP,
restore: NOOP,
clear: NOOP
},
recvmail: {
setup: setupRecvMail,
teardown: teardownRecvMail,
@@ -160,13 +151,6 @@ var ADDONS = {
restore: NOOP,
clear: NOOP,
},
tls: {
setup: NOOP,
teardown: NOOP,
backup: NOOP,
restore: NOOP,
clear: NOOP,
},
oauth: { // kept for backward compatibility. keep teardown for uninstall to work
setup: NOOP,
teardown: teardownOauth,
@@ -180,27 +164,27 @@ var ADDONS = {
const SERVICES = {
turn: {
status: statusTurn,
restart: docker.restartContainer.bind(null, 'turn'),
restart: restartContainer.bind(null, 'turn'),
defaultMemoryLimit: 256 * 1024 * 1024
},
mail: {
status: containerStatus.bind(null, 'mail', 'CLOUDRON_MAIL_TOKEN'),
restart: mail.restartMail,
defaultMemoryLimit: mail.DEFAULT_MEMORY_LIMIT
defaultMemoryLimit: Math.max((1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 128, 256) * 1024 * 1024
},
mongodb: {
status: containerStatus.bind(null, 'mongodb', 'CLOUDRON_MONGODB_TOKEN'),
restart: docker.restartContainer.bind(null, 'mongodb'),
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024
restart: restartContainer.bind(null, 'mongodb'),
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 200 * 1024 * 1024
},
mysql: {
status: containerStatus.bind(null, 'mysql', 'CLOUDRON_MYSQL_TOKEN'),
restart: docker.restartContainer.bind(null, 'mysql'),
restart: restartContainer.bind(null, 'mysql'),
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024
},
postgresql: {
status: containerStatus.bind(null, 'postgresql', 'CLOUDRON_POSTGRESQL_TOKEN'),
restart: docker.restartContainer.bind(null, 'postgresql'),
restart: restartContainer.bind(null, 'postgresql'),
defaultMemoryLimit: (1 + Math.round(os.totalmem()/(1024*1024*1024)/4)) * 256 * 1024 * 1024
},
docker: {
@@ -215,13 +199,13 @@ const SERVICES = {
},
sftp: {
status: statusSftp,
restart: docker.restartContainer.bind(null, 'sftp'),
defaultMemoryLimit: sftp.DEFAULT_MEMORY_LIMIT
restart: restartContainer.bind(null, 'sftp'),
defaultMemoryLimit: 256 * 1024 * 1024
},
graphite: {
status: statusGraphite,
restart: restartGraphite,
defaultMemoryLimit: 256 * 1024 * 1024
restart: restartContainer.bind(null, 'graphite'),
defaultMemoryLimit: 75 * 1024 * 1024
},
nginx: {
status: statusNginx,
@@ -235,7 +219,7 @@ const APP_SERVICES = {
status: (instance, done) => containerStatus(`redis-${instance}`, 'CLOUDRON_REDIS_TOKEN', done),
start: (instance, done) => docker.startContainer(`redis-${instance}`, done),
stop: (instance, done) => docker.stopContainer(`redis-${instance}`, done),
restart: (instance, done) => docker.restartContainer(`redis-${instance}`, done),
restart: (instance, done) => restartContainer(`redis-${instance}`, done),
defaultMemoryLimit: 150 * 1024 * 1024
}
};
@@ -270,6 +254,38 @@ function dumpPath(addon, appId) {
}
}
function rebuildService(serviceName, callback) {
assert.strictEqual(typeof serviceName, 'string');
assert.strictEqual(typeof callback, 'function');
// this attempts to recreate the service docker container if they don't exist but platform infra version is unchanged
// passing an infra version of 'none' will not attempt to purge existing data, not sure if this is good or bad
if (serviceName === 'turn') return startTurn({ version: 'none' }, callback);
if (serviceName === 'mongodb') return startMongodb({ version: 'none' }, callback);
if (serviceName === 'postgresql') return startPostgresql({ version: 'none' }, callback);
if (serviceName === 'mysql') return startMysql({ version: 'none' }, callback);
if (serviceName === 'sftp') return sftp.startSftp({ version: 'none' }, callback);
if (serviceName === 'graphite') return graphs.startGraphite({ version: 'none' }, callback);
// nothing to rebuild for now
callback();
}
function restartContainer(name, callback) {
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof callback, 'function');
docker.restartContainer(name, function (error) {
if (error && error.reason === BoxError.NOT_FOUND) {
callback(null); // callback early since rebuilding takes long
return rebuildService(name, function (error) { if (error) debug(`restartContainer: Unable to rebuild service ${name}`, error); });
}
if (error) return callback(error);
callback(error);
});
}
function getContainerDetails(containerName, tokenEnvName, callback) {
assert.strictEqual(typeof containerName, 'string');
assert.strictEqual(typeof tokenEnvName, 'string');
@@ -302,7 +318,7 @@ function containerStatus(containerName, tokenEnvName, callback) {
if (error && (error.reason === BoxError.NOT_FOUND || error.reason === BoxError.INACTIVE)) return callback(null, { status: exports.SERVICE_STATUS_STOPPED });
if (error) return callback(error);
request.get(`https://${addonDetails.ip}:3000/healthcheck?access_token=${addonDetails.token}`, { json: true, rejectUnauthorized: false, timeout: 20000 }, function (error, response) {
request.get(`https://${addonDetails.ip}:3000/healthcheck?access_token=${addonDetails.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
if (error) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for ${containerName}: ${error.message}` });
if (response.statusCode !== 200 || !response.body.status) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for ${containerName}. Status code: ${response.statusCode} message: ${response.body.message}` });
@@ -312,8 +328,7 @@ function containerStatus(containerName, tokenEnvName, callback) {
var tmp = {
status: addonDetails.state.Running ? exports.SERVICE_STATUS_ACTIVE : exports.SERVICE_STATUS_STOPPED,
memoryUsed: result.memory_stats.usage,
memoryPercent: parseInt(100 * result.memory_stats.usage / result.memory_stats.limit),
healthcheck: response.body
memoryPercent: parseInt(100 * result.memory_stats.usage / result.memory_stats.limit)
};
callback(null, tmp);
@@ -322,32 +337,32 @@ function containerStatus(containerName, tokenEnvName, callback) {
});
}
function getServiceIds(callback) {
function getServices(callback) {
assert.strictEqual(typeof callback, 'function');
let serviceIds = Object.keys(SERVICES);
let services = Object.keys(SERVICES);
appdb.getAll(function (error, apps) {
if (error) return callback(error);
for (let app of apps) {
if (app.manifest.addons && app.manifest.addons['redis']) serviceIds.push(`redis:${app.id}`);
if (app.manifest.addons && app.manifest.addons['redis']) services.push(`redis:${app.id}`);
}
callback(null, serviceIds);
callback(null, services);
});
}
function getServiceConfig(id, callback) {
function getServicesConfig(id, callback) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof callback, 'function');
const [name, instance] = id.split(':');
const [name, instance ] = id.split(':');
if (!instance) {
settings.getServicesConfig(function (error, servicesConfig) {
settings.getPlatformConfig(function (error, platformConfig) {
if (error) return callback(error);
callback(null, servicesConfig[name] || {});
callback(null, SERVICES[name], platformConfig);
});
return;
@@ -356,24 +371,22 @@ function getServiceConfig(id, callback) {
appdb.get(instance, function (error, app) {
if (error) return callback(error);
callback(null, app.servicesConfig[name] || {});
callback(null, APP_SERVICES[name], app.servicesConfig);
});
}
function getServiceStatus(id, callback) {
function getService(id, callback) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof callback, 'function');
const [name, instance ] = id.split(':');
let containerStatusFunc, service;
let containerStatusFunc;
if (instance) {
service = APP_SERVICES[name];
if (!service) return callback(new BoxError(BoxError.NOT_FOUND));
containerStatusFunc = service.status.bind(null, instance);
if (!APP_SERVICES[name]) return callback(new BoxError(BoxError.NOT_FOUND));
containerStatusFunc = APP_SERVICES[name].status.bind(null, instance);
} else if (SERVICES[name]) {
service = SERVICES[name];
containerStatusFunc = service.status;
containerStatusFunc = SERVICES[name].status;
} else {
return callback(new BoxError(BoxError.NOT_FOUND));
}
@@ -384,8 +397,11 @@ function getServiceStatus(id, callback) {
memoryUsed: 0,
memoryPercent: 0,
error: null,
healthcheck: null,
config: {}
config: {
// If a property is not set then we cannot change it through the api, see below
// memory: 0,
// memorySwap: 0
}
};
containerStatusFunc(function (error, result) {
@@ -395,15 +411,18 @@ function getServiceStatus(id, callback) {
tmp.memoryUsed = result.memoryUsed;
tmp.memoryPercent = result.memoryPercent;
tmp.error = result.error || null;
tmp.healthcheck = result.healthcheck || null;
getServiceConfig(id, function (error, serviceConfig) {
getServicesConfig(id, function (error, service, servicesConfig) {
if (error) return callback(error);
tmp.config = serviceConfig;
const serviceConfig = servicesConfig[name];
if (!tmp.config.memoryLimit && service.defaultMemoryLimit) {
tmp.config.memoryLimit = service.defaultMemoryLimit;
if (serviceConfig && serviceConfig.memory && serviceConfig.memorySwap) {
tmp.config.memory = serviceConfig.memory;
tmp.config.memorySwap = serviceConfig.memorySwap;
} else if (service.defaultMemoryLimit) {
tmp.config.memory = service.defaultMemoryLimit;
tmp.config.memorySwap = tmp.config.memory * 2;
}
callback(null, tmp);
@@ -420,34 +439,37 @@ function configureService(id, data, callback) {
if (instance) {
if (!APP_SERVICES[name]) return callback(new BoxError(BoxError.NOT_FOUND));
} else if (!SERVICES[name]) {
return callback(new BoxError(BoxError.NOT_FOUND));
}
apps.get(instance, function (error, app) {
if (error) return callback(error);
getServicesConfig(id, function (error, service, servicesConfig) {
if (error) return callback(error);
const servicesConfig = app.servicesConfig;
servicesConfig[name] = data;
if (!servicesConfig[name]) servicesConfig[name] = {};
// if not specified we clear the entry and use defaults
if (!data.memory || !data.memorySwap) {
delete servicesConfig[name];
} else {
servicesConfig[name].memory = data.memory;
servicesConfig[name].memorySwap = data.memorySwap;
}
if (instance) {
appdb.update(instance, { servicesConfig }, function (error) {
if (error) return callback(error);
applyServiceConfig(id, data, callback);
updateAppServiceConfig(name, instance, servicesConfig, callback);
});
});
} else if (SERVICES[name]) {
settings.getServicesConfig(function (error, servicesConfig) {
if (error) return callback(error);
servicesConfig[name] = data;
settings.setServicesConfig(servicesConfig, function (error) {
} else {
settings.setPlatformConfig(servicesConfig, function (error) {
if (error) return callback(error);
applyServiceConfig(id, data, callback);
callback(null);
});
});
} else {
return callback(new BoxError(BoxError.NOT_FOUND));
}
}
});
}
function getServiceLogs(id, options, callback) {
@@ -528,29 +550,6 @@ function getServiceLogs(id, options, callback) {
callback(null, transformStream);
}
function rebuildService(id, callback) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof callback, 'function');
// this attempts to recreate the service docker container if they don't exist but platform infra version is unchanged
// passing an infra version of 'none' will not attempt to purge existing data, not sure if this is good or bad
getServiceConfig(id, function (error, serviceConfig) {
if (error) return callback(error);
if (id === 'turn') return startTurn({ version: 'none' }, serviceConfig, callback);
if (id === 'mongodb') return startMongodb({ version: 'none' }, callback);
if (id === 'postgresql') return startPostgresql({ version: 'none' }, callback);
if (id === 'mysql') return startMysql({ version: 'none' }, callback);
if (id === 'sftp') return sftp.rebuild(serviceConfig, { /* options */ }, callback);
if (id === 'graphite') return startGraphite({ version: 'none' }, serviceConfig, callback);
// nothing to rebuild for now.
// TODO: mongo/postgresql/mysql need to be scaled down.
// TODO: missing redis container is not created
callback();
});
}
function restartService(id, callback) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof callback, 'function');
@@ -560,9 +559,9 @@ function restartService(id, callback) {
if (instance) {
if (!APP_SERVICES[name]) return callback(new BoxError(BoxError.NOT_FOUND));
return APP_SERVICES[name].restart(instance, callback);
APP_SERVICES[name].restart(instance, callback);
} else if (SERVICES[name]) {
return SERVICES[name].restart(callback);
SERVICES[name].restart(callback);
} else {
return callback(new BoxError(BoxError.NOT_FOUND));
}
@@ -615,7 +614,7 @@ function waitForContainer(containerName, tokenEnvName, callback) {
if (error) return callback(error);
async.retry({ times: 10, interval: 15000 }, function (retryCallback) {
request.get(`https://${result.ip}:3000/healthcheck?access_token=${result.token}`, { json: true, rejectUnauthorized: false, timeout: 5000 }, function (error, response) {
request.get(`https://${result.ip}:3000/healthcheck?access_token=${result.token}`, { json: true, rejectUnauthorized: false }, function (error, response) {
if (error) return retryCallback(new BoxError(BoxError.ADDONS_ERROR, `Network error waiting for ${containerName}: ${error.message}`));
if (response.statusCode !== 200 || !response.body.status) return retryCallback(new BoxError(BoxError.ADDONS_ERROR, `Error waiting for ${containerName}. Status code: ${response.statusCode} message: ${response.body.message}`));
@@ -770,20 +769,18 @@ function exportDatabase(addon, callback) {
return callback(null);
}
appdb.getAll(function (error, allApps) {
appdb.getAll(function (error, apps) {
if (error) return callback(error);
async.eachSeries(allApps, function iterator (app, iteratorCallback) {
async.eachSeries(apps, function iterator (app, iteratorCallback) {
if (!app.manifest.addons || !(addon in app.manifest.addons)) return iteratorCallback(); // app doesn't use the addon
if (app.installationState === apps.ISTATE_ERROR) return iteratorCallback(); // missing db causes crash in old app addon containers
debug(`exportDatabase: Exporting addon ${addon} of app ${app.id}`);
ADDONS[addon].backup(app, app.manifest.addons[addon], function (error) {
if (error) {
debug(`exportDatabase: Error exporting ${addon} of app ${app.id}.`, error);
// for errored apps, we can ignore if export had an error
return iteratorCallback(app.installationState === apps.ISTATE_ERROR ? null : error);
return iteratorCallback(error);
}
iteratorCallback();
@@ -801,84 +798,82 @@ function exportDatabase(addon, callback) {
});
}
function applyServiceConfig(id, serviceConfig, callback) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof serviceConfig, 'object');
function updateServiceConfig(platformConfig, callback) {
assert.strictEqual(typeof platformConfig, 'object');
assert.strictEqual(typeof callback, 'function');
const [name, instance] = id.split(':');
let containerName, memoryLimit;
async.eachSeries([ 'mysql', 'postgresql', 'mail', 'mongodb', 'graphite' ], function iterator(serviceName, iteratorCallback) {
const containerConfig = platformConfig[serviceName];
let memory, memorySwap;
if (containerConfig && containerConfig.memory && containerConfig.memorySwap) {
memory = containerConfig.memory;
memorySwap = containerConfig.memorySwap;
} else {
memory = SERVICES[serviceName].defaultMemoryLimit;
memorySwap = memory * 2;
}
if (instance) {
if (!APP_SERVICES[name]) return callback(new BoxError(BoxError.NOT_FOUND));
const args = `update --memory ${memory} --memory-swap ${memorySwap} ${serviceName}`.split(' ');
// scale back db containers, if possible. this is retried because updating memory constraints can fail
// with failed to write to memory.memsw.limit_in_bytes: write /sys/fs/cgroup/memory/docker/xx/memory.memsw.limit_in_bytes: device or resource busy
async.retry({ times: 10, interval: 60 * 1000 }, function (retryCallback) {
shell.spawn(`updateServiceConfig(${serviceName})`, '/usr/bin/docker', args, { }, retryCallback);
}, iteratorCallback);
}, callback);
}
containerName = `${name}-${instance}`;
memoryLimit = serviceConfig && serviceConfig.memoryLimit ? serviceConfig.memoryLimit : APP_SERVICES[name].defaultMemoryLimit;
} else if (SERVICES[name]) {
containerName = name;
memoryLimit = serviceConfig && serviceConfig.memoryLimit ? serviceConfig.memoryLimit : SERVICES[name].defaultMemoryLimit;
function updateAppServiceConfig(name, instance, servicesConfig, callback) {
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof instance, 'string');
assert.strictEqual(typeof servicesConfig, 'object');
assert.strictEqual(typeof callback, 'function');
debug(`updateAppServiceConfig: ${name}-${instance} ${JSON.stringify(servicesConfig)}`);
const serviceConfig = servicesConfig[name];
let memory, memorySwap;
if (serviceConfig && serviceConfig.memory && serviceConfig.memorySwap) {
memory = serviceConfig.memory;
memorySwap = serviceConfig.memorySwap;
} else {
return callback(new BoxError(BoxError.NOT_FOUND));
memory = APP_SERVICES[name].defaultMemoryLimit;
memorySwap = memory * 2;
}
debug(`updateServiceConfig: ${containerName} ${JSON.stringify(serviceConfig)}`);
const memory = system.getMemoryAllocation(memoryLimit);
docker.update(containerName, memory, memoryLimit, callback);
const args = `update --memory ${memory} --memory-swap ${memorySwap} ${name}-${instance}`.split(' ');
shell.spawn(`updateAppServiceConfig${name}`, '/usr/bin/docker', args, { }, callback);
}
function startServices(existingInfra, callback) {
assert.strictEqual(typeof existingInfra, 'object');
assert.strictEqual(typeof callback, 'function');
settings.getServicesConfig(function (error, servicesConfig) {
if (error) return callback(error);
let startFuncs = [ ];
let startFuncs = [ ];
// always start addons on any infra change, regardless of minor or major update
if (existingInfra.version !== infra.version) {
debug(`startServices: ${existingInfra.version} -> ${infra.version}. starting all services`);
startFuncs.push(
startTurn.bind(null, existingInfra),
startMysql.bind(null, existingInfra),
startPostgresql.bind(null, existingInfra),
startMongodb.bind(null, existingInfra),
startRedis.bind(null, existingInfra),
mail.startMail);
} else {
assert.strictEqual(typeof existingInfra.images, 'object');
// always start addons on any infra change, regardless of minor or major update
if (existingInfra.version !== infra.version) {
debug(`startServices: ${existingInfra.version} -> ${infra.version}. starting all services`);
startFuncs.push(
mail.startMail, // start this first to reduce email downtime
startTurn.bind(null, existingInfra, servicesConfig['turn'] || {}),
startMysql.bind(null, existingInfra),
startPostgresql.bind(null, existingInfra),
startMongodb.bind(null, existingInfra),
startRedis.bind(null, existingInfra),
startGraphite.bind(null, existingInfra, servicesConfig['graphite'] || {}),
sftp.start.bind(null, existingInfra, servicesConfig['sftp'] || {}),
);
} else {
assert.strictEqual(typeof existingInfra.images, 'object');
if (infra.images.turn.tag !== existingInfra.images.turn.tag) startFuncs.push(startTurn.bind(null, existingInfra));
if (infra.images.mysql.tag !== existingInfra.images.mysql.tag) startFuncs.push(startMysql.bind(null, existingInfra));
if (infra.images.postgresql.tag !== existingInfra.images.postgresql.tag) startFuncs.push(startPostgresql.bind(null, existingInfra));
if (infra.images.mongodb.tag !== existingInfra.images.mongodb.tag) startFuncs.push(startMongodb.bind(null, existingInfra));
if (infra.images.mail.tag !== existingInfra.images.mail.tag) startFuncs.push(mail.startMail);
if (infra.images.redis.tag !== existingInfra.images.redis.tag) startFuncs.push(startRedis.bind(null, existingInfra));
if (infra.images.mail.tag !== existingInfra.images.mail.tag) startFuncs.push(mail.startMail); // start this first to reduce email downtime
if (infra.images.turn.tag !== existingInfra.images.turn.tag) startFuncs.push(startTurn.bind(null, existingInfra, servicesConfig['turn'] || {}));
if (infra.images.mysql.tag !== existingInfra.images.mysql.tag) startFuncs.push(startMysql.bind(null, existingInfra));
if (infra.images.postgresql.tag !== existingInfra.images.postgresql.tag) startFuncs.push(startPostgresql.bind(null, existingInfra));
if (infra.images.mongodb.tag !== existingInfra.images.mongodb.tag) startFuncs.push(startMongodb.bind(null, existingInfra));
if (infra.images.redis.tag !== existingInfra.images.redis.tag) startFuncs.push(startRedis.bind(null, existingInfra));
if (infra.images.graphite.tag !== existingInfra.images.graphite.tag) startFuncs.push(startGraphite.bind(null, existingInfra, servicesConfig['graphite'] || {}));
if (infra.images.sftp.tag !== existingInfra.images.sftp.tag) startFuncs.push(sftp.start.bind(null, existingInfra, servicesConfig['sftp'] || {}));
debug('startServices: existing infra. incremental service create %j', startFuncs.map(function (f) { return f.name; }));
}
debug('startServices: existing infra. incremental service create %j', startFuncs.map(function (f) { return f.name; }));
}
async.series(startFuncs, function (error) {
if (error) return callback(error);
// we always start db containers with unlimited memory. we then scale them down per configuration
let updateFuncs = [
applyServiceConfig.bind(null, 'mysql', servicesConfig['mysql'] || {}),
applyServiceConfig.bind(null, 'postgresql', servicesConfig['postgresql'] || {}),
applyServiceConfig.bind(null, 'mongodb', servicesConfig['mongodb'] || {}),
];
async.series(updateFuncs, NOOP_CALLBACK); // it's ok if applying service configs fails
callback();
});
});
async.series(startFuncs, callback);
}
function getEnvironment(app, callback) {
@@ -894,6 +889,31 @@ function getEnvironment(app, callback) {
});
}
function getMountsSync(app, addons) {
assert.strictEqual(typeof app, 'object');
assert(!addons || typeof addons === 'object');
let mounts = [ ];
if (!addons) return mounts;
for (let addon in addons) {
switch (addon) {
case 'localstorage':
mounts.push({
Target: '/app/data',
Source: `${app.id}-localstorage`,
Type: 'volume',
ReadOnly: false
});
break;
default: break;
}
}
return mounts;
}
function getContainerNamesSync(app, addons) {
assert.strictEqual(typeof app, 'object');
assert(!addons || typeof addons === 'object');
@@ -959,25 +979,22 @@ function setupTurn(app, options, callback) {
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
const blobGet = util.callbackify(blobs.get);
blobGet(blobs.ADDON_TURN_SECRET, function (error, turnSecret) {
if (error) return callback(error);
if (!turnSecret) return callback(new BoxError(BoxError.ADDONS_ERROR, 'Turn secret is missing'));
var turnSecret = safe.fs.readFileSync(paths.ADDON_TURN_SECRET_FILE, 'utf8');
if (!turnSecret) debug('setupTurn: no turn secret set. Will leave emtpy, but this is a problem!');
const env = [
{ name: 'CLOUDRON_STUN_SERVER', value: settings.dashboardFqdn() },
{ name: 'CLOUDRON_STUN_PORT', value: '3478' },
{ name: 'CLOUDRON_STUN_TLS_PORT', value: '5349' },
{ name: 'CLOUDRON_TURN_SERVER', value: settings.dashboardFqdn() },
{ name: 'CLOUDRON_TURN_PORT', value: '3478' },
{ name: 'CLOUDRON_TURN_TLS_PORT', value: '5349' },
{ name: 'CLOUDRON_TURN_SECRET', value: turnSecret }
];
const env = [
{ name: 'CLOUDRON_STUN_SERVER', value: settings.adminFqdn() },
{ name: 'CLOUDRON_STUN_PORT', value: '3478' },
{ name: 'CLOUDRON_STUN_TLS_PORT', value: '5349' },
{ name: 'CLOUDRON_TURN_SERVER', value: settings.adminFqdn() },
{ name: 'CLOUDRON_TURN_PORT', value: '3478' },
{ name: 'CLOUDRON_TURN_TLS_PORT', value: '5349' },
{ name: 'CLOUDRON_TURN_SECRET', value: turnSecret }
];
debugApp(app, 'Setting up TURN');
debugApp(app, 'Setting up TURN');
appdb.setAddonConfig(app.id, 'turn', env, callback);
});
appdb.setAddonConfig(app.id, 'turn', env, callback);
}
function teardownTurn(app, options, callback) {
@@ -1012,7 +1029,7 @@ function setupEmail(app, options, callback) {
{ name: `${envPrefix}MAIL_SIEVE_PORT`, value: '4190' },
{ name: `${envPrefix}MAIL_DOMAIN`, value: app.domain },
{ name: `${envPrefix}MAIL_DOMAINS`, value: mailInDomains },
{ name: 'CLOUDRON_MAIL_SERVER_HOST', value: settings.mailFqdn() }, // this is also a hint to reconfigure on mail server name change
{ name: 'CLOUDRON_MAIL_SERVER_HOST', value: settings.mailFqdn() },
{ name: `${envPrefix}LDAP_MAILBOXES_BASE_DN`, value: 'ou=mailboxes,dc=cloudron' }
];
@@ -1074,9 +1091,6 @@ function setupSendMail(app, options, callback) {
debugApp(app, 'Setting up SendMail');
const disabled = app.manifest.addons.sendmail.optional && !app.enableMailbox;
if (disabled) return appdb.setAddonConfig(app.id, 'sendmail', [], callback);
appdb.getAddonConfigByName(app.id, 'sendmail', '%MAIL_SMTP_PASSWORD', function (error, existingPassword) {
if (error && error.reason !== BoxError.NOT_FOUND) return callback(error);
@@ -1088,7 +1102,6 @@ function setupSendMail(app, options, callback) {
{ name: `${envPrefix}MAIL_SMTP_SERVER`, value: 'mail' },
{ name: `${envPrefix}MAIL_SMTP_PORT`, value: '2525' },
{ name: `${envPrefix}MAIL_SMTPS_PORT`, value: '2465' },
{ name: `${envPrefix}MAIL_STARTTLS_PORT`, value: '2587' },
{ name: `${envPrefix}MAIL_SMTP_USERNAME`, value: app.mailboxName + '@' + app.mailboxDomain },
{ name: `${envPrefix}MAIL_SMTP_PASSWORD`, value: password },
{ name: `${envPrefix}MAIL_FROM`, value: app.mailboxName + '@' + app.mailboxDomain },
@@ -1188,7 +1201,6 @@ function startMysql(existingInfra, callback) {
-e CLOUDRON_MYSQL_ROOT_PASSWORD=${rootPassword} \
-v "${dataDir}/mysql:/var/lib/mysql" \
--label isCloudronManaged=true \
--cap-add SYS_NICE \
--read-only -v /tmp -v /run "${tag}"`;
async.series([
@@ -1441,8 +1453,7 @@ function setupPostgreSql(app, options, callback) {
const data = {
database: database,
username: username,
password: error ? hat(4 * 128) : existingPassword,
locale: options.locale || 'C'
password: error ? hat(4 * 128) : existingPassword
};
getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN', function (error, result) {
@@ -1476,14 +1487,13 @@ function clearPostgreSql(app, options, callback) {
assert.strictEqual(typeof callback, 'function');
const { database, username } = postgreSqlNames(app.id);
const locale = options.locale || 'C';
debugApp(app, 'Clearing postgresql');
getContainerDetails('postgresql', 'CLOUDRON_POSTGRESQL_TOKEN', function (error, result) {
if (error) return callback(error);
request.post(`https://${result.ip}:3000/databases/${database}/clear?access_token=${result.token}&username=${username}&locale=${locale}`, { json: true, rejectUnauthorized: false }, function (error, response) {
request.post(`https://${result.ip}:3000/databases/${database}/clear?access_token=${result.token}&username=${username}`, { json: true, rejectUnauthorized: false }, function (error, response) {
if (error) return callback(new BoxError(BoxError.ADDONS_ERROR, `Network error clearing postgresql: ${error.message}`));
if (response.statusCode !== 200) return callback(new BoxError(BoxError.ADDONS_ERROR, `Error clearing postgresql. Status code: ${response.statusCode} message: ${response.body.message}`));
@@ -1556,44 +1566,43 @@ function restorePostgreSql(app, options, callback) {
});
}
function startTurn(existingInfra, serviceConfig, callback) {
function startTurn(existingInfra, callback) {
assert.strictEqual(typeof existingInfra, 'object');
assert.strictEqual(typeof serviceConfig, 'object');
assert.strictEqual(typeof callback, 'function');
// get and ensure we have a turn secret
var turnSecret = safe.fs.readFileSync(paths.ADDON_TURN_SECRET_FILE, 'utf8');
if (!turnSecret) {
turnSecret = 'a' + crypto.randomBytes(15).toString('hex'); // prefix with a to ensure string starts with a letter
safe.fs.writeFileSync(paths.ADDON_TURN_SECRET_FILE, turnSecret, 'utf8');
}
const tag = infra.images.turn.tag;
const memoryLimit = serviceConfig.memoryLimit || SERVICES['turn'].defaultMemoryLimit;
const memory = system.getMemoryAllocation(memoryLimit);
const realm = settings.dashboardFqdn();
const memoryLimit = 256;
const realm = settings.adminFqdn();
const blobGet = util.callbackify(blobs.get);
blobGet(blobs.ADDON_TURN_SECRET, function (error, turnSecret) {
if (error) return callback(error);
if (!turnSecret) return callback(new BoxError(BoxError.ADDONS_ERROR, 'Turn secret is missing'));
// this exports 3478/tcp, 5349/tls and 50000-51000/udp
const cmd = `docker run --restart=always -d --name="turn" \
--hostname turn \
--net host \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=turn \
-m ${memoryLimit}m \
--memory-swap ${memoryLimit * 2}m \
--dns 172.18.0.1 \
--dns-search=. \
-e CLOUDRON_TURN_SECRET="${turnSecret}" \
-e CLOUDRON_REALM="${realm}" \
--label isCloudronManaged=true \
--read-only -v /tmp -v /run "${tag}"`;
// this exports 3478/tcp, 5349/tls and 50000-51000/udp. note that this runs on the host network!
const cmd = `docker run --restart=always -d --name="turn" \
--hostname turn \
--net host \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=turn \
-m ${memory} \
--memory-swap ${memoryLimit} \
--dns 172.18.0.1 \
--dns-search=. \
-e CLOUDRON_TURN_SECRET="${turnSecret}" \
-e CLOUDRON_REALM="${realm}" \
--label isCloudronManaged=true \
--read-only -v /tmp -v /run "${tag}"`;
async.series([
shell.exec.bind(null, 'stopTurn', 'docker stop turn || true'),
shell.exec.bind(null, 'removeTurn', 'docker rm -f turn || true'),
shell.exec.bind(null, 'startTurn', cmd)
], callback);
});
async.series([
shell.exec.bind(null, 'stopTurn', 'docker stop turn || true'),
shell.exec.bind(null, 'removeTurn', 'docker rm -f turn || true'),
shell.exec.bind(null, 'startTurn', cmd)
], callback);
}
function startMongodb(existingInfra, callback) {
@@ -1789,77 +1798,6 @@ function restoreMongoDb(app, options, callback) {
});
}
function startGraphite(existingInfra, serviceConfig, callback) {
assert.strictEqual(typeof existingInfra, 'object');
assert.strictEqual(typeof serviceConfig, 'object');
assert.strictEqual(typeof callback, 'function');
const tag = infra.images.graphite.tag;
const memoryLimit = serviceConfig.memoryLimit || 256 * 1024 * 1024;
const memory = system.getMemoryAllocation(memoryLimit);
const upgrading = existingInfra.version !== 'none' && requiresUpgrade(existingInfra.images.graphite.tag, tag);
if (upgrading) debug('startGraphite: graphite will be upgraded');
const cmd = `docker run --restart=always -d --name="graphite" \
--hostname graphite \
--net cloudron \
--net-alias graphite \
--log-driver syslog \
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag=graphite \
-m ${memory} \
--memory-swap ${memoryLimit} \
--dns 172.18.0.1 \
--dns-search=. \
-p 127.0.0.1:2003:2003 \
-p 127.0.0.1:2004:2004 \
-p 127.0.0.1:8417:8000 \
-v "${paths.PLATFORM_DATA_DIR}/graphite:/var/lib/graphite" \
--label isCloudronManaged=true \
--read-only -v /tmp -v /run "${tag}"`;
async.series([
shell.exec.bind(null, 'stopGraphite', 'docker stop graphite || true'),
shell.exec.bind(null, 'removeGraphite', 'docker rm -f graphite || true'),
(done) => {
if (!upgrading) return done();
shell.sudo('removeGraphiteDir', [ RMADDONDIR_CMD, 'graphite' ], {}, done);
},
shell.exec.bind(null, 'startGraphite', cmd)
], function (error) {
// restart collectd to get the disk stats after graphite starts. currently, there is no way to do graphite health check
if (!error) setTimeout(() => shell.sudo('restartcollectd', [ RESTART_SERVICE_CMD, 'collectd' ], {}, NOOP_CALLBACK), 60000);
callback(error);
});
}
function setupProxyAuth(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'Setting up proxyAuth');
const enabled = app.sso && app.manifest.addons && app.manifest.addons.proxyAuth;
if (!enabled) return callback();
const env = [ { name: 'CLOUDRON_PROXY_AUTH', value: '1' } ];
appdb.setAddonConfig(app.id, 'proxyauth', env, callback);
}
function teardownProxyAuth(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
appdb.unsetAddonConfig(app.id, 'proxyauth', callback);
}
function startRedis(existingInfra, callback) {
assert.strictEqual(typeof existingInfra, 'object');
assert.strictEqual(typeof callback, 'function');
@@ -1876,10 +1814,6 @@ function startRedis(existingInfra, callback) {
const redisName = 'redis-' + app.id;
async.series([
(done) => {
if (!upgrading) return done();
backupRedis(app, {}, done);
},
shell.exec.bind(null, 'stopRedis', `docker stop ${redisName} || true`), // redis will backup as part of signal handling
shell.exec.bind(null, 'removeRedis', `docker rm -f ${redisName} || true`),
setupRedis.bind(null, app, app.manifest.addons.redis) // starts the container
@@ -1909,8 +1843,7 @@ function setupRedis(app, options, callback) {
const redisServiceToken = hat(4 * 48);
// Compute redis memory limit based on app's memory limit (this is arbitrary)
const memoryLimit = app.servicesConfig['redis'] ? app.servicesConfig['redis'].memoryLimit : APP_SERVICES['redis'].defaultMemoryLimit;
const memory = system.getMemoryAllocation(memoryLimit);
const memoryLimit = app.servicesConfig['redis'] ? app.servicesConfig['redis'].memory : APP_SERVICES['redis'].defaultMemoryLimit;
const tag = infra.images.redis.tag;
const label = app.fqdn;
@@ -1924,7 +1857,7 @@ function setupRedis(app, options, callback) {
--log-opt syslog-address=udp://127.0.0.1:2514 \
--log-opt syslog-format=rfc5424 \
--log-opt tag="${redisName}" \
-m ${memory} \
-m ${memoryLimit/2} \
--memory-swap ${memoryLimit} \
--dns 172.18.0.1 \
--dns-search=. \
@@ -1993,7 +1926,7 @@ function teardownRedis(app, options, callback) {
if (error) return callback(new BoxError(BoxError.FS_ERROR, `Error removing redis data: ${error.message}`));
rimraf(path.join(paths.LOG_DIR, `redis-${app.id}`), function (error) {
if (error) debugApp(app, 'cannot cleanup logs:', error);
if (error) debugApp(app, 'cannot cleanup logs: %s', error);
appdb.unsetAddonConfig(app.id, 'redis', callback);
});
@@ -2080,7 +2013,7 @@ function statusDocker(callback) {
function restartDocker(callback) {
assert.strictEqual(typeof callback, 'function');
shell.sudo('restartdocker', [ RESTART_SERVICE_CMD, 'docker' ], {}, NOOP_CALLBACK);
shell.sudo('restartdocker', [ path.join(__dirname, 'scripts/restartdocker.sh') ], {}, NOOP_CALLBACK);
callback(null);
}
@@ -2096,7 +2029,7 @@ function statusUnbound(callback) {
function restartUnbound(callback) {
assert.strictEqual(typeof callback, 'function');
shell.sudo('restartunbound', [ RESTART_SERVICE_CMD, 'unbound' ], {}, NOOP_CALLBACK);
shell.sudo('restartunbound', [ path.join(__dirname, 'scripts/restartunbound.sh') ], {}, NOOP_CALLBACK);
callback(null);
}
@@ -2112,7 +2045,7 @@ function statusNginx(callback) {
function restartNginx(callback) {
assert.strictEqual(typeof callback, 'function');
shell.sudo('restartnginx', [ RESTART_SERVICE_CMD, 'nginx' ], {}, NOOP_CALLBACK);
shell.sudo('reloadnginx', [ path.join(__dirname, 'scripts/reloadnginx.sh') ], {}, NOOP_CALLBACK);
callback(null);
}
@@ -2145,7 +2078,7 @@ function statusGraphite(callback) {
if (error && error.reason === BoxError.NOT_FOUND) return callback(null, { status: exports.SERVICE_STATUS_STOPPED });
if (error) return callback(error);
request.get('http://127.0.0.1:8417/graphite-web/dashboard', { json: true, timeout: 20000 }, function (error, response) {
request.get('http://127.0.0.1:8417/graphite-web/dashboard', { json: true, timeout: 3000 }, function (error, response) {
if (error) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite: ${error.message}` });
if (response.statusCode !== 200) return callback(null, { status: exports.SERVICE_STATUS_STARTING, error: `Error waiting for graphite. Status code: ${response.statusCode} message: ${response.body.message}` });
@@ -2164,14 +2097,6 @@ function statusGraphite(callback) {
});
}
function restartGraphite(callback) {
assert.strictEqual(typeof callback, 'function');
docker.restartContainer('graphite', callback);
setTimeout(() => shell.sudo('restartcollectd', [ RESTART_SERVICE_CMD, 'collectd' ], {}, NOOP_CALLBACK), 60000);
}
function teardownOauth(app, options, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof options, 'object');
+136 -138
View File
@@ -1,53 +1,54 @@
'use strict';
exports = module.exports = {
get,
add,
exists,
del,
update,
getAll,
getPortBindings,
delPortBinding,
get: get,
getByHttpPort: getByHttpPort,
getByContainerId: getByContainerId,
add: add,
exists: exists,
del: del,
update: update,
getAll: getAll,
getPortBindings: getPortBindings,
delPortBinding: delPortBinding,
setAddonConfig,
getAddonConfig,
getAddonConfigByAppId,
getAddonConfigByName,
unsetAddonConfig,
unsetAddonConfigByAppId,
getAppIdByAddonConfigValue,
getByIpAddress,
setAddonConfig: setAddonConfig,
getAddonConfig: getAddonConfig,
getAddonConfigByAppId: getAddonConfigByAppId,
getAddonConfigByName: getAddonConfigByName,
unsetAddonConfig: unsetAddonConfig,
unsetAddonConfigByAppId: unsetAddonConfigByAppId,
getAppIdByAddonConfigValue: getAppIdByAddonConfigValue,
getIcons,
setHealth,
setTask,
getAppStoreIds,
setHealth: setHealth,
setTask: setTask,
getAppStoreIds: getAppStoreIds,
// subdomain table types
SUBDOMAIN_TYPE_PRIMARY: 'primary',
SUBDOMAIN_TYPE_REDIRECT: 'redirect',
SUBDOMAIN_TYPE_ALIAS: 'alias',
_clear: clear
};
const assert = require('assert'),
var assert = require('assert'),
async = require('async'),
BoxError = require('./boxerror.js'),
database = require('./database.js'),
safe = require('safetydance'),
util = require('util');
const APPS_FIELDS_PREFIXED = [ 'apps.id', 'apps.appStoreId', 'apps.installationState', 'apps.errorJson', 'apps.runState',
'apps.health', 'apps.containerId', 'apps.manifestJson', 'apps.accessRestrictionJson', 'apps.memoryLimit', 'apps.cpuShares',
'apps.label', 'apps.tagsJson', 'apps.taskId', 'apps.reverseProxyConfigJson', 'apps.servicesConfigJson',
'apps.sso', 'apps.debugModeJson', 'apps.enableBackup', 'apps.proxyAuth', 'apps.containerIp',
'apps.creationTime', 'apps.updateTime', 'apps.enableMailbox', 'apps.mailboxName', 'apps.mailboxDomain', 'apps.enableAutomaticUpdate',
'apps.dataDir', 'apps.ts', 'apps.healthTime', '(apps.icon IS NOT NULL) AS hasIcon', '(apps.appStoreIcon IS NOT NULL) AS hasAppStoreIcon' ].join(',');
var APPS_FIELDS_PREFIXED = [ 'apps.id', 'apps.appStoreId', 'apps.installationState', 'apps.errorJson', 'apps.runState',
'apps.health', 'apps.containerId', 'apps.manifestJson', 'apps.httpPort', 'subdomains.subdomain AS location', 'subdomains.domain',
'apps.accessRestrictionJson', 'apps.memoryLimit', 'apps.cpuShares',
'apps.label', 'apps.tagsJson', 'apps.taskId', 'apps.reverseProxyConfigJson', 'apps.servicesConfigJson', 'apps.bindsJson',
'apps.sso', 'apps.debugModeJson', 'apps.enableBackup',
'apps.creationTime', 'apps.updateTime', 'apps.mailboxName', 'apps.mailboxDomain', 'apps.enableAutomaticUpdate',
'apps.dataDir', 'apps.ts', 'apps.healthTime' ].join(',');
const PORT_BINDINGS_FIELDS = [ 'hostPort', 'type', 'environmentVariable', 'appId' ].join(',');
var PORT_BINDINGS_FIELDS = [ 'hostPort', 'type', 'environmentVariable', 'appId' ].join(',');
const SUBDOMAIN_FIELDS = [ 'appId', 'domain', 'subdomain', 'type' ].join(',');
function postProcess(result) {
assert.strictEqual(typeof result, 'object');
@@ -85,13 +86,9 @@ function postProcess(result) {
if (result.accessRestriction && !result.accessRestriction.users) result.accessRestriction.users = [];
delete result.accessRestrictionJson;
result.sso = !!result.sso;
result.enableBackup = !!result.enableBackup;
result.enableAutomaticUpdate = !!result.enableAutomaticUpdate;
result.enableMailbox = !!result.enableMailbox;
result.proxyAuth = !!result.proxyAuth;
result.hasIcon = !!result.hasIcon;
result.hasAppStoreIcon = !!result.hasAppStoreIcon;
result.sso = !!result.sso; // make it bool
result.enableBackup = !!result.enableBackup; // make it bool
result.enableAutomaticUpdate = !!result.enableAutomaticUpdate; // make it bool
assert(result.debugModeJson === null || typeof result.debugModeJson === 'string');
result.debugMode = safe.JSON.parse(result.debugModeJson);
@@ -101,23 +98,15 @@ function postProcess(result) {
result.servicesConfig = safe.JSON.parse(result.servicesConfigJson) || {};
delete result.servicesConfigJson;
let subdomains = JSON.parse(result.subdomains), domains = JSON.parse(result.domains), subdomainTypes = JSON.parse(result.subdomainTypes);
delete result.subdomains;
delete result.domains;
delete result.subdomainTypes;
assert(result.bindsJson === null || typeof result.bindsJson === 'string');
result.binds = safe.JSON.parse(result.bindsJson) || {};
delete result.bindsJson;
result.alternateDomains = [];
result.aliasDomains = [];
for (let i = 0; i < subdomainTypes.length; i++) {
if (subdomainTypes[i] === exports.SUBDOMAIN_TYPE_PRIMARY) {
result.location = subdomains[i];
result.domain = domains[i];
} else if (subdomainTypes[i] === exports.SUBDOMAIN_TYPE_REDIRECT) {
result.alternateDomains.push({ domain: domains[i], subdomain: subdomains[i] });
} else if (subdomainTypes[i] === exports.SUBDOMAIN_TYPE_ALIAS) {
result.aliasDomains.push({ domain: domains[i], subdomain: subdomains[i] });
}
}
result.alternateDomains = result.alternateDomains || [];
result.alternateDomains.forEach(function (d) {
delete d.appId;
delete d.type;
});
let envNames = JSON.parse(result.envNames), envValues = JSON.parse(result.envValues);
delete result.envNames;
@@ -127,67 +116,119 @@ function postProcess(result) {
if (envNames[i]) result.env[envNames[i]] = envValues[i];
}
let volumeIds = JSON.parse(result.volumeIds);
delete result.volumeIds;
let volumeReadOnlys = JSON.parse(result.volumeReadOnlys);
delete result.volumeReadOnlys;
result.mounts = volumeIds[0] === null ? [] : volumeIds.map((v, idx) => { return { volumeId: v, readOnly: !!volumeReadOnlys[idx] }; }); // NOTE: volumeIds is [null] when volumes of an app is empty
result.error = safe.JSON.parse(result.errorJson);
delete result.errorJson;
result.taskId = result.taskId ? String(result.taskId) : null;
}
// each query simply join apps table with another table by id. we then join the full result together
const PB_QUERY = 'SELECT id, GROUP_CONCAT(CAST(appPortBindings.hostPort AS CHAR(6))) AS hostPorts, GROUP_CONCAT(appPortBindings.environmentVariable) AS environmentVariables, GROUP_CONCAT(appPortBindings.type) AS portTypes FROM apps LEFT JOIN appPortBindings ON apps.id = appPortBindings.appId GROUP BY apps.id';
const ENV_QUERY = 'SELECT id, JSON_ARRAYAGG(appEnvVars.name) AS envNames, JSON_ARRAYAGG(appEnvVars.value) AS envValues FROM apps LEFT JOIN appEnvVars ON apps.id = appEnvVars.appId GROUP BY apps.id';
const SUBDOMAIN_QUERY = 'SELECT id, JSON_ARRAYAGG(subdomains.subdomain) AS subdomains, JSON_ARRAYAGG(subdomains.domain) AS domains, JSON_ARRAYAGG(subdomains.type) AS subdomainTypes FROM apps LEFT JOIN subdomains ON apps.id = subdomains.appId GROUP BY apps.id';
const MOUNTS_QUERY = 'SELECT id, JSON_ARRAYAGG(appMounts.volumeId) AS volumeIds, JSON_ARRAYAGG(appMounts.readOnly) AS volumeReadOnlys FROM apps LEFT JOIN appMounts ON apps.id = appMounts.appId GROUP BY apps.id';
const APPS_QUERY = `SELECT ${APPS_FIELDS_PREFIXED}, hostPorts, environmentVariables, portTypes, envNames, envValues, subdomains, domains, subdomainTypes, volumeIds, volumeReadOnlys FROM apps`
+ ` LEFT JOIN (${PB_QUERY}) AS q1 on q1.id = apps.id`
+ ` LEFT JOIN (${ENV_QUERY}) AS q2 on q2.id = apps.id`
+ ` LEFT JOIN (${SUBDOMAIN_QUERY}) AS q3 on q3.id = apps.id`
+ ` LEFT JOIN (${MOUNTS_QUERY}) AS q4 on q4.id = apps.id`;
function get(id, callback) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof callback, 'function');
database.query(`${APPS_QUERY} WHERE apps.id = ?`, [ id ], function (error, result) {
database.query('SELECT ' + APPS_FIELDS_PREFIXED + ','
+ 'GROUP_CONCAT(CAST(appPortBindings.hostPort AS CHAR(6))) AS hostPorts, GROUP_CONCAT(appPortBindings.environmentVariable) AS environmentVariables, GROUP_CONCAT(appPortBindings.type) AS portTypes, '
+ 'JSON_ARRAYAGG(appEnvVars.name) AS envNames, JSON_ARRAYAGG(appEnvVars.value) AS envValues'
+ ' FROM apps'
+ ' LEFT OUTER JOIN appPortBindings ON apps.id = appPortBindings.appId'
+ ' LEFT OUTER JOIN appEnvVars ON apps.id = appEnvVars.appId'
+ ' LEFT OUTER JOIN subdomains ON apps.id = subdomains.appId AND subdomains.type = ?'
+ ' WHERE apps.id = ? GROUP BY apps.id', [ exports.SUBDOMAIN_TYPE_PRIMARY, id ], function (error, result) {
if (error) return callback(new BoxError(BoxError.DATABASE_ERROR, error));
if (result.length === 0) return callback(new BoxError(BoxError.NOT_FOUND, 'App not found'));
postProcess(result[0]);
database.query('SELECT ' + SUBDOMAIN_FIELDS + ' FROM subdomains WHERE appId = ? AND type = ?', [ id, exports.SUBDOMAIN_TYPE_REDIRECT ], function (error, alternateDomains) {
if (error) return callback(new BoxError(BoxError.DATABASE_ERROR, error));
callback(null, result[0]);
result[0].alternateDomains = alternateDomains;
postProcess(result[0]);
callback(null, result[0]);
});
});
}
function getByIpAddress(ip, callback) {
assert.strictEqual(typeof ip, 'string');
function getByHttpPort(httpPort, callback) {
assert.strictEqual(typeof httpPort, 'number');
assert.strictEqual(typeof callback, 'function');
database.query(`${APPS_QUERY} WHERE apps.containerIp = ?`, [ ip ], function (error, result) {
database.query('SELECT ' + APPS_FIELDS_PREFIXED + ','
+ 'GROUP_CONCAT(CAST(appPortBindings.hostPort AS CHAR(6))) AS hostPorts, GROUP_CONCAT(appPortBindings.environmentVariable) AS environmentVariables, GROUP_CONCAT(appPortBindings.type) AS portTypes,'
+ 'JSON_ARRAYAGG(appEnvVars.name) AS envNames, JSON_ARRAYAGG(appEnvVars.value) AS envValues'
+ ' FROM apps'
+ ' LEFT OUTER JOIN appPortBindings ON apps.id = appPortBindings.appId'
+ ' LEFT OUTER JOIN appEnvVars ON apps.id = appEnvVars.appId'
+ ' LEFT OUTER JOIN subdomains ON apps.id = subdomains.appId AND subdomains.type = ?'
+ ' WHERE httpPort = ? GROUP BY apps.id', [ exports.SUBDOMAIN_TYPE_PRIMARY, httpPort ], function (error, result) {
if (error) return callback(new BoxError(BoxError.DATABASE_ERROR, error));
if (result.length === 0) return callback(new BoxError(BoxError.NOT_FOUND, 'App not found'));
postProcess(result[0]);
database.query('SELECT ' + SUBDOMAIN_FIELDS + ' FROM subdomains WHERE appId = ? AND type = ?', [ result[0].id, exports.SUBDOMAIN_TYPE_REDIRECT ], function (error, alternateDomains) {
if (error) return callback(new BoxError(BoxError.DATABASE_ERROR, error));
callback(null, result[0]);
result[0].alternateDomains = alternateDomains;
postProcess(result[0]);
callback(null, result[0]);
});
});
}
function getByContainerId(containerId, callback) {
assert.strictEqual(typeof containerId, 'string');
assert.strictEqual(typeof callback, 'function');
database.query('SELECT ' + APPS_FIELDS_PREFIXED + ','
+ 'GROUP_CONCAT(CAST(appPortBindings.hostPort AS CHAR(6))) AS hostPorts, GROUP_CONCAT(appPortBindings.environmentVariable) AS environmentVariables, GROUP_CONCAT(appPortBindings.type) AS portTypes,'
+ 'JSON_ARRAYAGG(appEnvVars.name) AS envNames, JSON_ARRAYAGG(appEnvVars.value) AS envValues'
+ ' FROM apps'
+ ' LEFT OUTER JOIN appPortBindings ON apps.id = appPortBindings.appId'
+ ' LEFT OUTER JOIN appEnvVars ON apps.id = appEnvVars.appId'
+ ' LEFT OUTER JOIN subdomains ON apps.id = subdomains.appId AND subdomains.type = ?'
+ ' WHERE containerId = ? GROUP BY apps.id', [ exports.SUBDOMAIN_TYPE_PRIMARY, containerId ], function (error, result) {
if (error) return callback(new BoxError(BoxError.DATABASE_ERROR, error));
if (result.length === 0) return callback(new BoxError(BoxError.NOT_FOUND, 'App not found'));
database.query('SELECT ' + SUBDOMAIN_FIELDS + ' FROM subdomains WHERE appId = ? AND type = ?', [ result[0].id, exports.SUBDOMAIN_TYPE_REDIRECT ], function (error, alternateDomains) {
if (error) return callback(new BoxError(BoxError.DATABASE_ERROR, error));
result[0].alternateDomains = alternateDomains;
postProcess(result[0]);
callback(null, result[0]);
});
});
}
function getAll(callback) {
assert.strictEqual(typeof callback, 'function');
database.query(`${APPS_QUERY} ORDER BY apps.id`, [ ], function (error, results) {
database.query('SELECT ' + APPS_FIELDS_PREFIXED + ','
+ 'GROUP_CONCAT(CAST(appPortBindings.hostPort AS CHAR(6))) AS hostPorts, GROUP_CONCAT(appPortBindings.environmentVariable) AS environmentVariables, GROUP_CONCAT(appPortBindings.type) AS portTypes,'
+ 'JSON_ARRAYAGG(appEnvVars.name) AS envNames, JSON_ARRAYAGG(appEnvVars.value) AS envValues'
+ ' FROM apps'
+ ' LEFT OUTER JOIN appPortBindings ON apps.id = appPortBindings.appId'
+ ' LEFT OUTER JOIN appEnvVars ON apps.id = appEnvVars.appId'
+ ' LEFT OUTER JOIN subdomains ON apps.id = subdomains.appId AND subdomains.type = ?'
+ ' GROUP BY apps.id ORDER BY apps.id', [ exports.SUBDOMAIN_TYPE_PRIMARY ], function (error, results) {
if (error) return callback(new BoxError(BoxError.DATABASE_ERROR, error));
results.forEach(postProcess);
database.query('SELECT ' + SUBDOMAIN_FIELDS + ' FROM subdomains WHERE type = ?', [ exports.SUBDOMAIN_TYPE_REDIRECT ], function (error, alternateDomains) {
if (error) return callback(new BoxError(BoxError.DATABASE_ERROR, error));
callback(null, results);
alternateDomains.forEach(function (d) {
var domain = results.find(function (a) { return d.appId === a.id; });
if (!domain) return;
domain.alternateDomains = domain.alternateDomains || [];
domain.alternateDomains.push(d);
});
results.forEach(postProcess);
callback(null, results);
});
});
}
@@ -220,18 +261,15 @@ function add(id, appStoreId, manifest, location, domain, portBindings, data, cal
const mailboxName = data.mailboxName || null;
const mailboxDomain = data.mailboxDomain || null;
const reverseProxyConfigJson = data.reverseProxyConfig ? JSON.stringify(data.reverseProxyConfig) : null;
const servicesConfigJson = data.servicesConfig ? JSON.stringify(data.servicesConfig) : null;
const enableMailbox = data.enableMailbox || false;
const icon = data.icon || null;
let queries = [];
var queries = [];
queries.push({
query: 'INSERT INTO apps (id, appStoreId, manifestJson, installationState, runState, accessRestrictionJson, memoryLimit, cpuShares, '
+ 'sso, debugModeJson, mailboxName, mailboxDomain, label, tagsJson, reverseProxyConfigJson, servicesConfigJson, icon, enableMailbox) '
+ ' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
+ 'sso, debugModeJson, mailboxName, mailboxDomain, label, tagsJson, reverseProxyConfigJson) '
+ ' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
args: [ id, appStoreId, manifestJson, installationState, runState, accessRestrictionJson, memoryLimit, cpuShares,
sso, debugModeJson, mailboxName, mailboxDomain, label, tagsJson, reverseProxyConfigJson, servicesConfigJson, icon, enableMailbox ]
sso, debugModeJson, mailboxName, mailboxDomain, label, tagsJson, reverseProxyConfigJson ]
});
queries.push({
@@ -262,15 +300,6 @@ function add(id, appStoreId, manifest, location, domain, portBindings, data, cal
});
}
if (data.aliasDomains) {
data.aliasDomains.forEach(function (d) {
queries.push({
query: 'INSERT INTO subdomains (appId, domain, subdomain, type) VALUES (?, ?, ?, ?)',
args: [ id, d.domain, d.subdomain, exports.SUBDOMAIN_TYPE_ALIAS ]
});
});
}
database.transaction(queries, function (error) {
if (error && error.code === 'ER_DUP_ENTRY') return callback(new BoxError(BoxError.ALREADY_EXISTS, error.message));
if (error && error.code === 'ER_NO_REFERENCED_ROW_2') return callback(new BoxError(BoxError.NOT_FOUND, 'no such domain'));
@@ -299,7 +328,7 @@ function getPortBindings(id, callback) {
if (error) return callback(new BoxError(BoxError.DATABASE_ERROR, error));
var portBindings = { };
for (let i = 0; i < results.length; i++) {
for (var i = 0; i < results.length; i++) {
portBindings[results[i].environmentVariable] = { hostPort: results[i].hostPort, type: results[i].type };
}
@@ -307,17 +336,6 @@ function getPortBindings(id, callback) {
});
}
function getIcons(id, callback) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof callback, 'function');
database.query('SELECT icon, appStoreIcon FROM apps WHERE id = ?', [ id ], function (error, results) {
if (error) return callback(new BoxError(BoxError.DATABASE_ERROR, error));
callback(null, { icon: results[0].icon, appStoreIcon: results[0].appStoreIcon });
});
}
function delPortBinding(hostPort, type, callback) {
assert.strictEqual(typeof hostPort, 'number');
assert.strictEqual(typeof type, 'string');
@@ -340,13 +358,12 @@ function del(id, callback) {
{ query: 'DELETE FROM appPortBindings WHERE appId = ?', args: [ id ] },
{ query: 'DELETE FROM appEnvVars WHERE appId = ?', args: [ id ] },
{ query: 'DELETE FROM appPasswords WHERE identifier = ?', args: [ id ] },
{ query: 'DELETE FROM appMounts WHERE appId = ?', args: [ id ] },
{ query: 'DELETE FROM apps WHERE id = ?', args: [ id ] }
];
database.transaction(queries, function (error, results) {
if (error) return callback(new BoxError(BoxError.DATABASE_ERROR, error));
if (results[5].affectedRows !== 1) return callback(new BoxError(BoxError.NOT_FOUND, 'App not found'));
if (results[4].affectedRows !== 1) return callback(new BoxError(BoxError.NOT_FOUND, 'App not found'));
callback(null);
});
@@ -368,9 +385,6 @@ function clear(callback) {
}
function update(id, app, callback) {
// ts is useful as a versioning mechanism (for example, icon changed). update the timestamp explicity in code instead of db.
// this way health and healthTime can be updated without changing ts
app.ts = new Date();
updateWithConstraints(id, app, '', callback);
}
@@ -382,7 +396,6 @@ function updateWithConstraints(id, app, constraints, callback) {
assert(!('portBindings' in app) || typeof app.portBindings === 'object');
assert(!('accessRestriction' in app) || typeof app.accessRestriction === 'object' || app.accessRestriction === '');
assert(!('alternateDomains' in app) || Array.isArray(app.alternateDomains));
assert(!('aliasDomains' in app) || Array.isArray(app.aliasDomains));
assert(!('tags' in app) || Array.isArray(app.tags));
assert(!('env' in app) || typeof app.env === 'object');
@@ -418,27 +431,14 @@ function updateWithConstraints(id, app, constraints, callback) {
queries.push({ query: 'INSERT INTO subdomains (appId, domain, subdomain, type) VALUES (?, ?, ?, ?)', args: [ id, d.domain, d.subdomain, exports.SUBDOMAIN_TYPE_REDIRECT ]});
});
}
if ('aliasDomains' in app) {
app.aliasDomains.forEach(function (d) {
queries.push({ query: 'INSERT INTO subdomains (appId, domain, subdomain, type) VALUES (?, ?, ?, ?)', args: [ id, d.domain, d.subdomain, exports.SUBDOMAIN_TYPE_ALIAS ]});
});
}
}
if ('mounts' in app) {
queries.push({ query: 'DELETE FROM appMounts WHERE appId = ?', args: [ id ]});
app.mounts.forEach(function (m) {
queries.push({ query: 'INSERT INTO appMounts (appId, volumeId, readOnly) VALUES (?, ?, ?)', args: [ id, m.volumeId, m.readOnly ]});
});
}
var fields = [ ], values = [ ];
for (let p in app) {
if (p === 'manifest' || p === 'tags' || p === 'accessRestriction' || p === 'debugMode' || p === 'error' || p === 'reverseProxyConfig' || p === 'servicesConfig') {
for (var p in app) {
if (p === 'manifest' || p === 'tags' || p === 'accessRestriction' || p === 'debugMode' || p === 'error' || p === 'reverseProxyConfig' || p === 'servicesConfig' || p === 'binds') {
fields.push(`${p}Json = ?`);
values.push(JSON.stringify(app[p]));
} else if (p !== 'portBindings' && p !== 'location' && p !== 'domain' && p !== 'alternateDomains' && p !== 'aliasDomains' && p !== 'env' && p !== 'mounts') {
} else if (p !== 'portBindings' && p !== 'location' && p !== 'domain' && p !== 'alternateDomains' && p !== 'env') {
fields.push(p + ' = ?');
values.push(app[p]);
}
@@ -461,10 +461,10 @@ function updateWithConstraints(id, app, constraints, callback) {
function setHealth(appId, health, healthTime, callback) {
assert.strictEqual(typeof appId, 'string');
assert.strictEqual(typeof health, 'string');
assert(util.types.isDate(healthTime));
assert(util.isDate(healthTime));
assert.strictEqual(typeof callback, 'function');
const values = { health, healthTime };
var values = { health, healthTime };
updateWithConstraints(appId, values, '', callback);
}
@@ -475,9 +475,7 @@ function setTask(appId, values, options, callback) {
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
values.ts = new Date();
if (!options.requireNullTaskId) return updateWithConstraints(appId, values, '', callback);
if (!options.requireNullTaskId) return updateWithConstraints(appId, values, '', callback);
if (options.requiredState === null) {
updateWithConstraints(appId, values, 'AND taskId IS NULL', callback);
@@ -499,7 +497,7 @@ function getAppStoreIds(callback) {
function setAddonConfig(appId, addonId, env, callback) {
assert.strictEqual(typeof appId, 'string');
assert.strictEqual(typeof addonId, 'string');
assert(Array.isArray(env));
assert(util.isArray(env));
assert.strictEqual(typeof callback, 'function');
unsetAddonConfig(appId, addonId, function (error) {
+26 -29
View File
@@ -1,58 +1,58 @@
'use strict';
const appdb = require('./appdb.js'),
var appdb = require('./appdb.js'),
apps = require('./apps.js'),
assert = require('assert'),
async = require('async'),
auditSource = require('./auditsource.js'),
BoxError = require('./boxerror.js'),
constants = require('./constants.js'),
debug = require('debug')('box:apphealthmonitor'),
docker = require('./docker.js'),
eventlog = require('./eventlog.js'),
safe = require('safetydance'),
superagent = require('superagent');
superagent = require('superagent'),
util = require('util');
exports = module.exports = {
run
run: run
};
const HEALTHCHECK_INTERVAL = 10 * 1000; // every 10 seconds. this needs to be small since the UI makes only healthy apps clickable
const UNHEALTHY_THRESHOLD = 20 * 60 * 1000; // 20 minutes
const UNHEALTHY_THRESHOLD = 10 * 60 * 1000; // 10 minutes
const OOM_EVENT_LIMIT = 60 * 60 * 1000; // will only raise 1 oom event every hour
let gStartTime = null; // time when apphealthmonitor was started
const OOM_EVENT_LIMIT = 60 * 60 * 1000; // 60 minutes
let gLastOomMailTime = Date.now() - (5 * 60 * 1000); // pretend we sent email 5 minutes ago
function debugApp(app) {
assert(typeof app === 'object');
debug(app.fqdn + ' ' + util.format.apply(util, Array.prototype.slice.call(arguments, 1)) + ' - ' + app.id);
}
function setHealth(app, health, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof health, 'string');
assert.strictEqual(typeof callback, 'function');
// app starts out with null health
// if it became healthy, we update immediately. this is required for ui to say "running" etc
// if it became unhealthy/error/dead, wait for a threshold before updating db
const now = new Date(), lastHealth = app.health;
let healthTime = gStartTime > app.healthTime ? gStartTime : app.healthTime; // on box restart, clamp value to start time
let now = new Date(), healthTime = app.healthTime, curHealth = app.health;
if (health === apps.HEALTH_HEALTHY) {
healthTime = now;
if (lastHealth && lastHealth !== apps.HEALTH_HEALTHY) { // app starts out with null health
debug(`setHealth: ${app.id} (${app.fqdn}) switched from ${lastHealth} to healthy`);
if (curHealth && curHealth !== apps.HEALTH_HEALTHY) { // app starts out with null health
debugApp(app, 'app switched from %s to healthy', curHealth);
// do not send mails for dev apps
if (!app.debugMode) eventlog.add(eventlog.ACTION_APP_UP, auditSource.HEALTH_MONITOR, { app: app });
}
} else if (Math.abs(now - healthTime) > UNHEALTHY_THRESHOLD) {
if (lastHealth === apps.HEALTH_HEALTHY) {
debug(`setHealth: marking ${app.id} (${app.fqdn}) as unhealthy since not seen for more than ${UNHEALTHY_THRESHOLD/(60 * 1000)} minutes`);
if (curHealth === apps.HEALTH_HEALTHY) {
debugApp(app, 'marking as unhealthy since not seen for more than %s minutes', UNHEALTHY_THRESHOLD/(60 * 1000));
// do not send mails for dev apps
if (!app.debugMode) eventlog.add(eventlog.ACTION_APP_DOWN, auditSource.HEALTH_MONITOR, { app: app });
}
} else {
debug(`setHealth: ${app.id} (${app.fqdn}) waiting for ${(UNHEALTHY_THRESHOLD - Math.abs(now - healthTime))/1000} to update health`);
debugApp(app, 'waiting for %s seconds to update the app health', (UNHEALTHY_THRESHOLD - Math.abs(now - healthTime))/1000);
return callback(null);
}
@@ -61,7 +61,6 @@ function setHealth(app, health, callback) {
if (error) return callback(error);
app.health = health;
app.healthTime = healthTime;
callback(null);
});
@@ -86,7 +85,8 @@ function checkAppHealth(app, callback) {
// non-appstore apps may not have healthCheckPath
if (!manifest.healthCheckPath) return setHealth(app, apps.HEALTH_HEALTHY, callback);
const healthCheckUrl = `http://${app.containerIp}:${manifest.httpPort}${manifest.healthCheckPath}`;
// poll through docker network instead of nginx to bypass any potential oauth proxy
var healthCheckUrl = 'http://127.0.0.1:' + app.httpPort + manifest.healthCheckPath;
superagent
.get(healthCheckUrl)
.set('Host', app.fqdn) // required for some apache configs with rewrite rules
@@ -96,7 +96,7 @@ function checkAppHealth(app, callback) {
.end(function (error, res) {
if (error && !error.response) {
setHealth(app, apps.HEALTH_UNHEALTHY, callback);
} else if (res.statusCode > 403) { // 2xx and 3xx are ok. even 401 and 403 are ok for now (for WP sites)
} else if (res.statusCode >= 400) { // 2xx and 3xx are ok
setHealth(app, apps.HEALTH_UNHEALTHY, callback);
} else {
setHealth(app, apps.HEALTH_HEALTHY, callback);
@@ -111,7 +111,7 @@ function getContainerInfo(containerId, callback) {
const appId = safe.query(result, 'Config.Labels.appId', null);
if (!appId) return callback(null, null /* app */, { name: result.Name.slice(1) }); // addon . Name has a '/' in the beginning for some reason
if (!appId) return callback(null, null /* app */, { name: result.Name }); // addon
apps.get(appId, callback); // don't get by container id as this can be an exec container
});
@@ -119,7 +119,8 @@ function getContainerInfo(containerId, callback) {
/*
OOM can be tested using stress tool like so:
docker run -ti -m 100M cloudron/base:3.0.0 /bin/bash
docker run -ti -m 100M cloudron/base:0.10.0 /bin/bash
apt-get update && apt-get install stress
stress --vm 1 --vm-bytes 200M --vm-hang 0
*/
function processDockerEvents(intervalSecs, callback) {
@@ -142,12 +143,12 @@ function processDockerEvents(intervalSecs, callback) {
const now = Date.now();
const notifyUser = !(app && app.debugMode) && ((now - gLastOomMailTime) > OOM_EVENT_LIMIT);
debug(`OOM ${program} notifyUser: ${notifyUser}. lastOomTime: ${gLastOomMailTime} (now: ${now})`);
debug('OOM %s notifyUser: %s. lastOomTime: %s (now: %s)', program, notifyUser, gLastOomMailTime, now);
// do not send mails for dev apps
if (notifyUser) {
// app can be null for addon containers
eventlog.add(eventlog.ACTION_APP_OOM, auditSource.HEALTH_MONITOR, { event, containerId, addon: addon || null, app: app || null });
eventlog.add(eventlog.ACTION_APP_OOM, auditSource.HEALTH_MONITOR, { event: event, containerId: containerId, addon: addon || null, app: app || null });
gLastOomMailTime = now;
}
@@ -187,10 +188,6 @@ function run(intervalSecs, callback) {
assert.strictEqual(typeof intervalSecs, 'number');
assert.strictEqual(typeof callback, 'function');
if (constants.TEST) return;
if (!gStartTime) gStartTime = new Date();
async.series([
processApp, // this is first because docker.getEvents seems to get 'stuck' sometimes
processDockerEvents.bind(null, intervalSecs)
+265 -343
View File
File diff suppressed because it is too large Load Diff
+45 -44
View File
@@ -1,26 +1,28 @@
'use strict';
exports = module.exports = {
getFeatures,
getFeatures: getFeatures,
getApps,
getApp,
getAppVersion,
getApps: getApps,
getApp: getApp,
getAppVersion: getAppVersion,
registerWithLoginCredentials,
updateCloudron,
trackBeginSetup: trackBeginSetup,
trackFinishedSetup: trackFinishedSetup,
purchaseApp,
unpurchaseApp,
registerWithLoginCredentials: registerWithLoginCredentials,
getUserToken,
getSubscription,
isFreePlan,
purchaseApp: purchaseApp,
unpurchaseApp: unpurchaseApp,
getAppUpdate,
getBoxUpdate,
getUserToken: getUserToken,
getSubscription: getSubscription,
isFreePlan: isFreePlan,
createTicket
getAppUpdate: getAppUpdate,
getBoxUpdate: getBoxUpdate,
createTicket: createTicket
};
var apps = require('./apps.js'),
@@ -43,8 +45,6 @@ var apps = require('./apps.js'),
// Keep in sync with appstore/routes/cloudrons.js
let gFeatures = {
userMaxCount: 5,
userGroups: false,
userRoles: false,
domainMaxCount: 1,
externalLdap: false,
privateDockerRegistry: false,
@@ -243,17 +243,17 @@ function getBoxUpdate(options, callback) {
automatic: options.automatic
};
superagent.get(url).query(query).timeout(30 * 1000).end(function (error, result) {
superagent.get(url).query(query).timeout(10 * 1000).end(function (error, result) {
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
if (result.statusCode === 401) return callback(new BoxError(BoxError.INVALID_CREDENTIALS));
if (result.statusCode === 422) return callback(new BoxError(BoxError.LICENSE_ERROR, result.body.message));
if (result.statusCode === 204) return callback(null, null); // no update
if (result.statusCode === 204) return callback(null); // no update
if (result.statusCode !== 200 || !result.body) return callback(new BoxError(BoxError.EXTERNAL_ERROR, util.format('Bad response: %s %s', result.statusCode, result.text)));
var updateInfo = result.body;
if (!semver.valid(updateInfo.version) || semver.gt(constants.VERSION, updateInfo.version)) {
return callback(new BoxError(BoxError.EXTERNAL_ERROR, util.format('Update version invalid or is a downgrade: %s %s', result.statusCode, result.text)));
return callback(new BoxError(BoxError.EXTERNAL_ERROR, util.format('Invalid update version: %s %s', result.statusCode, result.text)));
}
// updateInfo: { version, changelog, sourceTarballUrl, sourceTarballSigUrl, boxVersionsUrl, boxVersionsSigUrl }
@@ -286,7 +286,7 @@ function getAppUpdate(app, options, callback) {
automatic: options.automatic
};
superagent.get(url).query(query).timeout(30 * 1000).end(function (error, result) {
superagent.get(url).query(query).timeout(10 * 1000).end(function (error, result) {
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error));
if (result.statusCode === 401) return callback(new BoxError(BoxError.INVALID_CREDENTIALS));
if (result.statusCode === 422) return callback(new BoxError(BoxError.LICENSE_ERROR, result.body.message));
@@ -341,29 +341,30 @@ function registerCloudron(data, callback) {
});
}
function updateCloudron(data, callback) {
assert.strictEqual(typeof data, 'object');
assert.strictEqual(typeof callback, 'function');
// This works without a Cloudron token as this Cloudron was not yet registered
let gBeginSetupAlreadyTracked = false;
function trackBeginSetup() {
// avoid browser reload double tracking, not perfect since box might restart, but covers most cases and is simple
if (gBeginSetupAlreadyTracked) return;
gBeginSetupAlreadyTracked = true;
getCloudronToken(function (error, token) {
if (error && error.reason === BoxError.LICENSE_ERROR) return callback(null); // missing token. not registered yet
if (error) return callback(error);
const url = `${settings.apiServerOrigin()}/api/v1/helper/setup_begin`;
const url = `${settings.apiServerOrigin()}/api/v1/update_cloudron`;
const query = {
accessToken: token
};
superagent.post(url).send({}).timeout(30 * 1000).end(function (error, result) {
if (error && !error.response) return debug(`trackBeginSetup: ${error.message}`);
if (result.statusCode !== 200) return debug(`trackBeginSetup: ${result.statusCode} ${error.message}`);
});
}
superagent.post(url).query(query).send(data).timeout(30 * 1000).end(function (error, result) {
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error));
if (result.statusCode === 401) return callback(new BoxError(BoxError.INVALID_CREDENTIALS));
if (result.statusCode === 422) return callback(new BoxError(BoxError.LICENSE_ERROR, result.body.message));
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, util.format('Bad response: %s %s', result.statusCode, result.text)));
// This works without a Cloudron token as this Cloudron was not yet registered
function trackFinishedSetup(domain) {
assert.strictEqual(typeof domain, 'string');
debug(`updateCloudron: Cloudron updated with data ${JSON.stringify(data)}`);
const url = `${settings.apiServerOrigin()}/api/v1/helper/setup_finished`;
callback();
});
superagent.post(url).send({ domain }).timeout(30 * 1000).end(function (error, result) {
if (error && !error.response) return debug(`trackFinishedSetup: ${error.message}`);
if (result.statusCode !== 200) return debug(`trackFinishedSetup: ${result.statusCode} ${error.message}`);
});
}
@@ -386,7 +387,7 @@ function registerWithLoginCredentials(options, callback) {
login(options.email, options.password, options.totpToken || '', function (error, result) {
if (error) return callback(error);
registerCloudron({ domain: settings.dashboardDomain(), accessToken: result.accessToken, version: constants.VERSION, purpose: options.purpose || '' }, callback);
registerCloudron({ domain: settings.adminDomain(), accessToken: result.accessToken, version: constants.VERSION, purpose: options.purpose || '' }, callback);
});
});
});
@@ -412,7 +413,7 @@ function createTicket(info, auditSource, callback) {
support.enableRemoteSupport(true, auditSource, function (error) {
// ensure we can at least get the ticket through
if (error) debug('Unable to enable SSH support.', error);
if (error) console.error('Unable to enable SSH support.', error);
callback();
});
@@ -432,7 +433,7 @@ function createTicket(info, auditSource, callback) {
var req = superagent.post(`${settings.apiServerOrigin()}/api/v1/ticket`)
.query({ accessToken: token })
.timeout(30 * 1000);
.timeout(20 * 1000);
// either send as JSON through body or as multipart, depending on attachments
if (info.app) {
@@ -454,7 +455,7 @@ function createTicket(info, auditSource, callback) {
eventlog.add(eventlog.ACTION_SUPPORT_TICKET, auditSource, info);
callback(null, { message: `An email was sent to ${constants.SUPPORT_EMAIL}. We will get back shortly!` });
callback(null, { message: `An email for sent to ${constants.SUPPORT_EMAIL}. We will get back shortly!` });
});
});
});
@@ -471,7 +472,7 @@ function getApps(callback) {
if (error) return callback(error);
const url = `${settings.apiServerOrigin()}/api/v1/apps`;
superagent.get(url).query({ accessToken: token, boxVersion: constants.VERSION, unstable: unstable }).timeout(30 * 1000).end(function (error, result) {
superagent.get(url).query({ accessToken: token, boxVersion: constants.VERSION, unstable: unstable }).timeout(10 * 1000).end(function (error, result) {
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
if (result.statusCode === 403 || result.statusCode === 401) return callback(new BoxError(BoxError.INVALID_CREDENTIALS));
if (result.statusCode === 422) return callback(new BoxError(BoxError.LICENSE_ERROR, result.body.message));
@@ -506,7 +507,7 @@ function getAppVersion(appId, version, callback) {
let url = `${settings.apiServerOrigin()}/api/v1/apps/${appId}`;
if (version !== 'latest') url += `/versions/${version}`;
superagent.get(url).query({ accessToken: token }).timeout(30 * 1000).end(function (error, result) {
superagent.get(url).query({ accessToken: token }).timeout(10 * 1000).end(function (error, result) {
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
if (result.statusCode === 403 || result.statusCode === 401) return callback(new BoxError(BoxError.INVALID_CREDENTIALS));
if (result.statusCode === 404) return callback(new BoxError(BoxError.NOT_FOUND));
+179 -121
View File
@@ -3,19 +3,26 @@
'use strict';
exports = module.exports = {
run,
run: run,
// exported for testing
_reserveHttpPort: reserveHttpPort,
_configureReverseProxy: configureReverseProxy,
_unconfigureReverseProxy: unconfigureReverseProxy,
_createAppDir: createAppDir,
_deleteAppDir: deleteAppDir,
_verifyManifest: verifyManifest,
_registerSubdomains: registerSubdomains,
_unregisterSubdomains: unregisterSubdomains,
_waitForDnsPropagation: waitForDnsPropagation
};
const appdb = require('./appdb.js'),
var addons = require('./addons.js'),
appdb = require('./appdb.js'),
apps = require('./apps.js'),
assert = require('assert'),
async = require('async'),
auditSource = require('./auditsource.js'),
backups = require('./backups.js'),
BoxError = require('./boxerror.js'),
collectd = require('./collectd.js'),
@@ -25,16 +32,16 @@ const appdb = require('./appdb.js'),
docker = require('./docker.js'),
domains = require('./domains.js'),
ejs = require('ejs'),
eventlog = require('./eventlog.js'),
fs = require('fs'),
iputils = require('./iputils.js'),
manifestFormat = require('cloudron-manifestformat'),
net = require('net'),
os = require('os'),
path = require('path'),
paths = require('./paths.js'),
reverseProxy = require('./reverseproxy.js'),
rimraf = require('rimraf'),
safe = require('safetydance'),
services = require('./services.js'),
settings = require('./settings.js'),
shell = require('./shell.js'),
superagent = require('superagent'),
@@ -69,6 +76,8 @@ function updateApp(app, values, callback) {
assert.strictEqual(typeof values, 'object');
assert.strictEqual(typeof callback, 'function');
debugApp(app, 'updating app with values: %j', values);
appdb.update(app.id, values, function (error) {
if (error) return callback(error);
@@ -80,16 +89,22 @@ function updateApp(app, values, callback) {
});
}
function allocateContainerIp(app, callback) {
function reserveHttpPort(app, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
async.retry({ times: 10 }, function (retryCallback) {
const iprange = iputils.intFromIp('172.18.20.255') - iputils.intFromIp('172.18.16.1');
let rnd = Math.floor(Math.random() * iprange);
const containerIp = iputils.ipFromInt(iputils.intFromIp('172.18.16.1') + rnd);
updateApp(app, { containerIp }, retryCallback);
}, callback);
let server = net.createServer();
server.listen(0, function () {
let port = server.address().port;
updateApp(app, { httpPort: port }, function (error) {
server.close(function (/* closeError */) {
if (error) return callback(new BoxError(BoxError.NETWORK_ERROR, `Failed to allocate http port ${port}: ${error.message}`));
callback(null);
});
});
});
}
function configureReverseProxy(app, callback) {
@@ -263,7 +278,7 @@ function cleanupLogs(app, callback) {
// note that redis container logs are cleaned up by the addon
rimraf(path.join(paths.LOG_DIR, app.id), function (error) {
if (error) debugApp(app, 'cannot cleanup logs:', error);
if (error) debugApp(app, 'cannot cleanup logs: %s', error);
callback(null);
});
@@ -289,9 +304,9 @@ function downloadIcon(app, callback) {
// nothing to download if we dont have an appStoreId
if (!app.appStoreId) return callback(null);
debugApp(app, `Downloading icon of ${app.appStoreId}@${app.manifest.version}`);
debugApp(app, 'Downloading icon of %s@%s', app.appStoreId, app.manifest.version);
const iconUrl = settings.apiServerOrigin() + '/api/v1/apps/' + app.appStoreId + '/versions/' + app.manifest.version + '/icon';
var iconUrl = settings.apiServerOrigin() + '/api/v1/apps/' + app.appStoreId + '/versions/' + app.manifest.version + '/icon';
async.retry({ times: 10, interval: 5000 }, function (retryCallback) {
superagent
@@ -302,11 +317,105 @@ function downloadIcon(app, callback) {
if (error && !error.response) return retryCallback(new BoxError(BoxError.NETWORK_ERROR, `Network error downloading icon : ${error.message}`));
if (res.statusCode !== 200) return retryCallback(null); // ignore error. this can also happen for apps installed with cloudron-cli
updateApp(app, { appStoreIcon: res.body }, retryCallback);
const iconPath = path.join(paths.APP_ICONS_DIR, app.id + '.png');
if (!safe.fs.writeFileSync(iconPath, res.body)) return retryCallback(new BoxError(BoxError.FS_ERROR, `Error saving icon to ${iconPath}: ${safe.error.message}`));
retryCallback(null);
});
}, callback);
}
function removeIcon(app, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
if (!safe.fs.unlinkSync(path.join(paths.APP_ICONS_DIR, app.id + '.png'))) {
if (safe.error.code !== 'ENOENT') debugApp(app, 'cannot remove icon : %s', safe.error);
}
if (!safe.fs.unlinkSync(path.join(paths.APP_ICONS_DIR, app.id + '.user.png'))) {
if (safe.error.code !== 'ENOENT') debugApp(app, 'cannot remove user icon : %s', safe.error);
}
callback(null);
}
function registerSubdomains(app, overwrite, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof overwrite, 'boolean');
assert.strictEqual(typeof callback, 'function');
sysinfo.getServerIp(function (error, ip) {
if (error) return callback(error);
const allDomains = [ { subdomain: app.location, domain: app.domain }].concat(app.alternateDomains);
debug(`registerSubdomain: Will register ${JSON.stringify(allDomains)}`);
async.eachSeries(allDomains, function (domain, iteratorDone) {
async.retry({ times: 200, interval: 5000 }, function (retryCallback) {
debugApp(app, 'Registering subdomain: %s%s', domain.subdomain ? (domain.subdomain + '.') : '', domain.domain);
// get the current record before updating it
domains.getDnsRecords(domain.subdomain, domain.domain, 'A', function (error, values) {
if (error && error.reason === BoxError.EXTERNAL_ERROR) return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, error.message, { domain })); // try again
if (error && error.reason === BoxError.ACCESS_DENIED) return retryCallback(null, new BoxError(BoxError.ACCESS_DENIED, error.message, { domain }));
if (error && error.reason === BoxError.NOT_FOUND) return retryCallback(null, new BoxError(BoxError.NOT_FOUND, error.message, { domain }));
if (error) return retryCallback(null, new BoxError(BoxError.EXTERNAL_ERROR, error.message, domain)); // give up for other errors
if (values.length !== 0 && values[0] === ip) return retryCallback(null); // up-to-date
// refuse to update any existing DNS record for custom domains that we did not create
if (values.length !== 0 && !overwrite) return retryCallback(null, new BoxError(BoxError.ALREADY_EXISTS, 'DNS Record already exists', { domain }));
domains.upsertDnsRecords(domain.subdomain, domain.domain, 'A', [ ip ], function (error) {
if (error && (error.reason === BoxError.BUSY || error.reason === BoxError.EXTERNAL_ERROR)) {
debug('registerSubdomains: Upsert error. Will retry.', error.message);
return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, error.message, { domain })); // try again
}
retryCallback(null, error ? new BoxError(BoxError.EXTERNAL_ERROR, error.message, domain) : null);
});
});
}, function (error, result) {
if (error || result) return iteratorDone(error || result);
iteratorDone(null);
});
}, callback);
});
}
function unregisterSubdomains(app, allDomains, callback) {
assert.strictEqual(typeof app, 'object');
assert(Array.isArray(allDomains));
assert.strictEqual(typeof callback, 'function');
sysinfo.getServerIp(function (error, ip) {
if (error) return callback(error);
async.eachSeries(allDomains, function (domain, iteratorDone) {
async.retry({ times: 30, interval: 5000 }, function (retryCallback) {
debugApp(app, 'Unregistering subdomain: %s%s', domain.subdomain ? (domain.subdomain + '.') : '', domain.domain);
domains.removeDnsRecords(domain.subdomain, domain.domain, 'A', [ ip ], function (error) {
if (error && error.reason === BoxError.NOT_FOUND) return retryCallback(null, null);
if (error && (error.reason === BoxError.SBUSY || error.reason === BoxError.EXTERNAL_ERROR)) {
debug('registerSubdomains: Remove error. Will retry.', error.message);
return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, error.message, { domain })); // try again
}
retryCallback(null, error ? new BoxError(BoxError.EXTERNAL_ERROR, error.message, { domain }) : null);
});
}, function (error, result) {
if (error || result) return iteratorDone(error || result);
iteratorDone();
});
}, callback);
});
}
function waitForDnsPropagation(app, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof callback, 'function');
@@ -322,8 +431,8 @@ function waitForDnsPropagation(app, callback) {
domains.waitForDnsRecord(app.location, app.domain, 'A', ip, { times: 240 }, function (error) {
if (error) return callback(new BoxError(BoxError.DNS_ERROR, `DNS Record is not synced yet: ${error.message}`, { ip: ip, subdomain: app.location, domain: app.domain }));
// now wait for alternateDomains and aliasDomains, if any
async.eachSeries(app.alternateDomains.concat(app.aliasDomains), function (domain, iteratorCallback) {
// now wait for alternateDomains, if any
async.eachSeries(app.alternateDomains, function (domain, iteratorCallback) {
domains.waitForDnsRecord(domain.subdomain, domain.domain, 'A', ip, { times: 240 }, function (error) {
if (error) return callback(new BoxError(BoxError.DNS_ERROR, `DNS Record is not synced yet: ${error.message}`, { ip: ip, subdomain: domain.subdomain, domain: domain.domain }));
@@ -342,7 +451,7 @@ function moveDataDir(app, targetDir, callback) {
let resolvedSourceDir = apps.getDataDir(app, app.dataDir);
let resolvedTargetDir = apps.getDataDir(app, targetDir);
debugApp(app, `moveDataDir: migrating data from ${resolvedSourceDir} to ${resolvedTargetDir}`);
debug(`moveDataDir: migrating data from ${resolvedSourceDir} to ${resolvedTargetDir}`);
if (resolvedSourceDir === resolvedTargetDir) return callback();
@@ -375,8 +484,6 @@ function downloadImage(manifest, callback) {
}
function startApp(app, callback){
debugApp(app, 'startApp: starting container');
if (app.runState === apps.RSTATE_STOPPED) return callback();
docker.startContainer(app.id, callback);
@@ -390,7 +497,6 @@ function install(app, args, progressCallback, callback) {
const restoreConfig = args.restoreConfig; // has to be set when restoring
const overwriteDns = args.overwriteDns;
const skipDnsSetup = args.skipDnsSetup;
const oldManifest = args.oldManifest;
async.series([
@@ -411,7 +517,7 @@ function install(app, args, progressCallback, callback) {
addonsToRemove = app.manifest.addons;
}
services.teardownAddons(app, addonsToRemove, next);
addons.teardownAddons(app, addonsToRemove, next);
},
function deleteAppDirIfNeeded(done) {
@@ -426,21 +532,13 @@ function install(app, args, progressCallback, callback) {
docker.deleteImage(oldManifest, done);
},
// allocating container ip here, lets the users "repair" an app if allocation fails at appdb.add time
allocateContainerIp.bind(null, app),
reserveHttpPort.bind(null, app),
progressCallback.bind(null, { percent: 20, message: 'Downloading icon' }),
downloadIcon.bind(null, app),
function setupDnsIfNeeded(done) {
if (skipDnsSetup) return done();
async.series([
progressCallback.bind(null, { percent: 30, message: 'Registering subdomains' }),
domains.registerLocations.bind(null, [ { subdomain: app.location, domain: app.domain }].concat(app.alternateDomains).concat(app.aliasDomains), { overwriteDns }, progressCallback)
], done);
},
progressCallback.bind(null, { percent: 30, message: 'Registering subdomains' }),
registerSubdomains.bind(null, app, overwriteDns),
progressCallback.bind(null, { percent: 40, message: 'Downloading image' }),
downloadImage.bind(null, app.manifest),
@@ -452,44 +550,35 @@ function install(app, args, progressCallback, callback) {
if (!restoreConfig) {
async.series([
progressCallback.bind(null, { percent: 60, message: 'Setting up addons' }),
services.setupAddons.bind(null, app, app.manifest.addons),
addons.setupAddons.bind(null, app, app.manifest.addons),
], next);
} else if (!restoreConfig.backupId) { // in-place import
async.series([
progressCallback.bind(null, { percent: 60, message: 'Importing addons in-place' }),
services.setupAddons.bind(null, app, app.manifest.addons),
services.clearAddons.bind(null, app, _.omit(app.manifest.addons, 'localstorage')),
apps.restoreConfig.bind(null, app),
services.restoreAddons.bind(null, app, app.manifest.addons),
addons.setupAddons.bind(null, app, app.manifest.addons),
addons.clearAddons.bind(null, app, _.omit(app.manifest.addons, 'localstorage')),
addons.restoreAddons.bind(null, app, app.manifest.addons),
], next);
} else {
async.series([
progressCallback.bind(null, { percent: 65, message: 'Download backup and restoring addons' }),
services.setupAddons.bind(null, app, app.manifest.addons),
services.clearAddons.bind(null, app, app.manifest.addons),
addons.setupAddons.bind(null, app, app.manifest.addons),
addons.clearAddons.bind(null, app, app.manifest.addons),
backups.downloadApp.bind(null, app, restoreConfig, (progress) => {
progressCallback({ percent: 65, message: progress.message });
}),
(done) => { if (app.installationState === apps.ISTATE_PENDING_IMPORT) apps.restoreConfig(app, done); else done(); },
progressCallback.bind(null, { percent: 70, message: 'Restoring addons' }),
services.restoreAddons.bind(null, app, app.manifest.addons)
addons.restoreAddons.bind(null, app, app.manifest.addons)
], next);
}
},
progressCallback.bind(null, { percent: 80, message: 'Creating container' }),
progressCallback.bind(null, { percent: 70, message: 'Creating container' }),
createContainer.bind(null, app),
startApp.bind(null, app),
function waitForDns(done) {
if (skipDnsSetup) return done();
async.series([
progressCallback.bind(null, { percent: 85, message: 'Waiting for DNS propagation' }),
exports._waitForDnsPropagation.bind(null, app),
], done);
},
progressCallback.bind(null, { percent: 85, message: 'Waiting for DNS propagation' }),
exports._waitForDnsPropagation.bind(null, app),
progressCallback.bind(null, { percent: 95, message: 'Configuring reverse proxy' }),
configureReverseProxy.bind(null, app),
@@ -498,7 +587,7 @@ function install(app, args, progressCallback, callback) {
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null })
], function seriesDone(error) {
if (error) {
debugApp(app, 'error installing app:', error);
debugApp(app, 'error installing app: %s', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
}
callback(null);
@@ -513,7 +602,7 @@ function backup(app, args, progressCallback, callback) {
async.series([
progressCallback.bind(null, { percent: 10, message: 'Backing up' }),
backups.backupApp.bind(null, app, { snapshotOnly: !!args.snapshotOnly }, (progress) => {
backups.backupApp.bind(null, app, { /* options */ }, (progress) => {
progressCallback({ percent: 30, message: progress.message });
}),
@@ -521,9 +610,9 @@ function backup(app, args, progressCallback, callback) {
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null })
], function seriesDone(error) {
if (error) {
debugApp(app, 'error backing up app:', error);
debugApp(app, 'error backing up app: %s', error);
// return to installed state intentionally. the error is stashed only in the task and not the app (the UI shows error state otherwise)
return updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null }, callback.bind(null, error));
return updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null }, callback.bind(null, makeTaskError(error, app)));
}
callback(null);
});
@@ -541,7 +630,7 @@ function create(app, args, progressCallback, callback) {
// FIXME: re-setup addons only because sendmail addon to re-inject env vars on mailboxName change
progressCallback.bind(null, { percent: 30, message: 'Setting up addons' }),
services.setupAddons.bind(null, app, app.manifest.addons),
addons.setupAddons.bind(null, app, app.manifest.addons),
progressCallback.bind(null, { percent: 60, message: 'Creating container' }),
createContainer.bind(null, app),
@@ -552,7 +641,7 @@ function create(app, args, progressCallback, callback) {
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null })
], function seriesDone(error) {
if (error) {
debugApp(app, 'error creating :', error);
debugApp(app, 'error creating : %s', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
}
callback(null);
@@ -567,7 +656,6 @@ function changeLocation(app, args, progressCallback, callback) {
const oldConfig = args.oldConfig;
const locationChanged = oldConfig.fqdn !== app.fqdn;
const skipDnsSetup = args.skipDnsSetup;
const overwriteDns = args.overwriteDns;
async.series([
@@ -579,45 +667,27 @@ function changeLocation(app, args, progressCallback, callback) {
return !app.alternateDomains.some(function (n) { return n.subdomain === o.subdomain && n.domain === o.domain; });
});
if (oldConfig.aliasDomains) {
obsoleteDomains = obsoleteDomains.concat(oldConfig.aliasDomains.filter(function (o) {
return !app.aliasDomains.some(function (n) { return n.subdomain === o.subdomain && n.domain === o.domain; });
}));
}
if (locationChanged) obsoleteDomains.push({ subdomain: oldConfig.location, domain: oldConfig.domain });
if (obsoleteDomains.length === 0) return next();
domains.unregisterLocations(obsoleteDomains, progressCallback, next);
unregisterSubdomains(app, obsoleteDomains, next);
},
function setupDnsIfNeeded(done) {
if (skipDnsSetup) return done();
async.series([
progressCallback.bind(null, { percent: 30, message: 'Registering subdomains' }),
domains.registerLocations.bind(null, [ { subdomain: app.location, domain: app.domain }].concat(app.alternateDomains).concat(app.aliasDomains), { overwriteDns }, progressCallback)
], done);
},
progressCallback.bind(null, { percent: 30, message: 'Registering subdomains' }),
registerSubdomains.bind(null, app, overwriteDns),
// re-setup addons since they rely on the app's fqdn (e.g oauth)
progressCallback.bind(null, { percent: 50, message: 'Setting up addons' }),
services.setupAddons.bind(null, app, app.manifest.addons),
addons.setupAddons.bind(null, app, app.manifest.addons),
progressCallback.bind(null, { percent: 60, message: 'Creating container' }),
createContainer.bind(null, app),
startApp.bind(null, app),
function waitForDns(done) {
if (skipDnsSetup) return done();
async.series([
progressCallback.bind(null, { percent: 80, message: 'Waiting for DNS propagation' }),
exports._waitForDnsPropagation.bind(null, app),
], done);
},
progressCallback.bind(null, { percent: 80, message: 'Waiting for DNS propagation' }),
exports._waitForDnsPropagation.bind(null, app),
progressCallback.bind(null, { percent: 90, message: 'Configuring reverse proxy' }),
configureReverseProxy.bind(null, app),
@@ -626,7 +696,7 @@ function changeLocation(app, args, progressCallback, callback) {
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null })
], function seriesDone(error) {
if (error) {
debugApp(app, 'error changing location:', error);
debugApp(app, 'error changing location : %s', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
}
callback(null);
@@ -651,7 +721,7 @@ function migrateDataDir(app, args, progressCallback, callback) {
// re-setup addons since this creates the localStorage volume
progressCallback.bind(null, { percent: 50, message: 'Setting up addons' }),
services.setupAddons.bind(null, _.extend({}, app, { dataDir: newDataDir }), app.manifest.addons),
addons.setupAddons.bind(null, _.extend({}, app, { dataDir: newDataDir }), app.manifest.addons),
progressCallback.bind(null, { percent: 60, message: 'Moving data dir' }),
moveDataDir.bind(null, app, newDataDir),
@@ -665,7 +735,7 @@ function migrateDataDir(app, args, progressCallback, callback) {
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null, dataDir: newDataDir })
], function seriesDone(error) {
if (error) {
debugApp(app, 'error migrating data dir:', error);
debugApp(app, 'error migrating data dir : %s', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
}
@@ -684,6 +754,7 @@ function configure(app, args, progressCallback, callback) {
progressCallback.bind(null, { percent: 10, message: 'Cleaning up old install' }),
unconfigureReverseProxy.bind(null, app),
deleteContainers.bind(null, app, { managedOnly: true }),
reserveHttpPort.bind(null, app),
progressCallback.bind(null, { percent: 20, message: 'Downloading icon' }),
downloadIcon.bind(null, app),
@@ -696,7 +767,7 @@ function configure(app, args, progressCallback, callback) {
// re-setup addons since they rely on the app's fqdn (e.g oauth)
progressCallback.bind(null, { percent: 50, message: 'Setting up addons' }),
services.setupAddons.bind(null, app, app.manifest.addons),
addons.setupAddons.bind(null, app, app.manifest.addons),
progressCallback.bind(null, { percent: 60, message: 'Creating container' }),
createContainer.bind(null, app),
@@ -710,7 +781,7 @@ function configure(app, args, progressCallback, callback) {
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null })
], function seriesDone(error) {
if (error) {
debugApp(app, 'error reconfiguring:', error);
debugApp(app, 'error reconfiguring : %s', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
}
@@ -718,6 +789,7 @@ function configure(app, args, progressCallback, callback) {
});
}
// nginx configuration is skipped because app.httpPort is expected to be available
function update(app, args, progressCallback, callback) {
assert.strictEqual(typeof app, 'object');
assert.strictEqual(typeof args, 'object');
@@ -729,10 +801,7 @@ function update(app, args, progressCallback, callback) {
// app does not want these addons anymore
// FIXME: this does not handle option changes (like multipleDatabases)
const unusedAddons = _.omit(app.manifest.addons, Object.keys(updateConfig.manifest.addons));
const httpPathsChanged = app.manifest.httpPaths !== updateConfig.manifest.httpPaths;
const httpPortChanged = app.manifest.httpPort !== updateConfig.manifest.httpPort;
const proxyAuthChanged = !_.isEqual(safe.query(app.manifest, 'addons.proxyAuth'), safe.query(updateConfig.manifest, 'addons.proxyAuth'));
var unusedAddons = _.omit(app.manifest.addons, Object.keys(updateConfig.manifest.addons));
async.series([
// this protects against the theoretical possibility of an app being marked for update from
@@ -771,7 +840,7 @@ function update(app, args, progressCallback, callback) {
},
// only delete unused addons after backup
services.teardownAddons.bind(null, app, unusedAddons),
addons.teardownAddons.bind(null, app, unusedAddons),
// free unused ports
function (next) {
@@ -783,7 +852,7 @@ function update(app, args, progressCallback, callback) {
if (newTcpPorts[portName] || newUdpPorts[portName]) return callback(null); // port still in use
appdb.delPortBinding(currentPorts[portName], apps.PORT_TYPE_TCP, function (error) {
if (error && error.reason === BoxError.NOT_FOUND) debugApp(app, 'update: portbinding does not exist in database', error);
if (error && error.reason === BoxError.NOT_FOUND) debug('update: portbinding does not exist in database', error);
else if (error) return next(error);
// also delete from app object for further processing (the db is updated in the next step)
@@ -800,20 +869,13 @@ function update(app, args, progressCallback, callback) {
downloadIcon.bind(null, app),
progressCallback.bind(null, { percent: 60, message: 'Updating addons' }),
services.setupAddons.bind(null, app, updateConfig.manifest.addons),
addons.setupAddons.bind(null, app, updateConfig.manifest.addons),
progressCallback.bind(null, { percent: 70, message: 'Creating container' }),
createContainer.bind(null, app),
startApp.bind(null, app),
progressCallback.bind(null, { percent: 90, message: 'Configuring reverse proxy' }),
function (next) {
if (!httpPathsChanged && !proxyAuthChanged && !httpPortChanged) return next();
configureReverseProxy(app, next);
},
progressCallback.bind(null, { percent: 100, message: 'Done' }),
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null, updateTime: new Date() })
], function seriesDone(error) {
@@ -821,10 +883,10 @@ function update(app, args, progressCallback, callback) {
debugApp(app, 'update aborted because backup failed', error);
updateApp(app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null }, callback.bind(null, error));
} else if (error) {
debugApp(app, 'Error updating app:', error);
debugApp(app, 'Error updating app: %s', error);
updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
} else {
callback(null);
eventlog.add(eventlog.ACTION_APP_UPDATE_FINISH, auditSource.APP_TASK, { app: app, success: true }, () => callback()); // ignore error
}
});
}
@@ -837,23 +899,20 @@ function start(app, args, progressCallback, callback) {
async.series([
progressCallback.bind(null, { percent: 10, message: 'Starting app services' }),
services.startAppServices.bind(null, app),
addons.startAppServices.bind(null, app),
progressCallback.bind(null, { percent: 35, message: 'Starting container' }),
docker.startContainer.bind(null, app.id),
progressCallback.bind(null, { percent: 60, message: 'Adding collectd profile' }),
addCollectdProfile.bind(null, app),
// stopped apps do not renew certs. currently, we don't do DNS to not overwrite existing user settings
progressCallback.bind(null, { percent: 80, message: 'Configuring reverse proxy' }),
progressCallback.bind(null, { percent: 60, message: 'Configuring reverse proxy' }),
configureReverseProxy.bind(null, app),
progressCallback.bind(null, { percent: 100, message: 'Done' }),
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null })
], function seriesDone(error) {
if (error) {
debugApp(app, 'error starting app:', error);
debugApp(app, 'error starting app: %s', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
}
callback(null);
@@ -871,16 +930,13 @@ function stop(app, args, progressCallback, callback) {
docker.stopContainers.bind(null, app.id),
progressCallback.bind(null, { percent: 50, message: 'Stopping app services' }),
services.stopAppServices.bind(null, app),
progressCallback.bind(null, { percent: 80, message: 'Removing collectd profile' }),
removeCollectdProfile.bind(null, app),
addons.stopAppServices.bind(null, app),
progressCallback.bind(null, { percent: 100, message: 'Done' }),
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null })
], function seriesDone(error) {
if (error) {
debugApp(app, 'error starting app:', error);
debugApp(app, 'error starting app: %s', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
}
callback(null);
@@ -901,7 +957,7 @@ function restart(app, args, progressCallback, callback) {
updateApp.bind(null, app, { installationState: apps.ISTATE_INSTALLED, error: null, health: null })
], function seriesDone(error) {
if (error) {
debugApp(app, 'error starting app:', error);
debugApp(app, 'error starting app: %s', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
}
callback(null);
@@ -920,7 +976,7 @@ function uninstall(app, args, progressCallback, callback) {
deleteContainers.bind(null, app, {}),
progressCallback.bind(null, { percent: 30, message: 'Teardown addons' }),
services.teardownAddons.bind(null, app, app.manifest.addons),
addons.teardownAddons.bind(null, app, app.manifest.addons),
progressCallback.bind(null, { percent: 40, message: 'Cleanup file manager' }),
@@ -931,7 +987,10 @@ function uninstall(app, args, progressCallback, callback) {
docker.deleteImage.bind(null, app.manifest),
progressCallback.bind(null, { percent: 70, message: 'Unregistering domains' }),
domains.unregisterLocations.bind(null, [ { subdomain: app.location, domain: app.domain } ].concat(app.alternateDomains).concat(app.aliasDomains), progressCallback),
unregisterSubdomains.bind(null, app, [ { subdomain: app.location, domain: app.domain } ].concat(app.alternateDomains)),
progressCallback.bind(null, { percent: 80, message: 'Cleanup icon' }),
removeIcon.bind(null, app),
progressCallback.bind(null, { percent: 90, message: 'Cleanup logs' }),
cleanupLogs.bind(null, app),
@@ -940,7 +999,7 @@ function uninstall(app, args, progressCallback, callback) {
appdb.del.bind(null, app.id)
], function seriesDone(error) {
if (error) {
debugApp(app, 'error uninstalling app:', error);
debugApp(app, 'error uninstalling app: %s', error);
return updateApp(app, { installationState: apps.ISTATE_ERROR, error: makeTaskError(error, app) }, callback.bind(null, error));
}
callback(null);
@@ -957,13 +1016,12 @@ function run(appId, args, progressCallback, callback) {
apps.get(appId, function (error, app) {
if (error) return callback(error);
debugApp(app, `startTask installationState: ${app.installationState} runState: ${app.runState}`);
debugApp(app, 'startTask installationState: %s runState: %s', app.installationState, app.runState);
switch (app.installationState) {
case apps.ISTATE_PENDING_INSTALL:
case apps.ISTATE_PENDING_CLONE:
case apps.ISTATE_PENDING_RESTORE:
case apps.ISTATE_PENDING_IMPORT:
return install(app, args, progressCallback, callback);
case apps.ISTATE_PENDING_CONFIGURE:
return configure(app, args, progressCallback, callback);
+14 -12
View File
@@ -1,10 +1,10 @@
'use strict';
exports = module.exports = {
scheduleTask
scheduleTask: scheduleTask
};
const assert = require('assert'),
let assert = require('assert'),
BoxError = require('./boxerror.js'),
debug = require('debug')('box:apptaskmanager'),
fs = require('fs'),
@@ -12,7 +12,7 @@ const assert = require('assert'),
safe = require('safetydance'),
path = require('path'),
paths = require('./paths.js'),
scheduler = require('./scheduler.js'),
sftp = require('./sftp.js'),
tasks = require('./tasks.js');
let gActiveTasks = { }; // indexed by app id
@@ -36,10 +36,9 @@ function initializeSync() {
}
// callback is called when task is finished
function scheduleTask(appId, taskId, options, callback) {
function scheduleTask(appId, taskId, callback) {
assert.strictEqual(typeof appId, 'string');
assert.strictEqual(typeof taskId, 'string');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
if (!gInitialized) initializeSync();
@@ -51,7 +50,7 @@ function scheduleTask(appId, taskId, options, callback) {
if (Object.keys(gActiveTasks).length >= TASK_CONCURRENCY) {
debug(`Reached concurrency limit, queueing task id ${taskId}`);
tasks.update(taskId, { percent: 1, message: 'Waiting for other app tasks to complete' }, NOOP_CALLBACK);
gPendingTasks.push({ appId, taskId, options, callback });
gPendingTasks.push({ appId, taskId, callback });
return;
}
@@ -60,7 +59,7 @@ function scheduleTask(appId, taskId, options, callback) {
if (lockError) {
debug(`Could not get lock. ${lockError.message}, queueing task id ${taskId}`);
tasks.update(taskId, { percent: 1, message: waitText(lockError.operation) }, NOOP_CALLBACK);
gPendingTasks.push({ appId, taskId, options, callback });
gPendingTasks.push({ appId, taskId, callback });
return;
}
@@ -70,15 +69,17 @@ function scheduleTask(appId, taskId, options, callback) {
if (!fs.existsSync(path.dirname(logFile))) safe.fs.mkdirSync(path.dirname(logFile)); // ensure directory
scheduler.suspendJobs(appId);
tasks.startTask(taskId, Object.assign(options, { logFile }), function (error, result) {
// TODO: set memory limit for app backup task
tasks.startTask(taskId, { logFile, timeout: 20 * 60 * 60 * 1000 /* 20 hours */, nice: 15 }, function (error, result) {
callback(error, result);
delete gActiveTasks[appId];
locker.unlock(locker.OP_APPTASK); // unlock event will trigger next task
scheduler.resumeJobs(appId);
// post app task hooks
sftp.rebuild(function (error) {
if (error) console.error('Unable to rebuild sftp:', error);
});
});
}
@@ -88,5 +89,6 @@ function startNextTask() {
assert(Object.keys(gActiveTasks).length < TASK_CONCURRENCY);
const t = gPendingTasks.shift();
scheduleTask(t.appId, t.taskId, t.options, t.callback);
scheduleTask(t.appId, t.taskId, t.callback);
}
+4 -3
View File
@@ -3,13 +3,14 @@
exports = module.exports = {
CRON: { userId: null, username: 'cron' },
HEALTH_MONITOR: { userId: null, username: 'healthmonitor' },
APP_TASK: { userId: null, username: 'apptask' },
EXTERNAL_LDAP_TASK: { userId: null, username: 'externalldap' },
EXTERNAL_LDAP_AUTO_CREATE: { userId: null, username: 'externalldap' },
fromRequest
fromRequest: fromRequest
};
function fromRequest(req) {
const ip = req.headers['x-forwarded-for'] || req.connection.remoteAddress || null;
return { ip, username: req.user ? req.user.username : null, userId: req.user ? req.user.id : null };
var ip = req.headers['x-forwarded-for'] || req.connection.remoteAddress || null;
return { ip: ip, username: req.user ? req.user.username : null, userId: req.user ? req.user.id : null };
}
-29
View File
@@ -1,29 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<clientConfig version="1.1">
<emailProvider id="<%= domain %>">
<domain><%= domain %></domain>
<displayName>Cloudron Mail</displayName>
<displayShortName>Cloudron</displayShortName>
<incomingServer type="imap">
<hostname><%= mailFqdn %></hostname>
<port>993</port>
<socketType>SSL</socketType>
<authentication>password-cleartext</authentication>
<username>%EMAILADDRESS%</username>
</incomingServer>
<outgoingServer type="smtp">
<hostname><%= mailFqdn %></hostname>
<port>587</port>
<socketType>STARTTLS</socketType>
<authentication>password-cleartext</authentication>
<username>%EMAILADDRESS%</username>
<addThisServer>true</addThisServer>
</outgoingServer>
<documentation url="http://cloudron.io/email/#autodiscover">
<descr lang="en">Cloudron Email</descr>
</documentation>
</emailProvider>
</clientConfig>
+5 -20
View File
@@ -1,11 +1,12 @@
'use strict';
const assert = require('assert'),
var assert = require('assert'),
BoxError = require('./boxerror.js'),
database = require('./database.js'),
safe = require('safetydance');
safe = require('safetydance'),
util = require('util');
const BACKUPS_FIELDS = [ 'id', 'identifier', 'creationTime', 'packageVersion', 'type', 'dependsOn', 'state', 'manifestJson', 'format', 'preserveSecs', 'encryptionVersion' ];
var BACKUPS_FIELDS = [ 'id', 'identifier', 'creationTime', 'packageVersion', 'type', 'dependsOn', 'state', 'manifestJson', 'format', 'preserveSecs', 'encryptionVersion' ];
exports = module.exports = {
add,
@@ -17,7 +18,6 @@ exports = module.exports = {
get,
del,
update,
list,
_clear: clear
};
@@ -80,21 +80,6 @@ function getByIdentifierPaged(identifier, page, perPage, callback) {
});
}
function list(page, perPage, callback) {
assert(typeof page === 'number' && page > 0);
assert(typeof perPage === 'number' && perPage > 0);
assert.strictEqual(typeof callback, 'function');
database.query('SELECT ' + BACKUPS_FIELDS + ' FROM backups ORDER BY creationTime DESC LIMIT ?,?',
[ (page-1)*perPage, perPage ], function (error, results) {
if (error) return callback(new BoxError(BoxError.DATABASE_ERROR, error));
results.forEach(function (result) { postProcess(result); });
callback(null, results);
});
}
function get(id, callback) {
assert.strictEqual(typeof id, 'string');
assert.strictEqual(typeof callback, 'function');
@@ -118,7 +103,7 @@ function add(id, data, callback) {
assert.strictEqual(typeof data.type, 'string');
assert.strictEqual(typeof data.identifier, 'string');
assert.strictEqual(typeof data.state, 'string');
assert(Array.isArray(data.dependsOn));
assert(util.isArray(data.dependsOn));
assert.strictEqual(typeof data.manifest, 'object');
assert.strictEqual(typeof data.format, 'string');
assert.strictEqual(typeof callback, 'function');
+80 -147
View File
@@ -1,35 +1,36 @@
'use strict';
exports = module.exports = {
testConfig,
testProviderConfig,
testConfig: testConfig,
testProviderConfig: testProviderConfig,
getByIdentifierAndStatePaged,
get,
get: get,
startBackupTask,
startBackupTask: startBackupTask,
restore,
restore: restore,
backupApp,
downloadApp,
backupApp: backupApp,
downloadApp: downloadApp,
backupBoxAndApps,
backupBoxAndApps: backupBoxAndApps,
upload,
upload: upload,
startCleanupTask,
cleanup,
cleanupCacheFilesSync,
startCleanupTask: startCleanupTask,
cleanup: cleanup,
cleanupCacheFilesSync: cleanupCacheFilesSync,
injectPrivateFields,
removePrivateFields,
injectPrivateFields: injectPrivateFields,
removePrivateFields: removePrivateFields,
configureCollectd,
checkConfiguration: checkConfiguration,
generateEncryptionKeysSync,
isMountProvider,
configureCollectd: configureCollectd,
generateEncryptionKeysSync: generateEncryptionKeysSync,
BACKUP_IDENTIFIER_BOX: 'box',
@@ -47,7 +48,8 @@ exports = module.exports = {
_applyBackupRetentionPolicy: applyBackupRetentionPolicy
};
const apps = require('./apps.js'),
var addons = require('./addons.js'),
apps = require('./apps.js'),
async = require('async'),
assert = require('assert'),
backupdb = require('./backupdb.js'),
@@ -70,7 +72,6 @@ const apps = require('./apps.js'),
progressStream = require('progress-stream'),
safe = require('safetydance'),
shell = require('./shell.js'),
services = require('./services.js'),
settings = require('./settings.js'),
syncer = require('./syncer.js'),
tar = require('tar-fs'),
@@ -95,8 +96,6 @@ function api(provider) {
case 'nfs': return require('./storage/filesystem.js');
case 'cifs': return require('./storage/filesystem.js');
case 'sshfs': return require('./storage/filesystem.js');
case 'mountpoint': return require('./storage/filesystem.js');
case 'ext4': return require('./storage/filesystem.js');
case 's3': return require('./storage/s3.js');
case 'gcs': return require('./storage/gcs.js');
case 'filesystem': return require('./storage/filesystem.js');
@@ -109,17 +108,11 @@ function api(provider) {
case 'backblaze-b2': return require('./storage/s3.js');
case 'linode-objectstorage': return require('./storage/s3.js');
case 'ovh-objectstorage': return require('./storage/s3.js');
case 'ionos-objectstorage': return require('./storage/s3.js');
case 'vultr-objectstorage': return require('./storage/s3.js');
case 'noop': return require('./storage/noop.js');
default: return null;
}
}
function isMountProvider(provider) {
return provider === 'sshfs' || provider === 'cifs' || provider === 'nfs' || provider === 'ext4';
}
function injectPrivateFields(newConfig, currentConfig) {
if ('password' in newConfig) {
if (newConfig.password === constants.SECRET_PLACEHOLDER) {
@@ -145,7 +138,7 @@ function testConfig(backupConfig, callback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof callback, 'function');
const func = api(backupConfig.provider);
var func = api(backupConfig.provider);
if (!func) return callback(new BoxError(BoxError.BAD_FIELD, 'unknown storage provider', { field: 'provider' }));
if (backupConfig.format !== 'tgz' && backupConfig.format !== 'rsync') return callback(new BoxError(BoxError.BAD_FIELD, 'unknown format', { field: 'format' }));
@@ -556,29 +549,21 @@ function saveFsMetadata(dataLayout, metadataFile, callback) {
// contains paths prefixed with './'
let metadata = {
emptyDirs: [],
execFiles: [],
symlinks: []
execFiles: []
};
// we assume small number of files. spawnSync will raise a ENOBUFS error after maxBuffer
for (let lp of dataLayout.localPaths()) {
const emptyDirs = safe.child_process.execSync(`find ${lp} -type d -empty`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
if (emptyDirs === null) return callback(new BoxError(BoxError.FS_ERROR, `Error finding empty dirs: ${safe.error.message}`));
var emptyDirs = safe.child_process.execSync(`find ${lp} -type d -empty\n`, { encoding: 'utf8' });
if (emptyDirs === null) return callback(safe.error);
if (emptyDirs.length) metadata.emptyDirs = metadata.emptyDirs.concat(emptyDirs.trim().split('\n').map((ed) => dataLayout.toRemotePath(ed)));
const execFiles = safe.child_process.execSync(`find ${lp} -type f -executable`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
if (execFiles === null) return callback(new BoxError(BoxError.FS_ERROR, `Error finding executables: ${safe.error.message}`));
if (execFiles.length) metadata.execFiles = metadata.execFiles.concat(execFiles.trim().split('\n').map((ef) => dataLayout.toRemotePath(ef)));
var execFiles = safe.child_process.execSync(`find ${lp} -type f -executable\n`, { encoding: 'utf8' });
if (execFiles === null) return callback(safe.error);
const symlinks = safe.child_process.execSync(`find ${lp} -type l`, { encoding: 'utf8', maxBuffer: 1024 * 1024 * 30 });
if (symlinks === null) return callback(new BoxError(BoxError.FS_ERROR, `Error finding symlinks: ${safe.error.message}`));
if (symlinks.length) metadata.symlinks = metadata.symlinks.concat(symlinks.trim().split('\n').map((sl) => {
const target = safe.fs.readlinkSync(sl);
return { path: dataLayout.toRemotePath(sl), target };
}));
if (execFiles.length) metadata.execFiles = metadata.execFiles.concat(execFiles.trim().split('\n').map((ef) => dataLayout.toRemotePath(ef)));
}
if (!safe.fs.writeFileSync(metadataFile, JSON.stringify(metadata, null, 4))) return callback(new BoxError(BoxError.FS_ERROR, `Error writing fs metadata: ${safe.error.message}`));
if (!safe.fs.writeFileSync(metadataFile, JSON.stringify(metadata, null, 4))) return callback(safe.error);
callback();
}
@@ -706,19 +691,7 @@ function restoreFsMetadata(dataLayout, metadataFile, callback) {
}, function (error) {
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `unable to chmod: ${error.message}`));
async.eachSeries(metadata.symlinks || [], function createSymlink(symlink, iteratorDone) {
if (!symlink.target) return iteratorDone();
// the path may not exist if we had a directory full of symlinks
fs.mkdir(path.dirname(dataLayout.toLocalPath(symlink.path)), { recursive: true }, function (error) {
if (error) return iteratorDone(error);
fs.symlink(symlink.target, dataLayout.toLocalPath(symlink.path), 'file', iteratorDone);
});
}, function (error) {
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, `unable to symlink: ${error.message}`));
callback();
});
callback();
});
});
}
@@ -832,9 +805,7 @@ function restore(backupConfig, backupId, progressCallback, callback) {
assert.strictEqual(typeof progressCallback, 'function');
assert.strictEqual(typeof callback, 'function');
const boxDataDir = safe.fs.realpathSync(paths.BOX_DATA_DIR);
if (!boxDataDir) return callback(new BoxError(BoxError.FS_ERROR, `Error resolving boxdata: ${safe.error.message}`));
const dataLayout = new DataLayout(boxDataDir, []);
const dataLayout = new DataLayout(paths.BOX_DATA_DIR, []);
download(backupConfig, backupId, backupConfig.format, dataLayout, progressCallback, function (error) {
if (error) return callback(error);
@@ -858,7 +829,7 @@ function downloadApp(app, restoreConfig, progressCallback, callback) {
assert.strictEqual(typeof callback, 'function');
const appDataDir = safe.fs.realpathSync(path.join(paths.APPS_DATA_DIR, app.id));
if (!appDataDir) return callback(new BoxError(BoxError.FS_ERROR, safe.error.message));
if (!appDataDir) return callback(safe.error);
const dataLayout = new DataLayout(appDataDir, app.dataDir ? [{ localDir: app.dataDir, remoteDir: 'data' }] : []);
const startTime = new Date();
@@ -880,23 +851,15 @@ function runBackupUpload(uploadConfig, progressCallback, callback) {
assert.strictEqual(typeof progressCallback, 'function');
assert.strictEqual(typeof callback, 'function');
const { backupId, backupConfig, dataLayout, progressTag } = uploadConfig;
const { backupId, format, dataLayout, progressTag } = uploadConfig;
assert.strictEqual(typeof backupId, 'string');
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof format, 'string');
assert.strictEqual(typeof progressTag, 'string');
assert(dataLayout instanceof DataLayout, 'dataLayout must be a DataLayout');
let result = ''; // the script communicates error result as a string
// https://stackoverflow.com/questions/48387040/node-js-recommended-max-old-space-size
const envCopy = Object.assign({}, process.env);
if (backupConfig.memoryLimit && backupConfig.memoryLimit >= 2*1024*1024*1024) {
const heapSize = Math.min((backupConfig.memoryLimit/1024/1024) - 256, 8192);
debug(`runBackupUpload: adjusting heap size to ${heapSize}M`);
envCopy.NODE_OPTIONS = `--max-old-space-size=${heapSize}`;
}
shell.sudo(`backup-${backupId}`, [ BACKUP_UPLOAD_CMD, backupId, backupConfig.format, dataLayout.toString() ], { env: envCopy, preserveEnv: true, ipc: true }, function (error) {
shell.sudo(`backup-${backupId}`, [ BACKUP_UPLOAD_CMD, backupId, format, dataLayout.toString() ], { preserveEnv: true, ipc: true }, function (error) {
if (error && (error.code === null /* signal */ || (error.code !== 0 && error.code !== 50))) { // backuptask crashed
return callback(new BoxError(BoxError.INTERNAL_ERROR, 'Backuptask crashed'));
} else if (error && error.code === 50) { // exited with error
@@ -961,11 +924,11 @@ function uploadBoxSnapshot(backupConfig, progressCallback, callback) {
if (error) return callback(error);
const boxDataDir = safe.fs.realpathSync(paths.BOX_DATA_DIR);
if (!boxDataDir) return callback(new BoxError(BoxError.FS_ERROR, `Error resolving boxdata: ${safe.error.message}`));
if (!boxDataDir) return callback(safe.error);
const uploadConfig = {
backupId: 'snapshot/box',
backupConfig,
format: backupConfig.format,
dataLayout: new DataLayout(boxDataDir, []),
progressTag: 'box'
};
@@ -992,7 +955,10 @@ function rotateBoxBackup(backupConfig, tag, options, appBackupIds, progressCallb
assert.strictEqual(typeof progressCallback, 'function');
assert.strictEqual(typeof callback, 'function');
const backupId = `${tag}/box_v${constants.VERSION}`;
var snapshotInfo = getSnapshotInfo('box');
const snapshotTime = snapshotInfo.timestamp.replace(/[T.]/g, '-').replace(/[:Z]/g,''); // add this to filename to make it unique, so it's easy to download them
const backupId = util.format('%s/box_%s_v%s', tag, snapshotTime, constants.VERSION);
const format = backupConfig.format;
debug(`Rotating box backup to id ${backupId}`);
@@ -1067,16 +1033,16 @@ function snapshotApp(app, progressCallback, callback) {
const startTime = new Date();
progressCallback({ message: `Snapshotting app ${app.fqdn}` });
apps.backupConfig(app, function (error) {
if (error) return callback(error);
if (!safe.fs.writeFileSync(path.join(paths.APPS_DATA_DIR, app.id + '/config.json'), JSON.stringify(app))) {
return callback(new BoxError(BoxError.FS_ERROR, 'Error creating config.json: ' + safe.error.message));
}
services.backupAddons(app, app.manifest.addons, function (error) {
if (error) return callback(error);
addons.backupAddons(app, app.manifest.addons, function (error) {
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error.message));
debugApp(app, `snapshotApp: took ${(new Date() - startTime)/1000} seconds`);
debugApp(app, `snapshotApp: took ${(new Date() - startTime)/1000} seconds`);
return callback(null);
});
return callback(null);
});
}
@@ -1090,10 +1056,11 @@ function rotateAppBackup(backupConfig, app, tag, options, progressCallback, call
const startTime = new Date();
const snapshotInfo = getSnapshotInfo(app.id);
var snapshotInfo = getSnapshotInfo(app.id);
const manifest = snapshotInfo.restoreConfig ? snapshotInfo.restoreConfig.manifest : snapshotInfo.manifest; // compat
const backupId = `${tag}/app_${app.fqdn}_v${manifest.version}`;
var manifest = snapshotInfo.restoreConfig ? snapshotInfo.restoreConfig.manifest : snapshotInfo.manifest; // compat
const snapshotTime = snapshotInfo.timestamp.replace(/[T.]/g, '-').replace(/[:Z]/g,''); // add this for unique filename which helps when downloading them
const backupId = util.format('%s/app_%s_%s_v%s', tag, app.id, snapshotTime, manifest.version);
const format = backupConfig.format;
debug(`Rotating app backup of ${app.id} to id ${backupId}`);
@@ -1112,7 +1079,7 @@ function rotateAppBackup(backupConfig, app, tag, options, progressCallback, call
backupdb.add(backupId, data, function (error) {
if (error) return callback(error);
const copy = api(backupConfig.provider).copy(backupConfig, getBackupFilePath(backupConfig, `snapshot/app_${app.id}`, format), getBackupFilePath(backupConfig, backupId, format));
var copy = api(backupConfig.provider).copy(backupConfig, getBackupFilePath(backupConfig, `snapshot/app_${app.id}`, format), getBackupFilePath(backupConfig, backupId, format));
copy.on('progress', (message) => progressCallback({ message: `${message} (${app.fqdn})` }));
copy.on('done', function (copyBackupError) {
const state = copyBackupError ? exports.BACKUP_STATE_ERROR : exports.BACKUP_STATE_NORMAL;
@@ -1140,7 +1107,7 @@ function uploadAppSnapshot(backupConfig, app, progressCallback, callback) {
const backupId = util.format('snapshot/app_%s', app.id);
const appDataDir = safe.fs.realpathSync(path.join(paths.APPS_DATA_DIR, app.id));
if (!appDataDir) return callback(new BoxError(BoxError.FS_ERROR, `Error resolving appsdata: ${safe.error.message}`));
if (!appDataDir) return callback(safe.error);
const dataLayout = new DataLayout(appDataDir, app.dataDir ? [{ localDir: app.dataDir, remoteDir: 'data' }] : []);
@@ -1148,7 +1115,7 @@ function uploadAppSnapshot(backupConfig, app, progressCallback, callback) {
const uploadConfig = {
backupId,
backupConfig,
format: backupConfig.format,
dataLayout,
progressTag: app.fqdn
};
@@ -1200,8 +1167,6 @@ function backupApp(app, options, progressCallback, callback) {
assert.strictEqual(typeof progressCallback, 'function');
assert.strictEqual(typeof callback, 'function');
if (options.snapshotOnly) return snapshotApp(app, progressCallback, callback);
const tag = (new Date()).toISOString().replace(/[T.]/g, '-').replace(/[:Z]/g,'');
debug(`backupApp - Backing up ${app.fqdn} with tag ${tag}`);
@@ -1270,13 +1235,13 @@ function startBackupTask(auditSource, callback) {
eventlog.add(eventlog.ACTION_BACKUP_START, auditSource, { taskId });
tasks.startTask(taskId, { timeout: 24 * 60 * 60 * 1000 /* 24 hours */, nice: 15, memoryLimit }, async function (error, backupId) {
tasks.startTask(taskId, { timeout: 24 * 60 * 60 * 1000 /* 24 hours */, nice: 15, memoryLimit }, function (error, backupId) {
locker.unlock(locker.OP_FULL_BACKUP);
const errorMessage = error ? error.message : '';
const timedOut = error ? error.code === tasks.ETIMEOUT : false;
await safe(eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId, errorMessage, timedOut, backupId }));
eventlog.add(eventlog.ACTION_BACKUP_FINISH, auditSource, { taskId, errorMessage, timedOut, backupId });
});
callback(null, taskId);
@@ -1348,7 +1313,7 @@ function cleanupBackup(backupConfig, backup, progressCallback, callback) {
assert.strictEqual(typeof progressCallback, 'function');
assert.strictEqual(typeof callback, 'function');
const backupFilePath = getBackupFilePath(backupConfig, backup.id, backup.format);
var backupFilePath = getBackupFilePath(backupConfig, backup.id, backup.format);
function done(error) {
if (error) {
@@ -1452,48 +1417,6 @@ function cleanupBoxBackups(backupConfig, progressCallback, callback) {
});
}
function cleanupMissingBackups(backupConfig, progressCallback, callback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof progressCallback, 'function');
assert.strictEqual(typeof callback, 'function');
let page = 1, perPage = 1000, more = false, missingBackupIds = [];
if (constants.TEST) return callback(null, missingBackupIds);
async.doWhilst(function (whilstCallback) {
backupdb.list(page, perPage, function (error, result) {
if (error) return whilstCallback(error);
async.eachSeries(result, function (backup, next) {
let backupFilePath = getBackupFilePath(backupConfig, backup.id, backup.format);
if (backup.format === 'rsync') backupFilePath = backupFilePath + '/'; // add trailing slash to indicate directory
api(backupConfig.provider).exists(backupConfig, backupFilePath, function (error, exists) {
if (error || exists) return next();
progressCallback({ message: `Removing missing backup ${backup.id}`});
backupdb.del(backup.id, function (error) {
if (error) debug(`cleanupBackup: error removing ${backup.id} from database`, error);
missingBackupIds.push(backup.id);
next();
});
});
}, function () {
more = result.length === perPage;
whilstCallback();
});
});
}, function (testDone) { return testDone(null, more); }, function (error) {
if (error) return callback(error);
return callback(null, missingBackupIds);
});
}
function cleanupCacheFilesSync() {
var files = safe.fs.readdirSync(path.join(paths.BACKUP_INFO_DIR));
if (!files) return;
@@ -1565,18 +1488,12 @@ function cleanup(progressCallback, callback) {
cleanupAppBackups(backupConfig, referencedAppBackupIds, progressCallback, function (error, removedAppBackupIds) {
if (error) return callback(error);
progressCallback({ percent: 70, message: 'Cleaning missing backups' });
progressCallback({ percent: 90, message: 'Cleaning snapshots' });
cleanupMissingBackups(backupConfig, progressCallback, function (error, missingBackupIds) {
cleanupSnapshots(backupConfig, function (error) {
if (error) return callback(error);
progressCallback({ percent: 90, message: 'Cleaning snapshots' });
cleanupSnapshots(backupConfig, function (error) {
if (error) return callback(error);
callback(null, { removedBoxBackupIds, removedAppBackupIds, missingBackupIds });
});
callback(null, { removedBoxBackupIds, removedAppBackupIds });
});
});
});
@@ -1588,13 +1505,12 @@ function startCleanupTask(auditSource, callback) {
tasks.add(tasks.TASK_CLEAN_BACKUPS, [], function (error, taskId) {
if (error) return callback(error);
tasks.startTask(taskId, {}, (error, result) => { // result is { removedBoxBackupIds, removedAppBackupIds, missingBackupIds }
tasks.startTask(taskId, {}, (error, result) => { // result is { removedBoxBackups, removedAppBackups }
eventlog.add(eventlog.ACTION_BACKUP_CLEANUP_FINISH, auditSource, {
taskId,
errorMessage: error ? error.message : null,
removedBoxBackupIds: result ? result.removedBoxBackupIds : [],
removedAppBackupIds: result ? result.removedAppBackupIds : [],
missingBackupIds: result ? result.missingBackupIds : []
removedBoxBackups: result ? result.removedBoxBackups : [],
removedAppBackups: result ? result.removedAppBackups : []
});
});
@@ -1602,6 +1518,23 @@ function startCleanupTask(auditSource, callback) {
});
}
function checkConfiguration(callback) {
assert.strictEqual(typeof callback, 'function');
settings.getBackupConfig(function (error, backupConfig) {
if (error) return callback(error);
let message = '';
if (backupConfig.provider === 'noop') {
message = 'Cloudron backups are disabled. Please ensure this server is backed up using alternate means. See https://docs.cloudron.io/backups/#storage-providers for more information.';
} else if (backupConfig.provider === 'filesystem' && !backupConfig.externalDisk) {
message = 'Cloudron backups are currently on the same disk as the Cloudron server instance. This is dangerous and can lead to complete data loss if the disk fails. See https://docs.cloudron.io/backups/#storage-providers for storing backups in an external location.';
}
callback(null, message);
});
}
function configureCollectd(backupConfig, callback) {
assert.strictEqual(typeof backupConfig, 'object');
assert.strictEqual(typeof callback, 'function');
-102
View File
@@ -1,102 +0,0 @@
/* jslint node:true */
'use strict';
exports = module.exports = {
get,
set,
del,
initSecrets,
ACME_ACCOUNT_KEY: 'acme_account_key',
ADDON_TURN_SECRET: 'addon_turn_secret',
DHPARAMS: 'dhparams',
SFTP_PUBLIC_KEY: 'sftp_public_key',
SFTP_PRIVATE_KEY: 'sftp_private_key',
CERT_PREFIX: 'cert',
_clear: clear
};
const assert = require('assert'),
BoxError = require('./boxerror.js'),
constants = require('./constants.js'),
crypto = require('crypto'),
database = require('./database.js'),
debug = require('debug')('box:blobs'),
paths = require('./paths.js'),
safe = require('safetydance');
const BLOBS_FIELDS = [ 'id', 'value' ].join(',');
async function get(id) {
assert.strictEqual(typeof id, 'string');
const result = await database.query(`SELECT ${BLOBS_FIELDS} FROM blobs WHERE id = ?`, [ id ]);
if (result.length === 0) return null;
return result[0].value;
}
async function set(id, value) {
assert.strictEqual(typeof id, 'string');
assert(value === null || Buffer.isBuffer(value));
await database.query('INSERT INTO blobs (id, value) VALUES (?, ?) ON DUPLICATE KEY UPDATE value=VALUES(value)', [ id, value ]);
}
async function del(id) {
await database.query('DELETE FROM blobs WHERE id=?', [ id ]);
}
async function clear() {
await database.query('DELETE FROM blobs');
}
async function initSecrets() {
let acmeAccountKey = await get(exports.ACME_ACCOUNT_KEY);
if (!acmeAccountKey) {
acmeAccountKey = safe.child_process.execSync('openssl genrsa 4096');
if (!acmeAccountKey) throw new BoxError(BoxError.OPENSSL_ERROR, `Could not generate acme account key: ${safe.error.message}`);
await set(exports.ACME_ACCOUNT_KEY, acmeAccountKey);
}
let turnSecret = await get(exports.ADDON_TURN_SECRET);
if (!turnSecret) {
turnSecret = 'a' + crypto.randomBytes(15).toString('hex'); // prefix with a to ensure string starts with a letter
await set(exports.ADDON_TURN_SECRET, Buffer.from(turnSecret));
}
if (!constants.TEST) {
let dhparams = await get(exports.DHPARAMS);
if (!dhparams) {
debug('initSecrets: generating dhparams.pem. this takes forever');
dhparams = safe.child_process.execSync('openssl dhparam 2048');
if (!dhparams) throw new BoxError(BoxError.OPENSSL_ERROR, safe.error);
if (!safe.fs.writeFileSync(paths.DHPARAMS_FILE, dhparams)) throw new BoxError(BoxError.FS_ERROR, `Could not save dhparams.pem: ${safe.error.message}`);
await set(exports.DHPARAMS, dhparams);
} else if (!safe.fs.existsSync(paths.DHPARAMS_FILE)) {
if (!safe.fs.writeFileSync(paths.DHPARAMS_FILE, dhparams)) throw new BoxError(BoxError.FS_ERROR, `Could not save dhparams.pem: ${safe.error.message}`);
}
}
let sftpPrivateKey = await get(exports.SFTP_PRIVATE_KEY);
let sftpPublicKey = await get(exports.SFTP_PUBLIC_KEY);
if (!sftpPrivateKey || !sftpPublicKey) {
debug('initSecrets: generate sftp keys');
if (constants.TEST) {
safe.fs.unlinkSync(paths.SFTP_PUBLIC_KEY_FILE);
safe.fs.unlinkSync(paths.SFTP_PRIVATE_KEY_FILE);
}
if (!safe.child_process.execSync(`ssh-keygen -m PEM -t rsa -f "${paths.SFTP_KEYS_DIR}/ssh_host_rsa_key" -q -N ""`)) throw new BoxError(BoxError.OPENSSL_ERROR, `Could not generate sftp ssh keys: ${safe.error.message}`);
sftpPublicKey = safe.fs.readFileSync(paths.SFTP_PUBLIC_KEY_FILE);
await set(exports.SFTP_PUBLIC_KEY, sftpPublicKey);
sftpPrivateKey = safe.fs.readFileSync(paths.SFTP_PRIVATE_KEY_FILE);
await set(exports.SFTP_PRIVATE_KEY, sftpPrivateKey);
} else if (!safe.fs.existsSync(paths.SFTP_PUBLIC_KEY_FILE) || !safe.fs.existsSync(paths.SFTP_PRIVATE_KEY_FILE)) {
if (!safe.fs.writeFileSync(paths.SFTP_PUBLIC_KEY_FILE, sftpPublicKey)) throw new BoxError(BoxError.FS_ERROR, `Could not save sftp public key: ${safe.error.message}`);
if (!safe.fs.writeFileSync(paths.SFTP_PRIVATE_KEY_FILE, sftpPrivateKey)) throw new BoxError(BoxError.FS_ERROR, `Could not save sftp private key: ${safe.error.message}`);
}
}
+5 -7
View File
@@ -9,17 +9,17 @@ const assert = require('assert'),
exports = module.exports = BoxError;
function BoxError(reason, errorOrMessage, override) {
function BoxError(reason, errorOrMessage, details) {
assert.strictEqual(typeof reason, 'string');
assert(errorOrMessage instanceof Error || typeof errorOrMessage === 'string' || typeof errorOrMessage === 'undefined');
assert(typeof override === 'object' || typeof override === 'undefined');
assert(typeof details === 'object' || typeof details === 'undefined');
Error.call(this);
Error.captureStackTrace(this, this.constructor);
this.name = this.constructor.name;
this.reason = reason;
this.details = {};
this.details = details || {};
if (typeof errorOrMessage === 'undefined') {
this.message = reason;
@@ -28,7 +28,7 @@ function BoxError(reason, errorOrMessage, override) {
} else { // error object
this.message = errorOrMessage.message;
this.nestedError = errorOrMessage;
_.extend(this, override); // copy enumerable properies
_.extend(this.details, errorOrMessage); // copy enumerable properies
}
}
util.inherits(BoxError, Error);
@@ -47,14 +47,13 @@ BoxError.DOCKER_ERROR = 'Docker Error';
BoxError.EXTERNAL_ERROR = 'External Error'; // use this for external API errors
BoxError.FEATURE_DISABLED = 'Feature Disabled';
BoxError.FS_ERROR = 'FileSystem Error';
BoxError.INACTIVE = 'Inactive'; // service/volume/mount
BoxError.INACTIVE = 'Inactive';
BoxError.INTERNAL_ERROR = 'Internal Error';
BoxError.INVALID_CREDENTIALS = 'Invalid Credentials';
BoxError.IPTABLES_ERROR = 'IPTables Error';
BoxError.LICENSE_ERROR = 'License Error';
BoxError.LOGROTATE_ERROR = 'Logrotate Error';
BoxError.MAIL_ERROR = 'Mail Error';
BoxError.MOUNT_ERROR = 'Mount Error';
BoxError.NETWORK_ERROR = 'Network Error';
BoxError.NGINX_ERROR = 'Nginx Error';
BoxError.NOT_FOUND = 'Not found';
@@ -91,7 +90,6 @@ BoxError.toHttpError = function (error) {
case BoxError.EXTERNAL_ERROR:
case BoxError.NETWORK_ERROR:
case BoxError.FS_ERROR:
case BoxError.MOUNT_ERROR:
case BoxError.MAIL_ERROR:
case BoxError.DOCKER_ERROR:
case BoxError.ADDONS_ERROR:
-18
View File
@@ -1,18 +0,0 @@
'use strict';
exports = module.exports = {
renderFooter
};
const assert = require('assert'),
constants = require('./constants.js');
function renderFooter(footer) {
assert.strictEqual(typeof footer, 'string');
const year = new Date().getFullYear();
return footer.replace(/%YEAR%/g, year)
.replace(/%VERSION%/g, constants.VERSION);
}
+628
View File
@@ -0,0 +1,628 @@
'use strict';
var assert = require('assert'),
async = require('async'),
BoxError = require('../boxerror.js'),
crypto = require('crypto'),
debug = require('debug')('box:cert/acme2'),
domains = require('../domains.js'),
fs = require('fs'),
path = require('path'),
paths = require('../paths.js'),
request = require('request'),
safe = require('safetydance'),
util = require('util'),
_ = require('underscore');
const CA_PROD_DIRECTORY_URL = 'https://acme-v02.api.letsencrypt.org/directory',
CA_STAGING_DIRECTORY_URL = 'https://acme-staging-v02.api.letsencrypt.org/directory';
exports = module.exports = {
getCertificate: getCertificate,
// testing
_name: 'acme',
_getChallengeSubdomain: getChallengeSubdomain
};
// http://jose.readthedocs.org/en/latest/
// https://www.ietf.org/proceedings/92/slides/slides-92-acme-1.pdf
// https://community.letsencrypt.org/t/list-of-client-implementations/2103
function Acme2(options) {
assert.strictEqual(typeof options, 'object');
this.accountKeyPem = null; // Buffer
this.email = options.email;
this.keyId = null;
this.caDirectory = options.prod ? CA_PROD_DIRECTORY_URL : CA_STAGING_DIRECTORY_URL;
this.directory = {};
this.performHttpAuthorization = !!options.performHttpAuthorization;
this.wildcard = !!options.wildcard;
}
// urlsafe base64 encoding (jose)
function urlBase64Encode(string) {
return string.replace(/\+/g, '-').replace(/\//g, '_').replace(/=/g, '');
}
function b64(str) {
var buf = util.isBuffer(str) ? str : Buffer.from(str);
return urlBase64Encode(buf.toString('base64'));
}
function getModulus(pem) {
assert(util.isBuffer(pem));
var stdout = safe.child_process.execSync('openssl rsa -modulus -noout', { input: pem, encoding: 'utf8' });
if (!stdout) return null;
var match = stdout.match(/Modulus=([0-9a-fA-F]+)$/m);
if (!match) return null;
return Buffer.from(match[1], 'hex');
}
Acme2.prototype.sendSignedRequest = function (url, payload, callback) {
assert.strictEqual(typeof url, 'string');
assert.strictEqual(typeof payload, 'string');
assert.strictEqual(typeof callback, 'function');
assert(util.isBuffer(this.accountKeyPem));
const that = this;
let header = {
url: url,
alg: 'RS256'
};
// keyId is null when registering account
if (this.keyId) {
header.kid = this.keyId;
} else {
header.jwk = {
e: b64(Buffer.from([0x01, 0x00, 0x01])), // exponent - 65537
kty: 'RSA',
n: b64(getModulus(this.accountKeyPem))
};
}
var payload64 = b64(payload);
request.get(this.directory.newNonce, { json: true, timeout: 30000 }, function (error, response) {
if (error) return callback(new BoxError(BoxError.NETWORK_ERROR, `Network error sending signed request: ${error.message}`));
if (response.statusCode !== 204) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response code when fetching nonce : ' + response.statusCode));
const nonce = response.headers['Replay-Nonce'.toLowerCase()];
if (!nonce) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'No nonce in response'));
debug('sendSignedRequest: using nonce %s for url %s', nonce, url);
var protected64 = b64(JSON.stringify(_.extend({ }, header, { nonce: nonce })));
var signer = crypto.createSign('RSA-SHA256');
signer.update(protected64 + '.' + payload64, 'utf8');
var signature64 = urlBase64Encode(signer.sign(that.accountKeyPem, 'base64'));
var data = {
protected: protected64,
payload: payload64,
signature: signature64
};
request.post(url, { headers: { 'Content-Type': 'application/jose+json', 'User-Agent': 'acme-cloudron' }, body: JSON.stringify(data), timeout: 30000 }, function (error, response) {
if (error) return callback(new BoxError(BoxError.NETWORK_ERROR, `Network error sending signed request: ${error.message}`)); // network error
// we don't set json: true in request because it ends up mangling the content-type
// we don't set json: true in request because it ends up mangling the content-type
if (response.headers['content-type'] === 'application/json') response.body = safe.JSON.parse(response.body);
callback(null, response);
});
});
};
// https://tools.ietf.org/html/rfc8555#section-6.3
Acme2.prototype.postAsGet = function (url, callback) {
this.sendSignedRequest(url, '', callback);
};
Acme2.prototype.updateContact = function (registrationUri, callback) {
assert.strictEqual(typeof registrationUri, 'string');
assert.strictEqual(typeof callback, 'function');
debug(`updateContact: registrationUri: ${registrationUri} email: ${this.email}`);
// https://github.com/ietf-wg-acme/acme/issues/30
const payload = {
contact: [ 'mailto:' + this.email ]
};
const that = this;
this.sendSignedRequest(registrationUri, JSON.stringify(payload), function (error, result) {
if (error) return callback(error);
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, util.format('Failed to update contact. Expecting 200, got %s %s', result.statusCode, result.text)));
debug(`updateContact: contact of user updated to ${that.email}`);
callback();
});
};
Acme2.prototype.registerUser = function (callback) {
assert.strictEqual(typeof callback, 'function');
var payload = {
termsOfServiceAgreed: true
};
debug('registerUser: registering user');
var that = this;
this.sendSignedRequest(this.directory.newAccount, JSON.stringify(payload), function (error, result) {
if (error) return callback(error);
// 200 if already exists. 201 for new accounts
if (result.statusCode !== 200 && result.statusCode !== 201) return callback(new BoxError(BoxError.EXTERNAL_ERROR, util.format('Failed to register new account. Expecting 200 or 201, got %s %s', result.statusCode, result.text)));
debug(`registerUser: user registered keyid: ${result.headers.location}`);
that.keyId = result.headers.location;
that.updateContact(result.headers.location, callback);
});
};
Acme2.prototype.newOrder = function (domain, callback) {
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof callback, 'function');
var payload = {
identifiers: [{
type: 'dns',
value: domain
}]
};
debug('newOrder: %s', domain);
this.sendSignedRequest(this.directory.newOrder, JSON.stringify(payload), function (error, result) {
if (error) return callback(error);
if (result.statusCode === 403) return callback(new BoxError(BoxError.ACCESS_DENIED, `Forbidden sending signed request: ${result.body.detail}`));
if (result.statusCode !== 201) return callback(new BoxError(BoxError.EXTERNAL_ERROR, util.format('Failed to register user. Expecting 201, got %s %s', result.statusCode, result.text)));
debug('newOrder: created order %s %j', domain, result.body);
const order = result.body, orderUrl = result.headers.location;
if (!Array.isArray(order.authorizations)) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'invalid authorizations in order'));
if (typeof order.finalize !== 'string') return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'invalid finalize in order'));
if (typeof orderUrl !== 'string') return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'invalid order location in order header'));
callback(null, order, orderUrl);
});
};
Acme2.prototype.waitForOrder = function (orderUrl, callback) {
assert.strictEqual(typeof orderUrl, 'string');
assert.strictEqual(typeof callback, 'function');
debug(`waitForOrder: ${orderUrl}`);
const that = this;
async.retry({ times: 15, interval: 20000 }, function (retryCallback) {
debug('waitForOrder: getting status');
that.postAsGet(orderUrl, function (error, result) {
if (error) {
debug('waitForOrder: network error getting uri %s', orderUrl);
return retryCallback(error);
}
if (result.statusCode !== 200) {
debug('waitForOrder: invalid response code getting uri %s', result.statusCode);
return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, 'Bad response code:' + result.statusCode));
}
debug('waitForOrder: status is "%s %j', result.body.status, result.body);
if (result.body.status === 'pending' || result.body.status === 'processing') return retryCallback(new BoxError(BoxError.TRY_AGAIN, `Request is in ${result.body.status} state`));
else if (result.body.status === 'valid' && result.body.certificate) return retryCallback(null, result.body.certificate);
else return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, 'Unexpected status or invalid response: ' + result.body));
});
}, callback);
};
Acme2.prototype.getKeyAuthorization = function (token) {
assert(util.isBuffer(this.accountKeyPem));
let jwk = {
e: b64(Buffer.from([0x01, 0x00, 0x01])), // Exponent - 65537
kty: 'RSA',
n: b64(getModulus(this.accountKeyPem))
};
let shasum = crypto.createHash('sha256');
shasum.update(JSON.stringify(jwk));
let thumbprint = urlBase64Encode(shasum.digest('base64'));
return token + '.' + thumbprint;
};
Acme2.prototype.notifyChallengeReady = function (challenge, callback) {
assert.strictEqual(typeof challenge, 'object'); // { type, status, url, token }
assert.strictEqual(typeof callback, 'function');
debug('notifyChallengeReady: %s was met', challenge.url);
const keyAuthorization = this.getKeyAuthorization(challenge.token);
var payload = {
resource: 'challenge',
keyAuthorization: keyAuthorization
};
this.sendSignedRequest(challenge.url, JSON.stringify(payload), function (error, result) {
if (error) return callback(error);
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, util.format('Failed to notify challenge. Expecting 200, got %s %s', result.statusCode, result.text)));
callback();
});
};
Acme2.prototype.waitForChallenge = function (challenge, callback) {
assert.strictEqual(typeof challenge, 'object');
assert.strictEqual(typeof callback, 'function');
debug('waitingForChallenge: %j', challenge);
const that = this;
async.retry({ times: 15, interval: 20000 }, function (retryCallback) {
debug('waitingForChallenge: getting status');
that.postAsGet(challenge.url, function (error, result) {
if (error) {
debug('waitForChallenge: network error getting uri %s', challenge.url);
return retryCallback(error);
}
if (result.statusCode !== 200) {
debug('waitForChallenge: invalid response code getting uri %s', result.statusCode);
return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, 'Bad response code:' + result.statusCode));
}
debug('waitForChallenge: status is "%s" %j', result.body.status, result.body);
if (result.body.status === 'pending') return retryCallback(new BoxError(BoxError.TRY_AGAIN));
else if (result.body.status === 'valid') return retryCallback();
else return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, 'Unexpected status: ' + result.body.status));
});
}, function retryFinished(error) {
// async.retry will pass 'undefined' as second arg making it unusable with async.waterfall()
callback(error);
});
};
// https://community.letsencrypt.org/t/public-beta-rate-limits/4772 for rate limits
Acme2.prototype.signCertificate = function (domain, finalizationUrl, csrDer, callback) {
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof finalizationUrl, 'string');
assert(util.isBuffer(csrDer));
assert.strictEqual(typeof callback, 'function');
const payload = {
csr: b64(csrDer)
};
debug('signCertificate: sending sign request');
this.sendSignedRequest(finalizationUrl, JSON.stringify(payload), function (error, result) {
if (error) return callback(error);
// 429 means we reached the cert limit for this domain
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, util.format('Failed to sign certificate. Expecting 200, got %s %s', result.statusCode, result.text)));
return callback(null);
});
};
Acme2.prototype.createKeyAndCsr = function (hostname, callback) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof callback, 'function');
var outdir = paths.APP_CERTS_DIR;
const certName = hostname.replace('*.', '_.');
var csrFile = path.join(outdir, `${certName}.csr`);
var privateKeyFile = path.join(outdir, `${certName}.key`);
if (safe.fs.existsSync(privateKeyFile)) {
// in some old releases, csr file was corrupt. so always regenerate it
debug('createKeyAndCsr: reuse the key for renewal at %s', privateKeyFile);
} else {
var key = safe.child_process.execSync('openssl ecparam -genkey -name secp384r1'); // openssl ecparam -list_curves
if (!key) return callback(new BoxError(BoxError.OPENSSL_ERROR, safe.error));
if (!safe.fs.writeFileSync(privateKeyFile, key)) return callback(new BoxError(BoxError.FS_ERROR, safe.error));
debug('createKeyAndCsr: key file saved at %s', privateKeyFile);
}
var csrDer = safe.child_process.execSync(`openssl req -new -key ${privateKeyFile} -outform DER -subj /CN=${hostname}`);
if (!csrDer) return callback(new BoxError(BoxError.OPENSSL_ERROR, safe.error));
if (!safe.fs.writeFileSync(csrFile, csrDer)) return callback(new BoxError(BoxError.FS_ERROR, safe.error)); // bookkeeping
debug('createKeyAndCsr: csr file (DER) saved at %s', csrFile);
callback(null, csrDer);
};
Acme2.prototype.downloadCertificate = function (hostname, certUrl, callback) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof certUrl, 'string');
assert.strictEqual(typeof callback, 'function');
var outdir = paths.APP_CERTS_DIR;
const that = this;
async.retry({ times: 5, interval: 20000 }, function (retryCallback) {
debug('downloadCertificate: downloading certificate');
that.postAsGet(certUrl, function (error, result) {
if (error) return retryCallback(new BoxError(BoxError.NETWORK_ERROR, `Network error when downloading certificate: ${error.message}`));
if (result.statusCode === 202) return retryCallback(new BoxError(BoxError.TRY_AGAIN, 'Retry downloading certificate'));
if (result.statusCode !== 200) return retryCallback(new BoxError(BoxError.EXTERNAL_ERROR, util.format('Failed to get cert. Expecting 200, got %s %s', result.statusCode, result.text)));
const fullChainPem = result.body; // buffer
const certName = hostname.replace('*.', '_.');
var certificateFile = path.join(outdir, `${certName}.cert`);
if (!safe.fs.writeFileSync(certificateFile, fullChainPem)) return retryCallback(new BoxError(BoxError.FS_ERROR, safe.error));
debug('downloadCertificate: cert file for %s saved at %s', hostname, certificateFile);
retryCallback(null);
});
}, callback);
};
Acme2.prototype.prepareHttpChallenge = function (hostname, domain, authorization, callback) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof authorization, 'object');
assert.strictEqual(typeof callback, 'function');
debug('acmeFlow: challenges: %j', authorization);
let httpChallenges = authorization.challenges.filter(function(x) { return x.type === 'http-01'; });
if (httpChallenges.length === 0) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'no http challenges'));
let challenge = httpChallenges[0];
debug('prepareHttpChallenge: preparing for challenge %j', challenge);
let keyAuthorization = this.getKeyAuthorization(challenge.token);
debug('prepareHttpChallenge: writing %s to %s', keyAuthorization, path.join(paths.ACME_CHALLENGES_DIR, challenge.token));
fs.writeFile(path.join(paths.ACME_CHALLENGES_DIR, challenge.token), keyAuthorization, function (error) {
if (error) return callback(new BoxError(BoxError.FS_ERROR, error));
callback(null, challenge);
});
};
Acme2.prototype.cleanupHttpChallenge = function (hostname, domain, challenge, callback) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof challenge, 'object');
assert.strictEqual(typeof callback, 'function');
debug('cleanupHttpChallenge: unlinking %s', path.join(paths.ACME_CHALLENGES_DIR, challenge.token));
fs.unlink(path.join(paths.ACME_CHALLENGES_DIR, challenge.token), callback);
};
function getChallengeSubdomain(hostname, domain) {
let challengeSubdomain;
if (hostname === domain) {
challengeSubdomain = '_acme-challenge';
} else if (hostname.includes('*')) { // wildcard
let subdomain = hostname.slice(0, -domain.length - 1);
challengeSubdomain = subdomain ? subdomain.replace('*', '_acme-challenge') : '_acme-challenge';
} else {
challengeSubdomain = '_acme-challenge.' + hostname.slice(0, -domain.length - 1);
}
debug(`getChallengeSubdomain: challenge subdomain for hostname ${hostname} at domain ${domain} is ${challengeSubdomain}`);
return challengeSubdomain;
}
Acme2.prototype.prepareDnsChallenge = function (hostname, domain, authorization, callback) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof authorization, 'object');
assert.strictEqual(typeof callback, 'function');
debug('acmeFlow: challenges: %j', authorization);
let dnsChallenges = authorization.challenges.filter(function(x) { return x.type === 'dns-01'; });
if (dnsChallenges.length === 0) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'no dns challenges'));
let challenge = dnsChallenges[0];
const keyAuthorization = this.getKeyAuthorization(challenge.token);
let shasum = crypto.createHash('sha256');
shasum.update(keyAuthorization);
const txtValue = urlBase64Encode(shasum.digest('base64'));
let challengeSubdomain = getChallengeSubdomain(hostname, domain);
debug(`prepareDnsChallenge: update ${challengeSubdomain} with ${txtValue}`);
domains.upsertDnsRecords(challengeSubdomain, domain, 'TXT', [ `"${txtValue}"` ], function (error) {
if (error) return callback(error);
domains.waitForDnsRecord(challengeSubdomain, domain, 'TXT', txtValue, { times: 200 }, function (error) {
if (error) return callback(error);
callback(null, challenge);
});
});
};
Acme2.prototype.cleanupDnsChallenge = function (hostname, domain, challenge, callback) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof challenge, 'object');
assert.strictEqual(typeof callback, 'function');
const keyAuthorization = this.getKeyAuthorization(challenge.token);
let shasum = crypto.createHash('sha256');
shasum.update(keyAuthorization);
const txtValue = urlBase64Encode(shasum.digest('base64'));
let challengeSubdomain = getChallengeSubdomain(hostname, domain);
debug(`cleanupDnsChallenge: remove ${challengeSubdomain} with ${txtValue}`);
domains.removeDnsRecords(challengeSubdomain, domain, 'TXT', [ `"${txtValue}"` ], function (error) {
if (error) return callback(error);
callback(null);
});
};
Acme2.prototype.prepareChallenge = function (hostname, domain, authorizationUrl, callback) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof authorizationUrl, 'string');
assert.strictEqual(typeof callback, 'function');
debug(`prepareChallenge: http: ${this.performHttpAuthorization}`);
const that = this;
this.postAsGet(authorizationUrl, function (error, response) {
if (error) return callback(error);
if (response.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response code getting authorization : ' + response.statusCode));
const authorization = response.body;
if (that.performHttpAuthorization) {
that.prepareHttpChallenge(hostname, domain, authorization, callback);
} else {
that.prepareDnsChallenge(hostname, domain, authorization, callback);
}
});
};
Acme2.prototype.cleanupChallenge = function (hostname, domain, challenge, callback) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof challenge, 'object');
assert.strictEqual(typeof callback, 'function');
debug(`cleanupChallenge: http: ${this.performHttpAuthorization}`);
if (this.performHttpAuthorization) {
this.cleanupHttpChallenge(hostname, domain, challenge, callback);
} else {
this.cleanupDnsChallenge(hostname, domain, challenge, callback);
}
};
Acme2.prototype.acmeFlow = function (hostname, domain, callback) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof callback, 'function');
if (!fs.existsSync(paths.ACME_ACCOUNT_KEY_FILE)) {
debug('getCertificate: generating acme account key on first run');
this.accountKeyPem = safe.child_process.execSync('openssl genrsa 4096');
if (!this.accountKeyPem) return callback(new BoxError(BoxError.OPENSSL_ERROR, safe.error));
safe.fs.writeFileSync(paths.ACME_ACCOUNT_KEY_FILE, this.accountKeyPem);
} else {
debug('getCertificate: using existing acme account key');
this.accountKeyPem = fs.readFileSync(paths.ACME_ACCOUNT_KEY_FILE);
}
var that = this;
this.registerUser(function (error) {
if (error) return callback(error);
that.newOrder(hostname, function (error, order, orderUrl) {
if (error) return callback(error);
async.eachSeries(order.authorizations, function (authorizationUrl, iteratorCallback) {
debug(`acmeFlow: authorizing ${authorizationUrl}`);
that.prepareChallenge(hostname, domain, authorizationUrl, function (error, challenge) {
if (error) return iteratorCallback(error);
async.waterfall([
that.notifyChallengeReady.bind(that, challenge),
that.waitForChallenge.bind(that, challenge),
that.createKeyAndCsr.bind(that, hostname),
that.signCertificate.bind(that, hostname, order.finalize),
that.waitForOrder.bind(that, orderUrl),
that.downloadCertificate.bind(that, hostname)
], function (error) {
that.cleanupChallenge(hostname, domain, challenge, function (cleanupError) {
if (cleanupError) debug('acmeFlow: ignoring error when cleaning up challenge:', cleanupError);
iteratorCallback(error);
});
});
});
}, callback);
});
});
};
Acme2.prototype.getDirectory = function (callback) {
const that = this;
request.get(this.caDirectory, { json: true, timeout: 30000 }, function (error, response) {
if (error) return callback(new BoxError(BoxError.NETWORK_ERROR, `Network error getting directory: ${error.message}`));
if (response.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, 'Invalid response code when fetching directory : ' + response.statusCode));
if (typeof response.body.newNonce !== 'string' ||
typeof response.body.newOrder !== 'string' ||
typeof response.body.newAccount !== 'string') return callback(new BoxError(BoxError.EXTERNAL_ERROR, `Invalid response body : ${response.body}`));
that.directory = response.body;
callback(null);
});
};
Acme2.prototype.getCertificate = function (hostname, domain, callback) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof callback, 'function');
debug(`getCertificate: start acme flow for ${hostname} from ${this.caDirectory}`);
if (hostname !== domain && this.wildcard) { // bare domain is not part of wildcard SAN
hostname = domains.makeWildcard(hostname);
debug(`getCertificate: will get wildcard cert for ${hostname}`);
}
const that = this;
this.getDirectory(function (error) {
if (error) return callback(error);
that.acmeFlow(hostname, domain, function (error) {
if (error) return callback(error);
var outdir = paths.APP_CERTS_DIR;
const certName = hostname.replace('*.', '_.');
callback(null, path.join(outdir, `${certName}.cert`), path.join(outdir, `${certName}.key`));
});
});
};
function getCertificate(hostname, domain, options, callback) {
assert.strictEqual(typeof hostname, 'string');
assert.strictEqual(typeof domain, 'string');
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
let attempt = 1;
async.retry({ times: 3, interval: 0 }, function (retryCallback) {
debug(`getCertificate: attempt ${attempt++}`);
let acme = new Acme2(options || { });
acme.getCertificate(hostname, domain, retryCallback);
}, callback);
}
+1 -1
View File
@@ -5,7 +5,7 @@ let assert = require('assert'),
path = require('path');
exports = module.exports = {
getChanges
getChanges: getChanges
};
function getChanges(version) {
+62 -76
View File
@@ -17,19 +17,18 @@ exports = module.exports = {
setDashboardDomain,
updateDashboardDomain,
renewCerts,
syncDnsRecords,
runSystemChecks
};
const apps = require('./apps.js'),
var addons = require('./addons.js'),
apps = require('./apps.js'),
appstore = require('./appstore.js'),
assert = require('assert'),
async = require('async'),
auditSource = require('./auditsource.js'),
backups = require('./backups.js'),
BoxError = require('./boxerror.js'),
branding = require('./branding.js'),
constants = require('./constants.js'),
cron = require('./cron.js'),
debug = require('debug')('box:cloudron'),
@@ -43,7 +42,6 @@ const apps = require('./apps.js'),
platform = require('./platform.js'),
reverseProxy = require('./reverseproxy.js'),
safe = require('safetydance'),
services = require('./services.js'),
settings = require('./settings.js'),
shell = require('./shell.js'),
spawn = require('child_process').spawn,
@@ -52,14 +50,16 @@ const apps = require('./apps.js'),
tasks = require('./tasks.js'),
users = require('./users.js');
const REBOOT_CMD = path.join(__dirname, 'scripts/reboot.sh');
var REBOOT_CMD = path.join(__dirname, 'scripts/reboot.sh');
const NOOP_CALLBACK = function (error) { if (error) debug(error); };
async function initialize() {
function initialize(callback) {
assert.strictEqual(typeof callback, 'function');
runStartupTasks();
await notifyUpdate();
notifyUpdate(callback);
}
function uninitialize(callback) {
@@ -71,37 +71,42 @@ function uninitialize(callback) {
], callback);
}
function onActivated(options, callback) {
assert.strictEqual(typeof options, 'object');
function onActivated(callback) {
assert.strictEqual(typeof callback, 'function');
debug('onActivated: running post activation tasks');
// Starting the platform after a user is available means:
// 1. mail bounces can now be sent to the cloudron owner
// 2. the restore code path can run without sudo (since mail/ is non-root)
async.series([
platform.start.bind(null, options),
platform.start,
cron.startJobs,
function checkBackupConfiguration(done) {
backups.checkConfiguration(function (error, message) {
if (error) return done(error);
notifications.alert(notifications.ALERT_BACKUP_CONFIG, 'Backup configuration is unsafe', message, done);
});
},
// disable responding to api calls via IP to not leak domain info. this is carefully placed as the last item, so it buys
// the UI some time to query the dashboard domain in the restore code path
(done) => setTimeout(() => reverseProxy.writeDefaultConfig({ activated :true }, done), 30000)
], callback);
}
async function notifyUpdate() {
function notifyUpdate(callback) {
assert.strictEqual(typeof callback, 'function');
const version = safe.fs.readFileSync(paths.VERSION_FILE, 'utf8');
if (version === constants.VERSION) return;
if (version === constants.VERSION) return callback();
await eventlog.add(eventlog.ACTION_UPDATE_FINISH, auditSource.CRON, { errorMessage: '', oldVersion: version || 'dev', newVersion: constants.VERSION });
eventlog.add(eventlog.ACTION_UPDATE_FINISH, auditSource.CRON, { errorMessage: '', oldVersion: version || 'dev', newVersion: constants.VERSION }, function (error) {
if (error) return callback(error);
return new Promise((resolve, reject) => {
tasks.setCompletedByType(tasks.TASK_UPDATE, { error: null }, function (error) {
if (error && error.reason !== BoxError.NOT_FOUND) return reject(error); // when hotfixing, task may not exist
if (error && error.reason !== BoxError.NOT_FOUND) return callback(error); // when hotfixing, task may not exist
safe.fs.writeFileSync(paths.VERSION_FILE, constants.VERSION, 'utf8');
resolve();
callback();
});
});
}
@@ -123,9 +128,9 @@ function runStartupTasks() {
// always generate webadmin config since we have no versioning mechanism for the ejs
function (callback) {
if (!settings.dashboardDomain()) return callback();
if (!settings.adminDomain()) return callback();
reverseProxy.writeDashboardConfig(settings.dashboardDomain(), callback);
reverseProxy.writeDashboardConfig(settings.adminDomain(), callback);
},
// check activation state and start the platform
@@ -141,7 +146,7 @@ function runStartupTasks() {
return reverseProxy.writeDefaultConfig({ activated: false }, callback);
}
onActivated({}, callback);
onActivated(callback);
});
}
];
@@ -157,10 +162,6 @@ function runStartupTasks() {
function getConfig(callback) {
assert.strictEqual(typeof callback, 'function');
const release = safe.fs.readFileSync('/etc/lsb-release', 'utf-8');
if (release === null) return callback(new BoxError(BoxError.FS_ERROR, safe.error.message));
const ubuntuVersion = release.match(/DISTRIB_DESCRIPTION="(.*)"/)[1];
settings.getAll(function (error, allSettings) {
if (error) return callback(error);
@@ -168,14 +169,13 @@ function getConfig(callback) {
callback(null, {
apiServerOrigin: settings.apiServerOrigin(),
webServerOrigin: settings.webServerOrigin(),
adminDomain: settings.dashboardDomain(),
adminFqdn: settings.dashboardFqdn(),
adminDomain: settings.adminDomain(),
adminFqdn: settings.adminFqdn(),
mailFqdn: settings.mailFqdn(),
version: constants.VERSION,
ubuntuVersion,
isDemo: settings.isDemo(),
cloudronName: allSettings[settings.CLOUDRON_NAME_KEY],
footer: branding.renderFooter(allSettings[settings.FOOTER_KEY] || constants.FOOTER),
footer: allSettings[settings.FOOTER_KEY] || constants.FOOTER,
features: appstore.getFeatures(),
profileLocked: allSettings[settings.DIRECTORY_CONFIG_KEY].lockUserProfiles,
mandatory2FA: allSettings[settings.DIRECTORY_CONFIG_KEY].mandatory2FA
@@ -183,52 +183,53 @@ function getConfig(callback) {
});
}
async function reboot() {
await notifications.alert(notifications.ALERT_REBOOT, 'Reboot Required', '');
function reboot(callback) {
notifications.alert(notifications.ALERT_REBOOT, 'Reboot Required', '', function (error) {
if (error) debug('reboot: failed to clear reboot notification.', error);
const [error] = await safe(shell.promises.sudo('reboot', [ REBOOT_CMD ], {}));
if (error) debug('reboot: could not reboot', error);
shell.sudo('reboot', [ REBOOT_CMD ], {}, callback);
});
}
async function isRebootRequired() {
function isRebootRequired(callback) {
assert.strictEqual(typeof callback, 'function');
// https://serverfault.com/questions/92932/how-does-ubuntu-keep-track-of-the-system-restart-required-flag-in-motd
return fs.existsSync('/var/run/reboot-required');
callback(null, fs.existsSync('/var/run/reboot-required'));
}
// called from cron.js
function runSystemChecks(callback) {
assert.strictEqual(typeof callback, 'function');
debug('runSystemChecks: checking status');
async.parallel([
checkMailStatus,
checkRebootRequired,
checkUbuntuVersion
checkRebootRequired
], callback);
}
function checkMailStatus(callback) {
assert.strictEqual(typeof callback, 'function');
mail.checkConfiguration(async function (error, message) {
debug('checking mail status');
mail.checkConfiguration(function (error, message) {
if (error) return callback(error);
await safe(notifications.alert(notifications.ALERT_MAIL_STATUS, 'Email is not configured properly', message));
callback();
notifications.alert(notifications.ALERT_MAIL_STATUS, 'Email is not configured properly', message, callback);
});
}
async function checkRebootRequired() {
const rebootRequired = await isRebootRequired();
await notifications.alert(notifications.ALERT_REBOOT, 'Reboot Required', rebootRequired ? 'To finish ubuntu security updates, a reboot is necessary.' : '');
}
function checkRebootRequired(callback) {
assert.strictEqual(typeof callback, 'function');
async function checkUbuntuVersion() {
const isXenial = fs.readFileSync('/etc/lsb-release', 'utf-8').includes('16.04');
if (!isXenial) return;
debug('checking if reboot required');
await notifications.alert(notifications.ALERT_UPDATE_UBUNTU, 'Ubuntu upgrade required', 'Ubuntu 16.04 has reached end of life and will not receive security and maintenance updates. Please follow https://docs.cloudron.io/guides/upgrade-ubuntu-18/ to upgrade to Ubuntu 18 at the earliest.');
isRebootRequired(function (error, rebootRequired) {
if (error) return callback(error);
notifications.alert(notifications.ALERT_REBOOT, 'Reboot Required', rebootRequired ? 'To finish ubuntu security updates, a reboot is necessary.' : '', callback);
});
}
function getLogs(unit, options, callback) {
@@ -289,7 +290,7 @@ function prepareDashboardDomain(domain, auditSource, callback) {
domains.get(domain, function (error, domainObject) {
if (error) return callback(error);
const fqdn = domains.fqdn(constants.DASHBOARD_LOCATION, domainObject);
const fqdn = domains.fqdn(constants.ADMIN_LOCATION, domainObject);
apps.getAll(function (error, result) {
if (error) return callback(error);
@@ -297,7 +298,7 @@ function prepareDashboardDomain(domain, auditSource, callback) {
const conflict = result.filter(app => app.fqdn === fqdn);
if (conflict.length) return callback(new BoxError(BoxError.BAD_STATE, 'Dashboard location conflicts with an existing app'));
tasks.add(tasks.TASK_SETUP_DNS_AND_CERT, [ constants.DASHBOARD_LOCATION, domain, auditSource ], function (error, taskId) {
tasks.add(tasks.TASK_SETUP_DNS_AND_CERT, [ constants.ADMIN_LOCATION, domain, auditSource ], function (error, taskId) {
if (error) return callback(error);
tasks.startTask(taskId, {}, NOOP_CALLBACK);
@@ -322,14 +323,12 @@ function setDashboardDomain(domain, auditSource, callback) {
reverseProxy.writeDashboardConfig(domain, function (error) {
if (error) return callback(error);
const fqdn = domains.fqdn(constants.DASHBOARD_LOCATION, domainObject);
const fqdn = domains.fqdn(constants.ADMIN_LOCATION, domainObject);
settings.setDashboardLocation(domain, fqdn, function (error) {
settings.setAdminLocation(domain, fqdn, function (error) {
if (error) return callback(error);
appstore.updateCloudron({ domain }, NOOP_CALLBACK);
eventlog.add(eventlog.ACTION_DASHBOARD_DOMAIN_UPDATE, auditSource, { domain, fqdn });
eventlog.add(eventlog.ACTION_DASHBOARD_DOMAIN_UPDATE, auditSource, { domain: domain, fqdn: fqdn });
callback(null);
});
@@ -350,7 +349,7 @@ function updateDashboardDomain(domain, auditSource, callback) {
setDashboardDomain(domain, auditSource, function (error) {
if (error) return callback(error);
services.rebuildService('turn', NOOP_CALLBACK); // to update the realm variable
addons.rebuildService('turn', NOOP_CALLBACK); // to update the realm variable
callback(null);
});
@@ -361,7 +360,7 @@ function renewCerts(options, auditSource, callback) {
assert.strictEqual(typeof auditSource, 'object');
assert.strictEqual(typeof callback, 'function');
tasks.add(tasks.TASK_CHECK_CERTS, [ options, auditSource ], function (error, taskId) {
tasks.add(tasks.TASK_RENEW_CERTS, [ options, auditSource ], function (error, taskId) {
if (error) return callback(error);
tasks.startTask(taskId, {}, NOOP_CALLBACK);
@@ -380,17 +379,17 @@ function setupDnsAndCert(subdomain, domain, auditSource, progressCallback, callb
domains.get(domain, function (error, domainObject) {
if (error) return callback(error);
const dashboardFqdn = domains.fqdn(subdomain, domainObject);
const adminFqdn = domains.fqdn(subdomain, domainObject);
sysinfo.getServerIp(function (error, ip) {
if (error) return callback(error);
async.series([
(done) => { progressCallback({ message: `Updating DNS of ${dashboardFqdn}` }); done(); },
(done) => { progressCallback({ message: `Updating DNS of ${adminFqdn}` }); done(); },
domains.upsertDnsRecords.bind(null, subdomain, domain, 'A', [ ip ]),
(done) => { progressCallback({ message: `Waiting for DNS of ${dashboardFqdn}` }); done(); },
(done) => { progressCallback({ message: `Waiting for DNS of ${adminFqdn}` }); done(); },
domains.waitForDnsRecord.bind(null, subdomain, domain, 'A', ip, { interval: 30000, times: 50000 }),
(done) => { progressCallback({ message: `Getting certificate of ${dashboardFqdn}` }); done(); },
(done) => { progressCallback({ message: `Getting certificate of ${adminFqdn}` }); done(); },
reverseProxy.ensureCertificate.bind(null, domains.fqdn(subdomain, domainObject), domain, auditSource)
], function (error) {
if (error) return callback(error);
@@ -400,16 +399,3 @@ function setupDnsAndCert(subdomain, domain, auditSource, progressCallback, callb
});
});
}
function syncDnsRecords(options, callback) {
assert.strictEqual(typeof options, 'object');
assert.strictEqual(typeof callback, 'function');
tasks.add(tasks.TASK_SYNC_DNS_RECORDS, [ options ], function (error, taskId) {
if (error) return callback(error);
tasks.startTask(taskId, {}, NOOP_CALLBACK);
callback(null, taskId);
});
}
-9
View File
@@ -1,9 +0,0 @@
<Plugin python>
<Module du>
<Path>
Instance "<%= volumeId %>"
Dir "<%= hostPath %>"
</Path>
</Module>
</Plugin>
+6 -14
View File
@@ -22,30 +22,22 @@ exports = module.exports = {
'admins', 'users' // ldap code uses 'users' pseudo group
],
DASHBOARD_LOCATION: 'my',
ADMIN_LOCATION: 'my',
PORT: CLOUDRON ? 3000 : 5454,
INTERNAL_SMTP_PORT: 2525, // this value comes from the mail container
AUTHWALL_PORT: 3001,
SYSADMIN_PORT: 3001, // unused
LDAP_PORT: 3002,
DOCKER_PROXY_PORT: 3003,
NGINX_DEFAULT_CONFIG_FILE_NAME: 'default.conf',
DEFAULT_TOKEN_EXPIRATION_MSECS: 365 * 24 * 60 * 60 * 1000, // 1 year
DEFAULT_TOKEN_EXPIRATION_DAYS: 365,
DEFAULT_TOKEN_EXPIRATION: 365 * 24 * 60 * 60 * 1000, // 1 year
DEFAULT_MEMORY_LIMIT: (256 * 1024 * 1024), // see also client.js
DEMO_USERNAME: 'cloudron',
DEMO_BLACKLISTED_APPS: [
'com.github.cloudtorrent',
'net.alltubedownload.cloudronapp',
'com.adguard.home.cloudronapp',
'com.transmissionbt.cloudronapp',
'io.github.sickchill.cloudronapp',
'to.couchpota.cloudronapp'
],
DEMO_BLACKLISTED_APPS: [ 'com.github.cloudtorrent' ],
AUTOUPDATE_PATTERN_NEVER: 'never',
@@ -56,8 +48,8 @@ exports = module.exports = {
SUPPORT_EMAIL: 'support@cloudron.io',
FOOTER: '&copy; %YEAR% &nbsp; [Cloudron](https://cloudron.io) &nbsp; &nbsp; &nbsp; [Forum <i class="fa fa-comments"></i>](https://forum.cloudron.io)',
FOOTER: '&copy; 2020 &nbsp; [Cloudron](https://cloudron.io) &nbsp; &nbsp; &nbsp; [Forum <i class="fa fa-comments"></i>](https://forum.cloudron.io)',
VERSION: process.env.BOX_ENV === 'cloudron' ? fs.readFileSync(path.join(__dirname, '../VERSION'), 'utf8').trim() : '6.0.1-test'
VERSION: process.env.BOX_ENV === 'cloudron' ? fs.readFileSync(path.join(__dirname, '../VERSION'), 'utf8').trim() : '5.1.1-test'
};
+8 -7
View File
@@ -1,10 +1,10 @@
'use strict';
exports = module.exports = {
sendFailureLogs
sendFailureLogs: sendFailureLogs
};
const assert = require('assert'),
var assert = require('assert'),
auditSource = require('./auditsource.js'),
eventlog = require('./eventlog.js'),
safe = require('safetydance'),
@@ -38,7 +38,7 @@ function sendFailureLogs(unitName, callback) {
return callback();
}
collectLogs(unitName, async function (error, logs) {
collectLogs(unitName, function (error, logs) {
if (error) {
console.error('Failed to collect logs.', error);
logs = util.format('Failed to collect logs.', error);
@@ -49,11 +49,12 @@ function sendFailureLogs(unitName, callback) {
if (!safe.fs.writeFileSync(path.join(paths.CRASH_LOG_DIR, `${crashId}.log`), logs)) console.log(`Failed to stash logs to ${crashId}.log:`, safe.error);
[error] = await safe(eventlog.add(eventlog.ACTION_PROCESS_CRASH, auditSource.HEALTH_MONITOR, { processName: unitName, crashId: crashId }));
if (error) console.log(`Error sending crashlog. Logs stashed at ${crashId}.log`);
eventlog.add(eventlog.ACTION_PROCESS_CRASH, auditSource.HEALTH_MONITOR, { processName: unitName, crashId: crashId }, function (error) {
if (error) console.log(`Error sending crashlog. Logs stashed at ${crashId}.log`);
safe.fs.writeFileSync(CRASH_LOG_TIMESTAMP_FILE, String(Date.now()));
safe.fs.writeFileSync(CRASH_LOG_TIMESTAMP_FILE, String(Date.now()));
callback();
callback();
});
});
}
+8 -12
View File
@@ -16,7 +16,7 @@ exports = module.exports = {
DEFAULT_AUTOUPDATE_PATTERN,
};
const appHealthMonitor = require('./apphealthmonitor.js'),
var appHealthMonitor = require('./apphealthmonitor.js'),
apps = require('./apps.js'),
assert = require('assert'),
async = require('async'),
@@ -33,10 +33,9 @@ const appHealthMonitor = require('./apphealthmonitor.js'),
settings = require('./settings.js'),
system = require('./system.js'),
updater = require('./updater.js'),
updateChecker = require('./updatechecker.js'),
_ = require('underscore');
updateChecker = require('./updatechecker.js');
const gJobs = {
var gJobs = {
autoUpdater: null,
backup: null,
updateChecker: null,
@@ -52,7 +51,7 @@ const gJobs = {
appHealthMonitor: null
};
const NOOP_CALLBACK = function (error) { if (error) debug(error); };
var NOOP_CALLBACK = function (error) { if (error) debug(error); };
// cron format
// Seconds: 0-59
@@ -65,8 +64,6 @@ const NOOP_CALLBACK = function (error) { if (error) debug(error); };
function startJobs(callback) {
assert.strictEqual(typeof callback, 'function');
debug('startJobs: starting cron jobs');
const randomTick = Math.floor(60*Math.random());
gJobs.systemChecks = new CronJob({
cronTime: '00 30 2 * * *', // once a day. if you change this interval, change the notification messages with correct duration
@@ -101,7 +98,7 @@ function startJobs(callback) {
gJobs.cleanupEventlog = new CronJob({
cronTime: '00 */30 * * * *', // every 30 minutes
onTick: eventlog.cleanup.bind(null, { creationTime: new Date(Date.now() - 60 * 60 * 24 * 10 * 1000) }), // 10 days ago
onTick: eventlog.cleanup,
start: true
});
@@ -199,10 +196,9 @@ function autoupdatePatternChanged(pattern, tz) {
return;
}
const appUpdateInfo = _.omit(updateInfo, 'box');
if (Object.keys(appUpdateInfo).length > 0) {
debug('Starting app update to %j', appUpdateInfo);
apps.autoupdateApps(appUpdateInfo, auditSource.CRON, NOOP_CALLBACK);
if (updateInfo.apps && Object.keys(updateInfo.apps).length > 0) {
debug('Starting app update to %j', updateInfo.apps);
apps.autoupdateApps(updateInfo.apps, auditSource.CRON, NOOP_CALLBACK);
} else {
debug('No app auto updates available');
}
+29 -43
View File
@@ -1,18 +1,18 @@
'use strict';
exports = module.exports = {
initialize,
uninitialize,
query,
transaction,
initialize: initialize,
uninitialize: uninitialize,
query: query,
transaction: transaction,
importFromFile,
exportToFile,
importFromFile: importFromFile,
exportToFile: exportToFile,
_clear: clear
};
const assert = require('assert'),
var assert = require('assert'),
async = require('async'),
BoxError = require('./boxerror.js'),
child_process = require('child_process'),
@@ -45,8 +45,6 @@ function initialize(callback) {
// https://github.com/mysqljs/mysql#pool-options
gConnectionPool = mysql.createPool({
connectionLimit: 5,
acquireTimeout: 60000,
connectTimeout: 60000,
host: gDatabase.hostname,
user: gDatabase.username,
password: gDatabase.password,
@@ -88,52 +86,40 @@ function clear(callback) {
}
function query() {
assert.notStrictEqual(gConnectionPool, null);
const args = Array.prototype.slice.call(arguments);
const callback = args[args.length - 1];
assert.strictEqual(typeof callback, 'function');
return new Promise((resolve, reject) => {
let args = Array.prototype.slice.call(arguments);
const callback = typeof args[args.length - 1] === 'function' ? args.pop() : null;
if (constants.TEST && !gConnectionPool) return callback(new BoxError(BoxError.DATABASE_ERROR, 'database.js not initialized'));
args.push(function queryCallback(error, result) {
if (error) return callback ? callback(error) : reject(new BoxError(BoxError.DATABASE_ERROR, error, { code: error.code, sqlMessage: error.sqlMessage }));
callback ? callback(null, result) : resolve(result);
});
gConnectionPool.query.apply(gConnectionPool, args); // this is same as getConnection/query/release
});
gConnectionPool.query.apply(gConnectionPool, args); // this is same as getConnection/query/release
}
function transaction(queries) {
assert(Array.isArray(queries));
function transaction(queries, callback) {
assert(util.isArray(queries));
assert.strictEqual(typeof callback, 'function');
const args = Array.prototype.slice.call(arguments);
const callback = typeof args[args.length - 1] === 'function' ? once(args.pop()) : null;
callback = once(callback);
return new Promise((resolve, reject) => {
gConnectionPool.getConnection(function (error, connection) {
if (error) return callback ? callback(error) : reject(new BoxError(BoxError.DATABASE_ERROR, error, { code: error.code, sqlMessage: error.sqlMessage }));
gConnectionPool.getConnection(function (error, connection) {
if (error) return callback(error);
const releaseConnection = (error) => {
connection.release();
callback ? callback(error) : reject(new BoxError(BoxError.DATABASE_ERROR, error, { code: error.code, sqlMessage: error.sqlMessage }));
};
const releaseConnection = (error) => { connection.release(); callback(error); };
connection.beginTransaction(function (error) {
if (error) return releaseConnection(error);
connection.beginTransaction(function (error) {
if (error) return releaseConnection(error);
async.mapSeries(queries, function iterator(query, done) {
connection.query(query.query, query.args, done);
}, function seriesDone(error, results) {
async.mapSeries(queries, function iterator(query, done) {
connection.query(query.query, query.args, done);
}, function seriesDone(error, results) {
if (error) return connection.rollback(() => releaseConnection(error));
connection.commit(function (error) {
if (error) return connection.rollback(() => releaseConnection(error));
connection.commit(function (error) {
if (error) return connection.rollback(() => releaseConnection(error));
connection.release();
connection.release();
callback ? callback(null, results) : resolve(results);
});
callback(null, results);
});
});
});
+2 -2
View File
@@ -110,7 +110,7 @@ function upsert(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
@@ -206,7 +206,7 @@ function del(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
+12 -12
View File
@@ -1,16 +1,16 @@
'use strict';
exports = module.exports = {
removePrivateFields,
injectPrivateFields,
upsert,
get,
del,
wait,
verifyDnsConfig
removePrivateFields: removePrivateFields,
injectPrivateFields: injectPrivateFields,
upsert: upsert,
get: get,
del: del,
wait: wait,
verifyDnsConfig: verifyDnsConfig
};
const assert = require('assert'),
var assert = require('assert'),
async = require('async'),
BoxError = require('../boxerror.js'),
constants = require('../constants.js'),
@@ -22,7 +22,7 @@ const assert = require('assert'),
util = require('util'),
waitForDns = require('./waitfordns.js');
const DIGITALOCEAN_ENDPOINT = 'https://api.digitalocean.com';
var DIGITALOCEAN_ENDPOINT = 'https://api.digitalocean.com';
function formatError(response) {
return util.format('DigitalOcean DNS error [%s] %j', response.statusCode, response.body);
@@ -69,7 +69,7 @@ function getInternal(dnsConfig, zoneName, name, type, callback) {
iteratorDone();
});
}, function (testDone) { return testDone(null, !!nextPage); }, function (error) {
}, function () { return !!nextPage; }, function (error) {
debug('getInternal:', error, JSON.stringify(matchingRecords));
if (error) return callback(error);
@@ -82,7 +82,7 @@ function upsert(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
@@ -185,7 +185,7 @@ function del(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
+2 -2
View File
@@ -39,7 +39,7 @@ function upsert(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
@@ -98,7 +98,7 @@ function del(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
+2 -2
View File
@@ -73,7 +73,7 @@ function upsert(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
@@ -144,7 +144,7 @@ function del(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
+2 -2
View File
@@ -45,7 +45,7 @@ function upsert(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
@@ -118,7 +118,7 @@ function del(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
+2 -2
View File
@@ -34,7 +34,7 @@ function upsert(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
// Result: none
@@ -57,7 +57,7 @@ function del(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
// Result: none
+14 -17
View File
@@ -1,13 +1,13 @@
'use strict';
exports = module.exports = {
removePrivateFields,
injectPrivateFields,
upsert,
get,
del,
wait,
verifyDnsConfig
removePrivateFields: removePrivateFields,
injectPrivateFields: injectPrivateFields,
upsert: upsert,
get: get,
del: del,
wait: wait,
verifyDnsConfig: verifyDnsConfig
};
let async = require('async'),
@@ -99,7 +99,7 @@ function getZoneRecords(dnsConfig, zoneName, name, type, callback) {
iteratorDone();
});
}, function (testDone) { return testDone(null, more); }, function (error) {
}, function () { return more; }, function (error) {
debug('getZoneRecords:', error, JSON.stringify(records));
if (error) return callback(error);
@@ -119,10 +119,9 @@ function get(domainObject, location, type, callback) {
zoneName = domainObject.zoneName,
name = domains.getName(domainObject, location, type) || '';
getZoneRecords(dnsConfig, zoneName, name, type, function (error, result) {
getZoneRecords(dnsConfig, zoneName, name, type, function (error, { records }) {
if (error) return callback(error);
const { records } = result;
var tmp = records.map(function (record) { return record.target; });
debug('get: %j', tmp);
@@ -135,7 +134,7 @@ function upsert(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
@@ -144,10 +143,9 @@ function upsert(domainObject, location, type, values, callback) {
debug('upsert: %s for zone %s of type %s with values %j', name, zoneName, type, values);
getZoneRecords(dnsConfig, zoneName, name, type, function (error, result) {
getZoneRecords(dnsConfig, zoneName, name, type, function (error, { zoneId, records }) {
if (error) return callback(error);
const { zoneId, records } = result;
let i = 0, recordIds = []; // used to track available records to update instead of create
async.eachSeries(values, function (value, iteratorCallback) {
@@ -217,17 +215,16 @@ function del(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
zoneName = domainObject.zoneName,
name = domains.getName(domainObject, location, type) || '';
getZoneRecords(dnsConfig, zoneName, name, type, function (error, result) {
getZoneRecords(dnsConfig, zoneName, name, type, function (error, { zoneId, records }) {
if (error) return callback(error);
const { zoneId, records } = result;
if (records.length === 0) return callback(null);
var tmp = records.filter(function (record) { return values.some(function (value) { return value === record.target; }); });
@@ -290,7 +287,7 @@ function verifyDnsConfig(domainObject, callback) {
if (error || !nameservers) return callback(new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers', { field: 'nameservers' }));
if (nameservers.map(function (n) { return n.toLowerCase(); }).indexOf('ns1.linode.com') === -1) {
debug('verifyDnsConfig: %j does not contains linode NS', nameservers);
debug('verifyDnsConfig: %j does not contains DO NS', nameservers);
return callback(new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Linode', { field: 'nameservers' }));
}
+2 -2
View File
@@ -31,7 +31,7 @@ function upsert(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
debug('upsert: %s for zone %s of type %s with values %j', location, domainObject.zoneName, type, values);
@@ -52,7 +52,7 @@ function del(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
return callback();
+32 -23
View File
@@ -16,10 +16,10 @@ var assert = require('assert'),
debug = require('debug')('box:dns/namecheap'),
dns = require('../native-dns.js'),
domains = require('../domains.js'),
querystring = require('querystring'),
safe = require('safetydance'),
superagent = require('superagent'),
sysinfo = require('../sysinfo.js'),
util = require('util'),
waitForDns = require('./waitfordns.js'),
xml2js = require('xml2js');
@@ -50,9 +50,11 @@ function getQuery(dnsConfig, callback) {
});
}
function getZone(dnsConfig, zoneName, callback) {
function getInternal(dnsConfig, zoneName, subdomain, type, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof zoneName, 'string');
assert.strictEqual(typeof subdomain, 'string');
assert.strictEqual(typeof type, 'string');
assert.strictEqual(typeof callback, 'function');
getQuery(dnsConfig, function (error, query) {
@@ -87,7 +89,7 @@ function getZone(dnsConfig, zoneName, callback) {
});
}
function setZone(dnsConfig, zoneName, hosts, callback) {
function setInternal(dnsConfig, zoneName, hosts, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof zoneName, 'string');
assert(Array.isArray(hosts));
@@ -114,10 +116,7 @@ function setZone(dnsConfig, zoneName, hosts, callback) {
}
});
// namecheap recommends sending as POSTDATA with > 10 records
const qs = querystring.stringify(query);
superagent.post(ENDPOINT).set('Content-Type', 'application/x-www-form-urlencoded').send(qs).end(function (error, result) {
superagent.post(ENDPOINT).query(query).end(function (error, result) {
if (error) return callback(new BoxError(BoxError.EXTERNAL_ERROR, error));
var parser = new xml2js.Parser();
@@ -145,7 +144,7 @@ function upsert(domainObject, subdomain, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof subdomain, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config;
@@ -155,17 +154,17 @@ function upsert(domainObject, subdomain, type, values, callback) {
debug('upsert: %s for zone %s of type %s with values %j', subdomain, zoneName, type, values);
getZone(dnsConfig, zoneName, function (error, result) {
getInternal(dnsConfig, zoneName, subdomain, type, function (error, result) {
if (error) return callback(error);
// Array to keep track of records that need to be inserted
let toInsert = [];
for (let i = 0; i < values.length; i++) {
for (var i = 0; i < values.length; i++) {
let curValue = values[i];
let wasUpdate = false;
for (let j = 0; j < result.length; j++) {
for (var j = 0; j < result.length; j++) {
let curHost = result[j];
if (curHost.Type === type && curHost.Name === subdomain) {
@@ -199,9 +198,9 @@ function upsert(domainObject, subdomain, type, values, callback) {
}
}
const hosts = result.concat(toInsert);
let toUpsert = result.concat(toInsert);
setZone(dnsConfig, zoneName, hosts, callback);
setInternal(dnsConfig, zoneName, toUpsert, callback);
});
}
@@ -216,16 +215,16 @@ function get(domainObject, subdomain, type, callback) {
subdomain = domains.getName(domainObject, subdomain, type) || '@';
getZone(dnsConfig, zoneName, function (error, result) {
getInternal(dnsConfig, zoneName, subdomain, type, function (error, result) {
if (error) return callback(error);
// We need to filter hosts to ones with this subdomain and type
const actualHosts = result.filter((host) => host.Type === type && host.Name === subdomain);
let actualHosts = result.filter((host) => host.Type === type && host.Name === subdomain);
// We only return the value string
const tmp = actualHosts.map(function (record) { return record.Address; });
var tmp = actualHosts.map(function (record) { return record.Address; });
debug(`get: subdomain: ${subdomain} type:${type} value:${JSON.stringify(tmp)}`);
debug('get: %j', tmp);
return callback(null, tmp);
});
@@ -235,7 +234,7 @@ function del(domainObject, subdomain, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof subdomain, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config;
@@ -245,19 +244,29 @@ function del(domainObject, subdomain, type, values, callback) {
debug('del: %s for zone %s of type %s with values %j', subdomain, zoneName, type, values);
getZone(dnsConfig, zoneName, function (error, result) {
getInternal(dnsConfig, zoneName, subdomain, type, function (error, result) {
if (error) return callback(error);
if (result.length === 0) return callback();
const originalLength = result.length;
for (let i = 0; i < values.length; i++) {
let removed = false;
for (var i = 0; i < values.length; i++) {
let curValue = values[i];
result = result.filter(curHost => curHost.Type !== type || curHost.Name !== subdomain || curHost.Address !== curValue);
for (var j = 0; j < result.length; j++) {
let curHost = result[i];
if (curHost.Type === type && curHost.Name === subdomain && curHost.Address === curValue) {
removed = true;
result.splice(i, 1); // Remove element from result array
}
}
}
if (result.length !== originalLength) return setZone(dnsConfig, zoneName, result, callback);
// Only set hosts if we actually removed a host
if (removed) return setInternal(dnsConfig, zoneName, result, callback);
callback();
});
+2 -2
View File
@@ -157,7 +157,7 @@ function upsert(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
@@ -200,7 +200,7 @@ function del(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
-303
View File
@@ -1,303 +0,0 @@
'use strict';
exports = module.exports = {
removePrivateFields: removePrivateFields,
injectPrivateFields: injectPrivateFields,
upsert: upsert,
get: get,
del: del,
wait: wait,
verifyDnsConfig: verifyDnsConfig
};
var assert = require('assert'),
BoxError = require('../boxerror.js'),
constants = require('../constants.js'),
debug = require('debug')('box:dns/netcup'),
dns = require('../native-dns.js'),
domains = require('../domains.js'),
superagent = require('superagent'),
util = require('util'),
waitForDns = require('./waitfordns.js');
var API_ENDPOINT = 'https://ccp.netcup.net/run/webservice/servers/endpoint.php?JSON';
function formatError(response) {
if (response.body) return util.format('Netcup DNS error [%s] %s', response.body.statuscode, response.body.longmessage);
else return util.format('Netcup DNS error [%s] %s', response.statusCode, response.text);
}
function removePrivateFields(domainObject) {
domainObject.config.token = constants.SECRET_PLACEHOLDER;
return domainObject;
}
function injectPrivateFields(newConfig, currentConfig) {
if (newConfig.token === constants.SECRET_PLACEHOLDER) newConfig.token = currentConfig.token;
}
// returns a api session id
function login(dnsConfig, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof callback, 'function');
const data = {
action: 'login',
param:{
apikey: dnsConfig.apiKey,
apipassword: dnsConfig.apiPassword,
customernumber: dnsConfig.customerNumber
}
};
superagent.post(API_ENDPOINT).send(data).end(function (error, result) {
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
callback(null, result.body.responsedata.apisessionid);
});
}
function getAllRecords(dnsConfig, apiSessionId, zoneName, callback) {
assert.strictEqual(typeof dnsConfig, 'object');
assert.strictEqual(typeof apiSessionId, 'string');
assert.strictEqual(typeof zoneName, 'string');
assert.strictEqual(typeof callback, 'function');
debug(`getAllRecords: getting dns records of ${zoneName}`);
const data = {
action: 'infoDnsRecords',
param:{
apikey: dnsConfig.apiKey,
apisessionid: apiSessionId,
customernumber: dnsConfig.customerNumber,
domainname: zoneName,
}
};
superagent.post(API_ENDPOINT).send(data).end(function (error, result) {
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
debug('getAllRecords:', JSON.stringify(result.body.responsedata.dnsrecords || []));
callback(null, result.body.responsedata.dnsrecords || []);
});
}
function upsert(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
zoneName = domainObject.zoneName,
name = domains.getName(domainObject, location, type) || '@';
debug('upsert: %s for zone %s of type %s with values %j', name, zoneName, type, values);
login(dnsConfig, function (error, apiSessionId) {
if (error) return callback(error);
getAllRecords(dnsConfig, apiSessionId, zoneName, function (error, result) {
if (error) return callback(error);
let records = [];
values.forEach(function (value) {
// remove possible quotation
if (value.charAt(0) === '"') value = value.slice(1);
if (value.charAt(value.length -1) === '"') value = value.slice(0, -1);
let priority = null;
if (type === 'MX') {
priority = parseInt(value.split(' ')[0], 10);
value = value.split(' ')[1];
}
let record = result.find(function (r) { return r.hostname === name && r.type === type; });
if (!record) record = { hostname: name, type: type, destination: value, deleterecord: false };
else record.destination = value;
if (priority !== null) record.priority = priority;
records.push(record);
});
const data = {
action: 'updateDnsRecords',
param:{
apikey: dnsConfig.apiKey,
apisessionid: apiSessionId,
customernumber: dnsConfig.customerNumber,
domainname: zoneName,
dnsrecordset: {
dnsrecords: records
}
}
};
debug('upserting', JSON.stringify(data));
superagent.post(API_ENDPOINT).send(data).end(function (error, result) {
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
if (result.body.statuscode !== 2000) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
debug('upsert:', result.body);
callback(null);
});
});
});
}
function get(domainObject, location, type, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
zoneName = domainObject.zoneName,
name = domains.getName(domainObject, location, type) || '@';
debug('get: %s for zone %s of type %s', name, zoneName, type);
login(dnsConfig, function (error, apiSessionId) {
if (error) return callback(error);
getAllRecords(dnsConfig, apiSessionId, zoneName, function (error, result) {
if (error) return callback(error);
// We only return the value string
callback(null, result.filter(function (r) { return r.hostname === name && r.type === type; }).map(function (r) { return r.destination; }));
});
});
}
function del(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
zoneName = domainObject.zoneName,
name = domains.getName(domainObject, location, type) || '@';
debug('del: %s for zone %s of type %s with values %j', name, zoneName, type, values);
login(dnsConfig, function (error, apiSessionId) {
if (error) return callback(error);
getAllRecords(dnsConfig, apiSessionId, zoneName, function (error, result) {
if (error) return callback(error);
let records = [];
values.forEach(function (value) {
// remove possible quotation
if (value.charAt(0) === '"') value = value.slice(1);
if (value.charAt(value.length -1) === '"') value = value.slice(0, -1);
let record = result.find(function (r) { return r.hostname === name && r.type === type && r.destination === value; });
if (!record) return;
record.deleterecord = true;
records.push(record);
});
if (records.length === 0) return callback(null);
const data = {
action: 'updateDnsRecords',
param:{
apikey: dnsConfig.apiKey,
apisessionid: apiSessionId,
customernumber: dnsConfig.customerNumber,
domainname: zoneName,
dnsrecordset: {
dnsrecords: records
}
}
};
superagent.post(API_ENDPOINT).send(data).end(function (error, result) {
if (error && !error.response) return callback(new BoxError(BoxError.NETWORK_ERROR, error.message));
if (result.statusCode !== 200) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
if (result.body.statuscode !== 2000) return callback(new BoxError(BoxError.EXTERNAL_ERROR, formatError(result)));
debug('del:', result.body.responsedata);
callback(null);
});
});
});
}
function wait(domainObject, location, type, value, options, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert.strictEqual(typeof value, 'string');
assert(options && typeof options === 'object'); // { interval: 5000, times: 50000 }
assert.strictEqual(typeof callback, 'function');
const fqdn = domains.fqdn(location, domainObject);
waitForDns(fqdn, domainObject.zoneName, type, value, options, callback);
}
function verifyDnsConfig(domainObject, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
zoneName = domainObject.zoneName;
if (!dnsConfig.customerNumber || typeof dnsConfig.customerNumber !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'customerNumber must be a non-empty string', { field: 'customerNumber' }));
if (!dnsConfig.apiKey || typeof dnsConfig.apiKey !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'apiKey must be a non-empty string', { field: 'apiKey' }));
if (!dnsConfig.apiPassword || typeof dnsConfig.apiPassword !== 'string') return callback(new BoxError(BoxError.BAD_FIELD, 'apiPassword must be a non-empty string', { field: 'apiPassword' }));
const ip = '127.0.0.1';
var credentials = {
customerNumber: dnsConfig.customerNumber,
apiKey: dnsConfig.apiKey,
apiPassword: dnsConfig.apiPassword,
};
if (process.env.BOX_ENV === 'test') return callback(null, credentials); // this shouldn't be here
dns.resolve(zoneName, 'NS', { timeout: 5000 }, function (error, nameservers) {
if (error && error.code === 'ENOTFOUND') return callback(new BoxError(BoxError.BAD_FIELD, 'Unable to resolve nameservers for this domain', { field: 'nameservers' }));
if (error || !nameservers) return callback(new BoxError(BoxError.BAD_FIELD, error ? error.message : 'Unable to get nameservers', { field: 'nameservers' }));
if (!nameservers.every(function (n) { return n.toLowerCase().indexOf('dns.netcup.net') !== -1; })) {
debug('verifyDnsConfig: %j does not contains Netcup NS', nameservers);
return callback(new BoxError(BoxError.BAD_FIELD, 'Domain nameservers are not set to Netcup', { field: 'nameservers' }));
}
const location = 'cloudrontestdns';
upsert(domainObject, location, 'A', [ ip ], function (error) {
if (error) return callback(error);
debug('verifyDnsConfig: Test A record added');
del(domainObject, location, 'A', [ ip ], function (error) {
if (error) return callback(error);
debug('verifyDnsConfig: Test A record removed again');
callback(null, credentials);
});
});
});
}
+2 -2
View File
@@ -26,7 +26,7 @@ function upsert(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
debug('upsert: %s for zone %s of type %s with values %j', location, domainObject.zoneName, type, values);
@@ -47,7 +47,7 @@ function del(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
return callback();
+2 -2
View File
@@ -97,7 +97,7 @@ function upsert(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,
@@ -178,7 +178,7 @@ function del(domainObject, location, type, values, callback) {
assert.strictEqual(typeof domainObject, 'object');
assert.strictEqual(typeof location, 'string');
assert.strictEqual(typeof type, 'string');
assert(Array.isArray(values));
assert(util.isArray(values));
assert.strictEqual(typeof callback, 'function');
const dnsConfig = domainObject.config,

Some files were not shown because too many files have changed in this diff Show More